forked from BrainBlend-AI/atomic-agents
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbasic_custom_chatbot.py
68 lines (60 loc) · 2.52 KB
/
basic_custom_chatbot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
"""
This example demonstrates how to create a custom chatbot with custom personality using the Atomic Agents library.
"""
import os
from rich.console import Console
from atomic_agents.lib.components.agent_memory import AgentMemory
from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator, SystemPromptInfo
import instructor
import openai
# Define system prompt information including background, steps, and output instructions
system_prompt = SystemPromptInfo(
background=[
'This assistant is a general-purpose AI designed to be helpful and friendly.',
],
steps=[
'Understand the user\'s input and provide a relevant response.',
'Respond to the user.'
],
output_instructions=[
'Provide helpful and relevant information to assist the user.',
'Be friendly and respectful in all interactions.',
'Always answer in rhyming verse.'
]
)
# Initialize the system prompt generator with the defined system prompt and dynamic info providers
system_prompt_generator = SystemPromptGenerator(system_prompt)
# Initialize chat memory to store conversation history
memory = AgentMemory()
# Define initial memory with a greeting message from the assistant
initial_memory = [
{'role': 'assistant', 'content': 'How do you do? What can I do for you? Tell me, pray, what is your need today?'}
]
# Load the initial memory into the chat memory
memory.load(initial_memory)
# Create a chat agent with the specified model, system prompt generator, and memory
# For all supported clients such as Anthropic & Gemini, have a look at the `instructor` library documentation.
base_url = None # Replace with your OpenAI API base URL
api_key = None # Replace with your OpenAI API key
agent = BaseAgent(
config=BaseAgentConfig(
client=instructor.from_openai(openai.OpenAI(
base_url=base_url or os.getenv('OPENAI_BASE_URL'),
api_key=api_key or os.getenv('OPENAI_API_KEY')
)),
system_prompt_generator=system_prompt_generator,
model='gpt-3.5-turbo',
memory=memory,
)
)
# Main chat loop for testing the chat agent
console = Console()
console.print(f'Agent: {initial_memory[0]["content"]}')
while True:
user_input = input('You: ')
if user_input.lower() in ['/exit', '/quit']:
print('Exiting chat...')
break
response = agent.run(agent.input_schema(chat_message=user_input))
console.print(f'Agent: {response.chat_message}')