From d374b1d6f8c09c0963c3b0e260bed2d90397488a Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Mon, 13 Jan 2025 21:46:51 -0800 Subject: [PATCH 01/10] v1, make assistant agent declarative --- .../agents/_assistant_agent.py | 101 +++++++++--- .../agents/_base_chat_agent.py | 16 +- .../agents/_user_proxy_agent.py | 47 ++++-- .../tutorial/declarative.ipynb | 144 ++++++++++++++++-- .../_buffered_chat_completion_context.py | 23 ++- .../model_context/_chat_completion_context.py | 8 +- .../_unbounded_chat_completion_context.py | 20 ++- 7 files changed, 309 insertions(+), 50 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index f60a44dbb90c..923c49a698f2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -2,6 +2,7 @@ import json import logging import warnings +from typing_extensions import Self from typing import ( Any, AsyncGenerator, @@ -13,7 +14,7 @@ Sequence, ) -from autogen_core import CancellationToken, FunctionCall +from autogen_core import CancellationToken, FunctionCall, Component, ComponentModel from autogen_core.model_context import ( ChatCompletionContext, UnboundedChatCompletionContext, @@ -27,6 +28,7 @@ UserMessage, ) from autogen_core.tools import FunctionTool, Tool +from pydantic import BaseModel from .. import EVENT_LOGGER_NAME from ..base import Handoff as HandoffBase @@ -47,7 +49,20 @@ event_logger = logging.getLogger(EVENT_LOGGER_NAME) -class AssistantAgent(BaseChatAgent): +class AssistantAgentConfig(BaseModel): + """The declarative configuration for the assistant agent.""" + name: str + model_client: ComponentModel + tools: List[Any] | None = None + handoffs: List[HandoffBase | str] | None = None + model_context: ComponentModel | None = None + description: str + system_message: str | None = None + reflect_on_tool_use: bool + tool_call_summary_format: str + + +class AssistantAgent(BaseChatAgent, Component[AssistantAgentConfig]): """An agent that provides assistance with tool use. The :meth:`on_messages` returns a :class:`~autogen_agentchat.base.Response` @@ -226,12 +241,17 @@ async def main() -> None: See `o1 beta limitations `_ for more details. """ + component_type = "agent" + component_config_schema = AssistantAgentConfig + component_provider_override = "autogen_agentchat.agents.AssistantAgent" + def __init__( self, name: str, model_client: ChatCompletionClient, *, - tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None, + tools: List[Tool | Callable[..., Any] | + Callable[..., Awaitable[Any]]] | None = None, handoffs: List[HandoffBase | str] | None = None, model_context: ChatCompletionContext | None = None, description: str = "An agent that provides assistance with ability to use tools.", @@ -250,7 +270,8 @@ def __init__( self._tools: List[Tool] = [] if tools is not None: if model_client.model_info["function_calling"] is False: - raise ValueError("The model does not support function calling.") + raise ValueError( + "The model does not support function calling.") for tool in tools: if isinstance(tool, Tool): self._tools.append(tool) @@ -259,7 +280,8 @@ def __init__( description = tool.__doc__ else: description = "" - self._tools.append(FunctionTool(tool, description=description)) + self._tools.append(FunctionTool( + tool, description=description)) else: raise ValueError(f"Unsupported tool type: {type(tool)}") # Check if tool names are unique. @@ -271,7 +293,8 @@ def __init__( self._handoffs: Dict[str, HandoffBase] = {} if handoffs is not None: if model_client.model_info["function_calling"] is False: - raise ValueError("The model does not support function calling, which is needed for handoffs.") + raise ValueError( + "The model does not support function calling, which is needed for handoffs.") for handoff in handoffs: if isinstance(handoff, str): handoff = HandoffBase(target=handoff) @@ -279,11 +302,13 @@ def __init__( self._handoff_tools.append(handoff.handoff_tool) self._handoffs[handoff.name] = handoff else: - raise ValueError(f"Unsupported handoff type: {type(handoff)}") + raise ValueError( + f"Unsupported handoff type: {type(handoff)}") # Check if handoff tool names are unique. handoff_tool_names = [tool.name for tool in self._handoff_tools] if len(handoff_tool_names) != len(set(handoff_tool_names)): - raise ValueError(f"Handoff names must be unique: {handoff_tool_names}") + raise ValueError( + f"Handoff names must be unique: {handoff_tool_names}") # Check if handoff tool names not in tool names. if any(name in tool_names for name in handoff_tool_names): raise ValueError( @@ -311,7 +336,8 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message - raise AssertionError("The stream should have returned the final result.") + raise AssertionError( + "The stream should have returned the final result.") async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken @@ -337,14 +363,17 @@ async def on_messages_stream( # Check if the response is a string and return it. if isinstance(result.content, str): yield Response( - chat_message=TextMessage(content=result.content, source=self.name, models_usage=result.usage), + chat_message=TextMessage( + content=result.content, source=self.name, models_usage=result.usage), inner_messages=inner_messages, ) return # Process tool calls. - assert isinstance(result.content, list) and all(isinstance(item, FunctionCall) for item in result.content) - tool_call_msg = ToolCallRequestEvent(content=result.content, source=self.name, models_usage=result.usage) + assert isinstance(result.content, list) and all( + isinstance(item, FunctionCall) for item in result.content) + tool_call_msg = ToolCallRequestEvent( + content=result.content, source=self.name, models_usage=result.usage) event_logger.debug(tool_call_msg) # Add the tool call message to the output. inner_messages.append(tool_call_msg) @@ -352,7 +381,8 @@ async def on_messages_stream( # Execute the tool calls. results = await asyncio.gather(*[self._execute_tool_call(call, cancellation_token) for call in result.content]) - tool_call_result_msg = ToolCallExecutionEvent(content=results, source=self.name) + tool_call_result_msg = ToolCallExecutionEvent( + content=results, source=self.name) event_logger.debug(tool_call_result_msg) await self._model_context.add_message(FunctionExecutionResultMessage(content=results)) inner_messages.append(tool_call_result_msg) @@ -372,7 +402,8 @@ async def on_messages_stream( ) # Return the output messages to signal the handoff. yield Response( - chat_message=HandoffMessage(content=handoffs[0].message, target=handoffs[0].target, source=self.name), + chat_message=HandoffMessage( + content=handoffs[0].message, target=handoffs[0].target, source=self.name), inner_messages=inner_messages, ) return @@ -386,7 +417,8 @@ async def on_messages_stream( await self._model_context.add_message(AssistantMessage(content=result.content, source=self.name)) # Yield the response. yield Response( - chat_message=TextMessage(content=result.content, source=self.name, models_usage=result.usage), + chat_message=TextMessage( + content=result.content, source=self.name, models_usage=result.usage), inner_messages=inner_messages, ) else: @@ -402,7 +434,8 @@ async def on_messages_stream( ) tool_call_summary = "\n".join(tool_call_summaries) yield Response( - chat_message=ToolCallSummaryMessage(content=tool_call_summary, source=self.name), + chat_message=ToolCallSummaryMessage( + content=tool_call_summary, source=self.name), inner_messages=inner_messages, ) @@ -413,9 +446,11 @@ async def _execute_tool_call( try: if not self._tools + self._handoff_tools: raise ValueError("No tools are available.") - tool = next((t for t in self._tools + self._handoff_tools if t.name == tool_call.name), None) + tool = next((t for t in self._tools + + self._handoff_tools if t.name == tool_call.name), None) if tool is None: - raise ValueError(f"The tool '{tool_call.name}' is not available.") + raise ValueError( + f"The tool '{tool_call.name}' is not available.") arguments = json.loads(tool_call.arguments) result = await tool.run_json(arguments, cancellation_token) result_as_str = tool.return_value_as_string(result) @@ -437,3 +472,33 @@ async def load_state(self, state: Mapping[str, Any]) -> None: assistant_agent_state = AssistantAgentState.model_validate(state) # Load the model context state. await self._model_context.load_state(assistant_agent_state.llm_context) + + def _to_config(self) -> AssistantAgentConfig: + """Convert the assistant agent to a declarative config.""" + return AssistantAgentConfig( + name=self.name, + model_client=self._model_client.dump_component(), + tools=[], + handoffs=list(self._handoffs.values()), + model_context=self._model_context.dump_component(), + description=self.description, + system_message=self._system_messages[0].content if self._system_messages else None, + reflect_on_tool_use=self._reflect_on_tool_use, + tool_call_summary_format=self._tool_call_summary_format, + ) + + @classmethod + def _from_config(cls, config) -> Self: + """Create an assistant agent from a declarative config.""" + return cls( + name=config.name, + model_client=ChatCompletionClient.load_component( + config.model_client), + tools=[], + handoffs=config.handoffs, + model_context=None, + description=config.description, + system_message=config.system_message, + reflect_on_tool_use=config.reflect_on_tool_use, + tool_call_summary_format=config.tool_call_summary_format, + ) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index 42b7cb78a007..d737b528aa8c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -1,7 +1,8 @@ from abc import ABC, abstractmethod from typing import Any, AsyncGenerator, List, Mapping, Sequence -from autogen_core import CancellationToken +from autogen_core import CancellationToken, ComponentBase +from pydantic import BaseModel from ..base import ChatAgent, Response, TaskResult from ..messages import ( @@ -13,7 +14,7 @@ from ..state import BaseState -class BaseChatAgent(ChatAgent, ABC): +class BaseChatAgent(ChatAgent, ABC, ComponentBase[BaseModel]): """Base class for a chat agent. This abstract class provides a base implementation for a :class:`ChatAgent`. @@ -35,10 +36,13 @@ class BaseChatAgent(ChatAgent, ABC): This design principle must be followed when creating a new agent. """ + component_type = "agent" + def __init__(self, name: str, description: str) -> None: self._name = name if self._name.isidentifier() is False: - raise ValueError("The agent name must be a valid Python identifier.") + raise ValueError( + "The agent name must be a valid Python identifier.") self._description = description @property @@ -129,7 +133,8 @@ async def run( input_messages.append(msg) output_messages.append(msg) else: - raise ValueError(f"Invalid message type in sequence: {type(msg)}") + raise ValueError( + f"Invalid message type in sequence: {type(msg)}") response = await self.on_messages(input_messages, cancellation_token) if response.inner_messages is not None: output_messages += response.inner_messages @@ -168,7 +173,8 @@ async def run_stream( output_messages.append(msg) yield msg else: - raise ValueError(f"Invalid message type in sequence: {type(msg)}") + raise ValueError( + f"Invalid message type in sequence: {type(msg)}") async for message in self.on_messages_stream(input_messages, cancellation_token): if isinstance(message, Response): yield message.chat_message diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index 89e0b61a50ee..de3ad612b12f 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -6,7 +6,9 @@ from typing import Any, AsyncGenerator, Awaitable, Callable, ClassVar, Generator, Optional, Sequence, Union, cast from aioconsole import ainput # type: ignore -from autogen_core import CancellationToken +from autogen_core import CancellationToken, Component +from pydantic import BaseModel +from typing_extensions import Self from ..base import Response from ..messages import AgentEvent, ChatMessage, HandoffMessage, TextMessage, UserInputRequestedEvent @@ -20,13 +22,22 @@ # TODO: ainput doesn't seem to play nicely with jupyter. # No input window appears in this case. async def cancellable_input(prompt: str, cancellation_token: Optional[CancellationToken]) -> str: - task: asyncio.Task[str] = asyncio.create_task(ainput(prompt)) # type: ignore + task: asyncio.Task[str] = asyncio.create_task( + ainput(prompt)) # type: ignore if cancellation_token is not None: cancellation_token.link_future(task) return await task -class UserProxyAgent(BaseChatAgent): +class UserProxyAgentConfig(BaseModel): + """Declarative configuration for the UserProxyAgent.""" + + name: str + description: str = "A human user" + input_func: str | None = None + + +class UserProxyAgent(BaseChatAgent, Component[UserProxyAgentConfig]): """An agent that can represent a human user through an input function. This agent can be used to represent a human user in a chat system by providing a custom input function. @@ -111,23 +122,30 @@ async def cancellable_user_agent(): print(f"BaseException: {e}") """ + component_type = "agent" + component_provider_override = "autogen_agentchat.agents.UserProxyAgent" + component_config_schema = UserProxyAgentConfig + class InputRequestContext: def __init__(self) -> None: raise RuntimeError( "InputRequestContext cannot be instantiated. It is a static class that provides context management for user input requests." ) - _INPUT_REQUEST_CONTEXT_VAR: ClassVar[ContextVar[str]] = ContextVar("_INPUT_REQUEST_CONTEXT_VAR") + _INPUT_REQUEST_CONTEXT_VAR: ClassVar[ContextVar[str]] = ContextVar( + "_INPUT_REQUEST_CONTEXT_VAR") @classmethod @contextmanager def populate_context(cls, ctx: str) -> Generator[None, Any, None]: """:meta private:""" - token = UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.set(ctx) + token = UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.set( + ctx) try: yield finally: - UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.reset(token) + UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.reset( + token) @classmethod def request_id(cls) -> str: @@ -161,7 +179,8 @@ def _get_latest_handoff(self, messages: Sequence[ChatMessage]) -> Optional[Hando if messages[-1].target == self.name: return messages[-1] else: - raise RuntimeError(f"Handoff message target does not match agent name: {messages[-1].source}") + raise RuntimeError( + f"Handoff message target does not match agent name: {messages[-1].source}") return None async def _get_input(self, prompt: str, cancellation_token: Optional[CancellationToken]) -> str: @@ -186,7 +205,8 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message - raise AssertionError("The stream should have returned the final result.") + raise AssertionError( + "The stream should have returned the final result.") async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken @@ -201,7 +221,8 @@ async def on_messages_stream( request_id = str(uuid.uuid4()) - input_requested_event = UserInputRequestedEvent(request_id=request_id, source=self.name) + input_requested_event = UserInputRequestedEvent( + request_id=request_id, source=self.name) yield input_requested_event with UserProxyAgent.InputRequestContext.populate_context(request_id): user_input = await self._get_input(prompt, cancellation_token) @@ -220,3 +241,11 @@ async def on_messages_stream( async def on_reset(self, cancellation_token: Optional[CancellationToken] = None) -> None: """Reset agent state.""" pass + + def _to_config(self) -> UserProxyAgentConfig: + # TODO: Add ability to serialie input_func + return UserProxyAgentConfig(name=self.name, description=self.description, input_func=None) + + @classmethod + def _from_config(cls, config: UserProxyAgentConfig) -> Self: + return cls(name=config.name, description=config.description, input_func=None) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb index 274135c1155c..510f4951f4a3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb @@ -33,65 +33,183 @@ "metadata": {}, "outputs": [], "source": [ - "from autogen_agentchat.conditions import MaxMessageTermination, StopMessageTermination\n", + "# from autogen_agentchat.conditions import MaxMessageTermination, StopMessageTermination\n", "\n", - "max_termination = MaxMessageTermination(5)\n", - "stop_termination = StopMessageTermination()" + "# max_termination = MaxMessageTermination(5)\n", + "# stop_termination = StopMessageTermination()\n", + "\n", + "# or_termination = max_termination | stop_termination\n", + "\n", + "# or_term_config = or_termination.dump_component()\n", + "# print(\"Config: \",or_term_config)\n", + "\n", + "# new_or_termination = or_termination.load_component(or_term_config)\n", + "# print(\"Object: \",new_or_termination)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Agent Example \n", + "\n", + "In the example below, we will define an agent in python, export this to a dictionary/json and also demonstrate how the agent object can be loaded from the dictionary/json." ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, + "outputs": [], + "source": [ + "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", + "from autogen_agentchat.messages import TextMessage\n", + "from autogen_agentchat.ui import Console\n", + "from autogen_core import CancellationToken\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "\n", + "# Define a tool that searches the web for information.\n", + "async def web_search(query: str) -> str:\n", + " \"\"\"Find information on the web\"\"\"\n", + " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", + "\n", + "\n", + "# Create an agent that uses the OpenAI GPT-4o model.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "agent = AssistantAgent(\n", + " name=\"assistant\",\n", + " model_client=model_client,\n", + " handoffs=[\"flights_refunder\", \"user\"],\n", + " tools=[web_search],\n", + " system_message=\"Use tools to solve tasks.\",\n", + ")\n", + "\n", + "user_proxy = UserProxyAgent(name=\"user\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "provider='autogen_agentchat.conditions.MaxMessageTermination' component_type='termination' version=1 component_version=1 description=None config={'max_messages': 5}\n" + "provider='autogen_agentchat.agents.UserProxyAgent' component_type='agent' version=1 component_version=1 description=None config={'name': 'user', 'description': 'A human user'}\n", + "\n" ] } ], "source": [ - "print(max_termination.dump_component())" + "up_config = user_proxy.dump_component()\n", + "print(up_config)\n", + "up_new = user_proxy.load_component(up_config)\n", + "print(up_new)" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "{'provider': 'autogen_agentchat.conditions.MaxMessageTermination', 'component_type': 'termination', 'version': 1, 'component_version': 1, 'description': None, 'config': {'max_messages': 5}}\n" + "provider='autogen_agentchat.agents.AssistantAgent' component_type='agent' version=1 component_version=1 description=None config={'name': 'assistant', 'model_client': {'provider': 'autogen_ext.models.openai.OpenAIChatCompletionClient', 'component_type': 'model', 'version': 1, 'component_version': 1, 'config': {'model': 'gpt-4o'}}, 'tools': [], 'handoffs': [{'target': 'flights_refunder', 'description': 'Handoff to flights_refunder.', 'name': 'transfer_to_flights_refunder', 'message': 'Transferred to flights_refunder, adopting the role of flights_refunder immediately.'}, {'target': 'user', 'description': 'Handoff to user.', 'name': 'transfer_to_user', 'message': 'Transferred to user, adopting the role of user immediately.'}], 'model_context': {'provider': 'autogen_core.model_context.UnboundedChatCompletionContext', 'component_type': 'chat_completion_context', 'version': 1, 'component_version': 1, 'config': {}}, 'description': 'An agent that provides assistance with ability to use tools.', 'system_message': 'Use tools to solve tasks.', 'reflect_on_tool_use': False, 'tool_call_summary_format': '{result}'}\n", + "\n" ] } ], "source": [ - "print(max_termination.dump_component().model_dump())" + "agent_config = agent.dump_component() \n", + "print(agent_config)\n", + "agent_new = agent.load_component(agent_config)\n", + "print(agent_new)" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "ComponentModel(provider='autogen_agentchat.base.OrTerminationCondition', component_type='termination', version=1, component_version=1, description=None, config={'conditions': [{'provider': 'autogen_agentchat.conditions.MaxMessageTermination', 'component_type': 'termination', 'version': 1, 'component_version': 1, 'config': {'max_messages': 5}}, {'provider': 'autogen_agentchat.conditions.StopMessageTermination', 'component_type': 'termination', 'version': 1, 'component_version': 1, 'config': {}}]})" + "{'name': 'assistant',\n", + " 'model_client': {'provider': 'autogen_ext.models.openai.OpenAIChatCompletionClient',\n", + " 'component_type': 'model',\n", + " 'version': 1,\n", + " 'component_version': 1,\n", + " 'config': {'model': 'gpt-4o'}},\n", + " 'tools': [],\n", + " 'handoffs': [{'target': 'flights_refunder',\n", + " 'description': 'Handoff to flights_refunder.',\n", + " 'name': 'transfer_to_flights_refunder',\n", + " 'message': 'Transferred to flights_refunder, adopting the role of flights_refunder immediately.'},\n", + " {'target': 'user',\n", + " 'description': 'Handoff to user.',\n", + " 'name': 'transfer_to_user',\n", + " 'message': 'Transferred to user, adopting the role of user immediately.'}],\n", + " 'model_context': {'provider': 'autogen_core.model_context.UnboundedChatCompletionContext',\n", + " 'component_type': 'chat_completion_context',\n", + " 'version': 1,\n", + " 'component_version': 1,\n", + " 'config': {}},\n", + " 'description': 'An agent that provides assistance with ability to use tools.',\n", + " 'system_message': 'Use tools to solve tasks.',\n", + " 'reflect_on_tool_use': False,\n", + " 'tool_call_summary_format': '{result}'}" ] }, - "execution_count": 4, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "or_termination = max_termination | stop_termination\n", - "or_termination.dump_component()" + "agent_config.config" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "ComponentModel(provider='autogen_ext.models.openai.OpenAIChatCompletionClient', component_type='model', version=1, component_version=1, description=None, config={'model': 'gpt-4o'})" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from autogen_core.models import UserMessage\n", + "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", + "\n", + "# Create an OpenAI model client.\n", + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\", \n", + ")\n", + "model_client.dump_component()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_core.model_context import UnboundedChatCompletionContext" ] } ], diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py index f66197246e91..10802afa329a 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py @@ -1,10 +1,19 @@ from typing import List +from pydantic import BaseModel + from ..models import FunctionExecutionResultMessage, LLMMessage from ._chat_completion_context import ChatCompletionContext +from .._component_config import Component +from typing_extensions import Self + + +class BufferedChatCompletionContextConfig(BaseModel): + buffer_size: int + initial_messages: List[LLMMessage] | None = None -class BufferedChatCompletionContext(ChatCompletionContext): +class BufferedChatCompletionContext(ChatCompletionContext, Component[BufferedChatCompletionContextConfig]): """A buffered chat completion context that keeps a view of the last n messages, where n is the buffer size. The buffer size is set at initialization. @@ -13,6 +22,9 @@ class BufferedChatCompletionContext(ChatCompletionContext): initial_messages (List[LLMMessage] | None): The initial messages. """ + component_config_schema = BufferedChatCompletionContextConfig + component_provider_override = "autogen_core.model_context.BufferedChatCompletionContext" + def __init__(self, buffer_size: int, initial_messages: List[LLMMessage] | None = None) -> None: super().__init__(initial_messages) if buffer_size <= 0: @@ -21,9 +33,16 @@ def __init__(self, buffer_size: int, initial_messages: List[LLMMessage] | None = async def get_messages(self) -> List[LLMMessage]: """Get at most `buffer_size` recent messages.""" - messages = self._messages[-self._buffer_size :] + messages = self._messages[-self._buffer_size:] # Handle the first message is a function call result message. if messages and isinstance(messages[0], FunctionExecutionResultMessage): # Remove the first message from the list. messages = messages[1:] return messages + + def _to_config(self) -> BufferedChatCompletionContextConfig: + return BufferedChatCompletionContextConfig(buffer_size=self._buffer_size, initial_messages=self._messages) + + @classmethod + def _from_config(cls, config) -> Self: + return cls(**config.model_dump()) diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py index 33b1dac7fa18..656ff87801a2 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py @@ -4,9 +4,10 @@ from pydantic import BaseModel, Field from ..models import LLMMessage +from .._component_config import ComponentBase -class ChatCompletionContext(ABC): +class ChatCompletionContext(ABC, ComponentBase[BaseModel]): """An abstract base class for defining the interface of a chat completion context. A chat completion context lets agents store and retrieve LLM messages. It can be implemented with different recall strategies. @@ -15,6 +16,8 @@ class ChatCompletionContext(ABC): initial_messages (List[LLMMessage] | None): The initial messages. """ + component_type = "chat_completion_context" + def __init__(self, initial_messages: List[LLMMessage] | None = None) -> None: self._messages: List[LLMMessage] = initial_messages or [] @@ -33,7 +36,8 @@ async def save_state(self) -> Mapping[str, Any]: return ChatCompletionContextState(messages=self._messages).model_dump() async def load_state(self, state: Mapping[str, Any]) -> None: - self._messages = ChatCompletionContextState.model_validate(state).messages + self._messages = ChatCompletionContextState.model_validate( + state).messages class ChatCompletionContextState(BaseModel): diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py index dff45bfc92d8..374fb36cb394 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py @@ -1,12 +1,30 @@ from typing import List +from pydantic import BaseModel + from ..models import LLMMessage from ._chat_completion_context import ChatCompletionContext +from .._component_config import Component +from typing_extensions import Self + + +class UnboundedChatCompletionContextConfig(BaseModel): + pass -class UnboundedChatCompletionContext(ChatCompletionContext): +class UnboundedChatCompletionContext(ChatCompletionContext, Component[UnboundedChatCompletionContextConfig]): """An unbounded chat completion context that keeps a view of the all the messages.""" + component_config_schema = UnboundedChatCompletionContextConfig + component_provider_override = "autogen_core.model_context.UnboundedChatCompletionContext" + async def get_messages(self) -> List[LLMMessage]: """Get at most `buffer_size` recent messages.""" return self._messages + + def _to_config(self) -> UnboundedChatCompletionContextConfig: + return UnboundedChatCompletionContextConfig() + + @classmethod + def _from_config(cls, config) -> Self: + return cls() From 9a89869366d174e4866f2bdf844078f61a3c4e90 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Tue, 14 Jan 2025 19:26:12 -0800 Subject: [PATCH 02/10] make head tail context declarative --- .../_head_and_tail_chat_completion_context.py | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py index 2518f456b632..5e5fff0edf28 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py @@ -1,11 +1,21 @@ from typing import List +from pydantic import BaseModel + from .._types import FunctionCall from ..models import AssistantMessage, FunctionExecutionResultMessage, LLMMessage, UserMessage from ._chat_completion_context import ChatCompletionContext +from .._component_config import Component +from typing_extensions import Self + + +class HeadAndTailChatCompletionContextConfig(BaseModel): + head_size: int + tail_size: int + initial_messages: List[LLMMessage] | None = None -class HeadAndTailChatCompletionContext(ChatCompletionContext): +class HeadAndTailChatCompletionContext(ChatCompletionContext, Component[HeadAndTailChatCompletionContextConfig]): """A chat completion context that keeps a view of the first n and last m messages, where n is the head size and m is the tail size. The head and tail sizes are set at initialization. @@ -16,6 +26,9 @@ class HeadAndTailChatCompletionContext(ChatCompletionContext): initial_messages (List[LLMMessage] | None): The initial messages. """ + component_config_schema = HeadAndTailChatCompletionContextConfig + component_provider_override = "autogen_core.model_context.HeadAndTailChatCompletionContext" + def __init__(self, head_size: int, tail_size: int, initial_messages: List[LLMMessage] | None = None) -> None: super().__init__(initial_messages) if head_size <= 0: @@ -38,7 +51,7 @@ async def get_messages(self) -> List[LLMMessage]: # Remove the last message from the head. head_messages = head_messages[:-1] - tail_messages = self._messages[-self._tail_size :] + tail_messages = self._messages[-self._tail_size:] # Handle the first message is a function call result message. if tail_messages and isinstance(tail_messages[0], FunctionExecutionResultMessage): # Remove the first message from the tail. @@ -50,5 +63,17 @@ async def get_messages(self) -> List[LLMMessage]: # return all messages. return self._messages - placeholder_messages = [UserMessage(content=f"Skipped {num_skipped} messages.", source="System")] + placeholder_messages = [UserMessage( + content=f"Skipped {num_skipped} messages.", source="System")] return head_messages + placeholder_messages + tail_messages + + def _to_config(self) -> HeadAndTailChatCompletionContextConfig: + return HeadAndTailChatCompletionContextConfig( + head_size=self._head_size, tail_size=self._tail_size, initial_messages=self._messages + ) + + @classmethod + def _from_config(cls, config) -> Self: + return cls( + head_size=config.head_size, tail_size=config.tail_size, initial_messages=config.initial_messages + ) From 4795fee6fe6ce6b1fad25c2cb987392b66dc64ca Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Tue, 14 Jan 2025 21:30:04 -0800 Subject: [PATCH 03/10] update and formatting --- .../agents/_assistant_agent.py | 58 +++----- .../agents/_base_chat_agent.py | 9 +- .../agents/_user_proxy_agent.py | 21 +-- .../tests/test_assistant_agent.py | 38 +++++- .../tests/test_declarative_components.py | 37 +++++ .../tutorial/declarative.ipynb | 127 ++++-------------- .../_buffered_chat_completion_context.py | 8 +- .../model_context/_chat_completion_context.py | 5 +- .../_head_and_tail_chat_completion_context.py | 15 +-- .../_unbounded_chat_completion_context.py | 6 +- .../models/openai/_openai_client.py | 2 +- .../autogen-ext/test_filesurfer_agent.html | 9 ++ 12 files changed, 155 insertions(+), 180 deletions(-) create mode 100644 python/packages/autogen-ext/test_filesurfer_agent.html diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 923c49a698f2..bc01c903149a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -2,7 +2,6 @@ import json import logging import warnings -from typing_extensions import Self from typing import ( Any, AsyncGenerator, @@ -14,7 +13,7 @@ Sequence, ) -from autogen_core import CancellationToken, FunctionCall, Component, ComponentModel +from autogen_core import CancellationToken, Component, ComponentModel, FunctionCall from autogen_core.model_context import ( ChatCompletionContext, UnboundedChatCompletionContext, @@ -29,6 +28,7 @@ ) from autogen_core.tools import FunctionTool, Tool from pydantic import BaseModel +from typing_extensions import Self from .. import EVENT_LOGGER_NAME from ..base import Handoff as HandoffBase @@ -51,6 +51,7 @@ class AssistantAgentConfig(BaseModel): """The declarative configuration for the assistant agent.""" + name: str model_client: ComponentModel tools: List[Any] | None = None @@ -250,8 +251,7 @@ def __init__( name: str, model_client: ChatCompletionClient, *, - tools: List[Tool | Callable[..., Any] | - Callable[..., Awaitable[Any]]] | None = None, + tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None, handoffs: List[HandoffBase | str] | None = None, model_context: ChatCompletionContext | None = None, description: str = "An agent that provides assistance with ability to use tools.", @@ -270,8 +270,7 @@ def __init__( self._tools: List[Tool] = [] if tools is not None: if model_client.model_info["function_calling"] is False: - raise ValueError( - "The model does not support function calling.") + raise ValueError("The model does not support function calling.") for tool in tools: if isinstance(tool, Tool): self._tools.append(tool) @@ -280,8 +279,7 @@ def __init__( description = tool.__doc__ else: description = "" - self._tools.append(FunctionTool( - tool, description=description)) + self._tools.append(FunctionTool(tool, description=description)) else: raise ValueError(f"Unsupported tool type: {type(tool)}") # Check if tool names are unique. @@ -293,8 +291,7 @@ def __init__( self._handoffs: Dict[str, HandoffBase] = {} if handoffs is not None: if model_client.model_info["function_calling"] is False: - raise ValueError( - "The model does not support function calling, which is needed for handoffs.") + raise ValueError("The model does not support function calling, which is needed for handoffs.") for handoff in handoffs: if isinstance(handoff, str): handoff = HandoffBase(target=handoff) @@ -302,13 +299,11 @@ def __init__( self._handoff_tools.append(handoff.handoff_tool) self._handoffs[handoff.name] = handoff else: - raise ValueError( - f"Unsupported handoff type: {type(handoff)}") + raise ValueError(f"Unsupported handoff type: {type(handoff)}") # Check if handoff tool names are unique. handoff_tool_names = [tool.name for tool in self._handoff_tools] if len(handoff_tool_names) != len(set(handoff_tool_names)): - raise ValueError( - f"Handoff names must be unique: {handoff_tool_names}") + raise ValueError(f"Handoff names must be unique: {handoff_tool_names}") # Check if handoff tool names not in tool names. if any(name in tool_names for name in handoff_tool_names): raise ValueError( @@ -336,8 +331,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message - raise AssertionError( - "The stream should have returned the final result.") + raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken @@ -363,17 +357,14 @@ async def on_messages_stream( # Check if the response is a string and return it. if isinstance(result.content, str): yield Response( - chat_message=TextMessage( - content=result.content, source=self.name, models_usage=result.usage), + chat_message=TextMessage(content=result.content, source=self.name, models_usage=result.usage), inner_messages=inner_messages, ) return # Process tool calls. - assert isinstance(result.content, list) and all( - isinstance(item, FunctionCall) for item in result.content) - tool_call_msg = ToolCallRequestEvent( - content=result.content, source=self.name, models_usage=result.usage) + assert isinstance(result.content, list) and all(isinstance(item, FunctionCall) for item in result.content) + tool_call_msg = ToolCallRequestEvent(content=result.content, source=self.name, models_usage=result.usage) event_logger.debug(tool_call_msg) # Add the tool call message to the output. inner_messages.append(tool_call_msg) @@ -381,8 +372,7 @@ async def on_messages_stream( # Execute the tool calls. results = await asyncio.gather(*[self._execute_tool_call(call, cancellation_token) for call in result.content]) - tool_call_result_msg = ToolCallExecutionEvent( - content=results, source=self.name) + tool_call_result_msg = ToolCallExecutionEvent(content=results, source=self.name) event_logger.debug(tool_call_result_msg) await self._model_context.add_message(FunctionExecutionResultMessage(content=results)) inner_messages.append(tool_call_result_msg) @@ -402,8 +392,7 @@ async def on_messages_stream( ) # Return the output messages to signal the handoff. yield Response( - chat_message=HandoffMessage( - content=handoffs[0].message, target=handoffs[0].target, source=self.name), + chat_message=HandoffMessage(content=handoffs[0].message, target=handoffs[0].target, source=self.name), inner_messages=inner_messages, ) return @@ -417,8 +406,7 @@ async def on_messages_stream( await self._model_context.add_message(AssistantMessage(content=result.content, source=self.name)) # Yield the response. yield Response( - chat_message=TextMessage( - content=result.content, source=self.name, models_usage=result.usage), + chat_message=TextMessage(content=result.content, source=self.name, models_usage=result.usage), inner_messages=inner_messages, ) else: @@ -434,8 +422,7 @@ async def on_messages_stream( ) tool_call_summary = "\n".join(tool_call_summaries) yield Response( - chat_message=ToolCallSummaryMessage( - content=tool_call_summary, source=self.name), + chat_message=ToolCallSummaryMessage(content=tool_call_summary, source=self.name), inner_messages=inner_messages, ) @@ -446,11 +433,9 @@ async def _execute_tool_call( try: if not self._tools + self._handoff_tools: raise ValueError("No tools are available.") - tool = next((t for t in self._tools + - self._handoff_tools if t.name == tool_call.name), None) + tool = next((t for t in self._tools + self._handoff_tools if t.name == tool_call.name), None) if tool is None: - raise ValueError( - f"The tool '{tool_call.name}' is not available.") + raise ValueError(f"The tool '{tool_call.name}' is not available.") arguments = json.loads(tool_call.arguments) result = await tool.run_json(arguments, cancellation_token) result_as_str = tool.return_value_as_string(result) @@ -488,12 +473,11 @@ def _to_config(self) -> AssistantAgentConfig: ) @classmethod - def _from_config(cls, config) -> Self: + def _from_config(cls, config: AssistantAgentConfig) -> Self: """Create an assistant agent from a declarative config.""" return cls( name=config.name, - model_client=ChatCompletionClient.load_component( - config.model_client), + model_client=ChatCompletionClient.load_component(config.model_client), tools=[], handoffs=config.handoffs, model_context=None, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index d737b528aa8c..97b9de76242c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -41,8 +41,7 @@ class BaseChatAgent(ChatAgent, ABC, ComponentBase[BaseModel]): def __init__(self, name: str, description: str) -> None: self._name = name if self._name.isidentifier() is False: - raise ValueError( - "The agent name must be a valid Python identifier.") + raise ValueError("The agent name must be a valid Python identifier.") self._description = description @property @@ -133,8 +132,7 @@ async def run( input_messages.append(msg) output_messages.append(msg) else: - raise ValueError( - f"Invalid message type in sequence: {type(msg)}") + raise ValueError(f"Invalid message type in sequence: {type(msg)}") response = await self.on_messages(input_messages, cancellation_token) if response.inner_messages is not None: output_messages += response.inner_messages @@ -173,8 +171,7 @@ async def run_stream( output_messages.append(msg) yield msg else: - raise ValueError( - f"Invalid message type in sequence: {type(msg)}") + raise ValueError(f"Invalid message type in sequence: {type(msg)}") async for message in self.on_messages_stream(input_messages, cancellation_token): if isinstance(message, Response): yield message.chat_message diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py index de3ad612b12f..6cc0be8c7098 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -22,8 +22,7 @@ # TODO: ainput doesn't seem to play nicely with jupyter. # No input window appears in this case. async def cancellable_input(prompt: str, cancellation_token: Optional[CancellationToken]) -> str: - task: asyncio.Task[str] = asyncio.create_task( - ainput(prompt)) # type: ignore + task: asyncio.Task[str] = asyncio.create_task(ainput(prompt)) # type: ignore if cancellation_token is not None: cancellation_token.link_future(task) return await task @@ -132,20 +131,17 @@ def __init__(self) -> None: "InputRequestContext cannot be instantiated. It is a static class that provides context management for user input requests." ) - _INPUT_REQUEST_CONTEXT_VAR: ClassVar[ContextVar[str]] = ContextVar( - "_INPUT_REQUEST_CONTEXT_VAR") + _INPUT_REQUEST_CONTEXT_VAR: ClassVar[ContextVar[str]] = ContextVar("_INPUT_REQUEST_CONTEXT_VAR") @classmethod @contextmanager def populate_context(cls, ctx: str) -> Generator[None, Any, None]: """:meta private:""" - token = UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.set( - ctx) + token = UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.set(ctx) try: yield finally: - UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.reset( - token) + UserProxyAgent.InputRequestContext._INPUT_REQUEST_CONTEXT_VAR.reset(token) @classmethod def request_id(cls) -> str: @@ -179,8 +175,7 @@ def _get_latest_handoff(self, messages: Sequence[ChatMessage]) -> Optional[Hando if messages[-1].target == self.name: return messages[-1] else: - raise RuntimeError( - f"Handoff message target does not match agent name: {messages[-1].source}") + raise RuntimeError(f"Handoff message target does not match agent name: {messages[-1].source}") return None async def _get_input(self, prompt: str, cancellation_token: Optional[CancellationToken]) -> str: @@ -205,8 +200,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message - raise AssertionError( - "The stream should have returned the final result.") + raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken @@ -221,8 +215,7 @@ async def on_messages_stream( request_id = str(uuid.uuid4()) - input_requested_event = UserInputRequestedEvent( - request_id=request_id, source=self.name) + input_requested_event = UserInputRequestedEvent(request_id=request_id, source=self.name) yield input_requested_event with UserProxyAgent.InputRequestContext.populate_context(request_id): user_input = await self._get_input(prompt, cancellation_token) diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index ca079ce407b4..78fa11514be5 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -508,4 +508,40 @@ async def test_model_context(monkeypatch: pytest.MonkeyPatch) -> None: # Check if the mock client is called with only the last two messages. assert len(mock.calls) == 1 - assert len(mock.calls[0]) == 3 # 2 message from the context + 1 system message + # 2 message from the context + 1 system message + assert len(mock.calls[0]) == 3 + + +@pytest.mark.asyncio +async def test_assistant_agent_declarative(monkeypatch: pytest.MonkeyPatch) -> None: + model = "gpt-4o-2024-05-13" + chat_completions = [ + ChatCompletion( + id="id1", + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage(content="Response to message 3", role="assistant"), + ) + ], + created=0, + model=model, + object="chat.completion", + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15), + ), + ] + mock = _MockChatCompletion(chat_completions) + monkeypatch.setattr(AsyncCompletions, "create", mock.mock_create) + model_context = BufferedChatCompletionContext(buffer_size=2) + agent = AssistantAgent( + "test_agent", + model_client=OpenAIChatCompletionClient(model=model, api_key=""), + model_context=model_context, + ) + + agent_config = agent.dump_component() + assert agent_config.provider == "autogen_agentchat.agents.AssistantAgent" + + agent2 = AssistantAgent.load_component(agent_config) + assert agent2.name == agent.name diff --git a/python/packages/autogen-agentchat/tests/test_declarative_components.py b/python/packages/autogen-agentchat/tests/test_declarative_components.py index 35cf54f86416..4d7ba3f38bfb 100644 --- a/python/packages/autogen-agentchat/tests/test_declarative_components.py +++ b/python/packages/autogen-agentchat/tests/test_declarative_components.py @@ -11,6 +11,11 @@ TokenUsageTermination, ) from autogen_core import ComponentLoader, ComponentModel +from autogen_core.model_context import ( + BufferedChatCompletionContext, + HeadAndTailChatCompletionContext, + UnboundedChatCompletionContext, +) @pytest.mark.asyncio @@ -92,3 +97,35 @@ async def test_termination_declarative() -> None: # Test loading complex composition loaded_composite = ComponentLoader.load_component(composite_config) assert isinstance(loaded_composite, AndTerminationCondition) + + +@pytest.mark.asyncio +async def test_chat_completion_context_declarative() -> None: + unbounded_context = UnboundedChatCompletionContext() + buffered_context = BufferedChatCompletionContext(buffer_size=5) + head_tail_context = HeadAndTailChatCompletionContext(head_size=3, tail_size=2) + + # Test serialization + unbounded_config = unbounded_context.dump_component() + assert unbounded_config.provider == "autogen_core.model_context.UnboundedChatCompletionContext" + + buffered_config = buffered_context.dump_component() + assert buffered_config.provider == "autogen_core.model_context.BufferedChatCompletionContext" + assert buffered_config.config["buffer_size"] == 5 + + head_tail_config = head_tail_context.dump_component() + assert head_tail_config.provider == "autogen_core.model_context.HeadAndTailChatCompletionContext" + assert head_tail_config.config["head_size"] == 3 + assert head_tail_config.config["tail_size"] == 2 + + # Test deserialization + loaded_unbounded = ComponentLoader.load_component(unbounded_config, UnboundedChatCompletionContext) + assert isinstance(loaded_unbounded, UnboundedChatCompletionContext) + + loaded_buffered = ComponentLoader.load_component(buffered_config, BufferedChatCompletionContext) + + assert isinstance(loaded_buffered, BufferedChatCompletionContext) + + loaded_head_tail = ComponentLoader.load_component(head_tail_config, HeadAndTailChatCompletionContext) + + assert isinstance(loaded_head_tail, HeadAndTailChatCompletionContext) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb index 510f4951f4a3..6fd4c17c2eef 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb @@ -31,20 +31,27 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Config: {\"provider\":\"autogen_agentchat.base.OrTerminationCondition\",\"component_type\":\"termination\",\"version\":1,\"component_version\":1,\"description\":null,\"config\":{\"conditions\":[{\"provider\":\"autogen_agentchat.conditions.MaxMessageTermination\",\"component_type\":\"termination\",\"version\":1,\"component_version\":1,\"config\":{\"max_messages\":5}},{\"provider\":\"autogen_agentchat.conditions.StopMessageTermination\",\"component_type\":\"termination\",\"version\":1,\"component_version\":1,\"config\":{}}]}}\n" + ] + } + ], "source": [ - "# from autogen_agentchat.conditions import MaxMessageTermination, StopMessageTermination\n", + "from autogen_agentchat.conditions import MaxMessageTermination, StopMessageTermination\n", "\n", - "# max_termination = MaxMessageTermination(5)\n", - "# stop_termination = StopMessageTermination()\n", + "max_termination = MaxMessageTermination(5)\n", + "stop_termination = StopMessageTermination()\n", "\n", - "# or_termination = max_termination | stop_termination\n", + "or_termination = max_termination | stop_termination\n", "\n", - "# or_term_config = or_termination.dump_component()\n", - "# print(\"Config: \",or_term_config)\n", + "or_term_config = or_termination.dump_component()\n", + "print(\"Config: \", or_term_config.model_dump_json())\n", "\n", - "# new_or_termination = or_termination.load_component(or_term_config)\n", - "# print(\"Object: \",new_or_termination)" + "new_or_termination = or_termination.load_component(or_term_config)" ] }, { @@ -87,8 +94,7 @@ " tools=[web_search],\n", " system_message=\"Use tools to solve tasks.\",\n", ")\n", - "\n", - "user_proxy = UserProxyAgent(name=\"user\")\n" + "user_proxy = UserProxyAgent(name=\"user\")" ] }, { @@ -100,16 +106,14 @@ "name": "stdout", "output_type": "stream", "text": [ - "provider='autogen_agentchat.agents.UserProxyAgent' component_type='agent' version=1 component_version=1 description=None config={'name': 'user', 'description': 'A human user'}\n", - "\n" + "{\"provider\":\"autogen_agentchat.agents.UserProxyAgent\",\"component_type\":\"agent\",\"version\":1,\"component_version\":1,\"description\":null,\"config\":{\"name\":\"user\",\"description\":\"A human user\"}}\n" ] } ], "source": [ - "up_config = user_proxy.dump_component()\n", - "print(up_config)\n", - "up_new = user_proxy.load_component(up_config)\n", - "print(up_new)" + "user_proxy_config = user_proxy.dump_component() # dump component\n", + "print(user_proxy_config.model_dump_json())\n", + "up_new = user_proxy.load_component(user_proxy_config) # load component" ] }, { @@ -121,95 +125,14 @@ "name": "stdout", "output_type": "stream", "text": [ - "provider='autogen_agentchat.agents.AssistantAgent' component_type='agent' version=1 component_version=1 description=None config={'name': 'assistant', 'model_client': {'provider': 'autogen_ext.models.openai.OpenAIChatCompletionClient', 'component_type': 'model', 'version': 1, 'component_version': 1, 'config': {'model': 'gpt-4o'}}, 'tools': [], 'handoffs': [{'target': 'flights_refunder', 'description': 'Handoff to flights_refunder.', 'name': 'transfer_to_flights_refunder', 'message': 'Transferred to flights_refunder, adopting the role of flights_refunder immediately.'}, {'target': 'user', 'description': 'Handoff to user.', 'name': 'transfer_to_user', 'message': 'Transferred to user, adopting the role of user immediately.'}], 'model_context': {'provider': 'autogen_core.model_context.UnboundedChatCompletionContext', 'component_type': 'chat_completion_context', 'version': 1, 'component_version': 1, 'config': {}}, 'description': 'An agent that provides assistance with ability to use tools.', 'system_message': 'Use tools to solve tasks.', 'reflect_on_tool_use': False, 'tool_call_summary_format': '{result}'}\n", - "\n" + "{\"provider\":\"autogen_agentchat.agents.AssistantAgent\",\"component_type\":\"agent\",\"version\":1,\"component_version\":1,\"description\":null,\"config\":{\"name\":\"assistant\",\"model_client\":{\"provider\":\"autogen_ext.models.openai.OpenAIChatCompletionClient\",\"component_type\":\"model\",\"version\":1,\"component_version\":1,\"config\":{\"model\":\"gpt-4o\"}},\"tools\":[],\"handoffs\":[{\"target\":\"flights_refunder\",\"description\":\"Handoff to flights_refunder.\",\"name\":\"transfer_to_flights_refunder\",\"message\":\"Transferred to flights_refunder, adopting the role of flights_refunder immediately.\"},{\"target\":\"user\",\"description\":\"Handoff to user.\",\"name\":\"transfer_to_user\",\"message\":\"Transferred to user, adopting the role of user immediately.\"}],\"model_context\":{\"provider\":\"autogen_core.model_context.UnboundedChatCompletionContext\",\"component_type\":\"chat_completion_context\",\"version\":1,\"component_version\":1,\"config\":{}},\"description\":\"An agent that provides assistance with ability to use tools.\",\"system_message\":\"Use tools to solve tasks.\",\"reflect_on_tool_use\":false,\"tool_call_summary_format\":\"{result}\"}}\n" ] } ], "source": [ - "agent_config = agent.dump_component() \n", - "print(agent_config)\n", - "agent_new = agent.load_component(agent_config)\n", - "print(agent_new)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'name': 'assistant',\n", - " 'model_client': {'provider': 'autogen_ext.models.openai.OpenAIChatCompletionClient',\n", - " 'component_type': 'model',\n", - " 'version': 1,\n", - " 'component_version': 1,\n", - " 'config': {'model': 'gpt-4o'}},\n", - " 'tools': [],\n", - " 'handoffs': [{'target': 'flights_refunder',\n", - " 'description': 'Handoff to flights_refunder.',\n", - " 'name': 'transfer_to_flights_refunder',\n", - " 'message': 'Transferred to flights_refunder, adopting the role of flights_refunder immediately.'},\n", - " {'target': 'user',\n", - " 'description': 'Handoff to user.',\n", - " 'name': 'transfer_to_user',\n", - " 'message': 'Transferred to user, adopting the role of user immediately.'}],\n", - " 'model_context': {'provider': 'autogen_core.model_context.UnboundedChatCompletionContext',\n", - " 'component_type': 'chat_completion_context',\n", - " 'version': 1,\n", - " 'component_version': 1,\n", - " 'config': {}},\n", - " 'description': 'An agent that provides assistance with ability to use tools.',\n", - " 'system_message': 'Use tools to solve tasks.',\n", - " 'reflect_on_tool_use': False,\n", - " 'tool_call_summary_format': '{result}'}" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent_config.config" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "ComponentModel(provider='autogen_ext.models.openai.OpenAIChatCompletionClient', component_type='model', version=1, component_version=1, description=None, config={'model': 'gpt-4o'})" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from autogen_core.models import UserMessage\n", - "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", - "\n", - "# Create an OpenAI model client.\n", - "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o\", \n", - ")\n", - "model_client.dump_component()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_core.model_context import UnboundedChatCompletionContext" + "agent_config = agent.dump_component() # dump component\n", + "print(agent_config.model_dump_json())\n", + "agent_new = agent.load_component(agent_config) # load component" ] } ], diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py index 10802afa329a..dcece60b1cd7 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_buffered_chat_completion_context.py @@ -1,11 +1,11 @@ from typing import List from pydantic import BaseModel +from typing_extensions import Self +from .._component_config import Component from ..models import FunctionExecutionResultMessage, LLMMessage from ._chat_completion_context import ChatCompletionContext -from .._component_config import Component -from typing_extensions import Self class BufferedChatCompletionContextConfig(BaseModel): @@ -33,7 +33,7 @@ def __init__(self, buffer_size: int, initial_messages: List[LLMMessage] | None = async def get_messages(self) -> List[LLMMessage]: """Get at most `buffer_size` recent messages.""" - messages = self._messages[-self._buffer_size:] + messages = self._messages[-self._buffer_size :] # Handle the first message is a function call result message. if messages and isinstance(messages[0], FunctionExecutionResultMessage): # Remove the first message from the list. @@ -44,5 +44,5 @@ def _to_config(self) -> BufferedChatCompletionContextConfig: return BufferedChatCompletionContextConfig(buffer_size=self._buffer_size, initial_messages=self._messages) @classmethod - def _from_config(cls, config) -> Self: + def _from_config(cls, config: BufferedChatCompletionContextConfig) -> Self: return cls(**config.model_dump()) diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py index 656ff87801a2..d2b82ec1fb31 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_chat_completion_context.py @@ -3,8 +3,8 @@ from pydantic import BaseModel, Field -from ..models import LLMMessage from .._component_config import ComponentBase +from ..models import LLMMessage class ChatCompletionContext(ABC, ComponentBase[BaseModel]): @@ -36,8 +36,7 @@ async def save_state(self) -> Mapping[str, Any]: return ChatCompletionContextState(messages=self._messages).model_dump() async def load_state(self, state: Mapping[str, Any]) -> None: - self._messages = ChatCompletionContextState.model_validate( - state).messages + self._messages = ChatCompletionContextState.model_validate(state).messages class ChatCompletionContextState(BaseModel): diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py index 5e5fff0edf28..a37d5927b19f 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_head_and_tail_chat_completion_context.py @@ -1,12 +1,12 @@ from typing import List from pydantic import BaseModel +from typing_extensions import Self +from .._component_config import Component from .._types import FunctionCall from ..models import AssistantMessage, FunctionExecutionResultMessage, LLMMessage, UserMessage from ._chat_completion_context import ChatCompletionContext -from .._component_config import Component -from typing_extensions import Self class HeadAndTailChatCompletionContextConfig(BaseModel): @@ -51,7 +51,7 @@ async def get_messages(self) -> List[LLMMessage]: # Remove the last message from the head. head_messages = head_messages[:-1] - tail_messages = self._messages[-self._tail_size:] + tail_messages = self._messages[-self._tail_size :] # Handle the first message is a function call result message. if tail_messages and isinstance(tail_messages[0], FunctionExecutionResultMessage): # Remove the first message from the tail. @@ -63,8 +63,7 @@ async def get_messages(self) -> List[LLMMessage]: # return all messages. return self._messages - placeholder_messages = [UserMessage( - content=f"Skipped {num_skipped} messages.", source="System")] + placeholder_messages = [UserMessage(content=f"Skipped {num_skipped} messages.", source="System")] return head_messages + placeholder_messages + tail_messages def _to_config(self) -> HeadAndTailChatCompletionContextConfig: @@ -73,7 +72,5 @@ def _to_config(self) -> HeadAndTailChatCompletionContextConfig: ) @classmethod - def _from_config(cls, config) -> Self: - return cls( - head_size=config.head_size, tail_size=config.tail_size, initial_messages=config.initial_messages - ) + def _from_config(cls, config: HeadAndTailChatCompletionContextConfig) -> Self: + return cls(head_size=config.head_size, tail_size=config.tail_size, initial_messages=config.initial_messages) diff --git a/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py b/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py index 374fb36cb394..4bc26db46ae6 100644 --- a/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py +++ b/python/packages/autogen-core/src/autogen_core/model_context/_unbounded_chat_completion_context.py @@ -1,11 +1,11 @@ from typing import List from pydantic import BaseModel +from typing_extensions import Self +from .._component_config import Component from ..models import LLMMessage from ._chat_completion_context import ChatCompletionContext -from .._component_config import Component -from typing_extensions import Self class UnboundedChatCompletionContextConfig(BaseModel): @@ -26,5 +26,5 @@ def _to_config(self) -> UnboundedChatCompletionContextConfig: return UnboundedChatCompletionContextConfig() @classmethod - def _from_config(cls, config) -> Self: + def _from_config(cls, config: UnboundedChatCompletionContextConfig) -> Self: return cls() diff --git a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py index b525e6340fd0..04512d7ce9b2 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/openai/_openai_client.py @@ -30,13 +30,13 @@ Image, MessageHandlerContext, ) -from autogen_core.models import FinishReasons from autogen_core.logging import LLMCallEvent from autogen_core.models import ( AssistantMessage, ChatCompletionClient, ChatCompletionTokenLogprob, CreateResult, + FinishReasons, FunctionExecutionResultMessage, LLMMessage, ModelCapabilities, # type: ignore diff --git a/python/packages/autogen-ext/test_filesurfer_agent.html b/python/packages/autogen-ext/test_filesurfer_agent.html new file mode 100644 index 000000000000..8243435009e5 --- /dev/null +++ b/python/packages/autogen-ext/test_filesurfer_agent.html @@ -0,0 +1,9 @@ + + + FileSurfer test file + + +

FileSurfer test H1

+

FileSurfer test body

+ + \ No newline at end of file From 2fb5cdeb5663f756fc103ea6bfbab2d516b244f7 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Tue, 14 Jan 2025 21:53:19 -0800 Subject: [PATCH 04/10] update assistant, format updates --- .../agents/_assistant_agent.py | 58 ++++------ .../tests/test_assistant_agent.py | 104 ++++++++++-------- .../tutorial/declarative.ipynb | 3 - 3 files changed, 79 insertions(+), 86 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 8bb6bd93b3f3..6bb53cf09ad5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -254,8 +254,7 @@ def __init__( name: str, model_client: ChatCompletionClient, *, - tools: List[Tool | Callable[..., Any] | - Callable[..., Awaitable[Any]]] | None = None, + tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None, handoffs: List[HandoffBase | str] | None = None, model_context: ChatCompletionContext | None = None, description: str = "An agent that provides assistance with ability to use tools.", @@ -273,8 +272,7 @@ def __init__( if isinstance(memory, list): self._memory = memory else: - raise TypeError( - f"Expected Memory, List[Memory], or None, got {type(memory)}") + raise TypeError(f"Expected Memory, List[Memory], or None, got {type(memory)}") self._system_messages: List[ SystemMessage | UserMessage | AssistantMessage | FunctionExecutionResultMessage @@ -286,8 +284,7 @@ def __init__( self._tools: List[Tool] = [] if tools is not None: if model_client.model_info["function_calling"] is False: - raise ValueError( - "The model does not support function calling.") + raise ValueError("The model does not support function calling.") for tool in tools: if isinstance(tool, Tool): self._tools.append(tool) @@ -296,8 +293,7 @@ def __init__( description = tool.__doc__ else: description = "" - self._tools.append(FunctionTool( - tool, description=description)) + self._tools.append(FunctionTool(tool, description=description)) else: raise ValueError(f"Unsupported tool type: {type(tool)}") # Check if tool names are unique. @@ -309,8 +305,7 @@ def __init__( self._handoffs: Dict[str, HandoffBase] = {} if handoffs is not None: if model_client.model_info["function_calling"] is False: - raise ValueError( - "The model does not support function calling, which is needed for handoffs.") + raise ValueError("The model does not support function calling, which is needed for handoffs.") for handoff in handoffs: if isinstance(handoff, str): handoff = HandoffBase(target=handoff) @@ -318,13 +313,11 @@ def __init__( self._handoff_tools.append(handoff.handoff_tool) self._handoffs[handoff.name] = handoff else: - raise ValueError( - f"Unsupported handoff type: {type(handoff)}") + raise ValueError(f"Unsupported handoff type: {type(handoff)}") # Check if handoff tool names are unique. handoff_tool_names = [tool.name for tool in self._handoff_tools] if len(handoff_tool_names) != len(set(handoff_tool_names)): - raise ValueError( - f"Handoff names must be unique: {handoff_tool_names}") + raise ValueError(f"Handoff names must be unique: {handoff_tool_names}") # Check if handoff tool names not in tool names. if any(name in tool_names for name in handoff_tool_names): raise ValueError( @@ -352,8 +345,7 @@ async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: async for message in self.on_messages_stream(messages, cancellation_token): if isinstance(message, Response): return message - raise AssertionError( - "The stream should have returned the final result.") + raise AssertionError("The stream should have returned the final result.") async def on_messages_stream( self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken @@ -390,17 +382,14 @@ async def on_messages_stream( # Check if the response is a string and return it. if isinstance(result.content, str): yield Response( - chat_message=TextMessage( - content=result.content, source=self.name, models_usage=result.usage), + chat_message=TextMessage(content=result.content, source=self.name, models_usage=result.usage), inner_messages=inner_messages, ) return # Process tool calls. - assert isinstance(result.content, list) and all( - isinstance(item, FunctionCall) for item in result.content) - tool_call_msg = ToolCallRequestEvent( - content=result.content, source=self.name, models_usage=result.usage) + assert isinstance(result.content, list) and all(isinstance(item, FunctionCall) for item in result.content) + tool_call_msg = ToolCallRequestEvent(content=result.content, source=self.name, models_usage=result.usage) event_logger.debug(tool_call_msg) # Add the tool call message to the output. inner_messages.append(tool_call_msg) @@ -408,8 +397,7 @@ async def on_messages_stream( # Execute the tool calls. results = await asyncio.gather(*[self._execute_tool_call(call, cancellation_token) for call in result.content]) - tool_call_result_msg = ToolCallExecutionEvent( - content=results, source=self.name) + tool_call_result_msg = ToolCallExecutionEvent(content=results, source=self.name) event_logger.debug(tool_call_result_msg) await self._model_context.add_message(FunctionExecutionResultMessage(content=results)) inner_messages.append(tool_call_result_msg) @@ -429,8 +417,7 @@ async def on_messages_stream( ) # Return the output messages to signal the handoff. yield Response( - chat_message=HandoffMessage( - content=handoffs[0].message, target=handoffs[0].target, source=self.name), + chat_message=HandoffMessage(content=handoffs[0].message, target=handoffs[0].target, source=self.name), inner_messages=inner_messages, ) return @@ -444,8 +431,7 @@ async def on_messages_stream( await self._model_context.add_message(AssistantMessage(content=result.content, source=self.name)) # Yield the response. yield Response( - chat_message=TextMessage( - content=result.content, source=self.name, models_usage=result.usage), + chat_message=TextMessage(content=result.content, source=self.name, models_usage=result.usage), inner_messages=inner_messages, ) else: @@ -461,8 +447,7 @@ async def on_messages_stream( ) tool_call_summary = "\n".join(tool_call_summaries) yield Response( - chat_message=ToolCallSummaryMessage( - content=tool_call_summary, source=self.name), + chat_message=ToolCallSummaryMessage(content=tool_call_summary, source=self.name), inner_messages=inner_messages, ) @@ -473,11 +458,9 @@ async def _execute_tool_call( try: if not self._tools + self._handoff_tools: raise ValueError("No tools are available.") - tool = next((t for t in self._tools + - self._handoff_tools if t.name == tool_call.name), None) + tool = next((t for t in self._tools + self._handoff_tools if t.name == tool_call.name), None) if tool is None: - raise ValueError( - f"The tool '{tool_call.name}' is not available.") + raise ValueError(f"The tool '{tool_call.name}' is not available.") arguments = json.loads(tool_call.arguments) result = await tool.run_json(arguments, cancellation_token) result_as_str = tool.return_value_as_string(result) @@ -509,7 +492,9 @@ def _to_config(self) -> AssistantAgentConfig: handoffs=list(self._handoffs.values()), model_context=self._model_context.dump_component(), description=self.description, - system_message=self._system_messages[0].content if self._system_messages else None, + system_message=self._system_messages[0].content + if self._system_messages and isinstance(self._system_messages[0].content, str) + else None, reflect_on_tool_use=self._reflect_on_tool_use, tool_call_summary_format=self._tool_call_summary_format, ) @@ -519,8 +504,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self: """Create an assistant agent from a declarative config.""" return cls( name=config.name, - model_client=ChatCompletionClient.load_component( - config.model_client), + model_client=ChatCompletionClient.load_component(config.model_client), tools=[], handoffs=config.handoffs, model_context=None, diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index 3abefca00961..bf885af81734 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -97,8 +97,7 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ChatCompletion( id="id2", @@ -106,15 +105,13 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: Choice( finish_reason="stop", index=0, - message=ChatCompletionMessage( - content="pass", role="assistant"), + message=ChatCompletionMessage(content="pass", role="assistant"), ) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ChatCompletion( id="id2", @@ -122,15 +119,13 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: Choice( finish_reason="stop", index=0, - message=ChatCompletionMessage( - content="TERMINATE", role="assistant"), + message=ChatCompletionMessage(content="TERMINATE", role="assistant"), ) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ] mock = _MockChatCompletion(chat_completions) @@ -174,8 +169,7 @@ async def test_run_with_tools(monkeypatch: pytest.MonkeyPatch) -> None: agent2 = AssistantAgent( "tool_use_agent", model_client=OpenAIChatCompletionClient(model=model, api_key=""), - tools=[_pass_function, _fail_function, FunctionTool( - _echo_function, description="Echo")], + tools=[_pass_function, _fail_function, FunctionTool(_echo_function, description="Echo")], ) await agent2.load_state(state) state2 = await agent2.save_state() @@ -211,20 +205,17 @@ async def test_run_with_tools_and_reflection(monkeypatch: pytest.MonkeyPatch) -> created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ChatCompletion( id="id2", choices=[ - Choice(finish_reason="stop", index=0, message=ChatCompletionMessage( - content="Hello", role="assistant")) + Choice(finish_reason="stop", index=0, message=ChatCompletionMessage(content="Hello", role="assistant")) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ChatCompletion( id="id2", @@ -236,8 +227,7 @@ async def test_run_with_tools_and_reflection(monkeypatch: pytest.MonkeyPatch) -> created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ] mock = _MockChatCompletion(chat_completions) @@ -245,8 +235,7 @@ async def test_run_with_tools_and_reflection(monkeypatch: pytest.MonkeyPatch) -> agent = AssistantAgent( "tool_use_agent", model_client=OpenAIChatCompletionClient(model=model, api_key=""), - tools=[_pass_function, _fail_function, FunctionTool( - _echo_function, description="Echo")], + tools=[_pass_function, _fail_function, FunctionTool(_echo_function, description="Echo")], reflect_on_tool_use=True, ) result = await agent.run(task="task") @@ -322,8 +311,7 @@ async def test_handoffs(monkeypatch: pytest.MonkeyPatch) -> None: created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=42, completion_tokens=43, total_tokens=85), + usage=CompletionUsage(prompt_tokens=42, completion_tokens=43, total_tokens=85), ), ] mock = _MockChatCompletion(chat_completions) @@ -375,15 +363,13 @@ async def test_multi_modal_task(monkeypatch: pytest.MonkeyPatch) -> None: Choice( finish_reason="stop", index=0, - message=ChatCompletionMessage( - content="Hello", role="assistant"), + message=ChatCompletionMessage(content="Hello", role="assistant"), ) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ] mock = _MockChatCompletion(chat_completions) @@ -404,8 +390,7 @@ async def test_invalid_model_capabilities() -> None: model_client = OpenAIChatCompletionClient( model=model, api_key="", - model_info={"vision": False, "function_calling": False, - "json_output": False, "family": ModelFamily.UNKNOWN}, + model_info={"vision": False, "function_calling": False, "json_output": False, "family": ModelFamily.UNKNOWN}, ) with pytest.raises(ValueError): @@ -420,8 +405,7 @@ async def test_invalid_model_capabilities() -> None: ) with pytest.raises(ValueError): - agent = AssistantAgent( - name="assistant", model_client=model_client, handoffs=["agent2"]) + agent = AssistantAgent(name="assistant", model_client=model_client, handoffs=["agent2"]) with pytest.raises(ValueError): agent = AssistantAgent(name="assistant", model_client=model_client) @@ -440,15 +424,13 @@ async def test_list_chat_messages(monkeypatch: pytest.MonkeyPatch) -> None: Choice( finish_reason="stop", index=0, - message=ChatCompletionMessage( - content="Response to message 1", role="assistant"), + message=ChatCompletionMessage(content="Response to message 1", role="assistant"), ) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=15), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15), ), ] mock = _MockChatCompletion(chat_completions) @@ -501,15 +483,13 @@ async def test_model_context(monkeypatch: pytest.MonkeyPatch) -> None: Choice( finish_reason="stop", index=0, - message=ChatCompletionMessage( - content="Response to message 3", role="assistant"), + message=ChatCompletionMessage(content="Response to message 3", role="assistant"), ) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=15), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15), ), ] mock = _MockChatCompletion(chat_completions) @@ -544,15 +524,13 @@ async def test_run_with_memory(monkeypatch: pytest.MonkeyPatch) -> None: Choice( finish_reason="stop", index=0, - message=ChatCompletionMessage( - content="Hello", role="assistant"), + message=ChatCompletionMessage(content="Hello", role="assistant"), ) ], created=0, model=model, object="chat.completion", - usage=CompletionUsage( - prompt_tokens=10, completion_tokens=5, total_tokens=0), + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=0), ), ] b64_image_str = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVR4nGP4//8/AAX+Av4N70a4AAAAAElFTkSuQmCC" @@ -603,8 +581,7 @@ async def test_run_with_memory(monkeypatch: pytest.MonkeyPatch) -> None: result = await agent.run(task="test task") assert len(result.messages) > 0 - memory_event = next( - (msg for msg in result.messages if isinstance(msg, MemoryQueryEvent)), None) + memory_event = next((msg for msg in result.messages if isinstance(msg, MemoryQueryEvent)), None) assert memory_event is not None assert len(memory_event.content) > 0 assert isinstance(memory_event.content[0], MemoryContent) @@ -615,3 +592,38 @@ class BadMemory: assert not isinstance(BadMemory(), Memory) assert isinstance(ListMemory(), Memory) + + +@pytest.mark.asyncio +async def test_assistant_agent_declarative(monkeypatch: pytest.MonkeyPatch) -> None: + model = "gpt-4o-2024-05-13" + chat_completions = [ + ChatCompletion( + id="id1", + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage(content="Response to message 3", role="assistant"), + ) + ], + created=0, + model=model, + object="chat.completion", + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15), + ), + ] + mock = _MockChatCompletion(chat_completions) + monkeypatch.setattr(AsyncCompletions, "create", mock.mock_create) + model_context = BufferedChatCompletionContext(buffer_size=2) + agent = AssistantAgent( + "test_agent", + model_client=OpenAIChatCompletionClient(model=model, api_key=""), + model_context=model_context, + ) + + agent_config = agent.dump_component() + assert agent_config.provider == "autogen_agentchat.agents.AssistantAgent" + + agent2 = AssistantAgent.load_component(agent_config) + assert agent2.name == agent.name diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb index 6fd4c17c2eef..d425604b8287 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb @@ -70,9 +70,6 @@ "outputs": [], "source": [ "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", - "from autogen_agentchat.messages import TextMessage\n", - "from autogen_agentchat.ui import Console\n", - "from autogen_core import CancellationToken\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", "\n", From ddbe5fd716059aea8d8008f5a9f87a99cd3903e5 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Tue, 14 Jan 2025 22:54:26 -0800 Subject: [PATCH 05/10] make websurfer declarative --- .../tutorial/declarative.ipynb | 18 +++++ .../web_surfer/_multimodal_web_surfer.py | 70 +++++++++++++++++-- .../autogen-ext/tests/test_websurfer_agent.py | 41 ++++++++++- 3 files changed, 122 insertions(+), 7 deletions(-) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb index d425604b8287..51e290e82826 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb @@ -131,6 +131,24 @@ "print(agent_config.model_dump_json())\n", "agent_new = agent.load_component(agent_config) # load component" ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# from autogen_ext.agents.web_surfer import MultimodalWebSurfer\n", + "\n", + "# agent = MultimodalWebSurfer(\n", + "# name=\"web_surfer\",\n", + "# model_client=model_client,\n", + "# headless=False,\n", + "# )\n", + "\n", + "# web_surfer_config = agent.dump_component() # dump component\n", + "# print(web_surfer_config.model_dump_json())" + ] } ], "metadata": { diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index d266a2086529..1517edfa95cc 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -17,6 +17,7 @@ Sequence, cast, ) +from typing_extensions import Self from urllib.parse import quote_plus import aiofiles @@ -24,7 +25,7 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage -from autogen_core import EVENT_LOGGER_NAME, CancellationToken, FunctionCall +from autogen_core import EVENT_LOGGER_NAME, CancellationToken, FunctionCall, Component, ComponentModel from autogen_core import Image as AGImage from autogen_core.models import ( AssistantMessage, @@ -36,6 +37,7 @@ ) from PIL import Image from playwright.async_api import BrowserContext, Download, Page, Playwright, async_playwright +from pydantic import BaseModel from ._events import WebSurferEvent from ._prompts import WEB_SURFER_OCR_PROMPT, WEB_SURFER_QA_PROMPT, WEB_SURFER_QA_SYSTEM_MESSAGE, WEB_SURFER_TOOL_PROMPT @@ -58,7 +60,23 @@ from .playwright_controller import PlaywrightController -class MultimodalWebSurfer(BaseChatAgent): +class MultimodalWebSurferConfig(BaseModel): + name: str + model_client: ComponentModel + downloads_folder: str | None = None + description: str | None = None + debug_dir: str | None = None + headless: bool = True + start_page: str | None = "https://www.bing.com/" + animate_actions: bool = False + to_save_screenshots: bool = False + use_ocr: bool = False + browser_channel: str | None = None + browser_data_dir: str | None = None + to_resize_viewport: bool = True + + +class MultimodalWebSurfer(BaseChatAgent, Component[MultimodalWebSurferConfig]): """ MultimodalWebSurfer is a multimodal agent that acts as a web surfer that can search the web and visit web pages. @@ -144,6 +162,10 @@ async def main() -> None: asyncio.run(main()) """ + component_type = "agent" + component_config_schema = MultimodalWebSurferConfig + component_provider_override = "autogen_ext.agents.web_surfer.MultimodalWebSurfer" + DEFAULT_DESCRIPTION = """ A helpful assistant with access to a web browser. Ask them to perform web searches, open pages, and interact with content (e.g., clicking links, scrolling the viewport, etc., filling in form fields, etc.). @@ -242,7 +264,8 @@ def _download_handler(download: Download) -> None: TOOL_SLEEP, TOOL_HOVER, ] - self.n_lines_page_text = 50 # Number of lines of text to extract from the page in the absence of OCR + # Number of lines of text to extract from the page in the absence of OCR + self.n_lines_page_text = 50 self.did_lazy_init = False # flag to check if we have initialized the browser async def _lazy_init( @@ -346,7 +369,8 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: if self.to_save_screenshots: current_timestamp = "_" + int(time.time()).__str__() screenshot_png_name = "screenshot" + current_timestamp + ".png" - await self._page.screenshot(path=os.path.join(self.debug_dir, screenshot_png_name)) # type: ignore + # type: ignore + await self._page.screenshot(path=os.path.join(self.debug_dir, screenshot_png_name)) self.logger.info( WebSurferEvent( source=self.name, @@ -704,7 +728,8 @@ async def _execute_tool( if self.to_save_screenshots: current_timestamp = "_" + int(time.time()).__str__() screenshot_png_name = "screenshot" + current_timestamp + ".png" - async with aiofiles.open(os.path.join(self.debug_dir, screenshot_png_name), "wb") as file: # type: ignore + # type: ignore + async with aiofiles.open(os.path.join(self.debug_dir, screenshot_png_name), "wb") as file: await file.write(new_screenshot) # type: ignore self.logger.info( WebSurferEvent( @@ -861,3 +886,38 @@ async def _summarize_page( scaled_screenshot.close() assert isinstance(response.content, str) return response.content + + def _to_config(self) -> MultimodalWebSurferConfig: + return MultimodalWebSurferConfig( + name=self.name, + model_client=self._model_client.dump_component(), + downloads_folder=self.downloads_folder, + description=self.description, + debug_dir=self.debug_dir, + headless=self.headless, + start_page=self.start_page, + animate_actions=self.animate_actions, + to_save_screenshots=self.to_save_screenshots, + use_ocr=self.use_ocr, + browser_channel=self.browser_channel, + browser_data_dir=self.browser_data_dir, + to_resize_viewport=self.to_resize_viewport, + ) + + @classmethod + def _from_config(cls, config: MultimodalWebSurferConfig) -> Self: + return cls( + name=config.name, + model_client=ChatCompletionClient.load_component(config.model_client), + downloads_folder=config.downloads_folder, + description=config.description or cls.DEFAULT_DESCRIPTION, + debug_dir=config.debug_dir, + headless=config.headless, + start_page=config.start_page or cls.DEFAULT_START_PAGE, + animate_actions=config.animate_actions, + to_save_screenshots=config.to_save_screenshots, + use_ocr=config.use_ocr, + browser_channel=config.browser_channel, + browser_data_dir=config.browser_data_dir, + to_resize_viewport=config.to_resize_viewport, + ) diff --git a/python/packages/autogen-ext/tests/test_websurfer_agent.py b/python/packages/autogen-ext/tests/test_websurfer_agent.py index d8a36e4d9549..0d18d3727f01 100644 --- a/python/packages/autogen-ext/tests/test_websurfer_agent.py +++ b/python/packages/autogen-ext/tests/test_websurfer_agent.py @@ -128,8 +128,10 @@ async def test_run_websurfer(monkeypatch: pytest.MonkeyPatch) -> None: assert result.messages[2].content == "Hello" # check internal web surfer state assert len(agent._chat_history) == 2 # pyright: ignore[reportPrivateUsage] - assert agent._chat_history[0].content == "task" # pyright: ignore[reportPrivateUsage] - assert agent._chat_history[1].content == "Hello" # pyright: ignore[reportPrivateUsage] + # pyright: ignore[reportPrivateUsage] + assert agent._chat_history[0].content == "task" + # pyright: ignore[reportPrivateUsage] + assert agent._chat_history[1].content == "Hello" url_after_no_tool = agent._page.url # pyright: ignore[reportPrivateUsage] # run again @@ -145,3 +147,38 @@ async def test_run_websurfer(monkeypatch: pytest.MonkeyPatch) -> None: ) # type: ignore url_after_sleep = agent._page.url # type: ignore assert url_after_no_tool == url_after_sleep + + +@pytest.mark.asyncio +async def test_run_websurfer_declarative(monkeypatch: pytest.MonkeyPatch) -> None: + model = "gpt-4o-2024-05-13" + chat_completions = [ + ChatCompletion( + id="id1", + choices=[ + Choice( + finish_reason="stop", + index=0, + message=ChatCompletionMessage(content="Response to message 3", role="assistant"), + ) + ], + created=0, + model=model, + object="chat.completion", + usage=CompletionUsage(prompt_tokens=10, completion_tokens=5, total_tokens=15), + ), + ] + mock = _MockChatCompletion(chat_completions) + monkeypatch.setattr(AsyncCompletions, "create", mock.mock_create) + + agent = MultimodalWebSurfer( + "WebSurfer", model_client=OpenAIChatCompletionClient(model=model, api_key=""), use_ocr=False + ) + + agent_config = agent.dump_component() + assert agent_config.provider == "autogen_ext.agents.web_surfer.MultimodalWebSurfer" + assert agent_config.config["name"] == "WebSurfer" + + loaded_agent = MultimodalWebSurfer.load_component(agent_config) + assert isinstance(loaded_agent, MultimodalWebSurfer) + assert loaded_agent.name == "WebSurfer" From 172a54d805d22f5c337507fc6358a3bc3896dbe7 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Tue, 14 Jan 2025 23:38:47 -0800 Subject: [PATCH 06/10] update formatting --- .../agents/web_surfer/_multimodal_web_surfer.py | 15 ++++++++------- .../autogen-ext/tests/test_websurfer_agent.py | 6 ++---- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py index 1517edfa95cc..f90dc01cdda2 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/web_surfer/_multimodal_web_surfer.py @@ -17,7 +17,6 @@ Sequence, cast, ) -from typing_extensions import Self from urllib.parse import quote_plus import aiofiles @@ -25,7 +24,7 @@ from autogen_agentchat.agents import BaseChatAgent from autogen_agentchat.base import Response from autogen_agentchat.messages import AgentEvent, ChatMessage, MultiModalMessage, TextMessage -from autogen_core import EVENT_LOGGER_NAME, CancellationToken, FunctionCall, Component, ComponentModel +from autogen_core import EVENT_LOGGER_NAME, CancellationToken, Component, ComponentModel, FunctionCall from autogen_core import Image as AGImage from autogen_core.models import ( AssistantMessage, @@ -38,6 +37,7 @@ from PIL import Image from playwright.async_api import BrowserContext, Download, Page, Playwright, async_playwright from pydantic import BaseModel +from typing_extensions import Self from ._events import WebSurferEvent from ._prompts import WEB_SURFER_OCR_PROMPT, WEB_SURFER_QA_PROMPT, WEB_SURFER_QA_SYSTEM_MESSAGE, WEB_SURFER_TOOL_PROMPT @@ -340,7 +340,8 @@ async def _set_debug_dir(self, debug_dir: str | None) -> None: if self.to_save_screenshots: current_timestamp = "_" + int(time.time()).__str__() screenshot_png_name = "screenshot" + current_timestamp + ".png" - await self._page.screenshot(path=os.path.join(self.debug_dir, screenshot_png_name)) + + await self._page.screenshot(path=os.path.join(self.debug_dir, screenshot_png_name)) # type: ignore self.logger.info( WebSurferEvent( source=self.name, @@ -369,8 +370,8 @@ async def on_reset(self, cancellation_token: CancellationToken) -> None: if self.to_save_screenshots: current_timestamp = "_" + int(time.time()).__str__() screenshot_png_name = "screenshot" + current_timestamp + ".png" - # type: ignore - await self._page.screenshot(path=os.path.join(self.debug_dir, screenshot_png_name)) + + await self._page.screenshot(path=os.path.join(self.debug_dir, screenshot_png_name)) # type: ignore self.logger.info( WebSurferEvent( source=self.name, @@ -728,8 +729,8 @@ async def _execute_tool( if self.to_save_screenshots: current_timestamp = "_" + int(time.time()).__str__() screenshot_png_name = "screenshot" + current_timestamp + ".png" - # type: ignore - async with aiofiles.open(os.path.join(self.debug_dir, screenshot_png_name), "wb") as file: + + async with aiofiles.open(os.path.join(self.debug_dir, screenshot_png_name), "wb") as file: # type: ignore await file.write(new_screenshot) # type: ignore self.logger.info( WebSurferEvent( diff --git a/python/packages/autogen-ext/tests/test_websurfer_agent.py b/python/packages/autogen-ext/tests/test_websurfer_agent.py index 0d18d3727f01..a2aa33a10931 100644 --- a/python/packages/autogen-ext/tests/test_websurfer_agent.py +++ b/python/packages/autogen-ext/tests/test_websurfer_agent.py @@ -128,10 +128,8 @@ async def test_run_websurfer(monkeypatch: pytest.MonkeyPatch) -> None: assert result.messages[2].content == "Hello" # check internal web surfer state assert len(agent._chat_history) == 2 # pyright: ignore[reportPrivateUsage] - # pyright: ignore[reportPrivateUsage] - assert agent._chat_history[0].content == "task" - # pyright: ignore[reportPrivateUsage] - assert agent._chat_history[1].content == "Hello" + assert agent._chat_history[0].content == "task" # pyright: ignore[reportPrivateUsage] + assert agent._chat_history[1].content == "Hello" # pyright: ignore[reportPrivateUsage] url_after_no_tool = agent._page.url # pyright: ignore[reportPrivateUsage] # run again From 7284d609e3c97d6d4be01c38be93be410f1e4a2a Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Wed, 15 Jan 2025 20:52:06 -0800 Subject: [PATCH 07/10] move declarative docs to advanced section --- .../{tutorial => advanced}/memory.ipynb | 0 .../serialize-components.ipynb} | 40 +++++++++---------- .../user-guide/agentchat-user-guide/index.md | 5 ++- 3 files changed, 21 insertions(+), 24 deletions(-) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial => advanced}/memory.ipynb (100%) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{tutorial/declarative.ipynb => advanced/serialize-components.ipynb} (70%) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/memory.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/memory.ipynb similarity index 100% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/memory.ipynb rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/memory.ipynb diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/serialize-components.ipynb similarity index 70% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/serialize-components.ipynb index 51e290e82826..5a3855f48080 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/declarative.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/serialize-components.ipynb @@ -4,9 +4,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Declarative Components \n", + "# Serializing Components \n", "\n", - "AutoGen provides a declarative {py:class}`~autogen_core.Component` configuration class that defines behaviours for declarative import/export. This is useful for debugging, visualizing, and even for sharing your work with others. In this notebook, we will demonstrate how to export a declarative representation of a multiagent team in the form of a JSON file. \n", + "AutoGen provides a {py:class}`~autogen_core.Component` configuration class that defines behaviours for to serialize/deserialize component into declarative specifications. This is useful for debugging, visualizing, and even for sharing your work with others. In this notebook, we will demonstrate how to serialize multiple components to a declarative specification like a JSON file. \n", "\n", "\n", "```{note}\n", @@ -72,13 +72,6 @@ "from autogen_agentchat.agents import AssistantAgent, UserProxyAgent\n", "from autogen_ext.models.openai import OpenAIChatCompletionClient\n", "\n", - "\n", - "# Define a tool that searches the web for information.\n", - "async def web_search(query: str) -> str:\n", - " \"\"\"Find information on the web\"\"\"\n", - " return \"AutoGen is a programming framework for building multi-agent applications.\"\n", - "\n", - "\n", "# Create an agent that uses the OpenAI GPT-4o model.\n", "model_client = OpenAIChatCompletionClient(\n", " model=\"gpt-4o\",\n", @@ -88,7 +81,7 @@ " name=\"assistant\",\n", " model_client=model_client,\n", " handoffs=[\"flights_refunder\", \"user\"],\n", - " tools=[web_search],\n", + " # tools=[], # serializing tools is not yet supported\n", " system_message=\"Use tools to solve tasks.\",\n", ")\n", "user_proxy = UserProxyAgent(name=\"user\")" @@ -122,7 +115,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "{\"provider\":\"autogen_agentchat.agents.AssistantAgent\",\"component_type\":\"agent\",\"version\":1,\"component_version\":1,\"description\":null,\"config\":{\"name\":\"assistant\",\"model_client\":{\"provider\":\"autogen_ext.models.openai.OpenAIChatCompletionClient\",\"component_type\":\"model\",\"version\":1,\"component_version\":1,\"config\":{\"model\":\"gpt-4o\"}},\"tools\":[],\"handoffs\":[{\"target\":\"flights_refunder\",\"description\":\"Handoff to flights_refunder.\",\"name\":\"transfer_to_flights_refunder\",\"message\":\"Transferred to flights_refunder, adopting the role of flights_refunder immediately.\"},{\"target\":\"user\",\"description\":\"Handoff to user.\",\"name\":\"transfer_to_user\",\"message\":\"Transferred to user, adopting the role of user immediately.\"}],\"model_context\":{\"provider\":\"autogen_core.model_context.UnboundedChatCompletionContext\",\"component_type\":\"chat_completion_context\",\"version\":1,\"component_version\":1,\"config\":{}},\"description\":\"An agent that provides assistance with ability to use tools.\",\"system_message\":\"Use tools to solve tasks.\",\"reflect_on_tool_use\":false,\"tool_call_summary_format\":\"{result}\"}}\n" + "{\"provider\":\"autogen_agentchat.agents.AssistantAgent\",\"component_type\":\"agent\",\"version\":1,\"component_version\":1,\"description\":null,\"config\":{\"name\":\"assistant\",\"model_client\":{\"provider\":\"autogen_ext.models.openai.OpenAIChatCompletionClient\",\"component_type\":\"model\",\"version\":1,\"component_version\":1,\"config\":{\"model\":\"gpt-4o\"}},\"handoffs\":[{\"target\":\"flights_refunder\",\"description\":\"Handoff to flights_refunder.\",\"name\":\"transfer_to_flights_refunder\",\"message\":\"Transferred to flights_refunder, adopting the role of flights_refunder immediately.\"},{\"target\":\"user\",\"description\":\"Handoff to user.\",\"name\":\"transfer_to_user\",\"message\":\"Transferred to user, adopting the role of user immediately.\"}],\"model_context\":{\"provider\":\"autogen_core.model_context.UnboundedChatCompletionContext\",\"component_type\":\"chat_completion_context\",\"version\":1,\"component_version\":1,\"config\":{}},\"description\":\"An agent that provides assistance with ability to use tools.\",\"system_message\":\"Use tools to solve tasks.\",\"reflect_on_tool_use\":false,\"tool_call_summary_format\":\"{result}\"}}\n" ] } ], @@ -133,21 +126,24 @@ ] }, { - "cell_type": "code", - "execution_count": 5, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# from autogen_ext.agents.web_surfer import MultimodalWebSurfer\n", + "A similar approach can be used to serialize the `MultiModalWebSurfer` agent.\n", + "\n", + "```python\n", + "from autogen_ext.agents.web_surfer import MultimodalWebSurfer\n", + "\n", + "agent = MultimodalWebSurfer(\n", + " name=\"web_surfer\",\n", + " model_client=model_client,\n", + " headless=False,\n", + ")\n", "\n", - "# agent = MultimodalWebSurfer(\n", - "# name=\"web_surfer\",\n", - "# model_client=model_client,\n", - "# headless=False,\n", - "# )\n", + "web_surfer_config = agent.dump_component() # dump component\n", + "print(web_surfer_config.model_dump_json())\n", "\n", - "# web_surfer_config = agent.dump_component() # dump component\n", - "# print(web_surfer_config.model_dump_json())" + "```" ] } ], diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md index 5546417eb6d2..fc0473d0a7a5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md @@ -91,8 +91,7 @@ tutorial/human-in-the-loop tutorial/termination tutorial/custom-agents tutorial/state -tutorial/declarative -tutorial/memory + ``` ```{toctree} @@ -103,6 +102,8 @@ tutorial/memory selector-group-chat swarm magentic-one +advanced/memory +advanced/serialize-components ``` ```{toctree} From 974638be73d246eace06e2caa77e232b1e502ec3 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Wed, 15 Jan 2025 21:05:13 -0800 Subject: [PATCH 08/10] remove tools until implemented --- .../autogen_agentchat/agents/_assistant_agent.py | 13 +++++++++---- .../autogen-agentchat/tests/test_assistant_agent.py | 13 +++++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 6bb53cf09ad5..2c71a2558df0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -56,7 +56,7 @@ class AssistantAgentConfig(BaseModel): name: str model_client: ComponentModel - tools: List[Any] | None = None + # tools: List[Any] | None = None # TBD handoffs: List[HandoffBase | str] | None = None model_context: ComponentModel | None = None description: str @@ -245,7 +245,6 @@ async def main() -> None: See `o1 beta limitations `_ for more details. """ - component_type = "agent" component_config_schema = AssistantAgentConfig component_provider_override = "autogen_agentchat.agents.AssistantAgent" @@ -485,10 +484,16 @@ async def load_state(self, state: Mapping[str, Any]) -> None: def _to_config(self) -> AssistantAgentConfig: """Convert the assistant agent to a declarative config.""" + + # raise an error if tools is not empty until it is implemented + # TBD : Implement serializing tools and remove this check. + if self._tools and len(self._tools) > 0: + raise NotImplementedError("Serializing tools is not implemented yet.") + return AssistantAgentConfig( name=self.name, model_client=self._model_client.dump_component(), - tools=[], + # tools=[], # TBD handoffs=list(self._handoffs.values()), model_context=self._model_context.dump_component(), description=self.description, @@ -505,7 +510,7 @@ def _from_config(cls, config: AssistantAgentConfig) -> Self: return cls( name=config.name, model_client=ChatCompletionClient.load_component(config.model_client), - tools=[], + # tools=[], # TBD handoffs=config.handoffs, model_context=None, description=config.description, diff --git a/python/packages/autogen-agentchat/tests/test_assistant_agent.py b/python/packages/autogen-agentchat/tests/test_assistant_agent.py index bf885af81734..e477ed3f1245 100644 --- a/python/packages/autogen-agentchat/tests/test_assistant_agent.py +++ b/python/packages/autogen-agentchat/tests/test_assistant_agent.py @@ -627,3 +627,16 @@ async def test_assistant_agent_declarative(monkeypatch: pytest.MonkeyPatch) -> N agent2 = AssistantAgent.load_component(agent_config) assert agent2.name == agent.name + + agent3 = AssistantAgent( + "test_agent", + model_client=OpenAIChatCompletionClient(model=model, api_key=""), + model_context=model_context, + tools=[ + _pass_function, + _fail_function, + FunctionTool(_echo_function, description="Echo"), + ], + ) + with pytest.raises(NotImplementedError): + agent3.dump_component() From e2335c37e903c0da776710ac48959c924ca2e373 Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Wed, 15 Jan 2025 21:05:38 -0800 Subject: [PATCH 09/10] minor updates to termination conditions --- .../src/autogen_agentchat/base/_termination.py | 1 - .../src/autogen_agentchat/conditions/_terminations.py | 8 -------- 2 files changed, 9 deletions(-) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index dcefa5a04111..d8a3adb96818 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -48,7 +48,6 @@ async def main() -> None: """ component_type = "termination" - # component_config_schema = BaseModel # type: ignore @property @abstractmethod diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py index d824815aeb1f..e33f2fcb70a7 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/conditions/_terminations.py @@ -16,7 +16,6 @@ class StopMessageTerminationConfig(BaseModel): class StopMessageTermination(TerminationCondition, Component[StopMessageTerminationConfig]): """Terminate the conversation if a StopMessage is received.""" - component_type = "termination" component_config_schema = StopMessageTerminationConfig component_provider_override = "autogen_agentchat.conditions.StopMessageTermination" @@ -58,7 +57,6 @@ class MaxMessageTermination(TerminationCondition, Component[MaxMessageTerminatio max_messages: The maximum number of messages allowed in the conversation. """ - component_type = "termination" component_config_schema = MaxMessageTerminationConfig component_provider_override = "autogen_agentchat.conditions.MaxMessageTermination" @@ -104,7 +102,6 @@ class TextMentionTermination(TerminationCondition, Component[TextMentionTerminat text: The text to look for in the messages. """ - component_type = "termination" component_config_schema = TextMentionTerminationConfig component_provider_override = "autogen_agentchat.conditions.TextMentionTermination" @@ -159,7 +156,6 @@ class TokenUsageTermination(TerminationCondition, Component[TokenUsageTerminatio ValueError: If none of max_total_token, max_prompt_token, or max_completion_token is provided. """ - component_type = "termination" component_config_schema = TokenUsageTerminationConfig component_provider_override = "autogen_agentchat.conditions.TokenUsageTermination" @@ -234,7 +230,6 @@ class HandoffTermination(TerminationCondition, Component[HandoffTerminationConfi target (str): The target of the handoff message. """ - component_type = "termination" component_config_schema = HandoffTerminationConfig component_provider_override = "autogen_agentchat.conditions.HandoffTermination" @@ -279,7 +274,6 @@ class TimeoutTermination(TerminationCondition, Component[TimeoutTerminationConfi timeout_seconds: The maximum duration in seconds before terminating the conversation. """ - component_type = "termination" component_config_schema = TimeoutTerminationConfig component_provider_override = "autogen_agentchat.conditions.TimeoutTermination" @@ -339,7 +333,6 @@ class ExternalTermination(TerminationCondition, Component[ExternalTerminationCon """ - component_type = "termination" component_config_schema = ExternalTerminationConfig component_provider_override = "autogen_agentchat.conditions.ExternalTermination" @@ -389,7 +382,6 @@ class SourceMatchTermination(TerminationCondition, Component[SourceMatchTerminat TerminatedException: If the termination condition has already been reached. """ - component_type = "termination" component_config_schema = SourceMatchTerminationConfig component_provider_override = "autogen_agentchat.conditions.SourceMatchTermination" From 4db3d581acc604520484da55beca1ea0755244ab Mon Sep 17 00:00:00 2001 From: Victor Dibia Date: Thu, 16 Jan 2025 16:32:36 -0800 Subject: [PATCH 10/10] update docs --- .../src/user-guide/agentchat-user-guide/index.md | 16 ++++++++++++++-- .../{advanced => }/memory.ipynb | 0 .../{advanced => }/serialize-components.ipynb | 0 3 files changed, 14 insertions(+), 2 deletions(-) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{advanced => }/memory.ipynb (100%) rename python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/{advanced => }/serialize-components.ipynb (100%) diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md index fc0473d0a7a5..e83288338dc7 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/index.md @@ -66,6 +66,18 @@ Sample code and use cases How to migrate from AutoGen 0.2.x to 0.4.x. ::: + +:::{grid-item-card} {fas}`save;pst-color-primary` Serialize Components +:link: ./serialize-components.html + +Serialize and deserialize components +::: + +:::{grid-item-card} {fas}`brain;pst-color-primary` Memory +:link: ./memory.html + +Add memory capabilities to your agents +::: :::: ```{toctree} @@ -102,8 +114,8 @@ tutorial/state selector-group-chat swarm magentic-one -advanced/memory -advanced/serialize-components +memory +serialize-components ``` ```{toctree} diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/memory.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/memory.ipynb similarity index 100% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/memory.ipynb rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/memory.ipynb diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/serialize-components.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/serialize-components.ipynb similarity index 100% rename from python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/advanced/serialize-components.ipynb rename to python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/serialize-components.ipynb