diff --git a/samples/python/agents/ag2/Containerfile b/samples/python/agents/ag2/Containerfile deleted file mode 100644 index b1d3d2a3..00000000 --- a/samples/python/agents/ag2/Containerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM registry.access.redhat.com/ubi8/python-312 - -# Set work directory -WORKDIR /opt/app-root/agents/ag2 - -# Copy Python Project Files (Container context must be the `python` directory) -COPY ../.. /opt/app-root - -USER root - -# Install system build dependencies and UV package manager -RUN dnf -y update && dnf install -y gcc gcc-c++ \ - && pip install uv - -# Set environment variables for uv: -# UV_COMPILE_BYTECODE=1: Compiles Python files to .pyc for faster startup -# UV_LINK_MODE=copy: Ensures files are copied, not symlinked, which can avoid issues -ENV UV_COMPILE_BYTECODE=1 \ - UV_LINK_MODE=copy - -# Install dependencies using uv sync. -# --frozen: Ensures uv respects the uv.lock file -# --no-install-project: Prevents installing the project itself in this stage -# --no-dev: Excludes development dependencies -# --mount=type=cache: Leverages Docker's build cache for uv, speeding up repeated builds -RUN --mount=type=cache,target=/.cache/uv \ - uv sync --frozen --no-install-project --no-dev - -# Install the project -RUN --mount=type=cache,target=/.cache/uv \ - uv sync --frozen --no-dev - -# Allow non-root user to access the everything in app-root -RUN chgrp -R root /opt/app-root/ && chmod -R g+rwx /opt/app-root/ - -# Expose default port (change if needed) -EXPOSE 10010 - -USER 1001 - -# Run the agent -CMD uv run . --host 0.0.0.0 \ No newline at end of file diff --git a/samples/python/agents/ag2/README.md b/samples/python/agents/ag2/README.md index ee014eb0..f3257898 100644 --- a/samples/python/agents/ag2/README.md +++ b/samples/python/agents/ag2/README.md @@ -1,236 +1,106 @@ -# AG2 MCP Agent with A2A Protocol +# AG2 Mypy tool Agent with A2A Protocol -This sample demonstrates an MCP-enabled agent built with [AG2](https://github.com/ag2ai/ag2) that is exposed through the A2A protocol. It showcases how different agent frameworks (LangGraph, CrewAI, and now AG2) can communicate using A2A as a lingua franca. +This sample demonstrates an AG2 agent that is exposed through the A2A protocol. It shows how an AG2 can communicate +using A2A as a lingua franca, which is helpful for building a distributed agent system where different agents +can communicate with each other using the A2A protocol. The agent-as-a-service architecture allows you to encapsulate agent logic, +the local environment, tools, and other capabilities into a separate service that can be reused in different agent workflows. -## How It Works - -This agent uses AG2's `AssistantAgent` with MCP (Model Context Protocol) integration to access various tools and capabilities. The A2A protocol enables standardized interaction with the agent, allowing clients to discover and send requests to agents with tools exposed via MCP for complex tasks. +Here is a sequence diagram that shows how the current demo implementation of the A2A protocol works: ```mermaid sequenceDiagram participant Client as A2A Client participant Server as A2A Server participant Agent as AG2 (LLM + MCP Client) - participant MCP as MCP Server - participant Tools as MCP Tool Implementations + participant Tool as Mypy Tool Client->>Server: Send task with query Server->>Agent: Forward query to AG2 agent Note over Server,Agent: Real-time status updates (streaming) - - Agent->>MCP: Request available tools - MCP->>Agent: Return tool definitions - + Agent->>Agent: LLM decides to use tool - Agent->>MCP: Send tool execution request - MCP->>Tools: Call tool - Tools->>MCP: Return tool results - MCP->>Agent: Return tool results - + Agent->>Tool: Send tool execution request + Tool->>Agent: Return tool results + Agent->>Agent: LLM processes tool results Agent->>Server: Return completed response Server->>Client: Respond with task results ``` -## Key Features +## Possible use cases for Agent-as-a-Service architecture -- **Tool Access**: Leverage various MCP tools for complex tasks. +- **Tool Access**: Leverage various MCP or custom tools for complex tasks. - **Web Browsing**: Access to web browsing capabilities. - **Code Execution**: Run Python code for data analysis tasks. - **Image Generation**: Create images from text descriptions. - **Real-time Streaming**: Get status updates during processing. - **Cross-Framework Communication**: Demonstrates A2A's ability to connect different agent frameworks. +## The demo + +Here we have a simple demo that shows how to use the A2A protocol to communicate with an AG2 agent. We have +- one A2A-served remote agent `a2a_python_reviewer.py` +- two different A2A clients, which communicate with the remote agent using the A2A protocol: + CLI code generator `cli_codegen_a2a_client.py` and FastAPI code generator `fastapi_codegen_a2a_client.py` + ## Prerequisites - Python 3.12 or higher - UV package manager - OpenAI API Key (for default configuration) -- MCP YouTube server (see installation step below) ## Setup & Running -1. Install the MCP YouTube server: - - ```bash - uv tool install git+https://github.com/sparfenyuk/mcp-youtube - ``` - -2. Navigate to the samples directory: +1. Navigate to the samples directory: ```bash cd samples/python/agents/ag2 ``` -3. Create an environment file with your API key (uses `openai gpt-4o`): +2. Create an environment file with your API key (uses `openai gpt-4o`): ```bash echo "OPENAI_API_KEY=your_api_key_here" > .env ``` -4. Run the agent: - +3. Install the dependencies: ```bash - # Basic run on default port 10003 - uv run . + uv sync + ``` - # On a custom host/port - uv run . --host 0.0.0.0 --port 8080 +4. Run the remote agent: + ```bash + uv run a2a_python_reviewer.py ``` -5. In a new terminal, start an A2AClient interface to interact with the remote (ag2) agent. You can use one of the following methods: +5. In a new terminal, start an A2AClient interface to interact with the remote (ag2) agent. You can use one of the following clients: - **Method A: Run the CLI client** - From the `samples/python` directory: - ```bash - cd samples/python - uv run hosts/cli --agent http://localhost:10003 + uv run cli_codegen_a2a_client.py ``` - - **Method B: Use the demo web UI** - - This method uses the `google/gemini-2.0-flash-001` model. - - 1. Navigate to the demo directory and set up your environment: - - ```bash - cd demo/ui - echo "GOOGLE_API_KEY=your_api_key_here" > .env - ``` - - 2. Run the UI: - - ```bash - uv run main.py - ``` - - 3. Navigate to the web UI (typically `http://localhost:12000`) and follow these steps: - - Click the **Agents** tab. - - Add the Remote Agent. - - Enter the Agent URL: `localhost:10003` (or your custom host/port). - - Click the **Home** tab (Conversations). - - Create and start a new conversation (`+`) to test the interaction. - -## Build Container Image - -The agent can also be built and run using a container file. - -1. Navigate to the `samples/python` directory: - - ```bash - cd samples/python - ``` - -2. Build the container image: - - ```bash - podman build -f agents/ag2/Containerfile . -t ag2-a2a-server - ``` - - > [!TIP] - > `podman` is a drop-in replacement for `docker`, which can also be used in these commands. - -3. Run your container: + - **Method B: Run the FastAPI client** - ```bash - podman run -p 10010:10010 -e OPENAI_API_KEY=your_api_key_here ag2-a2a-server - ``` - -4. Run an A2A client (follow step 5 from the section above, pointing to the container's port). - -> [!IMPORTANT] -> -> - **Access URL:** You must access the A2A client through the URL `0.0.0.0:10010`. Using `localhost` will not work. -> - **Hostname Override:** If you're deploying to an environment where the hostname is defined differently outside the container, use the `HOST_OVERRIDE` environment variable to set the expected hostname on the Agent Card. This ensures proper communication with your client application. - -## Example Usage - -The MCP YouTube server enables the agent to download closed captions for YouTube videos (note: does not work for YouTube Shorts). Here's an example prompt you can try: - -```text -Summarize this video: https://www.youtube.com/watch?v=kQmXtrmQ5Zg (Building Agents with Model Context Protocol - Full Workshop with Mahesh Murag of Anthropic) -``` - -## Technical Implementation - -- **AG2 MCP Integration**: Integrates with the MCP toolkit for tool access. -- **Streaming Support**: Provides real-time updates during task processing. -- **A2A Protocol Integration**: Full compliance with A2A specifications. - -## Behind the Scenes: A2A Communication - -This demo provides two different interfaces to interact with the AG2 agent, both using the A2A protocol: - -### CLI Client (Direct Interaction) - -When using the CLI client, you interact directly with a simple A2A client that sends requests to the AG2 agent: - -```text -User → CLI (A2AClient) → AG2 Agent -``` - -### Web UI (Host Agent Delegation) - -When using the web UI, you interact with a Google ADK host agent, which acts as an A2A client to delegate tasks: - -```text -User → Web UI → ADK Host Agent (A2A Client) → AG2 Agent -``` - -In both cases, the underlying A2A protocol communication looks like this: - -```json -POST http://localhost:10003 -Content-Type: application/json - -{ - "jsonrpc": "2.0", - "id": 1, - "method": "message/stream", - "params": { - "id": "mcp-task-01", - "sessionId": "user-session-123", - "acceptedOutputModes": [ - "text" - ], - "message": { - "role": "user", - "parts": [ - { - "type": "text", - "text": "Summarize this video: https://www.youtube.com/watch?v=kQmXtrmQ5Zg" - } - ] - } - } -} -``` - -This standardized communication format is what enables different agent frameworks to interoperate seamlessly. - -If you want to test the API directly via `curl`: - -```bash -curl -X POST http://localhost:10003 \ --H "Content-Type: application/json" \ --d '{"jsonrpc": "2.0", "id": 1, "method": "message/stream", "params": {"id": "mcp-task-01", "sessionId": "user-session-123", "acceptedOutputModes": ["text"], "message": {"role": "user", "parts": [{"type": "text", "text": "Summarize this video: https://www.youtube.com/watch?v=kQmXtrmQ5Zg"}]}}}' -``` - -Note: This agent only supports the async streaming endpoint (`message/stream`). The synchronous endpoint (`message/send`) is not implemented. + ```bash + uv run fastapi_codegen_a2a_client.py + ``` ## Learn More - [A2A Protocol Documentation](https://google.github.io/A2A/#/documentation) - [AG2 Documentation](https://docs.ag2.ai/) +- [AG2 A2A Documentation](https://docs.ag2.ai/latest/docs/user-guide/a2a/) - [MCP Documentation](https://modelcontextprotocol.io/introduction) ## Disclaimer > [!WARNING] -> **The sample code provided is for demonstration purposes only.** When building production applications, it is critical to treat any agent operating outside of your direct control as a potentially untrusted entity. +> **The sample code provided is for demonstration purposes only.** When building production applications, it is critical +> to treat any agent operating outside of your direct control as a potentially untrusted entity. > -> All data received from an external agent—including but not limited to its AgentCard, messages, artifacts, and task statuses—should be handled as untrusted input. For example, a malicious agent could provide an AgentCard containing crafted data in its fields (e.g., `description`, `name`, `skills.description`). If this data is used without sanitization to construct prompts for a Large Language Model (LLM), it could expose your application to prompt injection attacks. Failure to properly validate and sanitize this data before use can introduce security vulnerabilities into your application. +> All data received from an external agent—including but not limited to its AgentCard, messages, artifacts, and task statuses—should be handled as untrusted input. For example, a malicious agent could provide an AgentCard containing crafted data in its fields (e.g., `description`, `name`, `skills.description`). +> If this data is used without sanitization to construct prompts for a Large Language Model (LLM), it could expose your application to prompt injection attacks. Failure to properly validate and sanitize this data before use can introduce security vulnerabilities into your application. > > Developers are responsible for implementing appropriate security measures, such as input validation and secure handling of credentials, to protect their systems and users. diff --git a/samples/python/agents/ag2/__init__.py b/samples/python/agents/ag2/__init__.py index e4e442f2..e69de29b 100644 --- a/samples/python/agents/ag2/__init__.py +++ b/samples/python/agents/ag2/__init__.py @@ -1 +0,0 @@ -# AG2 MCP Youtube Agent for A2A Protocol diff --git a/samples/python/agents/ag2/__main__.py b/samples/python/agents/ag2/__main__.py deleted file mode 100644 index c2507f09..00000000 --- a/samples/python/agents/ag2/__main__.py +++ /dev/null @@ -1,67 +0,0 @@ -import logging -import os - -import click - -from a2a.server.apps import A2AStarletteApplication -from a2a.server.request_handlers import DefaultRequestHandler -from a2a.server.tasks import InMemoryTaskStore -from a2a.types import AgentCapabilities, AgentCard, AgentSkill -from agent import YoutubeMCPAgent # type: ignore[import-untyped] -from agent_executor import AG2AgentExecutor # type: ignore[import-untyped] -from dotenv import load_dotenv - - -load_dotenv() - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -@click.command() -@click.option('--host', 'host', default='localhost') -@click.option('--port', 'port', default=10010) -def main(host, port): - """Starts the AG2 MCP Agent server.""" - if not os.getenv('OPENAI_API_KEY'): - print('OPENAI_API_KEY environment variable not set.') - - request_handler = DefaultRequestHandler( - agent_executor=AG2AgentExecutor(), - task_store=InMemoryTaskStore(), - ) - server = A2AStarletteApplication( - agent_card=get_agent_card(host, port), http_handler=request_handler - ) - import uvicorn - - uvicorn.run(server.build(), host=host, port=port) - - -def get_agent_card(host: str, port: int): - """Returns the Agent Card for the AG2 Agent.""" - capabilities = AgentCapabilities(streaming=True) - skill = AgentSkill( - id='download_closed_captions', - name='Download YouTube Closed Captions', - description='Retrieve closed captions/transcripts from YouTube videos', - tags=['youtube', 'captions', 'transcription', 'video'], - examples=[ - 'Extract the transcript from this YouTube video: https://www.youtube.com/watch?v=dQw4w9WgXcQ', - 'Download the captions for this YouTube tutorial', - ], - ) - return AgentCard( - name='YouTube Captions Agent', - description='AI agent that can extract closed captions and transcripts from YouTube videos. This agent provides raw transcription data that can be used for further processing.', - url=f'http://{host}:{port}/', - version='1.0.0', - default_input_modes=YoutubeMCPAgent.SUPPORTED_CONTENT_TYPES, - default_output_modes=YoutubeMCPAgent.SUPPORTED_CONTENT_TYPES, - capabilities=capabilities, - skills=[skill], - ) - - -if __name__ == '__main__': - main() diff --git a/samples/python/agents/ag2/a2a_python_reviewer.py b/samples/python/agents/ag2/a2a_python_reviewer.py new file mode 100644 index 00000000..8053d367 --- /dev/null +++ b/samples/python/agents/ag2/a2a_python_reviewer.py @@ -0,0 +1,58 @@ +import os +import tempfile + +from typing import Annotated + +from autogen import ConversableAgent, LLMConfig +from autogen.a2a import A2aAgentServer +from mypy import api + + +# create regular AG2 agent +config = LLMConfig( + { + 'model': 'gpt-4o-mini', + 'api_key': os.getenv('OPENAI_API_KEY'), + } +) + +reviewer_agent = ConversableAgent( + name='ReviewerAgent', + description='An agent that reviews the code for the user', + system_message=( + 'You are an expert in code review pretty strict and focused on typing. ' + 'Please, use mypy tool to validate the code.' + 'If mypy has no issues with the code, return "No issues found."' + ), + llm_config=config, + human_input_mode='NEVER', +) + + +# Add mypy tool to validate the code +@reviewer_agent.register_for_llm( + name='mypy-checker', + description='Check the code with mypy tool', +) +def review_code_with_mypy( + code: Annotated[ + str, + 'Raw code content to review. Code should be formatted as single file.', + ], +) -> str: + with tempfile.NamedTemporaryFile('w', suffix='.py') as tmp: + tmp.write(code) + stdout, stderr, exit_status = api.run([tmp.name]) + if exit_status != 0: + return stderr + return stdout or 'No issues found.' + + +# wrap agent to A2A server +server = A2aAgentServer(reviewer_agent).build() + +if __name__ == '__main__': + # run server as regular ASGI application + import uvicorn + + uvicorn.run(server, host='0.0.0.0', port=8000) diff --git a/samples/python/agents/ag2/agent.py b/samples/python/agents/ag2/agent.py deleted file mode 100644 index 1c27d2d9..00000000 --- a/samples/python/agents/ag2/agent.py +++ /dev/null @@ -1,196 +0,0 @@ -import json -import logging -import os -import traceback - -from collections.abc import AsyncIterable -from typing import Any, Literal - -from autogen import AssistantAgent, LLMConfig -from autogen.mcp import create_toolkit -from dotenv import load_dotenv -from mcp import ClientSession, StdioServerParameters -from mcp.client.stdio import stdio_client -from pydantic import BaseModel - - -logger = logging.getLogger(__name__) - - -class ResponseModel(BaseModel): - """Response model for the YouTube MCP agent.""" - - text_reply: str - closed_captions: str | None - status: Literal['TERMINATE', ''] - - def format(self) -> str: - """Format the response as a string.""" - if self.closed_captions is None: - return self.text_reply - return f'{self.text_reply}\n\nClosed Captions:\n{self.closed_captions}' - - -def get_api_key() -> str: - """Helper method to handle API Key.""" - load_dotenv() - return os.getenv('OPENAI_API_KEY') - - -class YoutubeMCPAgent: - """Agent to access a Youtube MCP Server to download closed captions.""" - - SUPPORTED_CONTENT_TYPES = ['text', 'text/plain'] - - def __init__(self): - # Import AG2 dependencies here to isolate requirements - try: - # Set up LLM configuration with response format - llm_config = LLMConfig( - model='gpt-4o', - api_key=get_api_key(), - response_format=ResponseModel, - ) - - # Create the assistant agent that will use MCP tools - self.agent = AssistantAgent( - name='YoutubeMCPAgent', - llm_config=llm_config, - system_message=( - 'You are a specialized assistant for processing YouTube videos. ' - 'You can use MCP tools to fetch captions and process YouTube content. ' - 'You can provide captions, summarize videos, or analyze content from YouTube. ' - "If the user asks about anything not related to YouTube videos or doesn't provide a YouTube URL, " - 'politely state that you can only help with tasks related to YouTube videos.\n\n' - 'IMPORTANT: Always respond using the ResponseModel format with these fields:\n' - '- text_reply: Your main response text\n' - '- closed_captions: YouTube captions if available, null if not relevant\n' - "- status: Always use 'TERMINATE' for all responses \n\n" - 'Example response:\n' - '{\n' - ' "text_reply": "Here\'s the information you requested...",\n' - ' "closed_captions": null,\n' - ' "status": "TERMINATE"\n' - '}' - ), - ) - - self.initialized = True - logger.info('MCP Agent initialized successfully') - except ImportError as e: - logger.error(f'Failed to import AG2 components: {e}') - self.initialized = False - - def get_agent_response(self, response: str) -> dict[str, Any]: - """Format agent response in a consistent structure.""" - try: - # Try to parse the response as a ResponseModel JSON - response_dict = json.loads(response) - model = ResponseModel(**response_dict) - - # All final responses should be treated as complete - return { - 'is_task_complete': True, - 'require_user_input': False, - 'content': model.format(), - } - except Exception as e: - # Log but continue with best-effort fallback - logger.error(f'Error parsing response: {e}, response: {response}') - - # Default to treating it as a completed response - return { - 'is_task_complete': True, - 'require_user_input': False, - 'content': response, - } - - async def stream( - self, query: str, session_id: str - ) -> AsyncIterable[dict[str, Any]]: - """Stream updates from the MCP agent.""" - if not self.initialized: - yield { - 'is_task_complete': False, - 'require_user_input': True, - 'content': 'Agent initialization failed. Please check the dependencies and logs.', - } - return - - try: - # Initial response to acknowledge the query - yield { - 'is_task_complete': False, - 'require_user_input': False, - 'content': 'Processing request...', - } - - logger.info(f'Processing query: {query[:50]}...') - - try: - # Create stdio server parameters for mcp-youtube - server_params = StdioServerParameters( - command='mcp-youtube', - ) - - # Connect to the MCP server using stdio client - async with ( - stdio_client(server_params) as (read, write), - ClientSession(read, write) as session, - ): - # Initialize the connection - await session.initialize() - - # Create toolkit and register tools - toolkit = await create_toolkit(session=session) - toolkit.register_for_llm(self.agent) - - result = await self.agent.a_run( - message=query, - tools=toolkit.tools, - max_turns=2, # Fixed at 2 turns to allow tool usage - user_input=False, - ) - - # Extract the content from the result - try: - # Process the result - await result.process() - - # Get the summary which contains the output - response = await result.summary - - except Exception as extraction_error: - logger.error( - f'Error extracting response: {extraction_error}' - ) - traceback.print_exc() - response = ( - f'Error processing request: {extraction_error!s}' - ) - - # Final response - yield self.get_agent_response(response) - - except Exception as e: - logger.error( - f'Error during processing: {traceback.format_exc()}' - ) - yield { - 'is_task_complete': False, - 'require_user_input': True, - 'content': f'Error processing request: {e!s}', - } - except Exception as e: - logger.error(f'Error in streaming agent: {traceback.format_exc()}') - yield { - 'is_task_complete': False, - 'require_user_input': True, - 'content': f'Error processing request: {e!s}', - } - - def invoke(self, query: str, session_id: str) -> dict[str, Any]: - """Synchronous invocation of the MCP agent.""" - raise NotImplementedError( - 'Synchronous invocation is not supported by this agent. Use the streaming endpoint (message/stream) instead.' - ) diff --git a/samples/python/agents/ag2/agent_executor.py b/samples/python/agents/ag2/agent_executor.py deleted file mode 100644 index bf89f062..00000000 --- a/samples/python/agents/ag2/agent_executor.py +++ /dev/null @@ -1,103 +0,0 @@ -import logging - -from a2a.server.agent_execution import AgentExecutor, RequestContext -from a2a.server.events.event_queue import EventQueue -from a2a.types import ( - TaskArtifactUpdateEvent, - TaskState, - TaskStatus, - TaskStatusUpdateEvent, -) -from a2a.utils import new_agent_text_message, new_task, new_text_artifact -from agent import YoutubeMCPAgent # type: ignore[import-untyped] - - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -class AG2AgentExecutor(AgentExecutor): - """A YoutubeMCPAgent agent executor.""" - - def __init__(self): - self.agent = YoutubeMCPAgent() - - async def execute( - self, - context: RequestContext, - event_queue: EventQueue, - ) -> None: - query = context.get_user_input() - task = context.current_task - if not task: - task = new_task(context.message) - await event_queue.enqueue_event(task) - - async for item in self.agent.stream(query, task.context_id): - is_task_complete = item['is_task_complete'] - require_user_input = item['require_user_input'] - content = item['content'] - - logger.info( - f'Stream item received: complete={is_task_complete}, require_input={require_user_input}, content_len={len(content)}' - ) - - if not is_task_complete and not require_user_input: - await event_queue.enqueue_event( - TaskStatusUpdateEvent( - status=TaskStatus( - state=TaskState.working, - message=new_agent_text_message( - content, - task.context_id, - task.id, - ), - ), - final=False, - context_id=task.context_id, - task_id=task.id, - ) - ) - elif require_user_input: - await event_queue.enqueue_event( - TaskStatusUpdateEvent( - status=TaskStatus( - state=TaskState.input_required, - message=new_agent_text_message( - content, - task.context_id, - task.id, - ), - ), - final=True, - context_id=task.context_id, - task_id=task.id, - ) - ) - else: - await event_queue.enqueue_event( - TaskArtifactUpdateEvent( - append=False, - context_id=task.context_id, - task_id=task.id, - last_chunk=True, - artifact=new_text_artifact( - name='current_result', - description='Result of request to agent.', - text=content, - ), - ) - ) - await event_queue.enqueue_event( - TaskStatusUpdateEvent( - status=TaskStatus(state=TaskState.completed), - final=True, - context_id=task.context_id, - task_id=task.id, - ) - ) - - async def cancel( - self, context: RequestContext, event_queue: EventQueue - ) -> None: - raise Exception('cancel not supported') diff --git a/samples/python/agents/ag2/cli_codegen_a2a_client.py b/samples/python/agents/ag2/cli_codegen_a2a_client.py new file mode 100644 index 00000000..69d1c50b --- /dev/null +++ b/samples/python/agents/ag2/cli_codegen_a2a_client.py @@ -0,0 +1,52 @@ +import os + +from autogen import ConversableAgent, LLMConfig +from autogen.a2a import A2aRemoteAgent + + +config = LLMConfig( + { + 'model': 'gpt-4o-mini', + 'api_key': os.getenv('OPENAI_API_KEY'), + } +) + +codegen_agent = ConversableAgent( + name='CodeGenAgent', + description='A agent that generates code for the user', + system_message=( + 'You are specialist in Python with huge Clean Architecture experience. ' + 'Also, you are an expert in argparse. ' + 'You should create a simple scripts based on user demands. ' + 'Generate code in a single file. Do not use any other files. ' + 'Do not use any external dependencies if it is possible. ' + 'Use [PEP 723](https://peps.python.org/pep-0723/) to specify script dependencies if it is required. ' + 'Generate just a code, no other text or comments. ' + 'Terminate conversation when reviewer agent has no issues with the code.' + ), + is_termination_msg=lambda msg: 'No issues found.' in msg.get('content', ''), + llm_config=config, +) + + +# create A2A remote agent +reviewer_agent = A2aRemoteAgent( + url='http://localhost:8000', + name='ReviewerAgent', +) + + +async def main() -> str: + # use A2A agent as regular one + result = await reviewer_agent.a_initiate_chat( + codegen_agent, + message='Please, generate a simple script, allows to transfer USD to EUR using any external API.', + ) + return result.chat_history[-2]['content'] + + +if __name__ == '__main__': + import asyncio + + code = asyncio.run(main()) + print(code) diff --git a/samples/python/agents/ag2/fastapi_codegen_a2a_client.py b/samples/python/agents/ag2/fastapi_codegen_a2a_client.py new file mode 100644 index 00000000..ed6c5ef6 --- /dev/null +++ b/samples/python/agents/ag2/fastapi_codegen_a2a_client.py @@ -0,0 +1,50 @@ +import os + +from autogen import ConversableAgent, LLMConfig +from autogen.a2a import A2aRemoteAgent + + +config = LLMConfig( + { + 'model': 'gpt-4o-mini', + 'api_key': os.getenv('OPENAI_API_KEY'), + } +) + +codegen_agent = ConversableAgent( + name='CodeGenAgent', + description='A agent that generates code for the user', + system_message=( + 'You are specialist in Python with huge Clean Architecture experience. ' + 'Also, you are an expert in FastAPI. ' + 'Please, focus on RESTful API principles while API design. ' + 'Generate code in a single file. Do not use any other files. ' + 'Generate just a code, no other text or comments. ' + 'Terminate conversation when reviewer agent has no issues with the code.' + ), + is_termination_msg=lambda msg: 'No issues found.' in msg.get('content', ''), + llm_config=config, +) + + +# create A2A remote agent +reviewer_agent = A2aRemoteAgent( + url='http://localhost:8000', + name='ReviewerAgent', +) + + +async def main() -> str: + # use A2A agent as regular one + result = await reviewer_agent.a_initiate_chat( + codegen_agent, + message='Please, generate a simple FastAPI application that returns a list of users.', + ) + return result.chat_history[-2]['content'] + + +if __name__ == '__main__': + import asyncio + + code = asyncio.run(main()) + print(code) diff --git a/samples/python/agents/ag2/pyproject.toml b/samples/python/agents/ag2/pyproject.toml index 734e4b37..9d21f071 100644 --- a/samples/python/agents/ag2/pyproject.toml +++ b/samples/python/agents/ag2/pyproject.toml @@ -1,11 +1,10 @@ [project] -name = "a2a-samples-mcp" +name = "ag2-a2a" version = "0.1.0" -description = "MCP agent using A2A and AG2" +description = "MCP Mypy agent using A2A and AG2" readme = "README.md" requires-python = ">=3.12" dependencies = [ - "ag2[mcp, openai]>=0.9.6", - "google-genai>=1.26.0", - "a2a-sdk>=0.3.0", + "ag2[mcp,openai,a2a]>=0.10.0", + "mypy>=1.10.0", ]