diff --git a/py/plugins/anthropic/src/genkit/plugins/anthropic/models.py b/py/plugins/anthropic/src/genkit/plugins/anthropic/models.py index e9c6185cba..f8e645944f 100644 --- a/py/plugins/anthropic/src/genkit/plugins/anthropic/models.py +++ b/py/plugins/anthropic/src/genkit/plugins/anthropic/models.py @@ -75,10 +75,10 @@ async def generate(self, request: GenerateRequest, ctx: ActionRunContext | None if streaming: response = await self._generate_streaming(params, ctx) - content = [] else: response = await self.client.messages.create(**params) - content = self._to_genkit_content(response.content) + + content = self._to_genkit_content(response.content) response_message = Message(role=Role.MODEL, content=content) basic_usage = get_basic_usage_stats(input_=request.messages, response=response_message) @@ -155,7 +155,7 @@ async def _generate_streaming(self, params: dict[str, Any], ctx: ActionRunContex GenerateResponseChunk( role=Role.MODEL, index=0, - content=[TextPart(text=chunk.delta.text)], + content=[Part(root=TextPart(text=chunk.delta.text))], ) ) return await stream.get_final_message() @@ -223,15 +223,17 @@ def _to_genkit_content(self, content_blocks: list) -> list[Part]: parts = [] for block in content_blocks: if block.type == 'text': - parts.append(TextPart(text=block.text)) + parts.append(Part(root=TextPart(text=block.text))) elif block.type == 'tool_use': parts.append( - ToolRequestPart( - tool_request={ - 'ref': block.id, - 'name': block.name, - 'input': block.input, - } + Part( + root=ToolRequestPart( + tool_request={ + 'ref': block.id, + 'name': block.name, + 'input': block.input, + } + ) ) ) return parts diff --git a/py/plugins/anthropic/tests/test_models.py b/py/plugins/anthropic/tests/test_models.py index f346cdb336..e2a779bd06 100644 --- a/py/plugins/anthropic/tests/test_models.py +++ b/py/plugins/anthropic/tests/test_models.py @@ -180,9 +180,10 @@ def test_to_anthropic_messages(): class MockStreamManager: """Mock stream manager for testing streaming.""" - def __init__(self, chunks): + def __init__(self, chunks, final_content=None): self.chunks = chunks self.final_message = MagicMock() + self.final_message.content = final_content if final_content else [] self.final_message.usage = MagicMock(input_tokens=10, output_tokens=20) self.final_message.stop_reason = 'end_turn' @@ -217,7 +218,8 @@ async def test_streaming_generation(): MagicMock(type='content_block_delta', delta=MagicMock(text='!')), ] - mock_stream = MockStreamManager(chunks) + final_content = [MagicMock(type='text', text='Hello world!')] + mock_stream = MockStreamManager(chunks, final_content=final_content) mock_client.messages.stream.return_value = mock_stream model = AnthropicModel(model_name='claude-sonnet-4', client=mock_client) @@ -248,3 +250,10 @@ def send_chunk(chunk: GenerateResponseChunk): assert response.usage.input_tokens == 10 assert response.usage.output_tokens == 20 + + # Verify final response content is populated + assert len(response.message.content) == 1 + final_part = response.message.content[0] + assert isinstance(final_part, Part) + assert isinstance(final_part.root, TextPart) + assert final_part.root.text == 'Hello world!'