Skip to content

Commit d6f4ebe

Browse files
Fix responses API (#8880)
* fix responses API * comments
1 parent 083d04d commit d6f4ebe

File tree

2 files changed

+60
-28
lines changed

2 files changed

+60
-28
lines changed

dspy/clients/base_lm.py

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -217,26 +217,43 @@ def _extract_citations_from_response(self, choice):
217217

218218
def _process_response(self, response):
219219
"""Process the response of OpenAI Response API and extract outputs.
220-
220+
221221
Args:
222222
response: OpenAI Response API response
223223
https://platform.openai.com/docs/api-reference/responses/object
224-
224+
225225
Returns:
226-
List of processed outputs
226+
List of processed outputs, which is always of size 1 because the Response API only supports one output.
227227
"""
228-
outputs = []
228+
text_outputs = []
229229
tool_calls = []
230+
reasoning_contents = []
231+
230232
for output_item in response.output:
231-
if output_item.type == "message":
233+
output_item_type = output_item.type
234+
if output_item_type == "message":
232235
for content_item in output_item.content:
233-
outputs.append(content_item.text)
234-
elif output_item.type == "function_call":
236+
text_outputs.append(content_item.text)
237+
elif output_item_type == "function_call":
235238
tool_calls.append(output_item.model_dump())
239+
elif output_item_type == "reasoning":
240+
if getattr(output_item, "content", None) and len(output_item.content) > 0:
241+
for content_item in output_item.content:
242+
reasoning_contents.append(content_item.text)
243+
elif getattr(output_item, "summary", None) and len(output_item.summary) > 0:
244+
for summary_item in output_item.summary:
245+
reasoning_contents.append(summary_item.text)
246+
247+
result = {}
248+
if len(text_outputs) > 0:
249+
result["text"] = "".join(text_outputs)
250+
if len(tool_calls) > 0:
251+
result["tool_calls"] = tool_calls
252+
if len(reasoning_contents) > 0:
253+
result["reasoning_content"] = "".join(reasoning_contents)
254+
# All `response.output` items map to one answer, so we return a list of size 1.
255+
return [result]
236256

237-
if tool_calls:
238-
outputs.append({"tool_calls": tool_calls})
239-
return outputs
240257

241258

242259
def inspect_history(n: int = 1):

tests/clients/test_lm.py

Lines changed: 33 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
from litellm.types.llms.openai import ResponseAPIUsage, ResponsesAPIResponse
1111
from litellm.utils import Choices, Message, ModelResponse
1212
from openai import RateLimitError
13+
from openai.types.responses import ResponseOutputMessage, ResponseReasoningItem
14+
from openai.types.responses.response_reasoning_item import Summary
1315

1416
import dspy
1517
from dspy.utils.dummies import DummyLM
@@ -505,36 +507,49 @@ def test_disable_history():
505507
model="openai/gpt-4o-mini",
506508
)
507509

508-
def test_responses_api(litellm_test_server):
509-
api_base, _ = litellm_test_server
510-
expected_text = "This is a test answer from responses API."
511-
510+
def test_responses_api():
512511
api_response = make_response(
513512
output_blocks=[
514-
{
515-
"id": "msg_1",
516-
"type": "message",
517-
"role": "assistant",
518-
"status": "completed",
519-
"content": [
520-
{"type": "output_text", "text": expected_text, "annotations": []}
521-
],
522-
}
513+
ResponseOutputMessage(
514+
**{
515+
"id": "msg_1",
516+
"type": "message",
517+
"role": "assistant",
518+
"status": "completed",
519+
"content": [
520+
{"type": "output_text", "text": "This is a test answer from responses API.", "annotations": []}
521+
],
522+
},
523+
),
524+
ResponseReasoningItem(
525+
**{
526+
"id": "reasoning_1",
527+
"type": "reasoning",
528+
"summary": [Summary(**{"type": "summary_text", "text": "This is a dummy reasoning."})],
529+
},
530+
),
523531
]
524532
)
525533

526534
with mock.patch("litellm.responses", autospec=True, return_value=api_response) as dspy_responses:
527535
lm = dspy.LM(
528-
model="openai/dspy-test-model",
529-
api_base=api_base,
530-
api_key="fakekey",
536+
model="openai/gpt-5-mini",
531537
model_type="responses",
532538
cache=False,
539+
temperature=1.0,
540+
max_tokens=16000,
533541
)
534-
assert lm("openai query") == [expected_text]
542+
lm_result = lm("openai query")
543+
544+
assert lm_result == [
545+
{
546+
"text": "This is a test answer from responses API.",
547+
"reasoning_content": "This is a dummy reasoning.",
548+
}
549+
]
535550

536551
dspy_responses.assert_called_once()
537-
assert dspy_responses.call_args.kwargs["model"] == "openai/dspy-test-model"
552+
assert dspy_responses.call_args.kwargs["model"] == "openai/gpt-5-mini"
538553

539554

540555
def test_lm_replaces_system_with_developer_role():

0 commit comments

Comments
 (0)