From 5ce5b009d70f888faf7cabcb95a432856b61713a Mon Sep 17 00:00:00 2001 From: SuveenE Date: Fri, 28 Mar 2025 20:52:40 -0700 Subject: [PATCH 1/6] Add reasoning parameter to ModelSettings --- src/agents/model_settings.py | 6 ++++++ src/agents/models/openai_chatcompletions.py | 2 ++ src/agents/models/openai_responses.py | 1 + 3 files changed, 9 insertions(+) diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index 2b0885ab..9ff986b8 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -40,6 +40,12 @@ class ModelSettings: max_tokens: int | None = None """The maximum number of output tokens to generate.""" + reasoning: dict[str, str] | None = None + """Controls reasoning behavior for reasoning-capable models. + For o-series models: Use 'effort' key with values 'low', 'medium', or 'high' to control + reasoning effort. For computer_use_preview: Use 'generate_summary' key with values + 'concise' or 'detailed' to get reasoning summaries.""" + store: bool | None = None """Whether to store the generated model response for later retrieval. Defaults to True if not provided.""" diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index de7b1ae4..dbcd7798 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -536,6 +536,7 @@ async def _fetch_response( stream=stream, stream_options={"include_usage": True} if stream else NOT_GIVEN, store=store, + reasoning=self._non_null_or_not_given(model_settings.reasoning), extra_headers=_HEADERS, ) @@ -555,6 +556,7 @@ async def _fetch_response( temperature=model_settings.temperature, tools=[], parallel_tool_calls=parallel_tool_calls or False, + reasoning=model_settings.reasoning, ) return response, ret diff --git a/src/agents/models/openai_responses.py b/src/agents/models/openai_responses.py index 17803fda..20cedc08 100644 --- a/src/agents/models/openai_responses.py +++ b/src/agents/models/openai_responses.py @@ -247,6 +247,7 @@ async def _fetch_response( extra_headers=_HEADERS, text=response_format, store=self._non_null_or_not_given(model_settings.store), + reasoning=self._non_null_or_not_given(model_settings.reasoning), ) def _get_client(self) -> AsyncOpenAI: From 2b5609b0f7208ccbe5e0a752e83b296810b7307e Mon Sep 17 00:00:00 2001 From: SuveenE Date: Tue, 1 Apr 2025 23:28:24 -0700 Subject: [PATCH 2/6] Use Reasoning object from openai.types.shared_params --- src/agents/model_settings.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index 9ff986b8..13053e9c 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -3,6 +3,8 @@ from dataclasses import dataclass, fields, replace from typing import Literal +from openai.types.shared import Reasoning + @dataclass class ModelSettings: @@ -40,7 +42,7 @@ class ModelSettings: max_tokens: int | None = None """The maximum number of output tokens to generate.""" - reasoning: dict[str, str] | None = None + reasoning: Reasoning | None = None """Controls reasoning behavior for reasoning-capable models. For o-series models: Use 'effort' key with values 'low', 'medium', or 'high' to control reasoning effort. For computer_use_preview: Use 'generate_summary' key with values From deb478ad46f33ab0ed0da3177327295320fbdc21 Mon Sep 17 00:00:00 2001 From: Suveen Ellawela <75742713+SuveenE@users.noreply.github.com> Date: Thu, 3 Apr 2025 10:43:28 -0700 Subject: [PATCH 3/6] Fix openai_chatcompletions.py typecheck error --- src/agents/models/openai_chatcompletions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/agents/models/openai_chatcompletions.py b/src/agents/models/openai_chatcompletions.py index dbcd7798..4e7c8bc6 100644 --- a/src/agents/models/openai_chatcompletions.py +++ b/src/agents/models/openai_chatcompletions.py @@ -521,6 +521,8 @@ async def _fetch_response( # Match the behavior of Responses where store is True when not given store = model_settings.store if model_settings.store is not None else True + reasoning_effort = model_settings.reasoning.effort if model_settings.reasoning else None + ret = await self._get_client().chat.completions.create( model=self.model, messages=converted_messages, @@ -536,7 +538,7 @@ async def _fetch_response( stream=stream, stream_options={"include_usage": True} if stream else NOT_GIVEN, store=store, - reasoning=self._non_null_or_not_given(model_settings.reasoning), + reasoning_effort=self._non_null_or_not_given(reasoning_effort), extra_headers=_HEADERS, ) From d71671f1a030e982f103426e35d47dfb794d5749 Mon Sep 17 00:00:00 2001 From: Suveen Ellawela <75742713+SuveenE@users.noreply.github.com> Date: Thu, 3 Apr 2025 14:59:52 -0700 Subject: [PATCH 4/6] Remove additional new line in model_settings.py --- src/agents/model_settings.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index afa42100..dd7978e8 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -51,7 +51,6 @@ class ModelSettings: metadata: dict[str, str] | None = None """Metadata to include with the model response call.""" - store: bool | None = None """Whether to store the generated model response for later retrieval. Defaults to True if not provided.""" From efc28c7bfa49bca4fb440ec0201d32b33fc22180 Mon Sep 17 00:00:00 2001 From: Rohan Mehta Date: Thu, 3 Apr 2025 19:32:19 -0400 Subject: [PATCH 5/6] Update model_settings.py --- src/agents/model_settings.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index dd7978e8..78d75d5c 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -43,10 +43,9 @@ class ModelSettings: """The maximum number of output tokens to generate.""" reasoning: Reasoning | None = None - """Controls reasoning behavior for reasoning-capable models. - For o-series models: Use 'effort' key with values 'low', 'medium', or 'high' to control - reasoning effort. For computer_use_preview: Use 'generate_summary' key with values - 'concise' or 'detailed' to get reasoning summaries.""" + """Configuration options for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). + """ metadata: dict[str, str] | None = None """Metadata to include with the model response call.""" From 39df39b6b99cfaac88c77c8806503a5dbc96dafe Mon Sep 17 00:00:00 2001 From: Rohan Mehta Date: Thu, 3 Apr 2025 19:33:47 -0400 Subject: [PATCH 6/6] Update model_settings.py --- src/agents/model_settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/agents/model_settings.py b/src/agents/model_settings.py index 78d75d5c..bac71f58 100644 --- a/src/agents/model_settings.py +++ b/src/agents/model_settings.py @@ -43,7 +43,7 @@ class ModelSettings: """The maximum number of output tokens to generate.""" reasoning: Reasoning | None = None - """Configuration options for + """Configuration options for [reasoning models](https://platform.openai.com/docs/guides/reasoning). """