From d459943cc1c81cf9ce5c426edd3ef9112fdf6723 Mon Sep 17 00:00:00 2001 From: Shreehari Date: Wed, 4 Jun 2025 19:21:14 +0530 Subject: [PATCH 1/3] fix(responses): support raw responses for `parse()` --- src/openai/resources/responses/responses.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index c3bec87153..81ae4e5bd6 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -2483,6 +2483,9 @@ def __init__(self, responses: Responses) -> None: self.cancel = _legacy_response.to_raw_response_wrapper( responses.cancel, ) + self.parse = _legacy_response.to_raw_response_wrapper( + responses.parse, + ) @cached_property def input_items(self) -> InputItemsWithRawResponse: @@ -2505,6 +2508,9 @@ def __init__(self, responses: AsyncResponses) -> None: self.cancel = _legacy_response.async_to_raw_response_wrapper( responses.cancel, ) + self.parse = _legacy_response.async_to_raw_response_wrapper( + responses.parse, + ) @cached_property def input_items(self) -> AsyncInputItemsWithRawResponse: From 002cc7bb3c315d95b81c2e497f55d21be7fd26f8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 15:47:27 +0000 Subject: [PATCH 2/3] feat(api): Add tools and structured outputs to evals --- .stats.yml | 4 +- src/openai/types/chat/__init__.py | 1 + src/openai/types/chat/chat_completion_tool.py | 15 ++++++ ...create_eval_completions_run_data_source.py | 28 +++++++++++ ..._eval_completions_run_data_source_param.py | 28 +++++++++++ src/openai/types/evals/run_cancel_response.py | 48 +++++++++++++++++++ src/openai/types/evals/run_create_params.py | 48 +++++++++++++++++++ src/openai/types/evals/run_create_response.py | 48 +++++++++++++++++++ src/openai/types/evals/run_list_response.py | 48 +++++++++++++++++++ .../types/evals/run_retrieve_response.py | 48 +++++++++++++++++++ 10 files changed, 314 insertions(+), 2 deletions(-) create mode 100644 src/openai/types/chat/chat_completion_tool.py diff --git a/.stats.yml b/.stats.yml index 035814ecaf..25b4500060 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 111 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-0205acb1015d29b2312a48526734c0399f93026d4fe2dff5c7768f566e333fd2.yml -openapi_spec_hash: 1772cc9056c2f6dfb2a4e9cb77ee6343 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-4865dda2b62927bd141cbc85f81be3d88602f103e2c581e15eb1caded3e3aaa2.yml +openapi_spec_hash: 7d14a9b23ef4ac93ea46d629601b6f6b config_hash: ed1e6b3c5f93d12b80d31167f55c557c diff --git a/src/openai/types/chat/__init__.py b/src/openai/types/chat/__init__.py index b4f43b298f..0945bcad11 100644 --- a/src/openai/types/chat/__init__.py +++ b/src/openai/types/chat/__init__.py @@ -4,6 +4,7 @@ from .chat_completion import ChatCompletion as ChatCompletion from .chat_completion_role import ChatCompletionRole as ChatCompletionRole +from .chat_completion_tool import ChatCompletionTool as ChatCompletionTool from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk from .completion_list_params import CompletionListParams as CompletionListParams diff --git a/src/openai/types/chat/chat_completion_tool.py b/src/openai/types/chat/chat_completion_tool.py new file mode 100644 index 0000000000..ae9126f906 --- /dev/null +++ b/src/openai/types/chat/chat_completion_tool.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel +from ..shared.function_definition import FunctionDefinition + +__all__ = ["ChatCompletionTool"] + + +class ChatCompletionTool(BaseModel): + function: FunctionDefinition + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 064ef3a310..0a942cd200 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -6,8 +6,12 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata +from ..chat.chat_completion_tool import ChatCompletionTool +from ..shared.response_format_text import ResponseFormatText from ..responses.easy_input_message import EasyInputMessage from ..responses.response_input_text import ResponseInputText +from ..shared.response_format_json_object import ResponseFormatJSONObject +from ..shared.response_format_json_schema import ResponseFormatJSONSchema __all__ = [ "CreateEvalCompletionsRunDataSource", @@ -24,6 +28,7 @@ "InputMessagesTemplateTemplateMessageContentOutputText", "InputMessagesItemReference", "SamplingParams", + "SamplingParamsResponseFormat", ] @@ -136,17 +141,40 @@ class InputMessagesItemReference(BaseModel): Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type") ] +SamplingParamsResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] + class SamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" + response_format: Optional[SamplingParamsResponseFormat] = None + """An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + seed: Optional[int] = None """A seed value to initialize the randomness, during sampling.""" temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" + tools: Optional[List[ChatCompletionTool]] = None + """A list of tools the model may call. + + Currently, only functions are supported as a tool. Use this to provide a list of + functions the model may generate JSON inputs for. A max of 128 functions are + supported. + """ + top_p: Optional[float] = None """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index 3fa4c19ad2..84344fcd94 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -6,8 +6,12 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata +from ..chat.chat_completion_tool_param import ChatCompletionToolParam from ..responses.easy_input_message_param import EasyInputMessageParam +from ..shared_params.response_format_text import ResponseFormatText from ..responses.response_input_text_param import ResponseInputTextParam +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema __all__ = [ "CreateEvalCompletionsRunDataSourceParam", @@ -24,6 +28,7 @@ "InputMessagesTemplateTemplateMessageContentOutputText", "InputMessagesItemReference", "SamplingParams", + "SamplingParamsResponseFormat", ] @@ -130,17 +135,40 @@ class InputMessagesItemReference(TypedDict, total=False): InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference] +SamplingParamsResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONSchema, ResponseFormatJSONObject] + class SamplingParams(TypedDict, total=False): max_completion_tokens: int """The maximum number of tokens in the generated output.""" + response_format: SamplingParamsResponseFormat + """An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + seed: int """A seed value to initialize the randomness, during sampling.""" temperature: float """A higher temperature increases randomness in the outputs.""" + tools: Iterable[ChatCompletionToolParam] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. Use this to provide a list of + functions the model may generate JSON inputs for. A max of 128 functions are + supported. + """ + top_p: float """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index d3416129af..12cc868045 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -8,10 +8,12 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from .eval_api_error import EvalAPIError +from ..responses.tool import Tool from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource __all__ = [ @@ -32,6 +34,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", + "DataSourceResponsesSamplingParamsText", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts", @@ -185,6 +188,24 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): ] +class DataSourceResponsesSamplingParamsText(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" @@ -195,6 +216,33 @@ class DataSourceResponsesSamplingParams(BaseModel): temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" + text: Optional[DataSourceResponsesSamplingParamsText] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tools: Optional[List[Tool]] = None + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + top_p: Optional[float] = None """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index 5aa2398f36..354a81132e 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -5,10 +5,12 @@ from typing import Dict, List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict +from ..responses.tool_param import ToolParam from ..shared_params.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text_param import ResponseInputTextParam from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam +from ..responses.response_format_text_config_param import ResponseFormatTextConfigParam from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam __all__ = [ @@ -29,6 +31,7 @@ "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText", "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference", "DataSourceCreateEvalResponsesRunDataSourceSamplingParams", + "DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText", ] @@ -202,6 +205,24 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(Typed ] +class DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText(TypedDict, total=False): + format: ResponseFormatTextConfigParam + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=False): max_completion_tokens: int """The maximum number of tokens in the generated output.""" @@ -212,6 +233,33 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= temperature: float """A higher temperature increases randomness in the outputs.""" + text: DataSourceCreateEvalResponsesRunDataSourceSamplingParamsText + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tools: Iterable[ToolParam] + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + top_p: float """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 51aed2080f..776ebb413f 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -8,10 +8,12 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from .eval_api_error import EvalAPIError +from ..responses.tool import Tool from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource __all__ = [ @@ -32,6 +34,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", + "DataSourceResponsesSamplingParamsText", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts", @@ -185,6 +188,24 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): ] +class DataSourceResponsesSamplingParamsText(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" @@ -195,6 +216,33 @@ class DataSourceResponsesSamplingParams(BaseModel): temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" + text: Optional[DataSourceResponsesSamplingParamsText] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tools: Optional[List[Tool]] = None + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + top_p: Optional[float] = None """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index f1d0b01da9..9e2374f93c 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -8,10 +8,12 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from .eval_api_error import EvalAPIError +from ..responses.tool import Tool from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource __all__ = [ @@ -32,6 +34,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", + "DataSourceResponsesSamplingParamsText", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts", @@ -185,6 +188,24 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): ] +class DataSourceResponsesSamplingParamsText(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" @@ -195,6 +216,33 @@ class DataSourceResponsesSamplingParams(BaseModel): temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" + text: Optional[DataSourceResponsesSamplingParamsText] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tools: Optional[List[Tool]] = None + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + top_p: Optional[float] = None """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index 6c5951b4eb..a4f43ce3f9 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -8,10 +8,12 @@ from ..._utils import PropertyInfo from ..._models import BaseModel from .eval_api_error import EvalAPIError +from ..responses.tool import Tool from ..shared.metadata import Metadata from ..shared.reasoning_effort import ReasoningEffort from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from ..responses.response_format_text_config import ResponseFormatTextConfig from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource __all__ = [ @@ -32,6 +34,7 @@ "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", "DataSourceResponsesInputMessagesItemReference", "DataSourceResponsesSamplingParams", + "DataSourceResponsesSamplingParamsText", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts", @@ -185,6 +188,24 @@ class DataSourceResponsesInputMessagesItemReference(BaseModel): ] +class DataSourceResponsesSamplingParamsText(BaseModel): + format: Optional[ResponseFormatTextConfig] = None + """An object specifying the format that the model must output. + + Configuring `{ "type": "json_schema" }` enables Structured Outputs, which + ensures the model will match your supplied JSON schema. Learn more in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + The default format is `{ "type": "text" }` with no additional options. + + **Not recommended for gpt-4o and newer models:** + + Setting to `{ "type": "json_object" }` enables the older JSON mode, which + ensures the message the model generates is valid JSON. Using `json_schema` is + preferred for models that support it. + """ + + class DataSourceResponsesSamplingParams(BaseModel): max_completion_tokens: Optional[int] = None """The maximum number of tokens in the generated output.""" @@ -195,6 +216,33 @@ class DataSourceResponsesSamplingParams(BaseModel): temperature: Optional[float] = None """A higher temperature increases randomness in the outputs.""" + text: Optional[DataSourceResponsesSamplingParamsText] = None + """Configuration options for a text response from the model. + + Can be plain text or structured JSON data. Learn more: + + - [Text inputs and outputs](https://platform.openai.com/docs/guides/text) + - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs) + """ + + tools: Optional[List[Tool]] = None + """An array of tools the model may call while generating a response. + + You can specify which tool to use by setting the `tool_choice` parameter. + + The two categories of tools you can provide the model are: + + - **Built-in tools**: Tools that are provided by OpenAI that extend the model's + capabilities, like + [web search](https://platform.openai.com/docs/guides/tools-web-search) or + [file search](https://platform.openai.com/docs/guides/tools-file-search). + Learn more about + [built-in tools](https://platform.openai.com/docs/guides/tools). + - **Function calls (custom tools)**: Functions that are defined by you, enabling + the model to call your own code. Learn more about + [function calling](https://platform.openai.com/docs/guides/function-calling). + """ + top_p: Optional[float] = None """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" From b8509a21a35fce313079b7c90dd8afeecaedd611 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 15:48:23 +0000 Subject: [PATCH 3/3] release: 1.85.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 13 +++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 67871342a5..c3ef6db435 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.84.0" + ".": "1.85.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index e148567c89..412b520d51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## 1.85.0 (2025-06-09) + +Full Changelog: [v1.84.0...v1.85.0](https://github.com/openai/openai-python/compare/v1.84.0...v1.85.0) + +### Features + +* **api:** Add tools and structured outputs to evals ([002cc7b](https://github.com/openai/openai-python/commit/002cc7bb3c315d95b81c2e497f55d21be7fd26f8)) + + +### Bug Fixes + +* **responses:** support raw responses for `parse()` ([d459943](https://github.com/openai/openai-python/commit/d459943cc1c81cf9ce5c426edd3ef9112fdf6723)) + ## 1.84.0 (2025-06-03) Full Changelog: [v1.83.0...v1.84.0](https://github.com/openai/openai-python/compare/v1.83.0...v1.84.0) diff --git a/pyproject.toml b/pyproject.toml index 224d6dce0f..7add11521c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.84.0" +version = "1.85.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 332096f987..0b85832b85 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.84.0" # x-release-please-version +__version__ = "1.85.0" # x-release-please-version