From 870ad4ed3a284d75f44b825503750129284c7906 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Fri, 18 Apr 2025 10:17:31 +0000
Subject: [PATCH 01/11] chore(internal): update models test

---
 tests/test_models.py | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/tests/test_models.py b/tests/test_models.py
index 4b18940b49..440e17a08c 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -492,12 +492,15 @@ class Model(BaseModel):
         resource_id: Optional[str] = None
 
     m = Model.construct()
+    assert m.resource_id is None
     assert "resource_id" not in m.model_fields_set
 
     m = Model.construct(resource_id=None)
+    assert m.resource_id is None
     assert "resource_id" in m.model_fields_set
 
     m = Model.construct(resource_id="foo")
+    assert m.resource_id == "foo"
     assert "resource_id" in m.model_fields_set
 
 

From a44016c64cdefe404e97592808ed3c25411ab27b Mon Sep 17 00:00:00 2001
From: dogisgreat <git@bigly.dog>
Date: Mon, 21 Apr 2025 14:23:33 -0400
Subject: [PATCH 02/11] chore: update completion parse signature

---
 src/openai/resources/beta/chat/completions.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/src/openai/resources/beta/chat/completions.py b/src/openai/resources/beta/chat/completions.py
index 545a3f4087..80e015615f 100644
--- a/src/openai/resources/beta/chat/completions.py
+++ b/src/openai/resources/beta/chat/completions.py
@@ -81,7 +81,7 @@ def parse(
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
-        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -228,7 +228,7 @@ def stream(
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
-        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -360,7 +360,7 @@ async def parse(
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
-        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,
@@ -507,7 +507,7 @@ def stream(
         presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
         reasoning_effort: Optional[ReasoningEffort] | NotGiven = NOT_GIVEN,
         seed: Optional[int] | NotGiven = NOT_GIVEN,
-        service_tier: Optional[Literal["auto", "default"]] | NotGiven = NOT_GIVEN,
+        service_tier: Optional[Literal["auto", "default", "flex"]] | NotGiven = NOT_GIVEN,
         stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN,
         store: Optional[bool] | NotGiven = NOT_GIVEN,
         stream_options: Optional[ChatCompletionStreamOptionsParam] | NotGiven = NOT_GIVEN,

From 09972119df5dd4c7c8db137c721364787e22d4c6 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 22 Apr 2025 20:12:49 +0000
Subject: [PATCH 03/11] chore(ci): add timeout thresholds for CI jobs

---
 .github/workflows/ci.yml | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 6f9cf84bb4..d148b34a9e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,6 +10,7 @@ on:
 
 jobs:
   lint:
+    timeout-minutes: 10
     name: lint
     runs-on: ubuntu-latest
     steps:
@@ -30,6 +31,7 @@ jobs:
         run: ./scripts/lint
 
   test:
+    timeout-minutes: 10
     name: test
     runs-on: ubuntu-latest
     steps:
@@ -50,6 +52,7 @@ jobs:
         run: ./scripts/test
 
   examples:
+    timeout-minutes: 10
     name: examples
     runs-on: ubuntu-latest
     if: github.repository == 'openai/openai-python'

From b425fb906f62550c3669b09b9d8575f3d4d8496b Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 22 Apr 2025 20:37:29 +0000
Subject: [PATCH 04/11] chore(internal): import reformatting

---
 src/openai/resources/audio/speech.py                      | 5 +----
 src/openai/resources/audio/transcriptions.py              | 8 +-------
 src/openai/resources/audio/translations.py                | 7 +------
 src/openai/resources/batches.py                           | 5 +----
 src/openai/resources/beta/assistants.py                   | 5 +----
 src/openai/resources/beta/realtime/sessions.py            | 5 +----
 .../resources/beta/realtime/transcription_sessions.py     | 5 +----
 src/openai/resources/beta/threads/messages.py             | 5 +----
 src/openai/resources/beta/threads/runs/steps.py           | 5 +----
 src/openai/resources/beta/threads/threads.py              | 6 +-----
 src/openai/resources/chat/completions/completions.py      | 6 +-----
 src/openai/resources/completions.py                       | 6 +-----
 src/openai/resources/evals/evals.py                       | 5 +----
 src/openai/resources/evals/runs/runs.py                   | 5 +----
 src/openai/resources/files.py                             | 7 +------
 .../resources/fine_tuning/checkpoints/permissions.py      | 5 +----
 src/openai/resources/fine_tuning/jobs/jobs.py             | 5 +----
 src/openai/resources/images.py                            | 7 +------
 src/openai/resources/moderations.py                       | 5 +----
 src/openai/resources/responses/responses.py               | 7 +------
 src/openai/resources/uploads/parts.py                     | 7 +------
 src/openai/resources/uploads/uploads.py                   | 5 +----
 src/openai/resources/vector_stores/file_batches.py        | 6 +-----
 src/openai/resources/vector_stores/files.py               | 6 +-----
 src/openai/resources/vector_stores/vector_stores.py       | 5 +----
 25 files changed, 25 insertions(+), 118 deletions(-)

diff --git a/src/openai/resources/audio/speech.py b/src/openai/resources/audio/speech.py
index 1ee53db9d5..fad18dcdf5 100644
--- a/src/openai/resources/audio/speech.py
+++ b/src/openai/resources/audio/speech.py
@@ -9,10 +9,7 @@
 
 from ... import _legacy_response
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import (
diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py
index 7e62f70f60..0c7ebca7a6 100644
--- a/src/openai/resources/audio/transcriptions.py
+++ b/src/openai/resources/audio/transcriptions.py
@@ -11,13 +11,7 @@
 from ... import _legacy_response
 from ...types import AudioResponseFormat
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ..._utils import (
-    extract_files,
-    required_args,
-    maybe_transform,
-    deepcopy_minimal,
-    async_maybe_transform,
-)
+from ..._utils import extract_files, required_args, maybe_transform, deepcopy_minimal, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/audio/translations.py b/src/openai/resources/audio/translations.py
index f55dbd0ee5..28b577ce2e 100644
--- a/src/openai/resources/audio/translations.py
+++ b/src/openai/resources/audio/translations.py
@@ -10,12 +10,7 @@
 
 from ... import _legacy_response
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ..._utils import (
-    extract_files,
-    maybe_transform,
-    deepcopy_minimal,
-    async_maybe_transform,
-)
+from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/batches.py b/src/openai/resources/batches.py
index b7a299be12..26ea498b31 100644
--- a/src/openai/resources/batches.py
+++ b/src/openai/resources/batches.py
@@ -10,10 +10,7 @@
 from .. import _legacy_response
 from ..types import batch_list_params, batch_create_params
 from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from .._utils import maybe_transform, async_maybe_transform
 from .._compat import cached_property
 from .._resource import SyncAPIResource, AsyncAPIResource
 from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/beta/assistants.py b/src/openai/resources/beta/assistants.py
index 43f6a7f135..9059d93616 100644
--- a/src/openai/resources/beta/assistants.py
+++ b/src/openai/resources/beta/assistants.py
@@ -9,10 +9,7 @@
 
 from ... import _legacy_response
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/beta/realtime/sessions.py b/src/openai/resources/beta/realtime/sessions.py
index 3e1c956fe4..3c0d4d47c1 100644
--- a/src/openai/resources/beta/realtime/sessions.py
+++ b/src/openai/resources/beta/realtime/sessions.py
@@ -9,10 +9,7 @@
 
 from .... import _legacy_response
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/beta/realtime/transcription_sessions.py b/src/openai/resources/beta/realtime/transcription_sessions.py
index 0917da71fa..dbcb1bb33b 100644
--- a/src/openai/resources/beta/realtime/transcription_sessions.py
+++ b/src/openai/resources/beta/realtime/transcription_sessions.py
@@ -9,10 +9,7 @@
 
 from .... import _legacy_response
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/beta/threads/messages.py b/src/openai/resources/beta/threads/messages.py
index e3374aba37..3a8913ef16 100644
--- a/src/openai/resources/beta/threads/messages.py
+++ b/src/openai/resources/beta/threads/messages.py
@@ -9,10 +9,7 @@
 
 from .... import _legacy_response
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/beta/threads/runs/steps.py b/src/openai/resources/beta/threads/runs/steps.py
index 709c729d45..3d2148687b 100644
--- a/src/openai/resources/beta/threads/runs/steps.py
+++ b/src/openai/resources/beta/threads/runs/steps.py
@@ -9,10 +9,7 @@
 
 from ..... import _legacy_response
 from ....._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ....._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ....._utils import maybe_transform, async_maybe_transform
 from ....._compat import cached_property
 from ....._resource import SyncAPIResource, AsyncAPIResource
 from ....._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index c697be416d..9c6954a9b3 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -18,11 +18,7 @@
     AsyncMessagesWithStreamingResponse,
 )
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    required_args,
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import required_args, maybe_transform, async_maybe_transform
 from .runs.runs import (
     Runs,
     AsyncRuns,
diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py
index d6214225d8..0ab105a389 100644
--- a/src/openai/resources/chat/completions/completions.py
+++ b/src/openai/resources/chat/completions/completions.py
@@ -19,11 +19,7 @@
     AsyncMessagesWithStreamingResponse,
 )
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    required_args,
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import required_args, maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/completions.py b/src/openai/resources/completions.py
index aebf35d1f1..43b923b9b9 100644
--- a/src/openai/resources/completions.py
+++ b/src/openai/resources/completions.py
@@ -10,11 +10,7 @@
 from .. import _legacy_response
 from ..types import completion_create_params
 from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import (
-    required_args,
-    maybe_transform,
-    async_maybe_transform,
-)
+from .._utils import required_args, maybe_transform, async_maybe_transform
 from .._compat import cached_property
 from .._resource import SyncAPIResource, AsyncAPIResource
 from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/evals/evals.py b/src/openai/resources/evals/evals.py
index 24a0350cfb..30ac4bdf32 100644
--- a/src/openai/resources/evals/evals.py
+++ b/src/openai/resources/evals/evals.py
@@ -10,10 +10,7 @@
 from ... import _legacy_response
 from ...types import eval_list_params, eval_create_params, eval_update_params
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from .runs.runs import (
     Runs,
diff --git a/src/openai/resources/evals/runs/runs.py b/src/openai/resources/evals/runs/runs.py
index 6df0b6d121..9c626d0903 100644
--- a/src/openai/resources/evals/runs/runs.py
+++ b/src/openai/resources/evals/runs/runs.py
@@ -9,10 +9,7 @@
 
 from .... import _legacy_response
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/files.py b/src/openai/resources/files.py
index 2eaa4a6401..179af870ba 100644
--- a/src/openai/resources/files.py
+++ b/src/openai/resources/files.py
@@ -12,12 +12,7 @@
 from .. import _legacy_response
 from ..types import FilePurpose, file_list_params, file_create_params
 from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import (
-    extract_files,
-    maybe_transform,
-    deepcopy_minimal,
-    async_maybe_transform,
-)
+from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
 from .._compat import cached_property
 from .._resource import SyncAPIResource, AsyncAPIResource
 from .._response import (
diff --git a/src/openai/resources/fine_tuning/checkpoints/permissions.py b/src/openai/resources/fine_tuning/checkpoints/permissions.py
index beb7b099d3..b2bcb33020 100644
--- a/src/openai/resources/fine_tuning/checkpoints/permissions.py
+++ b/src/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -9,10 +9,7 @@
 
 from .... import _legacy_response
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from ...._resource import SyncAPIResource, AsyncAPIResource
 from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/fine_tuning/jobs/jobs.py b/src/openai/resources/fine_tuning/jobs/jobs.py
index bbeff60bc6..90619c8609 100644
--- a/src/openai/resources/fine_tuning/jobs/jobs.py
+++ b/src/openai/resources/fine_tuning/jobs/jobs.py
@@ -9,10 +9,7 @@
 
 from .... import _legacy_response
 from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ...._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
 from ...._compat import cached_property
 from .checkpoints import (
     Checkpoints,
diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py
index 30473c14f7..e3398930e9 100644
--- a/src/openai/resources/images.py
+++ b/src/openai/resources/images.py
@@ -10,12 +10,7 @@
 from .. import _legacy_response
 from ..types import image_edit_params, image_generate_params, image_create_variation_params
 from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from .._utils import (
-    extract_files,
-    maybe_transform,
-    deepcopy_minimal,
-    async_maybe_transform,
-)
+from .._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
 from .._compat import cached_property
 from .._resource import SyncAPIResource, AsyncAPIResource
 from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/moderations.py b/src/openai/resources/moderations.py
index a8f03142bc..f7a8b52c23 100644
--- a/src/openai/resources/moderations.py
+++ b/src/openai/resources/moderations.py
@@ -9,10 +9,7 @@
 from .. import _legacy_response
 from ..types import moderation_create_params
 from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from .._utils import maybe_transform, async_maybe_transform
 from .._compat import cached_property
 from .._resource import SyncAPIResource, AsyncAPIResource
 from .._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py
index f07b4d8c4a..4a0687f9f3 100644
--- a/src/openai/resources/responses/responses.py
+++ b/src/openai/resources/responses/responses.py
@@ -10,12 +10,7 @@
 
 from ... import _legacy_response
 from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ..._utils import (
-    is_given,
-    required_args,
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import is_given, required_args, maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/uploads/parts.py b/src/openai/resources/uploads/parts.py
index 777469ac8e..a32f4eb1d2 100644
--- a/src/openai/resources/uploads/parts.py
+++ b/src/openai/resources/uploads/parts.py
@@ -8,12 +8,7 @@
 
 from ... import _legacy_response
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ..._utils import (
-    extract_files,
-    maybe_transform,
-    deepcopy_minimal,
-    async_maybe_transform,
-)
+from ..._utils import extract_files, maybe_transform, deepcopy_minimal, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/uploads/uploads.py b/src/openai/resources/uploads/uploads.py
index 9297dbc2c3..ecfcee4800 100644
--- a/src/openai/resources/uploads/uploads.py
+++ b/src/openai/resources/uploads/uploads.py
@@ -23,10 +23,7 @@
 )
 from ...types import FilePurpose, upload_create_params, upload_complete_params
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/vector_stores/file_batches.py b/src/openai/resources/vector_stores/file_batches.py
index 9b4b64d35e..4dd4430b71 100644
--- a/src/openai/resources/vector_stores/file_batches.py
+++ b/src/openai/resources/vector_stores/file_batches.py
@@ -13,11 +13,7 @@
 from ... import _legacy_response
 from ...types import FileChunkingStrategyParam
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ..._utils import (
-    is_given,
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import is_given, maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/vector_stores/files.py b/src/openai/resources/vector_stores/files.py
index 7d93798adf..f860384629 100644
--- a/src/openai/resources/vector_stores/files.py
+++ b/src/openai/resources/vector_stores/files.py
@@ -10,11 +10,7 @@
 from ... import _legacy_response
 from ...types import FileChunkingStrategyParam
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, FileTypes
-from ..._utils import (
-    is_given,
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import is_given, maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
diff --git a/src/openai/resources/vector_stores/vector_stores.py b/src/openai/resources/vector_stores/vector_stores.py
index aaa6ed2757..9fc17b183b 100644
--- a/src/openai/resources/vector_stores/vector_stores.py
+++ b/src/openai/resources/vector_stores/vector_stores.py
@@ -24,10 +24,7 @@
     vector_store_update_params,
 )
 from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
-    maybe_transform,
-    async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
 from ..._compat import cached_property
 from ..._resource import SyncAPIResource, AsyncAPIResource
 from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper

From da2113c60b50b4438459325fcd38d55df3f63d8e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 22 Apr 2025 22:01:33 +0000
Subject: [PATCH 05/11] chore(internal): fix list file params

---
 src/openai/_utils/_utils.py | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)

diff --git a/src/openai/_utils/_utils.py b/src/openai/_utils/_utils.py
index d6734e6b8f..1e7d013b51 100644
--- a/src/openai/_utils/_utils.py
+++ b/src/openai/_utils/_utils.py
@@ -76,8 +76,16 @@ def _extract_items(
         from .._files import assert_is_file_content
 
         # We have exhausted the path, return the entry we found.
-        assert_is_file_content(obj, key=flattened_key)
         assert flattened_key is not None
+
+        if is_list(obj):
+            files: list[tuple[str, FileTypes]] = []
+            for entry in obj:
+                assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
+                files.append((flattened_key + "[]", cast(FileTypes, entry)))
+            return files
+
+        assert_is_file_content(obj, key=flattened_key)
         return [(flattened_key, cast(FileTypes, obj))]
 
     index += 1

From 8cb8cfab48a4fed70a756ce50036e7e56e1f9f87 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Tue, 22 Apr 2025 22:53:21 +0000
Subject: [PATCH 06/11] chore(internal): refactor retries to not use recursion

---
 src/openai/_base_client.py | 417 ++++++++++++++++---------------------
 1 file changed, 177 insertions(+), 240 deletions(-)

diff --git a/src/openai/_base_client.py b/src/openai/_base_client.py
index 8b43a20699..a0f9cce7d8 100644
--- a/src/openai/_base_client.py
+++ b/src/openai/_base_client.py
@@ -439,8 +439,7 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0
         headers = httpx.Headers(headers_dict)
 
         idempotency_header = self._idempotency_header
-        if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
-            options.idempotency_key = options.idempotency_key or self._idempotency_key()
+        if idempotency_header and options.idempotency_key and idempotency_header not in headers:
             headers[idempotency_header] = options.idempotency_key
 
         # Don't set these headers if they were already set or removed by the caller. We check
@@ -905,7 +904,6 @@ def request(
         self,
         cast_to: Type[ResponseT],
         options: FinalRequestOptions,
-        remaining_retries: Optional[int] = None,
         *,
         stream: Literal[True],
         stream_cls: Type[_StreamT],
@@ -916,7 +914,6 @@ def request(
         self,
         cast_to: Type[ResponseT],
         options: FinalRequestOptions,
-        remaining_retries: Optional[int] = None,
         *,
         stream: Literal[False] = False,
     ) -> ResponseT: ...
@@ -926,7 +923,6 @@ def request(
         self,
         cast_to: Type[ResponseT],
         options: FinalRequestOptions,
-        remaining_retries: Optional[int] = None,
         *,
         stream: bool = False,
         stream_cls: Type[_StreamT] | None = None,
@@ -936,126 +932,110 @@ def request(
         self,
         cast_to: Type[ResponseT],
         options: FinalRequestOptions,
-        remaining_retries: Optional[int] = None,
         *,
         stream: bool = False,
         stream_cls: type[_StreamT] | None = None,
     ) -> ResponseT | _StreamT:
-        if remaining_retries is not None:
-            retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
-        else:
-            retries_taken = 0
-
-        return self._request(
-            cast_to=cast_to,
-            options=options,
-            stream=stream,
-            stream_cls=stream_cls,
-            retries_taken=retries_taken,
-        )
+        cast_to = self._maybe_override_cast_to(cast_to, options)
 
-    def _request(
-        self,
-        *,
-        cast_to: Type[ResponseT],
-        options: FinalRequestOptions,
-        retries_taken: int,
-        stream: bool,
-        stream_cls: type[_StreamT] | None,
-    ) -> ResponseT | _StreamT:
         # create a copy of the options we were given so that if the
         # options are mutated later & we then retry, the retries are
         # given the original options
         input_options = model_copy(options)
-
-        cast_to = self._maybe_override_cast_to(cast_to, options)
-        options = self._prepare_options(options)
-
-        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
-        request = self._build_request(options, retries_taken=retries_taken)
-        self._prepare_request(request)
-
-        if options.idempotency_key:
+        if input_options.idempotency_key is None and input_options.method.lower() != "get":
             # ensure the idempotency key is reused between requests
-            input_options.idempotency_key = options.idempotency_key
+            input_options.idempotency_key = self._idempotency_key()
 
-        kwargs: HttpxSendArgs = {}
-        if self.custom_auth is not None:
-            kwargs["auth"] = self.custom_auth
+        response: httpx.Response | None = None
+        max_retries = input_options.get_max_retries(self.max_retries)
 
-        log.debug("Sending HTTP Request: %s %s", request.method, request.url)
+        retries_taken = 0
+        for retries_taken in range(max_retries + 1):
+            options = model_copy(input_options)
+            options = self._prepare_options(options)
 
-        try:
-            response = self._client.send(
-                request,
-                stream=stream or self._should_stream_response_body(request=request),
-                **kwargs,
-            )
-        except httpx.TimeoutException as err:
-            log.debug("Encountered httpx.TimeoutException", exc_info=True)
+            remaining_retries = max_retries - retries_taken
+            request = self._build_request(options, retries_taken=retries_taken)
+            self._prepare_request(request)
 
-            if remaining_retries > 0:
-                return self._retry_request(
-                    input_options,
-                    cast_to,
-                    retries_taken=retries_taken,
-                    stream=stream,
-                    stream_cls=stream_cls,
-                    response_headers=None,
-                )
+            kwargs: HttpxSendArgs = {}
+            if self.custom_auth is not None:
+                kwargs["auth"] = self.custom_auth
 
-            log.debug("Raising timeout error")
-            raise APITimeoutError(request=request) from err
-        except Exception as err:
-            log.debug("Encountered Exception", exc_info=True)
+            log.debug("Sending HTTP Request: %s %s", request.method, request.url)
 
-            if remaining_retries > 0:
-                return self._retry_request(
-                    input_options,
-                    cast_to,
-                    retries_taken=retries_taken,
-                    stream=stream,
-                    stream_cls=stream_cls,
-                    response_headers=None,
+            response = None
+            try:
+                response = self._client.send(
+                    request,
+                    stream=stream or self._should_stream_response_body(request=request),
+                    **kwargs,
                 )
+            except httpx.TimeoutException as err:
+                log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+                if remaining_retries > 0:
+                    self._sleep_for_retry(
+                        retries_taken=retries_taken,
+                        max_retries=max_retries,
+                        options=input_options,
+                        response=None,
+                    )
+                    continue
+
+                log.debug("Raising timeout error")
+                raise APITimeoutError(request=request) from err
+            except Exception as err:
+                log.debug("Encountered Exception", exc_info=True)
+
+                if remaining_retries > 0:
+                    self._sleep_for_retry(
+                        retries_taken=retries_taken,
+                        max_retries=max_retries,
+                        options=input_options,
+                        response=None,
+                    )
+                    continue
+
+                log.debug("Raising connection error")
+                raise APIConnectionError(request=request) from err
+
+            log.debug(
+                'HTTP Response: %s %s "%i %s" %s',
+                request.method,
+                request.url,
+                response.status_code,
+                response.reason_phrase,
+                response.headers,
+            )
+            log.debug("request_id: %s", response.headers.get("x-request-id"))
 
-            log.debug("Raising connection error")
-            raise APIConnectionError(request=request) from err
-
-        log.debug(
-            'HTTP Response: %s %s "%i %s" %s',
-            request.method,
-            request.url,
-            response.status_code,
-            response.reason_phrase,
-            response.headers,
-        )
-        log.debug("request_id: %s", response.headers.get("x-request-id"))
+            try:
+                response.raise_for_status()
+            except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
+                log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+                if remaining_retries > 0 and self._should_retry(err.response):
+                    err.response.close()
+                    self._sleep_for_retry(
+                        retries_taken=retries_taken,
+                        max_retries=max_retries,
+                        options=input_options,
+                        response=response,
+                    )
+                    continue
 
-        try:
-            response.raise_for_status()
-        except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
-            log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
-
-            if remaining_retries > 0 and self._should_retry(err.response):
-                err.response.close()
-                return self._retry_request(
-                    input_options,
-                    cast_to,
-                    retries_taken=retries_taken,
-                    response_headers=err.response.headers,
-                    stream=stream,
-                    stream_cls=stream_cls,
-                )
+                # If the response is streamed then we need to explicitly read the response
+                # to completion before attempting to access the response text.
+                if not err.response.is_closed:
+                    err.response.read()
 
-            # If the response is streamed then we need to explicitly read the response
-            # to completion before attempting to access the response text.
-            if not err.response.is_closed:
-                err.response.read()
+                log.debug("Re-raising status error")
+                raise self._make_status_error_from_response(err.response) from None
 
-            log.debug("Re-raising status error")
-            raise self._make_status_error_from_response(err.response) from None
+            break
 
+        assert response is not None, "could not resolve response (should never happen)"
         return self._process_response(
             cast_to=cast_to,
             options=options,
@@ -1065,37 +1045,20 @@ def _request(
             retries_taken=retries_taken,
         )
 
-    def _retry_request(
-        self,
-        options: FinalRequestOptions,
-        cast_to: Type[ResponseT],
-        *,
-        retries_taken: int,
-        response_headers: httpx.Headers | None,
-        stream: bool,
-        stream_cls: type[_StreamT] | None,
-    ) -> ResponseT | _StreamT:
-        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+    def _sleep_for_retry(
+        self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+    ) -> None:
+        remaining_retries = max_retries - retries_taken
         if remaining_retries == 1:
             log.debug("1 retry left")
         else:
             log.debug("%i retries left", remaining_retries)
 
-        timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+        timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
         log.info("Retrying request to %s in %f seconds", options.url, timeout)
 
-        # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
-        # different thread if necessary.
         time.sleep(timeout)
 
-        return self._request(
-            options=options,
-            cast_to=cast_to,
-            retries_taken=retries_taken + 1,
-            stream=stream,
-            stream_cls=stream_cls,
-        )
-
     def _process_response(
         self,
         *,
@@ -1453,7 +1416,6 @@ async def request(
         options: FinalRequestOptions,
         *,
         stream: Literal[False] = False,
-        remaining_retries: Optional[int] = None,
     ) -> ResponseT: ...
 
     @overload
@@ -1464,7 +1426,6 @@ async def request(
         *,
         stream: Literal[True],
         stream_cls: type[_AsyncStreamT],
-        remaining_retries: Optional[int] = None,
     ) -> _AsyncStreamT: ...
 
     @overload
@@ -1475,7 +1436,6 @@ async def request(
         *,
         stream: bool,
         stream_cls: type[_AsyncStreamT] | None = None,
-        remaining_retries: Optional[int] = None,
     ) -> ResponseT | _AsyncStreamT: ...
 
     async def request(
@@ -1485,120 +1445,112 @@ async def request(
         *,
         stream: bool = False,
         stream_cls: type[_AsyncStreamT] | None = None,
-        remaining_retries: Optional[int] = None,
-    ) -> ResponseT | _AsyncStreamT:
-        if remaining_retries is not None:
-            retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
-        else:
-            retries_taken = 0
-
-        return await self._request(
-            cast_to=cast_to,
-            options=options,
-            stream=stream,
-            stream_cls=stream_cls,
-            retries_taken=retries_taken,
-        )
-
-    async def _request(
-        self,
-        cast_to: Type[ResponseT],
-        options: FinalRequestOptions,
-        *,
-        stream: bool,
-        stream_cls: type[_AsyncStreamT] | None,
-        retries_taken: int,
     ) -> ResponseT | _AsyncStreamT:
         if self._platform is None:
             # `get_platform` can make blocking IO calls so we
             # execute it earlier while we are in an async context
             self._platform = await asyncify(get_platform)()
 
+        cast_to = self._maybe_override_cast_to(cast_to, options)
+
         # create a copy of the options we were given so that if the
         # options are mutated later & we then retry, the retries are
         # given the original options
         input_options = model_copy(options)
-
-        cast_to = self._maybe_override_cast_to(cast_to, options)
-        options = await self._prepare_options(options)
-
-        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
-        request = self._build_request(options, retries_taken=retries_taken)
-        await self._prepare_request(request)
-
-        if options.idempotency_key:
+        if input_options.idempotency_key is None and input_options.method.lower() != "get":
             # ensure the idempotency key is reused between requests
-            input_options.idempotency_key = options.idempotency_key
+            input_options.idempotency_key = self._idempotency_key()
 
-        kwargs: HttpxSendArgs = {}
-        if self.custom_auth is not None:
-            kwargs["auth"] = self.custom_auth
+        response: httpx.Response | None = None
+        max_retries = input_options.get_max_retries(self.max_retries)
 
-        try:
-            response = await self._client.send(
-                request,
-                stream=stream or self._should_stream_response_body(request=request),
-                **kwargs,
-            )
-        except httpx.TimeoutException as err:
-            log.debug("Encountered httpx.TimeoutException", exc_info=True)
+        retries_taken = 0
+        for retries_taken in range(max_retries + 1):
+            options = model_copy(input_options)
+            options = await self._prepare_options(options)
 
-            if remaining_retries > 0:
-                return await self._retry_request(
-                    input_options,
-                    cast_to,
-                    retries_taken=retries_taken,
-                    stream=stream,
-                    stream_cls=stream_cls,
-                    response_headers=None,
-                )
+            remaining_retries = max_retries - retries_taken
+            request = self._build_request(options, retries_taken=retries_taken)
+            await self._prepare_request(request)
 
-            log.debug("Raising timeout error")
-            raise APITimeoutError(request=request) from err
-        except Exception as err:
-            log.debug("Encountered Exception", exc_info=True)
+            kwargs: HttpxSendArgs = {}
+            if self.custom_auth is not None:
+                kwargs["auth"] = self.custom_auth
 
-            if remaining_retries > 0:
-                return await self._retry_request(
-                    input_options,
-                    cast_to,
-                    retries_taken=retries_taken,
-                    stream=stream,
-                    stream_cls=stream_cls,
-                    response_headers=None,
-                )
+            log.debug("Sending HTTP Request: %s %s", request.method, request.url)
 
-            log.debug("Raising connection error")
-            raise APIConnectionError(request=request) from err
+            response = None
+            try:
+                response = await self._client.send(
+                    request,
+                    stream=stream or self._should_stream_response_body(request=request),
+                    **kwargs,
+                )
+            except httpx.TimeoutException as err:
+                log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+                if remaining_retries > 0:
+                    await self._sleep_for_retry(
+                        retries_taken=retries_taken,
+                        max_retries=max_retries,
+                        options=input_options,
+                        response=None,
+                    )
+                    continue
+
+                log.debug("Raising timeout error")
+                raise APITimeoutError(request=request) from err
+            except Exception as err:
+                log.debug("Encountered Exception", exc_info=True)
+
+                if remaining_retries > 0:
+                    await self._sleep_for_retry(
+                        retries_taken=retries_taken,
+                        max_retries=max_retries,
+                        options=input_options,
+                        response=None,
+                    )
+                    continue
+
+                log.debug("Raising connection error")
+                raise APIConnectionError(request=request) from err
+
+            log.debug(
+                'HTTP Response: %s %s "%i %s" %s',
+                request.method,
+                request.url,
+                response.status_code,
+                response.reason_phrase,
+                response.headers,
+            )
+            log.debug("request_id: %s", response.headers.get("x-request-id"))
 
-        log.debug(
-            'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
-        )
+            try:
+                response.raise_for_status()
+            except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
+                log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+                if remaining_retries > 0 and self._should_retry(err.response):
+                    await err.response.aclose()
+                    await self._sleep_for_retry(
+                        retries_taken=retries_taken,
+                        max_retries=max_retries,
+                        options=input_options,
+                        response=response,
+                    )
+                    continue
 
-        try:
-            response.raise_for_status()
-        except httpx.HTTPStatusError as err:  # thrown on 4xx and 5xx status code
-            log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
-
-            if remaining_retries > 0 and self._should_retry(err.response):
-                await err.response.aclose()
-                return await self._retry_request(
-                    input_options,
-                    cast_to,
-                    retries_taken=retries_taken,
-                    response_headers=err.response.headers,
-                    stream=stream,
-                    stream_cls=stream_cls,
-                )
+                # If the response is streamed then we need to explicitly read the response
+                # to completion before attempting to access the response text.
+                if not err.response.is_closed:
+                    await err.response.aread()
 
-            # If the response is streamed then we need to explicitly read the response
-            # to completion before attempting to access the response text.
-            if not err.response.is_closed:
-                await err.response.aread()
+                log.debug("Re-raising status error")
+                raise self._make_status_error_from_response(err.response) from None
 
-            log.debug("Re-raising status error")
-            raise self._make_status_error_from_response(err.response) from None
+            break
 
+        assert response is not None, "could not resolve response (should never happen)"
         return await self._process_response(
             cast_to=cast_to,
             options=options,
@@ -1608,35 +1560,20 @@ async def _request(
             retries_taken=retries_taken,
         )
 
-    async def _retry_request(
-        self,
-        options: FinalRequestOptions,
-        cast_to: Type[ResponseT],
-        *,
-        retries_taken: int,
-        response_headers: httpx.Headers | None,
-        stream: bool,
-        stream_cls: type[_AsyncStreamT] | None,
-    ) -> ResponseT | _AsyncStreamT:
-        remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+    async def _sleep_for_retry(
+        self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+    ) -> None:
+        remaining_retries = max_retries - retries_taken
         if remaining_retries == 1:
             log.debug("1 retry left")
         else:
             log.debug("%i retries left", remaining_retries)
 
-        timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+        timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
         log.info("Retrying request to %s in %f seconds", options.url, timeout)
 
         await anyio.sleep(timeout)
 
-        return await self._request(
-            options=options,
-            cast_to=cast_to,
-            retries_taken=retries_taken + 1,
-            stream=stream,
-            stream_cls=stream_cls,
-        )
-
     async def _process_response(
         self,
         *,

From 7351b12bc981f56632b92342d9ef26f6fb28d540 Mon Sep 17 00:00:00 2001
From: Konnor-Young <97478325+Konnor-Young@users.noreply.github.com>
Date: Tue, 22 Apr 2025 17:24:20 -0600
Subject: [PATCH 07/11] fix(pydantic v1): more robust `ModelField.annotation`
 check (#2163)

---------

Co-authored-by: Konnor Young <konnoryoung@ip-192-168-1-39.us-west-1.compute.internal>
Co-authored-by: Robert Craigie <robert@craigie.dev>
---
 src/openai/_models.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/src/openai/_models.py b/src/openai/_models.py
index 9b1aeb30bf..6b6f8e9294 100644
--- a/src/openai/_models.py
+++ b/src/openai/_models.py
@@ -651,8 +651,8 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
                 # Note: if one variant defines an alias then they all should
                 discriminator_alias = field_info.alias
 
-                if field_info.annotation and is_literal_type(field_info.annotation):
-                    for entry in get_args(field_info.annotation):
+                if (annotation := getattr(field_info, 'annotation', None)) and is_literal_type(annotation):
+                    for entry in get_args(annotation):
                         if isinstance(entry, str):
                             mapping[entry] = variant
 

From eba7856db55afb8cb44376a0248587549f7bc65f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 12:00:43 +0000
Subject: [PATCH 08/11] fix(pydantic v1): more robust ModelField.annotation
 check

---
 src/openai/_models.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/openai/_models.py b/src/openai/_models.py
index 6b6f8e9294..e2fce49250 100644
--- a/src/openai/_models.py
+++ b/src/openai/_models.py
@@ -651,7 +651,7 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
                 # Note: if one variant defines an alias then they all should
                 discriminator_alias = field_info.alias
 
-                if (annotation := getattr(field_info, 'annotation', None)) and is_literal_type(annotation):
+                if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
                     for entry in get_args(annotation):
                         if isinstance(entry, str):
                             mapping[entry] = variant

From aed1d767898324cf90328db329e04e89a77579c3 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 16:20:27 +0000
Subject: [PATCH 09/11] chore(internal): minor formatting changes

---
 src/openai/types/audio/transcription_word.py                   | 1 -
 src/openai/types/audio/translation.py                          | 1 -
 src/openai/types/batch_request_counts.py                       | 1 -
 src/openai/types/beta/assistant_tool_choice_function.py        | 1 -
 src/openai/types/chat/chat_completion_audio.py                 | 1 -
 src/openai/types/chat/chat_completion_reasoning_effort.py      | 1 -
 src/openai/types/chat/chat_completion_store_message.py         | 1 -
 src/openai/types/chat_model.py                                 | 1 -
 src/openai/types/eval_delete_response.py                       | 1 -
 src/openai/types/evals/eval_api_error.py                       | 1 -
 src/openai/types/fine_tuning/fine_tuning_job_integration.py    | 1 -
 src/openai/types/model_deleted.py                              | 1 -
 src/openai/types/responses/response_function_tool_call_item.py | 1 -
 src/openai/types/responses/response_usage.py                   | 1 -
 src/openai/types/static_file_chunking_strategy.py              | 1 -
 15 files changed, 15 deletions(-)

diff --git a/src/openai/types/audio/transcription_word.py b/src/openai/types/audio/transcription_word.py
index 969da32509..2ce682f957 100644
--- a/src/openai/types/audio/transcription_word.py
+++ b/src/openai/types/audio/transcription_word.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..._models import BaseModel
 
 __all__ = ["TranscriptionWord"]
diff --git a/src/openai/types/audio/translation.py b/src/openai/types/audio/translation.py
index 7c0e905189..efc56f7f9b 100644
--- a/src/openai/types/audio/translation.py
+++ b/src/openai/types/audio/translation.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..._models import BaseModel
 
 __all__ = ["Translation"]
diff --git a/src/openai/types/batch_request_counts.py b/src/openai/types/batch_request_counts.py
index 7e1d49fb88..068b071af1 100644
--- a/src/openai/types/batch_request_counts.py
+++ b/src/openai/types/batch_request_counts.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .._models import BaseModel
 
 __all__ = ["BatchRequestCounts"]
diff --git a/src/openai/types/beta/assistant_tool_choice_function.py b/src/openai/types/beta/assistant_tool_choice_function.py
index 0c896d8087..87f38310ca 100644
--- a/src/openai/types/beta/assistant_tool_choice_function.py
+++ b/src/openai/types/beta/assistant_tool_choice_function.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..._models import BaseModel
 
 __all__ = ["AssistantToolChoiceFunction"]
diff --git a/src/openai/types/chat/chat_completion_audio.py b/src/openai/types/chat/chat_completion_audio.py
index dd15508ebb..232d60563d 100644
--- a/src/openai/types/chat/chat_completion_audio.py
+++ b/src/openai/types/chat/chat_completion_audio.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..._models import BaseModel
 
 __all__ = ["ChatCompletionAudio"]
diff --git a/src/openai/types/chat/chat_completion_reasoning_effort.py b/src/openai/types/chat/chat_completion_reasoning_effort.py
index e4785c90bf..42a980c5b8 100644
--- a/src/openai/types/chat/chat_completion_reasoning_effort.py
+++ b/src/openai/types/chat/chat_completion_reasoning_effort.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..shared.reasoning_effort import ReasoningEffort
 
 __all__ = ["ChatCompletionReasoningEffort"]
diff --git a/src/openai/types/chat/chat_completion_store_message.py b/src/openai/types/chat/chat_completion_store_message.py
index 95adc08af8..8dc093f7b8 100644
--- a/src/openai/types/chat/chat_completion_store_message.py
+++ b/src/openai/types/chat/chat_completion_store_message.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .chat_completion_message import ChatCompletionMessage
 
 __all__ = ["ChatCompletionStoreMessage"]
diff --git a/src/openai/types/chat_model.py b/src/openai/types/chat_model.py
index 9304d195d6..f3b0e310cc 100644
--- a/src/openai/types/chat_model.py
+++ b/src/openai/types/chat_model.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .shared import chat_model
 
 __all__ = ["ChatModel"]
diff --git a/src/openai/types/eval_delete_response.py b/src/openai/types/eval_delete_response.py
index adb460ddbb..a27261e242 100644
--- a/src/openai/types/eval_delete_response.py
+++ b/src/openai/types/eval_delete_response.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .._models import BaseModel
 
 __all__ = ["EvalDeleteResponse"]
diff --git a/src/openai/types/evals/eval_api_error.py b/src/openai/types/evals/eval_api_error.py
index d67185e981..fe76871024 100644
--- a/src/openai/types/evals/eval_api_error.py
+++ b/src/openai/types/evals/eval_api_error.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..._models import BaseModel
 
 __all__ = ["EvalAPIError"]
diff --git a/src/openai/types/fine_tuning/fine_tuning_job_integration.py b/src/openai/types/fine_tuning/fine_tuning_job_integration.py
index 9a66aa4f17..2af73fbffb 100644
--- a/src/openai/types/fine_tuning/fine_tuning_job_integration.py
+++ b/src/openai/types/fine_tuning/fine_tuning_job_integration.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .fine_tuning_job_wandb_integration_object import FineTuningJobWandbIntegrationObject
 
 FineTuningJobIntegration = FineTuningJobWandbIntegrationObject
diff --git a/src/openai/types/model_deleted.py b/src/openai/types/model_deleted.py
index 7f81e1b380..e7601f74e4 100644
--- a/src/openai/types/model_deleted.py
+++ b/src/openai/types/model_deleted.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .._models import BaseModel
 
 __all__ = ["ModelDeleted"]
diff --git a/src/openai/types/responses/response_function_tool_call_item.py b/src/openai/types/responses/response_function_tool_call_item.py
index 25984f9451..762015a4b1 100644
--- a/src/openai/types/responses/response_function_tool_call_item.py
+++ b/src/openai/types/responses/response_function_tool_call_item.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .response_function_tool_call import ResponseFunctionToolCall
 
 __all__ = ["ResponseFunctionToolCallItem"]
diff --git a/src/openai/types/responses/response_usage.py b/src/openai/types/responses/response_usage.py
index 9ad36bd326..52b93ac578 100644
--- a/src/openai/types/responses/response_usage.py
+++ b/src/openai/types/responses/response_usage.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from ..._models import BaseModel
 
 __all__ = ["ResponseUsage", "InputTokensDetails", "OutputTokensDetails"]
diff --git a/src/openai/types/static_file_chunking_strategy.py b/src/openai/types/static_file_chunking_strategy.py
index 2813bc6630..cb842442c1 100644
--- a/src/openai/types/static_file_chunking_strategy.py
+++ b/src/openai/types/static_file_chunking_strategy.py
@@ -1,6 +1,5 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-
 from .._models import BaseModel
 
 __all__ = ["StaticFileChunkingStrategy"]

From 74d7692e94c9dca96db8793809d75631c22dbb87 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 16:29:55 +0000
Subject: [PATCH 10/11] feat(api): adding new image model support

---
 .stats.yml                                    |   6 +-
 api.md                                        |   6 +-
 .../resources/beta/realtime/realtime.py       |  34 +++
 src/openai/resources/beta/threads/threads.py  |  17 +-
 src/openai/resources/evals/evals.py           |   8 -
 src/openai/resources/evals/runs/runs.py       |   8 +-
 .../fine_tuning/checkpoints/permissions.py    |  14 +-
 src/openai/resources/images.py                | 248 +++++++++++++-----
 .../beta/realtime/realtime_client_event.py    |  17 +-
 .../realtime/realtime_client_event_param.py   |  14 +-
 .../beta/realtime/realtime_server_event.py    |  44 +++-
 .../beta/thread_create_and_run_params.py      |   9 +-
 src/openai/types/eval_create_params.py        | 166 ++++++++----
 src/openai/types/eval_create_response.py      |  96 ++++++-
 src/openai/types/eval_label_model_grader.py   |  57 ++--
 src/openai/types/eval_list_response.py        |  96 ++++++-
 src/openai/types/eval_retrieve_response.py    |  96 ++++++-
 .../types/eval_text_similarity_grader.py      |  16 +-
 .../eval_text_similarity_grader_param.py      |  16 +-
 src/openai/types/eval_update_response.py      |  96 ++++++-
 ...create_eval_completions_run_data_source.py | 165 ++++++------
 ..._eval_completions_run_data_source_param.py | 169 ++++++------
 src/openai/types/evals/run_cancel_response.py | 218 ++++++++++++++-
 src/openai/types/evals/run_create_params.py   | 222 +++++++++++++++-
 src/openai/types/evals/run_create_response.py | 218 ++++++++++++++-
 src/openai/types/evals/run_list_params.py     |   2 +-
 src/openai/types/evals/run_list_response.py   | 218 ++++++++++++++-
 .../types/evals/run_retrieve_response.py      | 218 ++++++++++++++-
 src/openai/types/image.py                     |  18 +-
 .../types/image_create_variation_params.py    |   5 +-
 src/openai/types/image_edit_params.py         |  37 ++-
 src/openai/types/image_generate_params.py     |  74 ++++--
 src/openai/types/image_model.py               |   2 +-
 src/openai/types/images_response.py           |  33 ++-
 src/openai/types/responses/__init__.py        |  13 +
 .../types/responses/easy_input_message.py     |  26 ++
 ...onse_reasoning_summary_part_added_event.py |  32 +++
 ...ponse_reasoning_summary_part_done_event.py |  32 +++
 ...onse_reasoning_summary_text_delta_event.py |  24 ++
 ...ponse_reasoning_summary_text_done_event.py |  24 ++
 .../types/responses/response_stream_event.py  |   8 +
 .../checkpoints/test_permissions.py           |  44 ++--
 tests/api_resources/test_evals.py             |   2 -
 tests/api_resources/test_images.py            |  14 +-
 44 files changed, 2367 insertions(+), 515 deletions(-)
 create mode 100644 src/openai/types/responses/easy_input_message.py
 create mode 100644 src/openai/types/responses/response_reasoning_summary_part_added_event.py
 create mode 100644 src/openai/types/responses/response_reasoning_summary_part_done_event.py
 create mode 100644 src/openai/types/responses/response_reasoning_summary_text_delta_event.py
 create mode 100644 src/openai/types/responses/response_reasoning_summary_text_done_event.py

diff --git a/.stats.yml b/.stats.yml
index 848c5b5adb..d92408173b 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
 configured_endpoints: 97
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5633633cc38734869cf7d993f7b549bb8e4d10e0ec45381ec2cd91507cd8eb8f.yml
-openapi_spec_hash: c855121b2b2324b99499c9244c21d24d
-config_hash: d20837393b73efdb19cd08e04c1cc9a1
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-8b68ae6b807dca92e914da1dd9e835a20f69b075e79102a264367fd7fddddb33.yml
+openapi_spec_hash: b6ade5b1a6327339e6669e1134de2d03
+config_hash: b597cd9a31e9e5ec709e2eefb4c54122
diff --git a/api.md b/api.md
index e06f55c2cc..d04c76960e 100644
--- a/api.md
+++ b/api.md
@@ -277,7 +277,7 @@ Methods:
 
 - <code title="post /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">create</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_create_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_create_response.py">SyncPage[PermissionCreateResponse]</a></code>
 - <code title="get /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">retrieve</a>(fine_tuned_model_checkpoint, \*\*<a href="src/openai/types/fine_tuning/checkpoints/permission_retrieve_params.py">params</a>) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_retrieve_response.py">PermissionRetrieveResponse</a></code>
-- <code title="delete /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">delete</a>(fine_tuned_model_checkpoint) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_delete_response.py">PermissionDeleteResponse</a></code>
+- <code title="delete /fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}">client.fine_tuning.checkpoints.permissions.<a href="./src/openai/resources/fine_tuning/checkpoints/permissions.py">delete</a>(permission_id, \*, fine_tuned_model_checkpoint) -> <a href="./src/openai/types/fine_tuning/checkpoints/permission_delete_response.py">PermissionDeleteResponse</a></code>
 
 # VectorStores
 
@@ -689,6 +689,10 @@ from openai.types.responses import (
     ResponseOutputRefusal,
     ResponseOutputText,
     ResponseReasoningItem,
+    ResponseReasoningSummaryPartAddedEvent,
+    ResponseReasoningSummaryPartDoneEvent,
+    ResponseReasoningSummaryTextDeltaEvent,
+    ResponseReasoningSummaryTextDoneEvent,
     ResponseRefusalDeltaEvent,
     ResponseRefusalDoneEvent,
     ResponseStatus,
diff --git a/src/openai/resources/beta/realtime/realtime.py b/src/openai/resources/beta/realtime/realtime.py
index 5cafce1322..d39db48e05 100644
--- a/src/openai/resources/beta/realtime/realtime.py
+++ b/src/openai/resources/beta/realtime/realtime.py
@@ -233,6 +233,7 @@ class AsyncRealtimeConnection:
     response: AsyncRealtimeResponseResource
     input_audio_buffer: AsyncRealtimeInputAudioBufferResource
     conversation: AsyncRealtimeConversationResource
+    output_audio_buffer: AsyncRealtimeOutputAudioBufferResource
     transcription_session: AsyncRealtimeTranscriptionSessionResource
 
     _connection: AsyncWebsocketConnection
@@ -244,6 +245,7 @@ def __init__(self, connection: AsyncWebsocketConnection) -> None:
         self.response = AsyncRealtimeResponseResource(self)
         self.input_audio_buffer = AsyncRealtimeInputAudioBufferResource(self)
         self.conversation = AsyncRealtimeConversationResource(self)
+        self.output_audio_buffer = AsyncRealtimeOutputAudioBufferResource(self)
         self.transcription_session = AsyncRealtimeTranscriptionSessionResource(self)
 
     async def __aiter__(self) -> AsyncIterator[RealtimeServerEvent]:
@@ -413,6 +415,7 @@ class RealtimeConnection:
     response: RealtimeResponseResource
     input_audio_buffer: RealtimeInputAudioBufferResource
     conversation: RealtimeConversationResource
+    output_audio_buffer: RealtimeOutputAudioBufferResource
     transcription_session: RealtimeTranscriptionSessionResource
 
     _connection: WebsocketConnection
@@ -424,6 +427,7 @@ def __init__(self, connection: WebsocketConnection) -> None:
         self.response = RealtimeResponseResource(self)
         self.input_audio_buffer = RealtimeInputAudioBufferResource(self)
         self.conversation = RealtimeConversationResource(self)
+        self.output_audio_buffer = RealtimeOutputAudioBufferResource(self)
         self.transcription_session = RealtimeTranscriptionSessionResource(self)
 
     def __iter__(self) -> Iterator[RealtimeServerEvent]:
@@ -808,6 +812,21 @@ def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN) -> Non
         )
 
 
+class RealtimeOutputAudioBufferResource(BaseRealtimeConnectionResource):
+    def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """**WebRTC Only:** Emit to cut off the current audio response.
+
+        This will trigger the server to
+        stop generating audio and emit a `output_audio_buffer.cleared` event. This
+        event should be preceded by a `response.cancel` client event to stop the
+        generation of the current response.
+        [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc).
+        """
+        self._connection.send(
+            cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
+        )
+
+
 class RealtimeTranscriptionSessionResource(BaseRealtimeConnectionResource):
     def update(
         self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
@@ -1045,6 +1064,21 @@ async def retrieve(self, *, item_id: str, event_id: str | NotGiven = NOT_GIVEN)
         )
 
 
+class AsyncRealtimeOutputAudioBufferResource(BaseAsyncRealtimeConnectionResource):
+    async def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
+        """**WebRTC Only:** Emit to cut off the current audio response.
+
+        This will trigger the server to
+        stop generating audio and emit a `output_audio_buffer.cleared` event. This
+        event should be preceded by a `response.cancel` client event to stop the
+        generation of the current response.
+        [Learn more](https://platform.openai.com/docs/guides/realtime-model-capabilities#client-and-server-events-for-audio-in-webrtc).
+        """
+        await self._connection.send(
+            cast(RealtimeClientEventParam, strip_not_given({"type": "output_audio_buffer.clear", "event_id": event_id}))
+        )
+
+
 class AsyncRealtimeTranscriptionSessionResource(BaseAsyncRealtimeConnectionResource):
     async def update(
         self, *, session: transcription_session_update_param.Session, event_id: str | NotGiven = NOT_GIVEN
diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py
index 9c6954a9b3..22dc5fe0ea 100644
--- a/src/openai/resources/beta/threads/threads.py
+++ b/src/openai/resources/beta/threads/threads.py
@@ -50,6 +50,7 @@
 from ....types.shared.chat_model import ChatModel
 from ....types.beta.thread_deleted import ThreadDeleted
 from ....types.shared_params.metadata import Metadata
+from ....types.beta.assistant_tool_param import AssistantToolParam
 from ....types.beta.assistant_stream_event import AssistantStreamEvent
 from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
 from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam
@@ -282,7 +283,7 @@ def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -415,7 +416,7 @@ def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -548,7 +549,7 @@ def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -681,7 +682,7 @@ def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1131,7 +1132,7 @@ async def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1264,7 +1265,7 @@ async def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1397,7 +1398,7 @@ async def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -1530,7 +1531,7 @@ async def create_and_run(
         thread: thread_create_and_run_params.Thread | NotGiven = NOT_GIVEN,
         tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN,
         tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN,
-        tools: Optional[Iterable[thread_create_and_run_params.Tool]] | NotGiven = NOT_GIVEN,
+        tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN,
         top_p: Optional[float] | NotGiven = NOT_GIVEN,
         truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
diff --git a/src/openai/resources/evals/evals.py b/src/openai/resources/evals/evals.py
index 30ac4bdf32..c12562a86d 100644
--- a/src/openai/resources/evals/evals.py
+++ b/src/openai/resources/evals/evals.py
@@ -65,7 +65,6 @@ def create(
         testing_criteria: Iterable[eval_create_params.TestingCriterion],
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: str | NotGiven = NOT_GIVEN,
-        share_with_openai: bool | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -94,8 +93,6 @@ def create(
 
           name: The name of the evaluation.
 
-          share_with_openai: Indicates whether the evaluation is shared with OpenAI.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -112,7 +109,6 @@ def create(
                     "testing_criteria": testing_criteria,
                     "metadata": metadata,
                     "name": name,
-                    "share_with_openai": share_with_openai,
                 },
                 eval_create_params.EvalCreateParams,
             ),
@@ -328,7 +324,6 @@ async def create(
         testing_criteria: Iterable[eval_create_params.TestingCriterion],
         metadata: Optional[Metadata] | NotGiven = NOT_GIVEN,
         name: str | NotGiven = NOT_GIVEN,
-        share_with_openai: bool | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -357,8 +352,6 @@ async def create(
 
           name: The name of the evaluation.
 
-          share_with_openai: Indicates whether the evaluation is shared with OpenAI.
-
           extra_headers: Send extra headers
 
           extra_query: Add additional query parameters to the request
@@ -375,7 +368,6 @@ async def create(
                     "testing_criteria": testing_criteria,
                     "metadata": metadata,
                     "name": name,
-                    "share_with_openai": share_with_openai,
                 },
                 eval_create_params.EvalCreateParams,
             ),
diff --git a/src/openai/resources/evals/runs/runs.py b/src/openai/resources/evals/runs/runs.py
index 9c626d0903..d74c91e3c4 100644
--- a/src/openai/resources/evals/runs/runs.py
+++ b/src/openai/resources/evals/runs/runs.py
@@ -176,8 +176,8 @@ def list(
           order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
               descending order. Defaults to `asc`.
 
-          status: Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" |
-              "canceled".
+          status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
+              | `canceled`.
 
           extra_headers: Send extra headers
 
@@ -425,8 +425,8 @@ def list(
           order: Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
               descending order. Defaults to `asc`.
 
-          status: Filter runs by status. Use "queued" | "in_progress" | "failed" | "completed" |
-              "canceled".
+          status: Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
+              | `canceled`.
 
           extra_headers: Send extra headers
 
diff --git a/src/openai/resources/fine_tuning/checkpoints/permissions.py b/src/openai/resources/fine_tuning/checkpoints/permissions.py
index b2bcb33020..547e42ecac 100644
--- a/src/openai/resources/fine_tuning/checkpoints/permissions.py
+++ b/src/openai/resources/fine_tuning/checkpoints/permissions.py
@@ -151,8 +151,9 @@ def retrieve(
 
     def delete(
         self,
-        fine_tuned_model_checkpoint: str,
+        permission_id: str,
         *,
+        fine_tuned_model_checkpoint: str,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -179,8 +180,10 @@ def delete(
             raise ValueError(
                 f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
             )
+        if not permission_id:
+            raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
         return self._delete(
-            f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+            f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
             options=make_request_options(
                 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
             ),
@@ -316,8 +319,9 @@ async def retrieve(
 
     async def delete(
         self,
-        fine_tuned_model_checkpoint: str,
+        permission_id: str,
         *,
+        fine_tuned_model_checkpoint: str,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
         # The extra values given here take precedence over values defined on the client or passed to this method.
         extra_headers: Headers | None = None,
@@ -344,8 +348,10 @@ async def delete(
             raise ValueError(
                 f"Expected a non-empty value for `fine_tuned_model_checkpoint` but received {fine_tuned_model_checkpoint!r}"
             )
+        if not permission_id:
+            raise ValueError(f"Expected a non-empty value for `permission_id` but received {permission_id!r}")
         return await self._delete(
-            f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions",
+            f"/fine_tuning/checkpoints/{fine_tuned_model_checkpoint}/permissions/{permission_id}",
             options=make_request_options(
                 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
             ),
diff --git a/src/openai/resources/images.py b/src/openai/resources/images.py
index e3398930e9..e59d0ce35c 100644
--- a/src/openai/resources/images.py
+++ b/src/openai/resources/images.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Union, Mapping, Optional, cast
+from typing import List, Union, Mapping, Optional, cast
 from typing_extensions import Literal
 
 import httpx
@@ -57,8 +57,9 @@ def create_variation(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ImagesResponse:
-        """
-        Creates a variation of a given image.
+        """Creates a variation of a given image.
+
+        This endpoint only supports `dall-e-2`.
 
         Args:
           image: The image to use as the basis for the variation(s). Must be a valid PNG file,
@@ -67,8 +68,7 @@ def create_variation(
           model: The model to use for image generation. Only `dall-e-2` is supported at this
               time.
 
-          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
-              `n=1` is supported.
+          n: The number of images to generate. Must be between 1 and 10.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
               `b64_json`. URLs are only valid for 60 minutes after the image has been
@@ -117,11 +117,12 @@ def create_variation(
     def edit(
         self,
         *,
-        image: FileTypes,
+        image: Union[FileTypes, List[FileTypes]],
         prompt: str,
         mask: FileTypes | NotGiven = NOT_GIVEN,
         model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
+        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
         response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
         size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
@@ -132,31 +133,43 @@ def edit(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ImagesResponse:
-        """
-        Creates an edited or extended image given an original image and a prompt.
+        """Creates an edited or extended image given one or more source images and a
+        prompt.
+
+        This endpoint only supports `gpt-image-1` and `dall-e-2`.
 
         Args:
-          image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
-              is not provided, image must have transparency, which will be used as the mask.
+          image: The image(s) to edit. Must be a supported image file or an array of images. For
+              `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+              25MB. For `dall-e-2`, you can only provide one image, and it should be a square
+              `png` file less than 4MB.
 
           prompt: A text description of the desired image(s). The maximum length is 1000
-              characters.
+              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
-              indicate where `image` should be edited. Must be a valid PNG file, less than
+              indicate where `image` should be edited. If there are multiple images provided,
+              the mask will be applied on the first image. Must be a valid PNG file, less than
               4MB, and have the same dimensions as `image`.
 
-          model: The model to use for image generation. Only `dall-e-2` is supported at this
-              time.
+          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+              is used.
 
           n: The number of images to generate. Must be between 1 and 10.
 
+          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+              Defaults to `auto`.
+
           response_format: The format in which the generated images are returned. Must be one of `url` or
               `b64_json`. URLs are only valid for 60 minutes after the image has been
-              generated.
+              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+              will always return base64-encoded images.
 
-          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
-              `1024x1024`.
+          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+              (landscape), `1024x1536` (portrait), or `auto` (default value) for
+              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
 
           user: A unique identifier representing your end-user, which can help OpenAI to monitor
               and detect abuse.
@@ -177,12 +190,13 @@ def edit(
                 "mask": mask,
                 "model": model,
                 "n": n,
+                "quality": quality,
                 "response_format": response_format,
                 "size": size,
                 "user": user,
             }
         )
-        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
+        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
         # It should be noted that the actual Content-Type header that will be
         # sent to the server will contain a `boundary` parameter, e.g.
         # multipart/form-data; boundary=---abc--
@@ -201,11 +215,18 @@ def generate(
         self,
         *,
         prompt: str,
+        background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
         model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
-        quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
+        output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+        output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
         response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
-        size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
+        size: Optional[
+            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+        ]
+        | NotGiven = NOT_GIVEN,
         style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -217,32 +238,60 @@ def generate(
     ) -> ImagesResponse:
         """
         Creates an image given a prompt.
+        [Learn more](https://platform.openai.com/docs/guides/images).
 
         Args:
-          prompt: A text description of the desired image(s). The maximum length is 1000
-              characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+          prompt: A text description of the desired image(s). The maximum length is 32000
+              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+              for `dall-e-3`.
+
+          background: Allows to set transparency for the background of the generated image(s). This
+              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+              `opaque` or `auto` (default value). When `auto` is used, the model will
+              automatically determine the best background for the image.
+
+              If `transparent`, the output format needs to support transparency, so it should
+              be set to either `png` (default value) or `webp`.
 
-          model: The model to use for image generation.
+          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+              `gpt-image-1` is used.
+
+          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+              be either `low` for less restrictive filtering or `auto` (default value).
 
           n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
               `n=1` is supported.
 
-          quality: The quality of the image that will be generated. `hd` creates images with finer
-              details and greater consistency across the image. This param is only supported
-              for `dall-e-3`.
+          output_compression: The compression level (0-100%) for the generated images. This parameter is only
+              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+              defaults to 100.
 
-          response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`. URLs are only valid for 60 minutes after the image has been
-              generated.
+          output_format: The format in which the generated images are returned. This parameter is only
+              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
 
-          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
-              `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
-              `1024x1792` for `dall-e-3` models.
+          quality: The quality of the image that will be generated.
+
+              - `auto` (default value) will automatically select the best quality for the
+                given model.
+              - `high`, `medium` and `low` are supported for `gpt-image-1`.
+              - `hd` and `standard` are supported for `dall-e-3`.
+              - `standard` is the only option for `dall-e-2`.
+
+          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+              after the image has been generated. This parameter isn't supported for
+              `gpt-image-1` which will always return base64-encoded images.
+
+          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+              (landscape), `1024x1536` (portrait), or `auto` (default value) for
+              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
 
-          style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
-              causes the model to lean towards generating hyper-real and dramatic images.
-              Natural causes the model to produce more natural, less hyper-real looking
-              images. This param is only supported for `dall-e-3`.
+          style: The style of the generated images. This parameter is only supported for
+              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+              towards generating hyper-real and dramatic images. Natural causes the model to
+              produce more natural, less hyper-real looking images.
 
           user: A unique identifier representing your end-user, which can help OpenAI to monitor
               and detect abuse.
@@ -261,8 +310,12 @@ def generate(
             body=maybe_transform(
                 {
                     "prompt": prompt,
+                    "background": background,
                     "model": model,
+                    "moderation": moderation,
                     "n": n,
+                    "output_compression": output_compression,
+                    "output_format": output_format,
                     "quality": quality,
                     "response_format": response_format,
                     "size": size,
@@ -314,8 +367,9 @@ async def create_variation(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ImagesResponse:
-        """
-        Creates a variation of a given image.
+        """Creates a variation of a given image.
+
+        This endpoint only supports `dall-e-2`.
 
         Args:
           image: The image to use as the basis for the variation(s). Must be a valid PNG file,
@@ -324,8 +378,7 @@ async def create_variation(
           model: The model to use for image generation. Only `dall-e-2` is supported at this
               time.
 
-          n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
-              `n=1` is supported.
+          n: The number of images to generate. Must be between 1 and 10.
 
           response_format: The format in which the generated images are returned. Must be one of `url` or
               `b64_json`. URLs are only valid for 60 minutes after the image has been
@@ -374,11 +427,12 @@ async def create_variation(
     async def edit(
         self,
         *,
-        image: FileTypes,
+        image: Union[FileTypes, List[FileTypes]],
         prompt: str,
         mask: FileTypes | NotGiven = NOT_GIVEN,
         model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
+        quality: Optional[Literal["standard", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
         response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
         size: Optional[Literal["256x256", "512x512", "1024x1024"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
@@ -389,31 +443,43 @@ async def edit(
         extra_body: Body | None = None,
         timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
     ) -> ImagesResponse:
-        """
-        Creates an edited or extended image given an original image and a prompt.
+        """Creates an edited or extended image given one or more source images and a
+        prompt.
+
+        This endpoint only supports `gpt-image-1` and `dall-e-2`.
 
         Args:
-          image: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask
-              is not provided, image must have transparency, which will be used as the mask.
+          image: The image(s) to edit. Must be a supported image file or an array of images. For
+              `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
+              25MB. For `dall-e-2`, you can only provide one image, and it should be a square
+              `png` file less than 4MB.
 
           prompt: A text description of the desired image(s). The maximum length is 1000
-              characters.
+              characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
 
           mask: An additional image whose fully transparent areas (e.g. where alpha is zero)
-              indicate where `image` should be edited. Must be a valid PNG file, less than
+              indicate where `image` should be edited. If there are multiple images provided,
+              the mask will be applied on the first image. Must be a valid PNG file, less than
               4MB, and have the same dimensions as `image`.
 
-          model: The model to use for image generation. Only `dall-e-2` is supported at this
-              time.
+          model: The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
+              supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
+              is used.
 
           n: The number of images to generate. Must be between 1 and 10.
 
+          quality: The quality of the image that will be generated. `high`, `medium` and `low` are
+              only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
+              Defaults to `auto`.
+
           response_format: The format in which the generated images are returned. Must be one of `url` or
               `b64_json`. URLs are only valid for 60 minutes after the image has been
-              generated.
+              generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
+              will always return base64-encoded images.
 
-          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
-              `1024x1024`.
+          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+              (landscape), `1024x1536` (portrait), or `auto` (default value) for
+              `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
 
           user: A unique identifier representing your end-user, which can help OpenAI to monitor
               and detect abuse.
@@ -434,12 +500,13 @@ async def edit(
                 "mask": mask,
                 "model": model,
                 "n": n,
+                "quality": quality,
                 "response_format": response_format,
                 "size": size,
                 "user": user,
             }
         )
-        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["mask"]])
+        files = extract_files(cast(Mapping[str, object], body), paths=[["image"], ["image", "<array>"], ["mask"]])
         # It should be noted that the actual Content-Type header that will be
         # sent to the server will contain a `boundary` parameter, e.g.
         # multipart/form-data; boundary=---abc--
@@ -458,11 +525,18 @@ async def generate(
         self,
         *,
         prompt: str,
+        background: Optional[Literal["transparent", "opaque", "auto"]] | NotGiven = NOT_GIVEN,
         model: Union[str, ImageModel, None] | NotGiven = NOT_GIVEN,
+        moderation: Optional[Literal["low", "auto"]] | NotGiven = NOT_GIVEN,
         n: Optional[int] | NotGiven = NOT_GIVEN,
-        quality: Literal["standard", "hd"] | NotGiven = NOT_GIVEN,
+        output_compression: Optional[int] | NotGiven = NOT_GIVEN,
+        output_format: Optional[Literal["png", "jpeg", "webp"]] | NotGiven = NOT_GIVEN,
+        quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]] | NotGiven = NOT_GIVEN,
         response_format: Optional[Literal["url", "b64_json"]] | NotGiven = NOT_GIVEN,
-        size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]] | NotGiven = NOT_GIVEN,
+        size: Optional[
+            Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+        ]
+        | NotGiven = NOT_GIVEN,
         style: Optional[Literal["vivid", "natural"]] | NotGiven = NOT_GIVEN,
         user: str | NotGiven = NOT_GIVEN,
         # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -474,32 +548,60 @@ async def generate(
     ) -> ImagesResponse:
         """
         Creates an image given a prompt.
+        [Learn more](https://platform.openai.com/docs/guides/images).
 
         Args:
-          prompt: A text description of the desired image(s). The maximum length is 1000
-              characters for `dall-e-2` and 4000 characters for `dall-e-3`.
+          prompt: A text description of the desired image(s). The maximum length is 32000
+              characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
+              for `dall-e-3`.
+
+          background: Allows to set transparency for the background of the generated image(s). This
+              parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
+              `opaque` or `auto` (default value). When `auto` is used, the model will
+              automatically determine the best background for the image.
+
+              If `transparent`, the output format needs to support transparency, so it should
+              be set to either `png` (default value) or `webp`.
 
-          model: The model to use for image generation.
+          model: The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
+              `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
+              `gpt-image-1` is used.
+
+          moderation: Control the content-moderation level for images generated by `gpt-image-1`. Must
+              be either `low` for less restrictive filtering or `auto` (default value).
 
           n: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
               `n=1` is supported.
 
-          quality: The quality of the image that will be generated. `hd` creates images with finer
-              details and greater consistency across the image. This param is only supported
-              for `dall-e-3`.
+          output_compression: The compression level (0-100%) for the generated images. This parameter is only
+              supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
+              defaults to 100.
 
-          response_format: The format in which the generated images are returned. Must be one of `url` or
-              `b64_json`. URLs are only valid for 60 minutes after the image has been
-              generated.
+          output_format: The format in which the generated images are returned. This parameter is only
+              supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
 
-          size: The size of the generated images. Must be one of `256x256`, `512x512`, or
-              `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or
-              `1024x1792` for `dall-e-3` models.
+          quality: The quality of the image that will be generated.
+
+              - `auto` (default value) will automatically select the best quality for the
+                given model.
+              - `high`, `medium` and `low` are supported for `gpt-image-1`.
+              - `hd` and `standard` are supported for `dall-e-3`.
+              - `standard` is the only option for `dall-e-2`.
+
+          response_format: The format in which generated images with `dall-e-2` and `dall-e-3` are
+              returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
+              after the image has been generated. This parameter isn't supported for
+              `gpt-image-1` which will always return base64-encoded images.
+
+          size: The size of the generated images. Must be one of `1024x1024`, `1536x1024`
+              (landscape), `1024x1536` (portrait), or `auto` (default value) for
+              `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
+              one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
 
-          style: The style of the generated images. Must be one of `vivid` or `natural`. Vivid
-              causes the model to lean towards generating hyper-real and dramatic images.
-              Natural causes the model to produce more natural, less hyper-real looking
-              images. This param is only supported for `dall-e-3`.
+          style: The style of the generated images. This parameter is only supported for
+              `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
+              towards generating hyper-real and dramatic images. Natural causes the model to
+              produce more natural, less hyper-real looking images.
 
           user: A unique identifier representing your end-user, which can help OpenAI to monitor
               and detect abuse.
@@ -518,8 +620,12 @@ async def generate(
             body=await async_maybe_transform(
                 {
                     "prompt": prompt,
+                    "background": background,
                     "model": model,
+                    "moderation": moderation,
                     "n": n,
+                    "output_compression": output_compression,
+                    "output_format": output_format,
                     "quality": quality,
                     "response_format": response_format,
                     "size": size,
diff --git a/src/openai/types/beta/realtime/realtime_client_event.py b/src/openai/types/beta/realtime/realtime_client_event.py
index f962a505cd..5f4858d688 100644
--- a/src/openai/types/beta/realtime/realtime_client_event.py
+++ b/src/openai/types/beta/realtime/realtime_client_event.py
@@ -1,9 +1,10 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import Union
-from typing_extensions import Annotated, TypeAlias
+from typing import Union, Optional
+from typing_extensions import Literal, Annotated, TypeAlias
 
 from ...._utils import PropertyInfo
+from ...._models import BaseModel
 from .session_update_event import SessionUpdateEvent
 from .response_cancel_event import ResponseCancelEvent
 from .response_create_event import ResponseCreateEvent
@@ -16,7 +17,16 @@
 from .conversation_item_retrieve_event import ConversationItemRetrieveEvent
 from .conversation_item_truncate_event import ConversationItemTruncateEvent
 
-__all__ = ["RealtimeClientEvent"]
+__all__ = ["RealtimeClientEvent", "OutputAudioBufferClear"]
+
+
+class OutputAudioBufferClear(BaseModel):
+    type: Literal["output_audio_buffer.clear"]
+    """The event type, must be `output_audio_buffer.clear`."""
+
+    event_id: Optional[str] = None
+    """The unique ID of the client event used for error handling."""
+
 
 RealtimeClientEvent: TypeAlias = Annotated[
     Union[
@@ -26,6 +36,7 @@
         ConversationItemTruncateEvent,
         InputAudioBufferAppendEvent,
         InputAudioBufferClearEvent,
+        OutputAudioBufferClear,
         InputAudioBufferCommitEvent,
         ResponseCancelEvent,
         ResponseCreateEvent,
diff --git a/src/openai/types/beta/realtime/realtime_client_event_param.py b/src/openai/types/beta/realtime/realtime_client_event_param.py
index 6fdba4b87c..e7dfba241e 100644
--- a/src/openai/types/beta/realtime/realtime_client_event_param.py
+++ b/src/openai/types/beta/realtime/realtime_client_event_param.py
@@ -3,7 +3,7 @@
 from __future__ import annotations
 
 from typing import Union
-from typing_extensions import TypeAlias
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from .session_update_event_param import SessionUpdateEventParam
 from .response_cancel_event_param import ResponseCancelEventParam
@@ -17,7 +17,16 @@
 from .conversation_item_retrieve_event_param import ConversationItemRetrieveEventParam
 from .conversation_item_truncate_event_param import ConversationItemTruncateEventParam
 
-__all__ = ["RealtimeClientEventParam"]
+__all__ = ["RealtimeClientEventParam", "OutputAudioBufferClear"]
+
+
+class OutputAudioBufferClear(TypedDict, total=False):
+    type: Required[Literal["output_audio_buffer.clear"]]
+    """The event type, must be `output_audio_buffer.clear`."""
+
+    event_id: str
+    """The unique ID of the client event used for error handling."""
+
 
 RealtimeClientEventParam: TypeAlias = Union[
     ConversationItemCreateEventParam,
@@ -26,6 +35,7 @@
     ConversationItemTruncateEventParam,
     InputAudioBufferAppendEventParam,
     InputAudioBufferClearEventParam,
+    OutputAudioBufferClear,
     InputAudioBufferCommitEventParam,
     ResponseCancelEventParam,
     ResponseCreateEventParam,
diff --git a/src/openai/types/beta/realtime/realtime_server_event.py b/src/openai/types/beta/realtime/realtime_server_event.py
index ba1d324445..c12f5df977 100644
--- a/src/openai/types/beta/realtime/realtime_server_event.py
+++ b/src/openai/types/beta/realtime/realtime_server_event.py
@@ -39,7 +39,13 @@
     ConversationItemInputAudioTranscriptionCompletedEvent,
 )
 
-__all__ = ["RealtimeServerEvent", "ConversationItemRetrieved"]
+__all__ = [
+    "RealtimeServerEvent",
+    "ConversationItemRetrieved",
+    "OutputAudioBufferStarted",
+    "OutputAudioBufferStopped",
+    "OutputAudioBufferCleared",
+]
 
 
 class ConversationItemRetrieved(BaseModel):
@@ -53,6 +59,39 @@ class ConversationItemRetrieved(BaseModel):
     """The event type, must be `conversation.item.retrieved`."""
 
 
+class OutputAudioBufferStarted(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    response_id: str
+    """The unique ID of the response that produced the audio."""
+
+    type: Literal["output_audio_buffer.started"]
+    """The event type, must be `output_audio_buffer.started`."""
+
+
+class OutputAudioBufferStopped(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    response_id: str
+    """The unique ID of the response that produced the audio."""
+
+    type: Literal["output_audio_buffer.stopped"]
+    """The event type, must be `output_audio_buffer.stopped`."""
+
+
+class OutputAudioBufferCleared(BaseModel):
+    event_id: str
+    """The unique ID of the server event."""
+
+    response_id: str
+    """The unique ID of the response that produced the audio."""
+
+    type: Literal["output_audio_buffer.cleared"]
+    """The event type, must be `output_audio_buffer.cleared`."""
+
+
 RealtimeServerEvent: TypeAlias = Annotated[
     Union[
         ConversationCreatedEvent,
@@ -86,6 +125,9 @@ class ConversationItemRetrieved(BaseModel):
         SessionCreatedEvent,
         SessionUpdatedEvent,
         TranscriptionSessionUpdatedEvent,
+        OutputAudioBufferStarted,
+        OutputAudioBufferStopped,
+        OutputAudioBufferCleared,
     ],
     PropertyInfo(discriminator="type"),
 ]
diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py
index 065c390f4e..d813710579 100644
--- a/src/openai/types/beta/thread_create_and_run_params.py
+++ b/src/openai/types/beta/thread_create_and_run_params.py
@@ -6,8 +6,7 @@
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared.chat_model import ChatModel
-from .function_tool_param import FunctionToolParam
-from .file_search_tool_param import FileSearchToolParam
+from .assistant_tool_param import AssistantToolParam
 from ..shared_params.metadata import Metadata
 from .code_interpreter_tool_param import CodeInterpreterToolParam
 from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam
@@ -32,7 +31,6 @@
     "ToolResources",
     "ToolResourcesCodeInterpreter",
     "ToolResourcesFileSearch",
-    "Tool",
     "TruncationStrategy",
     "ThreadCreateAndRunParamsNonStreaming",
     "ThreadCreateAndRunParamsStreaming",
@@ -153,7 +151,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False):
     tool requires a list of vector store IDs.
     """
 
-    tools: Optional[Iterable[Tool]]
+    tools: Optional[Iterable[AssistantToolParam]]
     """Override the tools the assistant can use for this run.
 
     This is useful for modifying the behavior on a per-run basis.
@@ -360,9 +358,6 @@ class ToolResources(TypedDict, total=False):
     file_search: ToolResourcesFileSearch
 
 
-Tool: TypeAlias = Union[CodeInterpreterToolParam, FileSearchToolParam, FunctionToolParam]
-
-
 class TruncationStrategy(TypedDict, total=False):
     type: Required[Literal["auto", "last_messages"]]
     """The truncation strategy to use for the thread.
diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py
index 8b28e51a6b..03f44f2c8c 100644
--- a/src/openai/types/eval_create_params.py
+++ b/src/openai/types/eval_create_params.py
@@ -8,20 +8,25 @@
 from .shared_params.metadata import Metadata
 from .eval_string_check_grader_param import EvalStringCheckGraderParam
 from .eval_text_similarity_grader_param import EvalTextSimilarityGraderParam
+from .responses.response_input_text_param import ResponseInputTextParam
 
 __all__ = [
     "EvalCreateParams",
     "DataSourceConfig",
     "DataSourceConfigCustom",
-    "DataSourceConfigStoredCompletions",
+    "DataSourceConfigLogs",
     "TestingCriterion",
     "TestingCriterionLabelModel",
     "TestingCriterionLabelModelInput",
     "TestingCriterionLabelModelInputSimpleInputMessage",
-    "TestingCriterionLabelModelInputInputMessage",
-    "TestingCriterionLabelModelInputInputMessageContent",
-    "TestingCriterionLabelModelInputOutputMessage",
-    "TestingCriterionLabelModelInputOutputMessageContent",
+    "TestingCriterionLabelModelInputEvalItem",
+    "TestingCriterionLabelModelInputEvalItemContent",
+    "TestingCriterionLabelModelInputEvalItemContentOutputText",
+    "TestingCriterionPython",
+    "TestingCriterionScoreModel",
+    "TestingCriterionScoreModelInput",
+    "TestingCriterionScoreModelInputContent",
+    "TestingCriterionScoreModelInputContentOutputText",
 ]
 
 
@@ -45,37 +50,30 @@ class EvalCreateParams(TypedDict, total=False):
     name: str
     """The name of the evaluation."""
 
-    share_with_openai: bool
-    """Indicates whether the evaluation is shared with OpenAI."""
-
 
 class DataSourceConfigCustom(TypedDict, total=False):
     item_schema: Required[Dict[str, object]]
-    """The json schema for the run data source items."""
+    """The json schema for each row in the data source."""
 
     type: Required[Literal["custom"]]
     """The type of data source. Always `custom`."""
 
     include_sample_schema: bool
-    """Whether to include the sample schema in the data source."""
-
-
-class DataSourceConfigStoredCompletions(TypedDict, total=False):
-    type: Required[Literal["stored_completions"]]
-    """The type of data source. Always `stored_completions`."""
+    """
+    Whether the eval should expect you to populate the sample namespace (ie, by
+    generating responses off of your data source)
+    """
 
-    metadata: Optional[Metadata]
-    """Set of 16 key-value pairs that can be attached to an object.
 
-    This can be useful for storing additional information about the object in a
-    structured format, and querying for objects via API or the dashboard.
+class DataSourceConfigLogs(TypedDict, total=False):
+    type: Required[Literal["logs"]]
+    """The type of data source. Always `logs`."""
 
-    Keys are strings with a maximum length of 64 characters. Values are strings with
-    a maximum length of 512 characters.
-    """
+    metadata: Dict[str, object]
+    """Metadata filters for the logs data source."""
 
 
-DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigStoredCompletions]
+DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigLogs]
 
 
 class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
@@ -86,51 +84,44 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False):
     """The role of the message (e.g. "system", "assistant", "user")."""
 
 
-class TestingCriterionLabelModelInputInputMessageContent(TypedDict, total=False):
+class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False):
     text: Required[str]
-    """The text content."""
-
-    type: Required[Literal["input_text"]]
-    """The type of content, which is always `input_text`."""
-
+    """The text output from the model."""
 
-class TestingCriterionLabelModelInputInputMessage(TypedDict, total=False):
-    content: Required[TestingCriterionLabelModelInputInputMessageContent]
-
-    role: Required[Literal["user", "system", "developer"]]
-    """The role of the message. One of `user`, `system`, or `developer`."""
-
-    type: Required[Literal["message"]]
-    """The type of item, which is always `message`."""
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
 
 
-class TestingCriterionLabelModelInputOutputMessageContent(TypedDict, total=False):
-    text: Required[str]
-    """The text content."""
+TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[
+    str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText
+]
 
-    type: Required[Literal["output_text"]]
-    """The type of content, which is always `output_text`."""
 
+class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False):
+    content: Required[TestingCriterionLabelModelInputEvalItemContent]
+    """Text inputs to the model - can contain template strings."""
 
-class TestingCriterionLabelModelInputOutputMessage(TypedDict, total=False):
-    content: Required[TestingCriterionLabelModelInputOutputMessageContent]
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
 
-    role: Required[Literal["assistant"]]
-    """The role of the message. Must be `assistant` for output."""
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
 
-    type: Required[Literal["message"]]
-    """The type of item, which is always `message`."""
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
 
 
 TestingCriterionLabelModelInput: TypeAlias = Union[
-    TestingCriterionLabelModelInputSimpleInputMessage,
-    TestingCriterionLabelModelInputInputMessage,
-    TestingCriterionLabelModelInputOutputMessage,
+    TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem
 ]
 
 
 class TestingCriterionLabelModel(TypedDict, total=False):
     input: Required[Iterable[TestingCriterionLabelModelInput]]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
 
     labels: Required[List[str]]
     """The labels to classify to each item in the evaluation."""
@@ -148,6 +139,77 @@ class TestingCriterionLabelModel(TypedDict, total=False):
     """The object type, which is always `label_model`."""
 
 
+class TestingCriterionPython(TypedDict, total=False):
+    name: Required[str]
+    """The name of the grader."""
+
+    source: Required[str]
+    """The source code of the python script."""
+
+    type: Required[Literal["python"]]
+    """The object type, which is always `python`."""
+
+    image_tag: str
+    """The image tag to use for the python script."""
+
+    pass_threshold: float
+    """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(TypedDict, total=False):
+    text: Required[str]
+    """The text output from the model."""
+
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+    str, ResponseInputTextParam, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(TypedDict, total=False):
+    content: Required[TestingCriterionScoreModelInputContent]
+    """Text inputs to the model - can contain template strings."""
+
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(TypedDict, total=False):
+    input: Required[Iterable[TestingCriterionScoreModelInput]]
+    """The input text. This may include template strings."""
+
+    model: Required[str]
+    """The model to use for the evaluation."""
+
+    name: Required[str]
+    """The name of the grader."""
+
+    type: Required[Literal["score_model"]]
+    """The object type, which is always `score_model`."""
+
+    pass_threshold: float
+    """The threshold for the score."""
+
+    range: Iterable[float]
+    """The range of the score. Defaults to `[0, 1]`."""
+
+    sampling_params: object
+    """The sampling parameters for the model."""
+
+
 TestingCriterion: TypeAlias = Union[
-    TestingCriterionLabelModel, EvalStringCheckGraderParam, EvalTextSimilarityGraderParam
+    TestingCriterionLabelModel,
+    EvalStringCheckGraderParam,
+    EvalTextSimilarityGraderParam,
+    TestingCriterionPython,
+    TestingCriterionScoreModel,
 ]
diff --git a/src/openai/types/eval_create_response.py b/src/openai/types/eval_create_response.py
index a1c2853a2a..6d77a81870 100644
--- a/src/openai/types/eval_create_response.py
+++ b/src/openai/types/eval_create_response.py
@@ -9,17 +9,106 @@
 from .eval_label_model_grader import EvalLabelModelGrader
 from .eval_string_check_grader import EvalStringCheckGrader
 from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
 from .eval_custom_data_source_config import EvalCustomDataSourceConfig
 from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
 
-__all__ = ["EvalCreateResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+    "EvalCreateResponse",
+    "DataSourceConfig",
+    "TestingCriterion",
+    "TestingCriterionPython",
+    "TestingCriterionScoreModel",
+    "TestingCriterionScoreModelInput",
+    "TestingCriterionScoreModelInputContent",
+    "TestingCriterionScoreModelInputContentOutputText",
+]
 
 DataSourceConfig: TypeAlias = Annotated[
     Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
 ]
 
+
+class TestingCriterionPython(BaseModel):
+    __test__ = False
+    name: str
+    """The name of the grader."""
+
+    source: str
+    """The source code of the python script."""
+
+    type: Literal["python"]
+    """The object type, which is always `python`."""
+
+    image_tag: Optional[str] = None
+    """The image tag to use for the python script."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+    __test__ = False
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+    str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+    __test__ = False
+    content: TestingCriterionScoreModelInputContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+    __test__ = False
+    input: List[TestingCriterionScoreModelInput]
+    """The input text. This may include template strings."""
+
+    model: str
+    """The model to use for the evaluation."""
+
+    name: str
+    """The name of the grader."""
+
+    type: Literal["score_model"]
+    """The object type, which is always `score_model`."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+    range: Optional[List[float]] = None
+    """The range of the score. Defaults to `[0, 1]`."""
+
+    sampling_params: Optional[object] = None
+    """The sampling parameters for the model."""
+
+
 TestingCriterion: TypeAlias = Annotated[
-    Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+    Union[
+        EvalLabelModelGrader,
+        EvalStringCheckGrader,
+        EvalTextSimilarityGrader,
+        TestingCriterionPython,
+        TestingCriterionScoreModel,
+    ],
+    PropertyInfo(discriminator="type"),
 ]
 
 
@@ -49,8 +138,5 @@ class EvalCreateResponse(BaseModel):
     object: Literal["eval"]
     """The object type."""
 
-    share_with_openai: bool
-    """Indicates whether the evaluation is shared with OpenAI."""
-
     testing_criteria: List[TestingCriterion]
     """A list of testing criteria."""
diff --git a/src/openai/types/eval_label_model_grader.py b/src/openai/types/eval_label_model_grader.py
index 826b116287..40e6bda140 100644
--- a/src/openai/types/eval_label_model_grader.py
+++ b/src/openai/types/eval_label_model_grader.py
@@ -1,58 +1,37 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union
-from typing_extensions import Literal, Annotated, TypeAlias
+from typing import List, Union, Optional
+from typing_extensions import Literal, TypeAlias
 
-from .._utils import PropertyInfo
 from .._models import BaseModel
+from .responses.response_input_text import ResponseInputText
 
-__all__ = [
-    "EvalLabelModelGrader",
-    "Input",
-    "InputInputMessage",
-    "InputInputMessageContent",
-    "InputAssistant",
-    "InputAssistantContent",
-]
+__all__ = ["EvalLabelModelGrader", "Input", "InputContent", "InputContentOutputText"]
 
 
-class InputInputMessageContent(BaseModel):
+class InputContentOutputText(BaseModel):
     text: str
-    """The text content."""
-
-    type: Literal["input_text"]
-    """The type of content, which is always `input_text`."""
-
-
-class InputInputMessage(BaseModel):
-    content: InputInputMessageContent
-
-    role: Literal["user", "system", "developer"]
-    """The role of the message. One of `user`, `system`, or `developer`."""
-
-    type: Literal["message"]
-    """The type of item, which is always `message`."""
-
-
-class InputAssistantContent(BaseModel):
-    text: str
-    """The text content."""
+    """The text output from the model."""
 
     type: Literal["output_text"]
-    """The type of content, which is always `output_text`."""
+    """The type of the output text. Always `output_text`."""
+
 
+InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText]
 
-class InputAssistant(BaseModel):
-    content: InputAssistantContent
 
-    role: Literal["assistant"]
-    """The role of the message. Must be `assistant` for output."""
+class Input(BaseModel):
+    content: InputContent
+    """Text inputs to the model - can contain template strings."""
 
-    type: Literal["message"]
-    """The type of item, which is always `message`."""
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
 
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
 
-Input: TypeAlias = Annotated[Union[InputInputMessage, InputAssistant], PropertyInfo(discriminator="role")]
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
 
 
 class EvalLabelModelGrader(BaseModel):
diff --git a/src/openai/types/eval_list_response.py b/src/openai/types/eval_list_response.py
index eb54569011..8c7e9c5588 100644
--- a/src/openai/types/eval_list_response.py
+++ b/src/openai/types/eval_list_response.py
@@ -9,17 +9,106 @@
 from .eval_label_model_grader import EvalLabelModelGrader
 from .eval_string_check_grader import EvalStringCheckGrader
 from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
 from .eval_custom_data_source_config import EvalCustomDataSourceConfig
 from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
 
-__all__ = ["EvalListResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+    "EvalListResponse",
+    "DataSourceConfig",
+    "TestingCriterion",
+    "TestingCriterionPython",
+    "TestingCriterionScoreModel",
+    "TestingCriterionScoreModelInput",
+    "TestingCriterionScoreModelInputContent",
+    "TestingCriterionScoreModelInputContentOutputText",
+]
 
 DataSourceConfig: TypeAlias = Annotated[
     Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
 ]
 
+
+class TestingCriterionPython(BaseModel):
+    __test__ = False
+    name: str
+    """The name of the grader."""
+
+    source: str
+    """The source code of the python script."""
+
+    type: Literal["python"]
+    """The object type, which is always `python`."""
+
+    image_tag: Optional[str] = None
+    """The image tag to use for the python script."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+    __test__ = False
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+    str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+    __test__ = False
+    content: TestingCriterionScoreModelInputContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+    __test__ = False
+    input: List[TestingCriterionScoreModelInput]
+    """The input text. This may include template strings."""
+
+    model: str
+    """The model to use for the evaluation."""
+
+    name: str
+    """The name of the grader."""
+
+    type: Literal["score_model"]
+    """The object type, which is always `score_model`."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+    range: Optional[List[float]] = None
+    """The range of the score. Defaults to `[0, 1]`."""
+
+    sampling_params: Optional[object] = None
+    """The sampling parameters for the model."""
+
+
 TestingCriterion: TypeAlias = Annotated[
-    Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+    Union[
+        EvalLabelModelGrader,
+        EvalStringCheckGrader,
+        EvalTextSimilarityGrader,
+        TestingCriterionPython,
+        TestingCriterionScoreModel,
+    ],
+    PropertyInfo(discriminator="type"),
 ]
 
 
@@ -49,8 +138,5 @@ class EvalListResponse(BaseModel):
     object: Literal["eval"]
     """The object type."""
 
-    share_with_openai: bool
-    """Indicates whether the evaluation is shared with OpenAI."""
-
     testing_criteria: List[TestingCriterion]
     """A list of testing criteria."""
diff --git a/src/openai/types/eval_retrieve_response.py b/src/openai/types/eval_retrieve_response.py
index 8f3bfdf902..625bae80f4 100644
--- a/src/openai/types/eval_retrieve_response.py
+++ b/src/openai/types/eval_retrieve_response.py
@@ -9,17 +9,106 @@
 from .eval_label_model_grader import EvalLabelModelGrader
 from .eval_string_check_grader import EvalStringCheckGrader
 from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
 from .eval_custom_data_source_config import EvalCustomDataSourceConfig
 from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
 
-__all__ = ["EvalRetrieveResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+    "EvalRetrieveResponse",
+    "DataSourceConfig",
+    "TestingCriterion",
+    "TestingCriterionPython",
+    "TestingCriterionScoreModel",
+    "TestingCriterionScoreModelInput",
+    "TestingCriterionScoreModelInputContent",
+    "TestingCriterionScoreModelInputContentOutputText",
+]
 
 DataSourceConfig: TypeAlias = Annotated[
     Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
 ]
 
+
+class TestingCriterionPython(BaseModel):
+    __test__ = False
+    name: str
+    """The name of the grader."""
+
+    source: str
+    """The source code of the python script."""
+
+    type: Literal["python"]
+    """The object type, which is always `python`."""
+
+    image_tag: Optional[str] = None
+    """The image tag to use for the python script."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+    __test__ = False
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+    str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+    __test__ = False
+    content: TestingCriterionScoreModelInputContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+    __test__ = False
+    input: List[TestingCriterionScoreModelInput]
+    """The input text. This may include template strings."""
+
+    model: str
+    """The model to use for the evaluation."""
+
+    name: str
+    """The name of the grader."""
+
+    type: Literal["score_model"]
+    """The object type, which is always `score_model`."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+    range: Optional[List[float]] = None
+    """The range of the score. Defaults to `[0, 1]`."""
+
+    sampling_params: Optional[object] = None
+    """The sampling parameters for the model."""
+
+
 TestingCriterion: TypeAlias = Annotated[
-    Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+    Union[
+        EvalLabelModelGrader,
+        EvalStringCheckGrader,
+        EvalTextSimilarityGrader,
+        TestingCriterionPython,
+        TestingCriterionScoreModel,
+    ],
+    PropertyInfo(discriminator="type"),
 ]
 
 
@@ -49,8 +138,5 @@ class EvalRetrieveResponse(BaseModel):
     object: Literal["eval"]
     """The object type."""
 
-    share_with_openai: bool
-    """Indicates whether the evaluation is shared with OpenAI."""
-
     testing_criteria: List[TestingCriterion]
     """A list of testing criteria."""
diff --git a/src/openai/types/eval_text_similarity_grader.py b/src/openai/types/eval_text_similarity_grader.py
index 7c6897a4a7..853c6d4fbf 100644
--- a/src/openai/types/eval_text_similarity_grader.py
+++ b/src/openai/types/eval_text_similarity_grader.py
@@ -10,22 +10,12 @@
 
 class EvalTextSimilarityGrader(BaseModel):
     evaluation_metric: Literal[
-        "fuzzy_match",
-        "bleu",
-        "gleu",
-        "meteor",
-        "rouge_1",
-        "rouge_2",
-        "rouge_3",
-        "rouge_4",
-        "rouge_5",
-        "rouge_l",
-        "cosine",
+        "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
     ]
     """The evaluation metric to use.
 
-    One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
-    `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
+    One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`,
+    `rouge_4`, `rouge_5`, or `rouge_l`.
     """
 
     input: str
diff --git a/src/openai/types/eval_text_similarity_grader_param.py b/src/openai/types/eval_text_similarity_grader_param.py
index 4bf5d586f3..f07cc29178 100644
--- a/src/openai/types/eval_text_similarity_grader_param.py
+++ b/src/openai/types/eval_text_similarity_grader_param.py
@@ -10,23 +10,13 @@
 class EvalTextSimilarityGraderParam(TypedDict, total=False):
     evaluation_metric: Required[
         Literal[
-            "fuzzy_match",
-            "bleu",
-            "gleu",
-            "meteor",
-            "rouge_1",
-            "rouge_2",
-            "rouge_3",
-            "rouge_4",
-            "rouge_5",
-            "rouge_l",
-            "cosine",
+            "fuzzy_match", "bleu", "gleu", "meteor", "rouge_1", "rouge_2", "rouge_3", "rouge_4", "rouge_5", "rouge_l"
         ]
     ]
     """The evaluation metric to use.
 
-    One of `cosine`, `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`,
-    `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
+    One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `rouge_1`, `rouge_2`, `rouge_3`,
+    `rouge_4`, `rouge_5`, or `rouge_l`.
     """
 
     input: Required[str]
diff --git a/src/openai/types/eval_update_response.py b/src/openai/types/eval_update_response.py
index 728a291736..2c280977a1 100644
--- a/src/openai/types/eval_update_response.py
+++ b/src/openai/types/eval_update_response.py
@@ -9,17 +9,106 @@
 from .eval_label_model_grader import EvalLabelModelGrader
 from .eval_string_check_grader import EvalStringCheckGrader
 from .eval_text_similarity_grader import EvalTextSimilarityGrader
+from .responses.response_input_text import ResponseInputText
 from .eval_custom_data_source_config import EvalCustomDataSourceConfig
 from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig
 
-__all__ = ["EvalUpdateResponse", "DataSourceConfig", "TestingCriterion"]
+__all__ = [
+    "EvalUpdateResponse",
+    "DataSourceConfig",
+    "TestingCriterion",
+    "TestingCriterionPython",
+    "TestingCriterionScoreModel",
+    "TestingCriterionScoreModelInput",
+    "TestingCriterionScoreModelInputContent",
+    "TestingCriterionScoreModelInputContentOutputText",
+]
 
 DataSourceConfig: TypeAlias = Annotated[
     Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type")
 ]
 
+
+class TestingCriterionPython(BaseModel):
+    __test__ = False
+    name: str
+    """The name of the grader."""
+
+    source: str
+    """The source code of the python script."""
+
+    type: Literal["python"]
+    """The object type, which is always `python`."""
+
+    image_tag: Optional[str] = None
+    """The image tag to use for the python script."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+
+class TestingCriterionScoreModelInputContentOutputText(BaseModel):
+    __test__ = False
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+TestingCriterionScoreModelInputContent: TypeAlias = Union[
+    str, ResponseInputText, TestingCriterionScoreModelInputContentOutputText
+]
+
+
+class TestingCriterionScoreModelInput(BaseModel):
+    __test__ = False
+    content: TestingCriterionScoreModelInputContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+class TestingCriterionScoreModel(BaseModel):
+    __test__ = False
+    input: List[TestingCriterionScoreModelInput]
+    """The input text. This may include template strings."""
+
+    model: str
+    """The model to use for the evaluation."""
+
+    name: str
+    """The name of the grader."""
+
+    type: Literal["score_model"]
+    """The object type, which is always `score_model`."""
+
+    pass_threshold: Optional[float] = None
+    """The threshold for the score."""
+
+    range: Optional[List[float]] = None
+    """The range of the score. Defaults to `[0, 1]`."""
+
+    sampling_params: Optional[object] = None
+    """The sampling parameters for the model."""
+
+
 TestingCriterion: TypeAlias = Annotated[
-    Union[EvalLabelModelGrader, EvalStringCheckGrader, EvalTextSimilarityGrader], PropertyInfo(discriminator="type")
+    Union[
+        EvalLabelModelGrader,
+        EvalStringCheckGrader,
+        EvalTextSimilarityGrader,
+        TestingCriterionPython,
+        TestingCriterionScoreModel,
+    ],
+    PropertyInfo(discriminator="type"),
 ]
 
 
@@ -49,8 +138,5 @@ class EvalUpdateResponse(BaseModel):
     object: Literal["eval"]
     """The object type."""
 
-    share_with_openai: bool
-    """Indicates whether the evaluation is shared with OpenAI."""
-
     testing_criteria: List[TestingCriterion]
     """A list of testing criteria."""
diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py
index 07b88129e2..29c687b542 100644
--- a/src/openai/types/evals/create_eval_completions_run_data_source.py
+++ b/src/openai/types/evals/create_eval_completions_run_data_source.py
@@ -6,102 +6,27 @@
 from ..._utils import PropertyInfo
 from ..._models import BaseModel
 from ..shared.metadata import Metadata
+from ..responses.easy_input_message import EasyInputMessage
+from ..responses.response_input_text import ResponseInputText
 
 __all__ = [
     "CreateEvalCompletionsRunDataSource",
-    "InputMessages",
-    "InputMessagesTemplate",
-    "InputMessagesTemplateTemplate",
-    "InputMessagesTemplateTemplateChatMessage",
-    "InputMessagesTemplateTemplateInputMessage",
-    "InputMessagesTemplateTemplateInputMessageContent",
-    "InputMessagesTemplateTemplateOutputMessage",
-    "InputMessagesTemplateTemplateOutputMessageContent",
-    "InputMessagesItemReference",
     "Source",
     "SourceFileContent",
     "SourceFileContentContent",
     "SourceFileID",
     "SourceStoredCompletions",
+    "InputMessages",
+    "InputMessagesTemplate",
+    "InputMessagesTemplateTemplate",
+    "InputMessagesTemplateTemplateMessage",
+    "InputMessagesTemplateTemplateMessageContent",
+    "InputMessagesTemplateTemplateMessageContentOutputText",
+    "InputMessagesItemReference",
     "SamplingParams",
 ]
 
 
-class InputMessagesTemplateTemplateChatMessage(BaseModel):
-    content: str
-    """The content of the message."""
-
-    role: str
-    """The role of the message (e.g. "system", "assistant", "user")."""
-
-
-class InputMessagesTemplateTemplateInputMessageContent(BaseModel):
-    text: str
-    """The text content."""
-
-    type: Literal["input_text"]
-    """The type of content, which is always `input_text`."""
-
-
-class InputMessagesTemplateTemplateInputMessage(BaseModel):
-    content: InputMessagesTemplateTemplateInputMessageContent
-
-    role: Literal["user", "system", "developer"]
-    """The role of the message. One of `user`, `system`, or `developer`."""
-
-    type: Literal["message"]
-    """The type of item, which is always `message`."""
-
-
-class InputMessagesTemplateTemplateOutputMessageContent(BaseModel):
-    text: str
-    """The text content."""
-
-    type: Literal["output_text"]
-    """The type of content, which is always `output_text`."""
-
-
-class InputMessagesTemplateTemplateOutputMessage(BaseModel):
-    content: InputMessagesTemplateTemplateOutputMessageContent
-
-    role: Literal["assistant"]
-    """The role of the message. Must be `assistant` for output."""
-
-    type: Literal["message"]
-    """The type of item, which is always `message`."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[
-    InputMessagesTemplateTemplateChatMessage,
-    InputMessagesTemplateTemplateInputMessage,
-    InputMessagesTemplateTemplateOutputMessage,
-]
-
-
-class InputMessagesTemplate(BaseModel):
-    template: List[InputMessagesTemplateTemplate]
-    """A list of chat messages forming the prompt or context.
-
-    May include variable references to the "item" namespace, ie {{item.name}}.
-    """
-
-    type: Literal["template"]
-    """The type of input messages. Always `template`."""
-
-
-class InputMessagesItemReference(BaseModel):
-    item_reference: str
-    """A reference to a variable in the "item" namespace. Ie, "item.name" """
-
-    type: Literal["item_reference"]
-    """The type of input messages. Always `item_reference`."""
-
-
-InputMessages: TypeAlias = Annotated[
-    Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type")
-]
-
-
 class SourceFileContentContent(BaseModel):
     item: Dict[str, object]
 
@@ -125,6 +50,9 @@ class SourceFileID(BaseModel):
 
 
 class SourceStoredCompletions(BaseModel):
+    type: Literal["stored_completions"]
+    """The type of source. Always `stored_completions`."""
+
     created_after: Optional[int] = None
     """An optional Unix timestamp to filter items created after this time."""
 
@@ -147,15 +75,68 @@ class SourceStoredCompletions(BaseModel):
     model: Optional[str] = None
     """An optional model to filter by (e.g., 'gpt-4o')."""
 
-    type: Literal["stored_completions"]
-    """The type of source. Always `stored_completions`."""
-
 
 Source: TypeAlias = Annotated[
     Union[SourceFileContent, SourceFileID, SourceStoredCompletions], PropertyInfo(discriminator="type")
 ]
 
 
+class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel):
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+    str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText
+]
+
+
+class InputMessagesTemplateTemplateMessage(BaseModel):
+    content: InputMessagesTemplateTemplateMessageContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+InputMessagesTemplateTemplate: TypeAlias = Annotated[
+    Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type")
+]
+
+
+class InputMessagesTemplate(BaseModel):
+    template: List[InputMessagesTemplateTemplate]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Literal["template"]
+    """The type of input messages. Always `template`."""
+
+
+class InputMessagesItemReference(BaseModel):
+    item_reference: str
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Literal["item_reference"]
+    """The type of input messages. Always `item_reference`."""
+
+
+InputMessages: TypeAlias = Annotated[
+    Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type")
+]
+
+
 class SamplingParams(BaseModel):
     max_completion_tokens: Optional[int] = None
     """The maximum number of tokens in the generated output."""
@@ -171,15 +152,15 @@ class SamplingParams(BaseModel):
 
 
 class CreateEvalCompletionsRunDataSource(BaseModel):
-    input_messages: InputMessages
-
-    model: str
-    """The name of the model to use for generating completions (e.g. "o3-mini")."""
-
     source: Source
     """A StoredCompletionsRunDataSource configuration describing a set of filters"""
 
     type: Literal["completions"]
     """The type of run data source. Always `completions`."""
 
+    input_messages: Optional[InputMessages] = None
+
+    model: Optional[str] = None
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
     sampling_params: Optional[SamplingParams] = None
diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py
index be4a6f1ec6..c53064ee27 100644
--- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py
+++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py
@@ -6,100 +6,27 @@
 from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared_params.metadata import Metadata
+from ..responses.easy_input_message_param import EasyInputMessageParam
+from ..responses.response_input_text_param import ResponseInputTextParam
 
 __all__ = [
     "CreateEvalCompletionsRunDataSourceParam",
-    "InputMessages",
-    "InputMessagesTemplate",
-    "InputMessagesTemplateTemplate",
-    "InputMessagesTemplateTemplateChatMessage",
-    "InputMessagesTemplateTemplateInputMessage",
-    "InputMessagesTemplateTemplateInputMessageContent",
-    "InputMessagesTemplateTemplateOutputMessage",
-    "InputMessagesTemplateTemplateOutputMessageContent",
-    "InputMessagesItemReference",
     "Source",
     "SourceFileContent",
     "SourceFileContentContent",
     "SourceFileID",
     "SourceStoredCompletions",
+    "InputMessages",
+    "InputMessagesTemplate",
+    "InputMessagesTemplateTemplate",
+    "InputMessagesTemplateTemplateMessage",
+    "InputMessagesTemplateTemplateMessageContent",
+    "InputMessagesTemplateTemplateMessageContentOutputText",
+    "InputMessagesItemReference",
     "SamplingParams",
 ]
 
 
-class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
-    content: Required[str]
-    """The content of the message."""
-
-    role: Required[str]
-    """The role of the message (e.g. "system", "assistant", "user")."""
-
-
-class InputMessagesTemplateTemplateInputMessageContent(TypedDict, total=False):
-    text: Required[str]
-    """The text content."""
-
-    type: Required[Literal["input_text"]]
-    """The type of content, which is always `input_text`."""
-
-
-class InputMessagesTemplateTemplateInputMessage(TypedDict, total=False):
-    content: Required[InputMessagesTemplateTemplateInputMessageContent]
-
-    role: Required[Literal["user", "system", "developer"]]
-    """The role of the message. One of `user`, `system`, or `developer`."""
-
-    type: Required[Literal["message"]]
-    """The type of item, which is always `message`."""
-
-
-class InputMessagesTemplateTemplateOutputMessageContent(TypedDict, total=False):
-    text: Required[str]
-    """The text content."""
-
-    type: Required[Literal["output_text"]]
-    """The type of content, which is always `output_text`."""
-
-
-class InputMessagesTemplateTemplateOutputMessage(TypedDict, total=False):
-    content: Required[InputMessagesTemplateTemplateOutputMessageContent]
-
-    role: Required[Literal["assistant"]]
-    """The role of the message. Must be `assistant` for output."""
-
-    type: Required[Literal["message"]]
-    """The type of item, which is always `message`."""
-
-
-InputMessagesTemplateTemplate: TypeAlias = Union[
-    InputMessagesTemplateTemplateChatMessage,
-    InputMessagesTemplateTemplateInputMessage,
-    InputMessagesTemplateTemplateOutputMessage,
-]
-
-
-class InputMessagesTemplate(TypedDict, total=False):
-    template: Required[Iterable[InputMessagesTemplateTemplate]]
-    """A list of chat messages forming the prompt or context.
-
-    May include variable references to the "item" namespace, ie {{item.name}}.
-    """
-
-    type: Required[Literal["template"]]
-    """The type of input messages. Always `template`."""
-
-
-class InputMessagesItemReference(TypedDict, total=False):
-    item_reference: Required[str]
-    """A reference to a variable in the "item" namespace. Ie, "item.name" """
-
-    type: Required[Literal["item_reference"]]
-    """The type of input messages. Always `item_reference`."""
-
-
-InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference]
-
-
 class SourceFileContentContent(TypedDict, total=False):
     item: Required[Dict[str, object]]
 
@@ -123,16 +50,19 @@ class SourceFileID(TypedDict, total=False):
 
 
 class SourceStoredCompletions(TypedDict, total=False):
-    created_after: Required[Optional[int]]
+    type: Required[Literal["stored_completions"]]
+    """The type of source. Always `stored_completions`."""
+
+    created_after: Optional[int]
     """An optional Unix timestamp to filter items created after this time."""
 
-    created_before: Required[Optional[int]]
+    created_before: Optional[int]
     """An optional Unix timestamp to filter items created before this time."""
 
-    limit: Required[Optional[int]]
+    limit: Optional[int]
     """An optional maximum number of items to return."""
 
-    metadata: Required[Optional[Metadata]]
+    metadata: Optional[Metadata]
     """Set of 16 key-value pairs that can be attached to an object.
 
     This can be useful for storing additional information about the object in a
@@ -142,16 +72,65 @@ class SourceStoredCompletions(TypedDict, total=False):
     a maximum length of 512 characters.
     """
 
-    model: Required[Optional[str]]
+    model: Optional[str]
     """An optional model to filter by (e.g., 'gpt-4o')."""
 
-    type: Required[Literal["stored_completions"]]
-    """The type of source. Always `stored_completions`."""
-
 
 Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions]
 
 
+class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False):
+    text: Required[str]
+    """The text output from the model."""
+
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
+
+
+InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[
+    str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText
+]
+
+
+class InputMessagesTemplateTemplateMessage(TypedDict, total=False):
+    content: Required[InputMessagesTemplateTemplateMessageContent]
+    """Text inputs to the model - can contain template strings."""
+
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
+
+
+InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage]
+
+
+class InputMessagesTemplate(TypedDict, total=False):
+    template: Required[Iterable[InputMessagesTemplateTemplate]]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Required[Literal["template"]]
+    """The type of input messages. Always `template`."""
+
+
+class InputMessagesItemReference(TypedDict, total=False):
+    item_reference: Required[str]
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Required[Literal["item_reference"]]
+    """The type of input messages. Always `item_reference`."""
+
+
+InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference]
+
+
 class SamplingParams(TypedDict, total=False):
     max_completion_tokens: int
     """The maximum number of tokens in the generated output."""
@@ -167,15 +146,15 @@ class SamplingParams(TypedDict, total=False):
 
 
 class CreateEvalCompletionsRunDataSourceParam(TypedDict, total=False):
-    input_messages: Required[InputMessages]
-
-    model: Required[str]
-    """The name of the model to use for generating completions (e.g. "o3-mini")."""
-
     source: Required[Source]
     """A StoredCompletionsRunDataSource configuration describing a set of filters"""
 
     type: Required[Literal["completions"]]
     """The type of run data source. Always `completions`."""
 
+    input_messages: InputMessages
+
+    model: str
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
     sampling_params: SamplingParams
diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py
index 90e52241a6..eb6d689fc3 100644
--- a/src/openai/types/evals/run_cancel_response.py
+++ b/src/openai/types/evals/run_cancel_response.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@
 from ..._models import BaseModel
 from .eval_api_error import EvalAPIError
 from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
 from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
 from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
 
-__all__ = ["RunCancelResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+    "RunCancelResponse",
+    "DataSource",
+    "DataSourceCompletions",
+    "DataSourceCompletionsSource",
+    "DataSourceCompletionsSourceFileContent",
+    "DataSourceCompletionsSourceFileContentContent",
+    "DataSourceCompletionsSourceFileID",
+    "DataSourceCompletionsSourceResponses",
+    "DataSourceCompletionsInputMessages",
+    "DataSourceCompletionsInputMessagesTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+    "DataSourceCompletionsInputMessagesItemReference",
+    "DataSourceCompletionsSamplingParams",
+    "PerModelUsage",
+    "PerTestingCriteriaResult",
+    "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+    item: Dict[str, object]
+
+    sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+    content: List[DataSourceCompletionsSourceFileContentContent]
+    """The content of the jsonl file."""
+
+    type: Literal["file_content"]
+    """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+    id: str
+    """The identifier of the file."""
+
+    type: Literal["file_id"]
+    """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+    type: Literal["responses"]
+    """The type of run data source. Always `responses`."""
+
+    allow_parallel_tool_calls: Optional[bool] = None
+    """Whether to allow parallel tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    created_after: Optional[int] = None
+    """Only include items created after this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    created_before: Optional[int] = None
+    """Only include items created before this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    has_tool_calls: Optional[bool] = None
+    """Whether the response has tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    instructions_search: Optional[str] = None
+    """Optional search string for instructions.
+
+    This is a query parameter used to select responses.
+    """
+
+    metadata: Optional[object] = None
+    """Metadata filter for the responses.
+
+    This is a query parameter used to select responses.
+    """
+
+    model: Optional[str] = None
+    """The name of the model to find responses for.
+
+    This is a query parameter used to select responses.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort] = None
+    """Optional reasoning effort parameter.
+
+    This is a query parameter used to select responses.
+    """
+
+    temperature: Optional[float] = None
+    """Sampling temperature. This is a query parameter used to select responses."""
+
+    top_p: Optional[float] = None
+    """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+    users: Optional[List[str]] = None
+    """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+    Union[
+        DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+    content: str
+    """The content of the message."""
+
+    role: str
+    """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+    str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+    content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+    DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+    DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+    template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Literal["template"]
+    """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+    item_reference: str
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Literal["item_reference"]
+    """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+    Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+    max_completion_tokens: Optional[int] = None
+    """The maximum number of tokens in the generated output."""
+
+    seed: Optional[int] = None
+    """A seed value to initialize the randomness, during sampling."""
+
+    temperature: Optional[float] = None
+    """A higher temperature increases randomness in the outputs."""
+
+    top_p: Optional[float] = None
+    """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+    source: DataSourceCompletionsSource
+    """A EvalResponsesSource object describing a run data source configuration."""
+
+    type: Literal["completions"]
+    """The type of run data source. Always `completions`."""
+
+    input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+    model: Optional[str] = None
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+    sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
 
 DataSource: TypeAlias = Annotated[
-    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+    PropertyInfo(discriminator="type"),
 ]
 
 
diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py
index acf7b1b126..0c9720ea7a 100644
--- a/src/openai/types/evals/run_create_params.py
+++ b/src/openai/types/evals/run_create_params.py
@@ -2,14 +2,34 @@
 
 from __future__ import annotations
 
-from typing import Union, Optional
-from typing_extensions import Required, TypeAlias, TypedDict
+from typing import Dict, List, Union, Iterable, Optional
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
 
 from ..shared_params.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text_param import ResponseInputTextParam
 from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam
 from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam
 
-__all__ = ["RunCreateParams", "DataSource"]
+__all__ = [
+    "RunCreateParams",
+    "DataSource",
+    "DataSourceCreateEvalResponsesRunDataSource",
+    "DataSourceCreateEvalResponsesRunDataSourceSource",
+    "DataSourceCreateEvalResponsesRunDataSourceSourceFileContent",
+    "DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent",
+    "DataSourceCreateEvalResponsesRunDataSourceSourceFileID",
+    "DataSourceCreateEvalResponsesRunDataSourceSourceResponses",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessages",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText",
+    "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference",
+    "DataSourceCreateEvalResponsesRunDataSourceSamplingParams",
+]
 
 
 class RunCreateParams(TypedDict, total=False):
@@ -30,4 +50,198 @@ class RunCreateParams(TypedDict, total=False):
     """The name of the run."""
 
 
-DataSource: TypeAlias = Union[CreateEvalJSONLRunDataSourceParam, CreateEvalCompletionsRunDataSourceParam]
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent(TypedDict, total=False):
+    item: Required[Dict[str, object]]
+
+    sample: Dict[str, object]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileContent(TypedDict, total=False):
+    content: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent]]
+    """The content of the jsonl file."""
+
+    type: Required[Literal["file_content"]]
+    """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=False):
+    id: Required[str]
+    """The identifier of the file."""
+
+    type: Required[Literal["file_id"]]
+    """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False):
+    type: Required[Literal["responses"]]
+    """The type of run data source. Always `responses`."""
+
+    allow_parallel_tool_calls: Optional[bool]
+    """Whether to allow parallel tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    created_after: Optional[int]
+    """Only include items created after this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    created_before: Optional[int]
+    """Only include items created before this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    has_tool_calls: Optional[bool]
+    """Whether the response has tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    instructions_search: Optional[str]
+    """Optional search string for instructions.
+
+    This is a query parameter used to select responses.
+    """
+
+    metadata: Optional[object]
+    """Metadata filter for the responses.
+
+    This is a query parameter used to select responses.
+    """
+
+    model: Optional[str]
+    """The name of the model to find responses for.
+
+    This is a query parameter used to select responses.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort]
+    """Optional reasoning effort parameter.
+
+    This is a query parameter used to select responses.
+    """
+
+    temperature: Optional[float]
+    """Sampling temperature. This is a query parameter used to select responses."""
+
+    top_p: Optional[float]
+    """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+    users: Optional[List[str]]
+    """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceSource: TypeAlias = Union[
+    DataSourceCreateEvalResponsesRunDataSourceSourceFileContent,
+    DataSourceCreateEvalResponsesRunDataSourceSourceFileID,
+    DataSourceCreateEvalResponsesRunDataSourceSourceResponses,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage(TypedDict, total=False):
+    content: Required[str]
+    """The content of the message."""
+
+    role: Required[str]
+    """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText(
+    TypedDict, total=False
+):
+    text: Required[str]
+    """The text output from the model."""
+
+    type: Required[Literal["output_text"]]
+    """The type of the output text. Always `output_text`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+    str,
+    ResponseInputTextParam,
+    DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False):
+    content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent]
+    """Text inputs to the model - can contain template strings."""
+
+    role: Required[Literal["user", "assistant", "system", "developer"]]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Literal["message"]
+    """The type of the message input. Always `message`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate: TypeAlias = Union[
+    DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage,
+    DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, total=False):
+    template: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate]]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Required[Literal["template"]]
+    """The type of input messages. Always `template`."""
+
+
+class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(TypedDict, total=False):
+    item_reference: Required[str]
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Required[Literal["item_reference"]]
+    """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCreateEvalResponsesRunDataSourceInputMessages: TypeAlias = Union[
+    DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate,
+    DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference,
+]
+
+
+class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=False):
+    max_completion_tokens: int
+    """The maximum number of tokens in the generated output."""
+
+    seed: int
+    """A seed value to initialize the randomness, during sampling."""
+
+    temperature: float
+    """A higher temperature increases randomness in the outputs."""
+
+    top_p: float
+    """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False):
+    source: Required[DataSourceCreateEvalResponsesRunDataSourceSource]
+    """A EvalResponsesSource object describing a run data source configuration."""
+
+    type: Required[Literal["completions"]]
+    """The type of run data source. Always `completions`."""
+
+    input_messages: DataSourceCreateEvalResponsesRunDataSourceInputMessages
+
+    model: str
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+    sampling_params: DataSourceCreateEvalResponsesRunDataSourceSamplingParams
+
+
+DataSource: TypeAlias = Union[
+    CreateEvalJSONLRunDataSourceParam,
+    CreateEvalCompletionsRunDataSourceParam,
+    DataSourceCreateEvalResponsesRunDataSource,
+]
diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py
index 14ca426427..459399511c 100644
--- a/src/openai/types/evals/run_create_response.py
+++ b/src/openai/types/evals/run_create_response.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@
 from ..._models import BaseModel
 from .eval_api_error import EvalAPIError
 from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
 from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
 from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
 
-__all__ = ["RunCreateResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+    "RunCreateResponse",
+    "DataSource",
+    "DataSourceCompletions",
+    "DataSourceCompletionsSource",
+    "DataSourceCompletionsSourceFileContent",
+    "DataSourceCompletionsSourceFileContentContent",
+    "DataSourceCompletionsSourceFileID",
+    "DataSourceCompletionsSourceResponses",
+    "DataSourceCompletionsInputMessages",
+    "DataSourceCompletionsInputMessagesTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+    "DataSourceCompletionsInputMessagesItemReference",
+    "DataSourceCompletionsSamplingParams",
+    "PerModelUsage",
+    "PerTestingCriteriaResult",
+    "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+    item: Dict[str, object]
+
+    sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+    content: List[DataSourceCompletionsSourceFileContentContent]
+    """The content of the jsonl file."""
+
+    type: Literal["file_content"]
+    """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+    id: str
+    """The identifier of the file."""
+
+    type: Literal["file_id"]
+    """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+    type: Literal["responses"]
+    """The type of run data source. Always `responses`."""
+
+    allow_parallel_tool_calls: Optional[bool] = None
+    """Whether to allow parallel tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    created_after: Optional[int] = None
+    """Only include items created after this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    created_before: Optional[int] = None
+    """Only include items created before this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    has_tool_calls: Optional[bool] = None
+    """Whether the response has tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    instructions_search: Optional[str] = None
+    """Optional search string for instructions.
+
+    This is a query parameter used to select responses.
+    """
+
+    metadata: Optional[object] = None
+    """Metadata filter for the responses.
+
+    This is a query parameter used to select responses.
+    """
+
+    model: Optional[str] = None
+    """The name of the model to find responses for.
+
+    This is a query parameter used to select responses.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort] = None
+    """Optional reasoning effort parameter.
+
+    This is a query parameter used to select responses.
+    """
+
+    temperature: Optional[float] = None
+    """Sampling temperature. This is a query parameter used to select responses."""
+
+    top_p: Optional[float] = None
+    """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+    users: Optional[List[str]] = None
+    """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+    Union[
+        DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+    content: str
+    """The content of the message."""
+
+    role: str
+    """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+    str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+    content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+    DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+    DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+    template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Literal["template"]
+    """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+    item_reference: str
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Literal["item_reference"]
+    """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+    Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+    max_completion_tokens: Optional[int] = None
+    """The maximum number of tokens in the generated output."""
+
+    seed: Optional[int] = None
+    """A seed value to initialize the randomness, during sampling."""
+
+    temperature: Optional[float] = None
+    """A higher temperature increases randomness in the outputs."""
+
+    top_p: Optional[float] = None
+    """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+    source: DataSourceCompletionsSource
+    """A EvalResponsesSource object describing a run data source configuration."""
+
+    type: Literal["completions"]
+    """The type of run data source. Always `completions`."""
+
+    input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+    model: Optional[str] = None
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+    sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
 
 DataSource: TypeAlias = Annotated[
-    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+    PropertyInfo(discriminator="type"),
 ]
 
 
diff --git a/src/openai/types/evals/run_list_params.py b/src/openai/types/evals/run_list_params.py
index 6060eafb97..383b89d85c 100644
--- a/src/openai/types/evals/run_list_params.py
+++ b/src/openai/types/evals/run_list_params.py
@@ -23,5 +23,5 @@ class RunListParams(TypedDict, total=False):
     status: Literal["queued", "in_progress", "completed", "canceled", "failed"]
     """Filter runs by status.
 
-    Use "queued" | "in_progress" | "failed" | "completed" | "canceled".
+    One of `queued` | `in_progress` | `failed` | `completed` | `canceled`.
     """
diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py
index a1022f542f..278ceeabed 100644
--- a/src/openai/types/evals/run_list_response.py
+++ b/src/openai/types/evals/run_list_response.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@
 from ..._models import BaseModel
 from .eval_api_error import EvalAPIError
 from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
 from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
 from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
 
-__all__ = ["RunListResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+    "RunListResponse",
+    "DataSource",
+    "DataSourceCompletions",
+    "DataSourceCompletionsSource",
+    "DataSourceCompletionsSourceFileContent",
+    "DataSourceCompletionsSourceFileContentContent",
+    "DataSourceCompletionsSourceFileID",
+    "DataSourceCompletionsSourceResponses",
+    "DataSourceCompletionsInputMessages",
+    "DataSourceCompletionsInputMessagesTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+    "DataSourceCompletionsInputMessagesItemReference",
+    "DataSourceCompletionsSamplingParams",
+    "PerModelUsage",
+    "PerTestingCriteriaResult",
+    "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+    item: Dict[str, object]
+
+    sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+    content: List[DataSourceCompletionsSourceFileContentContent]
+    """The content of the jsonl file."""
+
+    type: Literal["file_content"]
+    """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+    id: str
+    """The identifier of the file."""
+
+    type: Literal["file_id"]
+    """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+    type: Literal["responses"]
+    """The type of run data source. Always `responses`."""
+
+    allow_parallel_tool_calls: Optional[bool] = None
+    """Whether to allow parallel tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    created_after: Optional[int] = None
+    """Only include items created after this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    created_before: Optional[int] = None
+    """Only include items created before this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    has_tool_calls: Optional[bool] = None
+    """Whether the response has tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    instructions_search: Optional[str] = None
+    """Optional search string for instructions.
+
+    This is a query parameter used to select responses.
+    """
+
+    metadata: Optional[object] = None
+    """Metadata filter for the responses.
+
+    This is a query parameter used to select responses.
+    """
+
+    model: Optional[str] = None
+    """The name of the model to find responses for.
+
+    This is a query parameter used to select responses.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort] = None
+    """Optional reasoning effort parameter.
+
+    This is a query parameter used to select responses.
+    """
+
+    temperature: Optional[float] = None
+    """Sampling temperature. This is a query parameter used to select responses."""
+
+    top_p: Optional[float] = None
+    """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+    users: Optional[List[str]] = None
+    """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+    Union[
+        DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+    content: str
+    """The content of the message."""
+
+    role: str
+    """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+    str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+    content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+    DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+    DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+    template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Literal["template"]
+    """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+    item_reference: str
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Literal["item_reference"]
+    """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+    Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+    max_completion_tokens: Optional[int] = None
+    """The maximum number of tokens in the generated output."""
+
+    seed: Optional[int] = None
+    """A seed value to initialize the randomness, during sampling."""
+
+    temperature: Optional[float] = None
+    """A higher temperature increases randomness in the outputs."""
+
+    top_p: Optional[float] = None
+    """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+    source: DataSourceCompletionsSource
+    """A EvalResponsesSource object describing a run data source configuration."""
+
+    type: Literal["completions"]
+    """The type of run data source. Always `completions`."""
+
+    input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+    model: Optional[str] = None
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+    sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
 
 DataSource: TypeAlias = Annotated[
-    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+    PropertyInfo(discriminator="type"),
 ]
 
 
diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py
index 461ed43dda..e142f31b14 100644
--- a/src/openai/types/evals/run_retrieve_response.py
+++ b/src/openai/types/evals/run_retrieve_response.py
@@ -1,6 +1,6 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List, Union, Optional
+from typing import Dict, List, Union, Optional
 from typing_extensions import Literal, Annotated, TypeAlias
 
 from pydantic import Field as FieldInfo
@@ -9,13 +9,225 @@
 from ..._models import BaseModel
 from .eval_api_error import EvalAPIError
 from ..shared.metadata import Metadata
+from ..shared.reasoning_effort import ReasoningEffort
+from ..responses.response_input_text import ResponseInputText
 from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource
 from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource
 
-__all__ = ["RunRetrieveResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"]
+__all__ = [
+    "RunRetrieveResponse",
+    "DataSource",
+    "DataSourceCompletions",
+    "DataSourceCompletionsSource",
+    "DataSourceCompletionsSourceFileContent",
+    "DataSourceCompletionsSourceFileContentContent",
+    "DataSourceCompletionsSourceFileID",
+    "DataSourceCompletionsSourceResponses",
+    "DataSourceCompletionsInputMessages",
+    "DataSourceCompletionsInputMessagesTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplate",
+    "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent",
+    "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText",
+    "DataSourceCompletionsInputMessagesItemReference",
+    "DataSourceCompletionsSamplingParams",
+    "PerModelUsage",
+    "PerTestingCriteriaResult",
+    "ResultCounts",
+]
+
+
+class DataSourceCompletionsSourceFileContentContent(BaseModel):
+    item: Dict[str, object]
+
+    sample: Optional[Dict[str, object]] = None
+
+
+class DataSourceCompletionsSourceFileContent(BaseModel):
+    content: List[DataSourceCompletionsSourceFileContentContent]
+    """The content of the jsonl file."""
+
+    type: Literal["file_content"]
+    """The type of jsonl source. Always `file_content`."""
+
+
+class DataSourceCompletionsSourceFileID(BaseModel):
+    id: str
+    """The identifier of the file."""
+
+    type: Literal["file_id"]
+    """The type of jsonl source. Always `file_id`."""
+
+
+class DataSourceCompletionsSourceResponses(BaseModel):
+    type: Literal["responses"]
+    """The type of run data source. Always `responses`."""
+
+    allow_parallel_tool_calls: Optional[bool] = None
+    """Whether to allow parallel tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    created_after: Optional[int] = None
+    """Only include items created after this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    created_before: Optional[int] = None
+    """Only include items created before this timestamp (inclusive).
+
+    This is a query parameter used to select responses.
+    """
+
+    has_tool_calls: Optional[bool] = None
+    """Whether the response has tool calls.
+
+    This is a query parameter used to select responses.
+    """
+
+    instructions_search: Optional[str] = None
+    """Optional search string for instructions.
+
+    This is a query parameter used to select responses.
+    """
+
+    metadata: Optional[object] = None
+    """Metadata filter for the responses.
+
+    This is a query parameter used to select responses.
+    """
+
+    model: Optional[str] = None
+    """The name of the model to find responses for.
+
+    This is a query parameter used to select responses.
+    """
+
+    reasoning_effort: Optional[ReasoningEffort] = None
+    """Optional reasoning effort parameter.
+
+    This is a query parameter used to select responses.
+    """
+
+    temperature: Optional[float] = None
+    """Sampling temperature. This is a query parameter used to select responses."""
+
+    top_p: Optional[float] = None
+    """Nucleus sampling parameter. This is a query parameter used to select responses."""
+
+    users: Optional[List[str]] = None
+    """List of user identifiers. This is a query parameter used to select responses."""
+
+
+DataSourceCompletionsSource: TypeAlias = Annotated[
+    Union[
+        DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses
+    ],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel):
+    content: str
+    """The content of the message."""
+
+    role: str
+    """The role of the message (e.g. "system", "assistant", "user")."""
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel):
+    text: str
+    """The text output from the model."""
+
+    type: Literal["output_text"]
+    """The type of the output text. Always `output_text`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[
+    str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText
+]
+
+
+class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel):
+    content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent
+    """Text inputs to the model - can contain template strings."""
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
+
+
+DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[
+    DataSourceCompletionsInputMessagesTemplateTemplateChatMessage,
+    DataSourceCompletionsInputMessagesTemplateTemplateEvalItem,
+]
+
+
+class DataSourceCompletionsInputMessagesTemplate(BaseModel):
+    template: List[DataSourceCompletionsInputMessagesTemplateTemplate]
+    """A list of chat messages forming the prompt or context.
+
+    May include variable references to the "item" namespace, ie {{item.name}}.
+    """
+
+    type: Literal["template"]
+    """The type of input messages. Always `template`."""
+
+
+class DataSourceCompletionsInputMessagesItemReference(BaseModel):
+    item_reference: str
+    """A reference to a variable in the "item" namespace. Ie, "item.name" """
+
+    type: Literal["item_reference"]
+    """The type of input messages. Always `item_reference`."""
+
+
+DataSourceCompletionsInputMessages: TypeAlias = Annotated[
+    Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference],
+    PropertyInfo(discriminator="type"),
+]
+
+
+class DataSourceCompletionsSamplingParams(BaseModel):
+    max_completion_tokens: Optional[int] = None
+    """The maximum number of tokens in the generated output."""
+
+    seed: Optional[int] = None
+    """A seed value to initialize the randomness, during sampling."""
+
+    temperature: Optional[float] = None
+    """A higher temperature increases randomness in the outputs."""
+
+    top_p: Optional[float] = None
+    """An alternative to temperature for nucleus sampling; 1.0 includes all tokens."""
+
+
+class DataSourceCompletions(BaseModel):
+    source: DataSourceCompletionsSource
+    """A EvalResponsesSource object describing a run data source configuration."""
+
+    type: Literal["completions"]
+    """The type of run data source. Always `completions`."""
+
+    input_messages: Optional[DataSourceCompletionsInputMessages] = None
+
+    model: Optional[str] = None
+    """The name of the model to use for generating completions (e.g. "o3-mini")."""
+
+    sampling_params: Optional[DataSourceCompletionsSamplingParams] = None
+
 
 DataSource: TypeAlias = Annotated[
-    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource], PropertyInfo(discriminator="type")
+    Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions],
+    PropertyInfo(discriminator="type"),
 ]
 
 
diff --git a/src/openai/types/image.py b/src/openai/types/image.py
index f48aa2c702..ecaef3fd58 100644
--- a/src/openai/types/image.py
+++ b/src/openai/types/image.py
@@ -9,16 +9,18 @@
 
 class Image(BaseModel):
     b64_json: Optional[str] = None
-    """
-    The base64-encoded JSON of the generated image, if `response_format` is
-    `b64_json`.
+    """The base64-encoded JSON of the generated image.
+
+    Default value for `gpt-image-1`, and only present if `response_format` is set to
+    `b64_json` for `dall-e-2` and `dall-e-3`.
     """
 
     revised_prompt: Optional[str] = None
-    """
-    The prompt that was used to generate the image, if there was any revision to the
-    prompt.
-    """
+    """For `dall-e-3` only, the revised prompt that was used to generate the image."""
 
     url: Optional[str] = None
-    """The URL of the generated image, if `response_format` is `url` (default)."""
+    """
+    When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
+    `response_format` is set to `url` (default value). Unsupported for
+    `gpt-image-1`.
+    """
diff --git a/src/openai/types/image_create_variation_params.py b/src/openai/types/image_create_variation_params.py
index d20f672912..d10b74b2c2 100644
--- a/src/openai/types/image_create_variation_params.py
+++ b/src/openai/types/image_create_variation_params.py
@@ -25,10 +25,7 @@ class ImageCreateVariationParams(TypedDict, total=False):
     """
 
     n: Optional[int]
-    """The number of images to generate.
-
-    Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
-    """
+    """The number of images to generate. Must be between 1 and 10."""
 
     response_format: Optional[Literal["url", "b64_json"]]
     """The format in which the generated images are returned.
diff --git a/src/openai/types/image_edit_params.py b/src/openai/types/image_edit_params.py
index 1cb10611f3..f01a12c1b0 100644
--- a/src/openai/types/image_edit_params.py
+++ b/src/openai/types/image_edit_params.py
@@ -2,7 +2,7 @@
 
 from __future__ import annotations
 
-from typing import Union, Optional
+from typing import List, Union, Optional
 from typing_extensions import Literal, Required, TypedDict
 
 from .._types import FileTypes
@@ -12,46 +12,61 @@
 
 
 class ImageEditParams(TypedDict, total=False):
-    image: Required[FileTypes]
-    """The image to edit.
+    image: Required[Union[FileTypes, List[FileTypes]]]
+    """The image(s) to edit.
 
-    Must be a valid PNG file, less than 4MB, and square. If mask is not provided,
-    image must have transparency, which will be used as the mask.
+    Must be a supported image file or an array of images. For `gpt-image-1`, each
+    image should be a `png`, `webp`, or `jpg` file less than 25MB. For `dall-e-2`,
+    you can only provide one image, and it should be a square `png` file less than
+    4MB.
     """
 
     prompt: Required[str]
     """A text description of the desired image(s).
 
-    The maximum length is 1000 characters.
+    The maximum length is 1000 characters for `dall-e-2`, and 32000 characters for
+    `gpt-image-1`.
     """
 
     mask: FileTypes
     """An additional image whose fully transparent areas (e.g.
 
-    where alpha is zero) indicate where `image` should be edited. Must be a valid
-    PNG file, less than 4MB, and have the same dimensions as `image`.
+    where alpha is zero) indicate where `image` should be edited. If there are
+    multiple images provided, the mask will be applied on the first image. Must be a
+    valid PNG file, less than 4MB, and have the same dimensions as `image`.
     """
 
     model: Union[str, ImageModel, None]
     """The model to use for image generation.
 
-    Only `dall-e-2` is supported at this time.
+    Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a
+    parameter specific to `gpt-image-1` is used.
     """
 
     n: Optional[int]
     """The number of images to generate. Must be between 1 and 10."""
 
+    quality: Optional[Literal["standard", "low", "medium", "high", "auto"]]
+    """The quality of the image that will be generated.
+
+    `high`, `medium` and `low` are only supported for `gpt-image-1`. `dall-e-2` only
+    supports `standard` quality. Defaults to `auto`.
+    """
+
     response_format: Optional[Literal["url", "b64_json"]]
     """The format in which the generated images are returned.
 
     Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
-    image has been generated.
+    image has been generated. This parameter is only supported for `dall-e-2`, as
+    `gpt-image-1` will always return base64-encoded images.
     """
 
     size: Optional[Literal["256x256", "512x512", "1024x1024"]]
     """The size of the generated images.
 
-    Must be one of `256x256`, `512x512`, or `1024x1024`.
+    Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
+    `auto` (default value) for `gpt-image-1`, and one of `256x256`, `512x512`, or
+    `1024x1024` for `dall-e-2`.
     """
 
     user: str
diff --git a/src/openai/types/image_generate_params.py b/src/openai/types/image_generate_params.py
index c88c45f518..8fc10220dc 100644
--- a/src/openai/types/image_generate_params.py
+++ b/src/openai/types/image_generate_params.py
@@ -14,12 +14,33 @@ class ImageGenerateParams(TypedDict, total=False):
     prompt: Required[str]
     """A text description of the desired image(s).
 
-    The maximum length is 1000 characters for `dall-e-2` and 4000 characters for
-    `dall-e-3`.
+    The maximum length is 32000 characters for `gpt-image-1`, 1000 characters for
+    `dall-e-2` and 4000 characters for `dall-e-3`.
+    """
+
+    background: Optional[Literal["transparent", "opaque", "auto"]]
+    """Allows to set transparency for the background of the generated image(s).
+
+    This parameter is only supported for `gpt-image-1`. Must be one of
+    `transparent`, `opaque` or `auto` (default value). When `auto` is used, the
+    model will automatically determine the best background for the image.
+
+    If `transparent`, the output format needs to support transparency, so it should
+    be set to either `png` (default value) or `webp`.
     """
 
     model: Union[str, ImageModel, None]
-    """The model to use for image generation."""
+    """The model to use for image generation.
+
+    One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`. Defaults to `dall-e-2` unless a
+    parameter specific to `gpt-image-1` is used.
+    """
+
+    moderation: Optional[Literal["low", "auto"]]
+    """Control the content-moderation level for images generated by `gpt-image-1`.
+
+    Must be either `low` for less restrictive filtering or `auto` (default value).
+    """
 
     n: Optional[int]
     """The number of images to generate.
@@ -27,34 +48,57 @@ class ImageGenerateParams(TypedDict, total=False):
     Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported.
     """
 
-    quality: Literal["standard", "hd"]
+    output_compression: Optional[int]
+    """The compression level (0-100%) for the generated images.
+
+    This parameter is only supported for `gpt-image-1` with the `webp` or `jpeg`
+    output formats, and defaults to 100.
+    """
+
+    output_format: Optional[Literal["png", "jpeg", "webp"]]
+    """The format in which the generated images are returned.
+
+    This parameter is only supported for `gpt-image-1`. Must be one of `png`,
+    `jpeg`, or `webp`.
+    """
+
+    quality: Optional[Literal["standard", "hd", "low", "medium", "high", "auto"]]
     """The quality of the image that will be generated.
 
-    `hd` creates images with finer details and greater consistency across the image.
-    This param is only supported for `dall-e-3`.
+    - `auto` (default value) will automatically select the best quality for the
+      given model.
+    - `high`, `medium` and `low` are supported for `gpt-image-1`.
+    - `hd` and `standard` are supported for `dall-e-3`.
+    - `standard` is the only option for `dall-e-2`.
     """
 
     response_format: Optional[Literal["url", "b64_json"]]
-    """The format in which the generated images are returned.
+    """The format in which generated images with `dall-e-2` and `dall-e-3` are
+    returned.
 
     Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the
-    image has been generated.
+    image has been generated. This parameter isn't supported for `gpt-image-1` which
+    will always return base64-encoded images.
     """
 
-    size: Optional[Literal["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"]]
+    size: Optional[
+        Literal["auto", "1024x1024", "1536x1024", "1024x1536", "256x256", "512x512", "1792x1024", "1024x1792"]
+    ]
     """The size of the generated images.
 
-    Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one
-    of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models.
+    Must be one of `1024x1024`, `1536x1024` (landscape), `1024x1536` (portrait), or
+    `auto` (default value) for `gpt-image-1`, one of `256x256`, `512x512`, or
+    `1024x1024` for `dall-e-2`, and one of `1024x1024`, `1792x1024`, or `1024x1792`
+    for `dall-e-3`.
     """
 
     style: Optional[Literal["vivid", "natural"]]
     """The style of the generated images.
 
-    Must be one of `vivid` or `natural`. Vivid causes the model to lean towards
-    generating hyper-real and dramatic images. Natural causes the model to produce
-    more natural, less hyper-real looking images. This param is only supported for
-    `dall-e-3`.
+    This parameter is only supported for `dall-e-3`. Must be one of `vivid` or
+    `natural`. Vivid causes the model to lean towards generating hyper-real and
+    dramatic images. Natural causes the model to produce more natural, less
+    hyper-real looking images.
     """
 
     user: str
diff --git a/src/openai/types/image_model.py b/src/openai/types/image_model.py
index 1672369bea..7fed69ed82 100644
--- a/src/openai/types/image_model.py
+++ b/src/openai/types/image_model.py
@@ -4,4 +4,4 @@
 
 __all__ = ["ImageModel"]
 
-ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3"]
+ImageModel: TypeAlias = Literal["dall-e-2", "dall-e-3", "gpt-image-1"]
diff --git a/src/openai/types/images_response.py b/src/openai/types/images_response.py
index 7cee813184..df454afa4d 100644
--- a/src/openai/types/images_response.py
+++ b/src/openai/types/images_response.py
@@ -1,14 +1,41 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
-from typing import List
+from typing import List, Optional
 
 from .image import Image
 from .._models import BaseModel
 
-__all__ = ["ImagesResponse"]
+__all__ = ["ImagesResponse", "Usage", "UsageInputTokensDetails"]
+
+
+class UsageInputTokensDetails(BaseModel):
+    image_tokens: int
+    """The number of image tokens in the input prompt."""
+
+    text_tokens: int
+    """The number of text tokens in the input prompt."""
+
+
+class Usage(BaseModel):
+    input_tokens: int
+    """The number of tokens (images and text) in the input prompt."""
+
+    input_tokens_details: UsageInputTokensDetails
+    """The input tokens detailed information for the image generation."""
+
+    output_tokens: int
+    """The number of image tokens in the output image."""
+
+    total_tokens: int
+    """The total number of tokens (images and text) used for the image generation."""
 
 
 class ImagesResponse(BaseModel):
     created: int
+    """The Unix timestamp (in seconds) of when the image was created."""
+
+    data: Optional[List[Image]] = None
+    """The list of generated images."""
 
-    data: List[Image]
+    usage: Optional[Usage] = None
+    """For `gpt-image-1` only, the token usage information for the image generation."""
diff --git a/src/openai/types/responses/__init__.py b/src/openai/types/responses/__init__.py
index 4f07a3d097..22fd2a0802 100644
--- a/src/openai/types/responses/__init__.py
+++ b/src/openai/types/responses/__init__.py
@@ -22,6 +22,7 @@
 from .web_search_tool import WebSearchTool as WebSearchTool
 from .file_search_tool import FileSearchTool as FileSearchTool
 from .tool_choice_types import ToolChoiceTypes as ToolChoiceTypes
+from .easy_input_message import EasyInputMessage as EasyInputMessage
 from .response_item_list import ResponseItemList as ResponseItemList
 from .computer_tool_param import ComputerToolParam as ComputerToolParam
 from .function_tool_param import FunctionToolParam as FunctionToolParam
@@ -117,6 +118,12 @@
 from .response_input_message_content_list_param import (
     ResponseInputMessageContentListParam as ResponseInputMessageContentListParam,
 )
+from .response_reasoning_summary_part_done_event import (
+    ResponseReasoningSummaryPartDoneEvent as ResponseReasoningSummaryPartDoneEvent,
+)
+from .response_reasoning_summary_text_done_event import (
+    ResponseReasoningSummaryTextDoneEvent as ResponseReasoningSummaryTextDoneEvent,
+)
 from .response_web_search_call_in_progress_event import (
     ResponseWebSearchCallInProgressEvent as ResponseWebSearchCallInProgressEvent,
 )
@@ -126,6 +133,12 @@
 from .response_function_call_arguments_done_event import (
     ResponseFunctionCallArgumentsDoneEvent as ResponseFunctionCallArgumentsDoneEvent,
 )
+from .response_reasoning_summary_part_added_event import (
+    ResponseReasoningSummaryPartAddedEvent as ResponseReasoningSummaryPartAddedEvent,
+)
+from .response_reasoning_summary_text_delta_event import (
+    ResponseReasoningSummaryTextDeltaEvent as ResponseReasoningSummaryTextDeltaEvent,
+)
 from .response_function_call_arguments_delta_event import (
     ResponseFunctionCallArgumentsDeltaEvent as ResponseFunctionCallArgumentsDeltaEvent,
 )
diff --git a/src/openai/types/responses/easy_input_message.py b/src/openai/types/responses/easy_input_message.py
new file mode 100644
index 0000000000..4ed0194f9f
--- /dev/null
+++ b/src/openai/types/responses/easy_input_message.py
@@ -0,0 +1,26 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Union, Optional
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+from .response_input_message_content_list import ResponseInputMessageContentList
+
+__all__ = ["EasyInputMessage"]
+
+
+class EasyInputMessage(BaseModel):
+    content: Union[str, ResponseInputMessageContentList]
+    """
+    Text, image, or audio input to the model, used to generate a response. Can also
+    contain previous assistant responses.
+    """
+
+    role: Literal["user", "assistant", "system", "developer"]
+    """The role of the message input.
+
+    One of `user`, `assistant`, `system`, or `developer`.
+    """
+
+    type: Optional[Literal["message"]] = None
+    """The type of the message input. Always `message`."""
diff --git a/src/openai/types/responses/response_reasoning_summary_part_added_event.py b/src/openai/types/responses/response_reasoning_summary_part_added_event.py
new file mode 100644
index 0000000000..fd11520170
--- /dev/null
+++ b/src/openai/types/responses/response_reasoning_summary_part_added_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryPartAddedEvent", "Part"]
+
+
+class Part(BaseModel):
+    text: str
+    """The text of the summary part."""
+
+    type: Literal["summary_text"]
+    """The type of the summary part. Always `summary_text`."""
+
+
+class ResponseReasoningSummaryPartAddedEvent(BaseModel):
+    item_id: str
+    """The ID of the item this summary part is associated with."""
+
+    output_index: int
+    """The index of the output item this summary part is associated with."""
+
+    part: Part
+    """The summary part that was added."""
+
+    summary_index: int
+    """The index of the summary part within the reasoning summary."""
+
+    type: Literal["response.reasoning_summary_part.added"]
+    """The type of the event. Always `response.reasoning_summary_part.added`."""
diff --git a/src/openai/types/responses/response_reasoning_summary_part_done_event.py b/src/openai/types/responses/response_reasoning_summary_part_done_event.py
new file mode 100644
index 0000000000..7f30189a49
--- /dev/null
+++ b/src/openai/types/responses/response_reasoning_summary_part_done_event.py
@@ -0,0 +1,32 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryPartDoneEvent", "Part"]
+
+
+class Part(BaseModel):
+    text: str
+    """The text of the summary part."""
+
+    type: Literal["summary_text"]
+    """The type of the summary part. Always `summary_text`."""
+
+
+class ResponseReasoningSummaryPartDoneEvent(BaseModel):
+    item_id: str
+    """The ID of the item this summary part is associated with."""
+
+    output_index: int
+    """The index of the output item this summary part is associated with."""
+
+    part: Part
+    """The completed summary part."""
+
+    summary_index: int
+    """The index of the summary part within the reasoning summary."""
+
+    type: Literal["response.reasoning_summary_part.done"]
+    """The type of the event. Always `response.reasoning_summary_part.done`."""
diff --git a/src/openai/types/responses/response_reasoning_summary_text_delta_event.py b/src/openai/types/responses/response_reasoning_summary_text_delta_event.py
new file mode 100644
index 0000000000..6d0cbd8265
--- /dev/null
+++ b/src/openai/types/responses/response_reasoning_summary_text_delta_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryTextDeltaEvent"]
+
+
+class ResponseReasoningSummaryTextDeltaEvent(BaseModel):
+    delta: str
+    """The text delta that was added to the summary."""
+
+    item_id: str
+    """The ID of the item this summary text delta is associated with."""
+
+    output_index: int
+    """The index of the output item this summary text delta is associated with."""
+
+    summary_index: int
+    """The index of the summary part within the reasoning summary."""
+
+    type: Literal["response.reasoning_summary_text.delta"]
+    """The type of the event. Always `response.reasoning_summary_text.delta`."""
diff --git a/src/openai/types/responses/response_reasoning_summary_text_done_event.py b/src/openai/types/responses/response_reasoning_summary_text_done_event.py
new file mode 100644
index 0000000000..15b894c75b
--- /dev/null
+++ b/src/openai/types/responses/response_reasoning_summary_text_done_event.py
@@ -0,0 +1,24 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["ResponseReasoningSummaryTextDoneEvent"]
+
+
+class ResponseReasoningSummaryTextDoneEvent(BaseModel):
+    item_id: str
+    """The ID of the item this summary text is associated with."""
+
+    output_index: int
+    """The index of the output item this summary text is associated with."""
+
+    summary_index: int
+    """The index of the summary part within the reasoning summary."""
+
+    text: str
+    """The full text of the completed reasoning summary."""
+
+    type: Literal["response.reasoning_summary_text.done"]
+    """The type of the event. Always `response.reasoning_summary_text.done`."""
diff --git a/src/openai/types/responses/response_stream_event.py b/src/openai/types/responses/response_stream_event.py
index 446863b175..07c18bd217 100644
--- a/src/openai/types/responses/response_stream_event.py
+++ b/src/openai/types/responses/response_stream_event.py
@@ -27,9 +27,13 @@
 from .response_web_search_call_searching_event import ResponseWebSearchCallSearchingEvent
 from .response_file_search_call_completed_event import ResponseFileSearchCallCompletedEvent
 from .response_file_search_call_searching_event import ResponseFileSearchCallSearchingEvent
+from .response_reasoning_summary_part_done_event import ResponseReasoningSummaryPartDoneEvent
+from .response_reasoning_summary_text_done_event import ResponseReasoningSummaryTextDoneEvent
 from .response_web_search_call_in_progress_event import ResponseWebSearchCallInProgressEvent
 from .response_file_search_call_in_progress_event import ResponseFileSearchCallInProgressEvent
 from .response_function_call_arguments_done_event import ResponseFunctionCallArgumentsDoneEvent
+from .response_reasoning_summary_part_added_event import ResponseReasoningSummaryPartAddedEvent
+from .response_reasoning_summary_text_delta_event import ResponseReasoningSummaryTextDeltaEvent
 from .response_function_call_arguments_delta_event import ResponseFunctionCallArgumentsDeltaEvent
 from .response_code_interpreter_call_code_done_event import ResponseCodeInterpreterCallCodeDoneEvent
 from .response_code_interpreter_call_completed_event import ResponseCodeInterpreterCallCompletedEvent
@@ -65,6 +69,10 @@
         ResponseIncompleteEvent,
         ResponseOutputItemAddedEvent,
         ResponseOutputItemDoneEvent,
+        ResponseReasoningSummaryPartAddedEvent,
+        ResponseReasoningSummaryPartDoneEvent,
+        ResponseReasoningSummaryTextDeltaEvent,
+        ResponseReasoningSummaryTextDoneEvent,
         ResponseRefusalDeltaEvent,
         ResponseRefusalDoneEvent,
         ResponseTextAnnotationDeltaEvent,
diff --git a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
index d40466919a..6aa0b867d9 100644
--- a/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
+++ b/tests/api_resources/fine_tuning/checkpoints/test_permissions.py
@@ -117,19 +117,19 @@ def test_path_params_retrieve(self, client: OpenAI) -> None:
                 fine_tuned_model_checkpoint="",
             )
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     def test_method_delete(self, client: OpenAI) -> None:
         permission = client.fine_tuning.checkpoints.permissions.delete(
-            "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+            permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+            fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
         )
         assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     def test_raw_response_delete(self, client: OpenAI) -> None:
         response = client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
-            "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+            permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+            fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
         )
 
         assert response.is_closed is True
@@ -137,11 +137,11 @@ def test_raw_response_delete(self, client: OpenAI) -> None:
         permission = response.parse()
         assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     def test_streaming_response_delete(self, client: OpenAI) -> None:
         with client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
-            "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+            permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+            fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -151,14 +151,20 @@ def test_streaming_response_delete(self, client: OpenAI) -> None:
 
         assert cast(Any, response.is_closed) is True
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     def test_path_params_delete(self, client: OpenAI) -> None:
         with pytest.raises(
             ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
         ):
             client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
-                "",
+                permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+                fine_tuned_model_checkpoint="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
+            client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
+                permission_id="",
+                fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
             )
 
 
@@ -260,19 +266,19 @@ async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
                 fine_tuned_model_checkpoint="",
             )
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
         permission = await async_client.fine_tuning.checkpoints.permissions.delete(
-            "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+            permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+            fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
         )
         assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
         response = await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
-            "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+            permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+            fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
         )
 
         assert response.is_closed is True
@@ -280,11 +286,11 @@ async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
         permission = response.parse()
         assert_matches_type(PermissionDeleteResponse, permission, path=["response"])
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
         async with async_client.fine_tuning.checkpoints.permissions.with_streaming_response.delete(
-            "ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
+            permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+            fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
         ) as response:
             assert not response.is_closed
             assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -294,12 +300,18 @@ async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> Non
 
         assert cast(Any, response.is_closed) is True
 
-    @pytest.mark.skip(reason="OpenAPI spec is slightly incorrect")
     @parametrize
     async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
         with pytest.raises(
             ValueError, match=r"Expected a non-empty value for `fine_tuned_model_checkpoint` but received ''"
         ):
             await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
-                "",
+                permission_id="cp_zc4Q7MP6XxulcVzj4MZdwsAB",
+                fine_tuned_model_checkpoint="",
+            )
+
+        with pytest.raises(ValueError, match=r"Expected a non-empty value for `permission_id` but received ''"):
+            await async_client.fine_tuning.checkpoints.permissions.with_raw_response.delete(
+                permission_id="",
+                fine_tuned_model_checkpoint="ft:gpt-4o-mini-2024-07-18:org:weather:B7R9VjQd",
             )
diff --git a/tests/api_resources/test_evals.py b/tests/api_resources/test_evals.py
index 8d03513b32..4ae2c597dd 100644
--- a/tests/api_resources/test_evals.py
+++ b/tests/api_resources/test_evals.py
@@ -74,7 +74,6 @@ def test_method_create_with_all_params(self, client: OpenAI) -> None:
             ],
             metadata={"foo": "string"},
             name="name",
-            share_with_openai=True,
         )
         assert_matches_type(EvalCreateResponse, eval, path=["response"])
 
@@ -350,7 +349,6 @@ async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) ->
             ],
             metadata={"foo": "string"},
             name="name",
-            share_with_openai=True,
         )
         assert_matches_type(EvalCreateResponse, eval, path=["response"])
 
diff --git a/tests/api_resources/test_images.py b/tests/api_resources/test_images.py
index 0a88f2ebcf..7997e9f5a1 100644
--- a/tests/api_resources/test_images.py
+++ b/tests/api_resources/test_images.py
@@ -76,6 +76,7 @@ def test_method_edit_with_all_params(self, client: OpenAI) -> None:
             mask=b"raw file contents",
             model="string",
             n=1,
+            quality="high",
             response_format="url",
             size="1024x1024",
             user="user-1234",
@@ -119,9 +120,13 @@ def test_method_generate(self, client: OpenAI) -> None:
     def test_method_generate_with_all_params(self, client: OpenAI) -> None:
         image = client.images.generate(
             prompt="A cute baby sea otter",
+            background="transparent",
             model="string",
+            moderation="low",
             n=1,
-            quality="standard",
+            output_compression=100,
+            output_format="png",
+            quality="medium",
             response_format="url",
             size="1024x1024",
             style="vivid",
@@ -216,6 +221,7 @@ async def test_method_edit_with_all_params(self, async_client: AsyncOpenAI) -> N
             mask=b"raw file contents",
             model="string",
             n=1,
+            quality="high",
             response_format="url",
             size="1024x1024",
             user="user-1234",
@@ -259,9 +265,13 @@ async def test_method_generate(self, async_client: AsyncOpenAI) -> None:
     async def test_method_generate_with_all_params(self, async_client: AsyncOpenAI) -> None:
         image = await async_client.images.generate(
             prompt="A cute baby sea otter",
+            background="transparent",
             model="string",
+            moderation="low",
             n=1,
-            quality="standard",
+            output_compression=100,
+            output_format="png",
+            quality="medium",
             response_format="url",
             size="1024x1024",
             style="vivid",

From c3617c9e4b6b07ea284440b1b303aba0ad983811 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
 <142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 16:31:09 +0000
Subject: [PATCH 11/11] release: 1.76.0

---
 .release-please-manifest.json |  2 +-
 CHANGELOG.md                  | 25 +++++++++++++++++++++++++
 pyproject.toml                |  2 +-
 src/openai/_version.py        |  2 +-
 4 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index cb464946f0..df3aaa16a7 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
 {
-  ".": "1.75.0"
+  ".": "1.76.0"
 }
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fb077b91c3..73d8f2bf6e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,30 @@
 # Changelog
 
+## 1.76.0 (2025-04-23)
+
+Full Changelog: [v1.75.0...v1.76.0](https://github.com/openai/openai-python/compare/v1.75.0...v1.76.0)
+
+### Features
+
+* **api:** adding new image model support ([74d7692](https://github.com/openai/openai-python/commit/74d7692e94c9dca96db8793809d75631c22dbb87))
+
+
+### Bug Fixes
+
+* **pydantic v1:** more robust `ModelField.annotation` check ([#2163](https://github.com/openai/openai-python/issues/2163)) ([7351b12](https://github.com/openai/openai-python/commit/7351b12bc981f56632b92342d9ef26f6fb28d540))
+* **pydantic v1:** more robust ModelField.annotation check ([eba7856](https://github.com/openai/openai-python/commit/eba7856db55afb8cb44376a0248587549f7bc65f))
+
+
+### Chores
+
+* **ci:** add timeout thresholds for CI jobs ([0997211](https://github.com/openai/openai-python/commit/09972119df5dd4c7c8db137c721364787e22d4c6))
+* **internal:** fix list file params ([da2113c](https://github.com/openai/openai-python/commit/da2113c60b50b4438459325fcd38d55df3f63d8e))
+* **internal:** import reformatting ([b425fb9](https://github.com/openai/openai-python/commit/b425fb906f62550c3669b09b9d8575f3d4d8496b))
+* **internal:** minor formatting changes ([aed1d76](https://github.com/openai/openai-python/commit/aed1d767898324cf90328db329e04e89a77579c3))
+* **internal:** refactor retries to not use recursion ([8cb8cfa](https://github.com/openai/openai-python/commit/8cb8cfab48a4fed70a756ce50036e7e56e1f9f87))
+* **internal:** update models test ([870ad4e](https://github.com/openai/openai-python/commit/870ad4ed3a284d75f44b825503750129284c7906))
+* update completion parse signature ([a44016c](https://github.com/openai/openai-python/commit/a44016c64cdefe404e97592808ed3c25411ab27b))
+
 ## 1.75.0 (2025-04-16)
 
 Full Changelog: [v1.74.1...v1.75.0](https://github.com/openai/openai-python/compare/v1.74.1...v1.75.0)
diff --git a/pyproject.toml b/pyproject.toml
index b5648e9e51..947e082f78 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
 [project]
 name = "openai"
-version = "1.75.0"
+version = "1.76.0"
 description = "The official Python library for the openai API"
 dynamic = ["readme"]
 license = "Apache-2.0"
diff --git a/src/openai/_version.py b/src/openai/_version.py
index 8eab2d7416..ea6b974272 100644
--- a/src/openai/_version.py
+++ b/src/openai/_version.py
@@ -1,4 +1,4 @@
 # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 
 __title__ = "openai"
-__version__ = "1.75.0"  # x-release-please-version
+__version__ = "1.76.0"  # x-release-please-version