From c289fc9ba9038ed54d8060a88f0681a25db64c52 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 15:24:49 -0500 Subject: [PATCH 01/15] update base class --- .../langchain_openai/chat_models/base.py | 145 ++++++++++++------ 1 file changed, 101 insertions(+), 44 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 2d580d08ccd98..1a82d3d98938f 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -8,10 +8,12 @@ import os import sys import warnings +from functools import cached_property from io import BytesIO from math import ceil from operator import itemgetter from typing import ( + TYPE_CHECKING, Any, AsyncIterator, Callable, @@ -91,10 +93,20 @@ is_basemodel_subclass, ) from langchain_core.utils.utils import _build_model_kwargs, from_env, secret_from_env -from pydantic import BaseModel, ConfigDict, Field, SecretStr, model_validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + PrivateAttr, + SecretStr, + model_validator, +) from pydantic.v1 import BaseModel as BaseModelV1 from typing_extensions import Self +if TYPE_CHECKING: + import httpx + logger = logging.getLogger(__name__) @@ -491,6 +503,7 @@ class BaseChatOpenAI(BaseChatModel): However this does not prevent a user from directly passed in the parameter during invocation. """ + _client_params: Dict[str, Any] = PrivateAttr(default_factory=dict) model_config = ConfigDict(populate_by_name=True) @@ -526,7 +539,7 @@ def validate_environment(self) -> Self: or os.getenv("OPENAI_ORGANIZATION") ) self.openai_api_base = self.openai_api_base or os.getenv("OPENAI_API_BASE") - client_params: dict = { + self._client_params: dict = { "api_key": ( self.openai_api_key.get_secret_value() if self.openai_api_key else None ), @@ -537,7 +550,7 @@ def validate_environment(self) -> Self: "default_query": self.default_query, } if self.max_retries is not None: - client_params["max_retries"] = self.max_retries + self._client_params["max_retries"] = self.max_retries if self.openai_proxy and (self.http_client or self.http_async_client): openai_proxy = self.openai_proxy @@ -548,37 +561,81 @@ def validate_environment(self) -> Self: "'http_client'/'http_async_client' is already specified. Received:\n" f"{openai_proxy=}\n{http_client=}\n{http_async_client=}" ) - if not self.client: - if self.openai_proxy and not self.http_client: - try: - import httpx - except ImportError as e: - raise ImportError( - "Could not import httpx python package. " - "Please install it with `pip install httpx`." - ) from e - self.http_client = httpx.Client(proxy=self.openai_proxy) - sync_specific = {"http_client": self.http_client} - self.root_client = openai.OpenAI(**client_params, **sync_specific) # type: ignore[arg-type] - self.client = self.root_client.chat.completions - if not self.async_client: - if self.openai_proxy and not self.http_async_client: - try: - import httpx - except ImportError as e: - raise ImportError( - "Could not import httpx python package. " - "Please install it with `pip install httpx`." - ) from e - self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) - async_specific = {"http_client": self.http_async_client} - self.root_async_client = openai.AsyncOpenAI( - **client_params, - **async_specific, # type: ignore[arg-type] - ) - self.async_client = self.root_async_client.chat.completions + return self + @cached_property + def _http_client(self) -> Optional[httpx.Client]: + """Optional httpx.Client. Only used for sync invocations. + + Must specify http_async_client as well if you'd like a custom client for + async invocations. + """ + # Configure a custom httpx client. See the + # [httpx documentation](https://www.python-httpx.org/api/#client) for more + # details. + if self.http_client is not None: + return self.http_client + if not self.openai_proxy: + return None + try: + import httpx + except ImportError as e: + raise ImportError( + "Could not import httpx python package. " + "Please install it with `pip install httpx`." + ) from e + return httpx.Client(proxy=self.openai_proxy) + + @cached_property + def _http_async_client(self) -> Optional[httpx.AsyncClient]: + """Optional httpx.AsyncClient. Only used for async invocations. + + Must specify http_client as well if you'd like a custom client for sync + invocations. + """ + if self.http_async_client is not None: + return self.http_async_client + if not self.openai_proxy: + return None + try: + import httpx + except ImportError as e: + raise ImportError( + "Could not import httpx python package. " + "Please install it with `pip install httpx`." + ) from e + return httpx.AsyncClient(proxy=self.openai_proxy) + + @cached_property + def _root_client(self) -> openai.OpenAI: + if self.root_client is not None: + return self.root_client + sync_specific = {"http_client": self._http_client} + return openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] + + @cached_property + def _root_async_client(self) -> openai.AsyncOpenAI: + if self.root_async_client is not None: + return self.root_async_client + async_specific = {"http_client": self._http_async_client} + return openai.AsyncOpenAI( + **self._client_params, + **async_specific, # type: ignore[arg-type] + ) + + @cached_property + def _client(self) -> Any: + if self.client is not None: + return self.client + return self._root_client.chat.completions + + @cached_property + def _async_client(self) -> Any: + if self.async_client is not None: + return self.async_client + return self._root_async_client.chat.completions + @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" @@ -704,15 +761,15 @@ def _stream( "specified." ) payload.pop("stream") - response_stream = self.root_client.beta.chat.completions.stream(**payload) + response_stream = self._root_client.beta.chat.completions.stream(**payload) context_manager = response_stream else: if self.include_response_headers: - raw_response = self.client.with_raw_response.create(**payload) + raw_response = self._client.with_raw_response.create(**payload) response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = self.client.create(**payload) + response = self._client.create(**payload) context_manager = response try: with context_manager as response: @@ -772,15 +829,15 @@ def _generate( ) payload.pop("stream") try: - response = self.root_client.beta.chat.completions.parse(**payload) + response = self._root_client.beta.chat.completions.parse(**payload) except openai.BadRequestError as e: _handle_openai_bad_request(e) elif self.include_response_headers: - raw_response = self.client.with_raw_response.create(**payload) + raw_response = self._client.with_raw_response.create(**payload) response = raw_response.parse() generation_info = {"headers": dict(raw_response.headers)} else: - response = self.client.create(**payload) + response = self._client.create(**payload) return self._create_chat_result(response, generation_info) def _get_request_payload( @@ -868,19 +925,19 @@ async def _astream( "specified." ) payload.pop("stream") - response_stream = self.root_async_client.beta.chat.completions.stream( + response_stream = self._root_async_client.beta.chat.completions.stream( **payload ) context_manager = response_stream else: if self.include_response_headers: - raw_response = await self.async_client.with_raw_response.create( + raw_response = await self._async_client.with_raw_response.create( **payload ) response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = await self.async_client.create(**payload) + response = await self._async_client.create(**payload) context_manager = response try: async with context_manager as response: @@ -940,17 +997,17 @@ async def _agenerate( ) payload.pop("stream") try: - response = await self.root_async_client.beta.chat.completions.parse( + response = await self._root_async_client.beta.chat.completions.parse( **payload ) except openai.BadRequestError as e: _handle_openai_bad_request(e) elif self.include_response_headers: - raw_response = await self.async_client.with_raw_response.create(**payload) + raw_response = await self._async_client.with_raw_response.create(**payload) response = raw_response.parse() generation_info = {"headers": dict(raw_response.headers)} else: - response = await self.async_client.create(**payload) + response = await self._async_client.create(**payload) return await run_in_executor( None, self._create_chat_result, response, generation_info ) From 6c2474b22057156dbb5b299373a3f90089bac0ff Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 15:25:37 -0500 Subject: [PATCH 02/15] update azure --- .../langchain_openai/chat_models/azure.py | 55 +++++++++++++------ 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 04c5e4e5c1db2..1c5653ac3a280 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -4,6 +4,7 @@ import logging import os +from functools import cached_property from typing import ( Any, Awaitable, @@ -629,7 +630,7 @@ def validate_environment(self) -> Self: "Or you can equivalently specify:\n\n" 'base_url="https://xxx.openai.azure.com/openai/deployments/my-deployment"' ) - client_params: dict = { + self._client_params: dict = { "api_version": self.openai_api_version, "azure_endpoint": self.azure_endpoint, "azure_deployment": self.deployment_name, @@ -650,27 +651,45 @@ def validate_environment(self) -> Self: "default_query": self.default_query, } if self.max_retries is not None: - client_params["max_retries"] = self.max_retries - - if not self.client: - sync_specific = {"http_client": self.http_client} - self.root_client = openai.AzureOpenAI(**client_params, **sync_specific) # type: ignore[arg-type] - self.client = self.root_client.chat.completions - if not self.async_client: - async_specific = {"http_client": self.http_async_client} - - if self.azure_ad_async_token_provider: - client_params["azure_ad_token_provider"] = ( - self.azure_ad_async_token_provider - ) + self._client_params["max_retries"] = self.max_retries - self.root_async_client = openai.AsyncAzureOpenAI( - **client_params, - **async_specific, # type: ignore[arg-type] + if self.azure_ad_async_token_provider: + self._client_params["azure_ad_token_provider"] = ( + self.azure_ad_async_token_provider ) - self.async_client = self.root_async_client.chat.completions + return self + @cached_property + def _root_client(self) -> openai.AzureOpenAI: + if self.root_client is not None: + return self.root_client + sync_specific = {"http_client": self._http_client} + return openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] + + @cached_property + def _root_async_client(self) -> openai.AsyncAzureOpenAI: + if self.root_async_client is not None: + return self.root_async_client + async_specific = {"http_client": self._http_async_client} + + return openai.AsyncAzureOpenAI( + **self._client_params, + **async_specific, # type: ignore[call-overload] + ) + + @cached_property + def _client(self) -> Any: + if self.client is not None: + return self.client + return self._root_client.chat.completions + + @cached_property + def _async_client(self) -> Any: + if self.async_client is not None: + return self.async_client + return self._root_async_client.chat.completions + @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" From 619a885263d0705d8d23fc704202885ad9f97780 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 15:47:37 -0500 Subject: [PATCH 03/15] add test --- libs/partners/openai/tests/unit_tests/chat_models/test_base.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index a7b3e5f102ee8..dd5b8291e97f3 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -541,6 +541,9 @@ def test_openai_invoke(mock_client: MagicMock) -> None: assert "headers" not in res.response_metadata assert mock_client.create.called + assert llm.root_client is None + assert llm.root_async_client is None + async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None: llm = ChatOpenAI() From adcb5396d9b200015521c719e56935987df070e2 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 16:54:54 -0500 Subject: [PATCH 04/15] update --- .../langchain_openai/chat_models/azure.py | 22 +++++++------ .../langchain_openai/chat_models/base.py | 31 +++++++++++-------- .../chat_models/test_base.py | 6 ++++ .../tests/unit_tests/chat_models/test_base.py | 3 +- 4 files changed, 37 insertions(+), 25 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 1c5653ac3a280..2b50512953038 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -4,7 +4,6 @@ import logging import os -from functools import cached_property from typing import ( Any, Awaitable, @@ -660,35 +659,38 @@ def validate_environment(self) -> Self: return self - @cached_property + @property def _root_client(self) -> openai.AzureOpenAI: if self.root_client is not None: return self.root_client sync_specific = {"http_client": self._http_client} - return openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] + self.root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] + return self.root_client - @cached_property + @property def _root_async_client(self) -> openai.AsyncAzureOpenAI: if self.root_async_client is not None: return self.root_async_client async_specific = {"http_client": self._http_async_client} - - return openai.AsyncAzureOpenAI( + self.root_async_client = openai.AsyncAzureOpenAI( **self._client_params, **async_specific, # type: ignore[call-overload] ) + return self._root_async_client - @cached_property + @property def _client(self) -> Any: if self.client is not None: return self.client - return self._root_client.chat.completions + self.client = self._root_client.chat.completions + return self.client - @cached_property + @property def _async_client(self) -> Any: if self.async_client is not None: return self.async_client - return self._root_async_client.chat.completions + self.async_client = self._root_async_client.chat.completions + return self.async_client @property def _identifying_params(self) -> Dict[str, Any]: diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 1a82d3d98938f..73cb63a5b8786 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -8,7 +8,6 @@ import os import sys import warnings -from functools import cached_property from io import BytesIO from math import ceil from operator import itemgetter @@ -564,7 +563,7 @@ def validate_environment(self) -> Self: return self - @cached_property + @property def _http_client(self) -> Optional[httpx.Client]: """Optional httpx.Client. Only used for sync invocations. @@ -585,9 +584,10 @@ def _http_client(self) -> Optional[httpx.Client]: "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - return httpx.Client(proxy=self.openai_proxy) + self.http_client = httpx.Client(proxy=self.openai_proxy) + return self.http_client - @cached_property + @property def _http_async_client(self) -> Optional[httpx.AsyncClient]: """Optional httpx.AsyncClient. Only used for async invocations. @@ -605,36 +605,41 @@ def _http_async_client(self) -> Optional[httpx.AsyncClient]: "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - return httpx.AsyncClient(proxy=self.openai_proxy) + self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) + return self.http_async_client - @cached_property + @property def _root_client(self) -> openai.OpenAI: if self.root_client is not None: return self.root_client sync_specific = {"http_client": self._http_client} - return openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] + self.root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] + return self.root_client - @cached_property + @property def _root_async_client(self) -> openai.AsyncOpenAI: if self.root_async_client is not None: return self.root_async_client async_specific = {"http_client": self._http_async_client} - return openai.AsyncOpenAI( + self.root_async_client = openai.AsyncOpenAI( **self._client_params, **async_specific, # type: ignore[arg-type] ) + return self.root_async_client - @cached_property + @property def _client(self) -> Any: if self.client is not None: return self.client - return self._root_client.chat.completions + self.client = self._root_client.chat.completions + return self.client - @cached_property + @property def _async_client(self) -> Any: if self.async_client is not None: return self.async_client - return self._root_async_client.chat.completions + self.async_client = self._root_async_client.chat.completions + return self.async_client @property def _default_params(self) -> Dict[str, Any]: diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 09cae79520bf1..5a7dfabaed083 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -660,6 +660,9 @@ class MyModel(BaseModel): def test_openai_proxy() -> None: """Test ChatOpenAI with proxy.""" chat_openai = ChatOpenAI(openai_proxy="http://localhost:8080") + assert chat_openai.client is None + _ = chat_openai._client # force client to instantiate + assert chat_openai.client is not None mounts = chat_openai.client._client._client._mounts assert len(mounts) == 1 for key, value in mounts.items(): @@ -668,6 +671,9 @@ def test_openai_proxy() -> None: assert proxy.host == b"localhost" assert proxy.port == 8080 + assert chat_openai.async_client is None + _ = chat_openai._async_client # force client to instantiate + assert chat_openai.async_client is not None async_client_mounts = chat_openai.async_client._client._client._mounts assert len(async_client_mounts) == 1 for key, value in async_client_mounts.items(): diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index dd5b8291e97f3..08d2ce395ebec 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -541,8 +541,7 @@ def test_openai_invoke(mock_client: MagicMock) -> None: assert "headers" not in res.response_metadata assert mock_client.create.called - assert llm.root_client is None - assert llm.root_async_client is None + assert llm.async_client is None async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None: From 4588e0679483d31375fbeb348caae769d38e03c2 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 19:08:41 -0500 Subject: [PATCH 05/15] clients -> private attributes --- .../langchain_openai/chat_models/azure.py | 42 +++--- .../langchain_openai/chat_models/base.py | 127 ++++++++++-------- .../chat_models/test_base.py | 6 - .../unit_tests/chat_models/test_azure.py | 1 + .../tests/unit_tests/chat_models/test_base.py | 20 +-- 5 files changed, 106 insertions(+), 90 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 2b50512953038..f4fe272c46df7 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -660,37 +660,37 @@ def validate_environment(self) -> Self: return self @property - def _root_client(self) -> openai.AzureOpenAI: - if self.root_client is not None: - return self.root_client - sync_specific = {"http_client": self._http_client} - self.root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] - return self.root_client + def root_client(self) -> openai.AzureOpenAI: + if self._root_client is not None: + return self._root_client + sync_specific = {"http_client": self.http_client} + self._root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] + return self._root_client @property - def _root_async_client(self) -> openai.AsyncAzureOpenAI: - if self.root_async_client is not None: - return self.root_async_client - async_specific = {"http_client": self._http_async_client} - self.root_async_client = openai.AsyncAzureOpenAI( + def root_async_client(self) -> openai.AsyncAzureOpenAI: + if self._root_async_client is not None: + return self._root_async_client + async_specific = {"http_client": self.http_async_client} + self._root_async_client = openai.AsyncAzureOpenAI( **self._client_params, **async_specific, # type: ignore[call-overload] ) return self._root_async_client @property - def _client(self) -> Any: - if self.client is not None: - return self.client - self.client = self._root_client.chat.completions - return self.client + def client(self) -> Any: + if self._client is not None: + return self._client + self._client = self.root_client.chat.completions + return self._client @property - def _async_client(self) -> Any: - if self.async_client is not None: - return self.async_client - self.async_client = self._root_async_client.chat.completions - return self.async_client + def async_client(self) -> Any: + if self._async_client is not None: + return self._async_client + self._async_client = self.root_async_client.chat.completions + return self._async_client @property def _identifying_params(self) -> Dict[str, Any]: diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 73cb63a5b8786..7fe69cc712add 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -396,10 +396,10 @@ class _AllReturnType(TypedDict): class BaseChatOpenAI(BaseChatModel): - client: Any = Field(default=None, exclude=True) #: :meta private: - async_client: Any = Field(default=None, exclude=True) #: :meta private: - root_client: Any = Field(default=None, exclude=True) #: :meta private: - root_async_client: Any = Field(default=None, exclude=True) #: :meta private: + _client: Any = PrivateAttr(default=None) #: :meta private: + _async_client: Any = PrivateAttr(default=None) #: :meta private: + _root_client: Any = PrivateAttr(default=None) #: :meta private: + _root_async_client: Any = PrivateAttr(default=None) #: :meta private: model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: Optional[float] = None @@ -471,11 +471,11 @@ class BaseChatOpenAI(BaseChatModel): default_query: Union[Mapping[str, object], None] = None # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more details. - http_client: Union[Any, None] = Field(default=None, exclude=True) + _http_client: Union[Any, None] = PrivateAttr(default=None) """Optional httpx.Client. Only used for sync invocations. Must specify http_async_client as well if you'd like a custom client for async invocations. """ - http_async_client: Union[Any, None] = Field(default=None, exclude=True) + _http_async_client: Union[Any, None] = PrivateAttr(default=None) """Optional httpx.AsyncClient. Only used for async invocations. Must specify http_client as well if you'd like a custom client for sync invocations.""" stop: Optional[Union[List[str], str]] = Field(default=None, alias="stop_sequences") @@ -523,6 +523,24 @@ def validate_temperature(cls, values: Dict[str, Any]) -> Any: values["temperature"] = 1 return values + def __init__( + self, + client: Optional[Any] = None, + async_client: Optional[Any] = None, + root_client: Optional[Any] = None, + async_root_client: Optional[Any] = None, + http_client: Optional[Any] = None, + http_async_client: Optional[Any] = None, + **kwargs: Any, + ) -> None: + super().__init__(**kwargs) + self._client = client + self._async_client = async_client + self._root_client = root_client + self._async_root_client = async_root_client + self._http_client = http_client + self._http_async_client = http_async_client + @model_validator(mode="after") def validate_environment(self) -> Self: """Validate that api key and python package exists in environment.""" @@ -551,10 +569,10 @@ def validate_environment(self) -> Self: if self.max_retries is not None: self._client_params["max_retries"] = self.max_retries - if self.openai_proxy and (self.http_client or self.http_async_client): + if self.openai_proxy and (self._http_client or self._http_async_client): openai_proxy = self.openai_proxy - http_client = self.http_client - http_async_client = self.http_async_client + http_client = self._http_client + http_async_client = self._http_async_client raise ValueError( "Cannot specify 'openai_proxy' if one of " "'http_client'/'http_async_client' is already specified. Received:\n" @@ -564,7 +582,7 @@ def validate_environment(self) -> Self: return self @property - def _http_client(self) -> Optional[httpx.Client]: + def http_client(self) -> Optional[httpx.Client]: """Optional httpx.Client. Only used for sync invocations. Must specify http_async_client as well if you'd like a custom client for @@ -573,8 +591,8 @@ def _http_client(self) -> Optional[httpx.Client]: # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more # details. - if self.http_client is not None: - return self.http_client + if self._http_client is not None: + return self._http_client if not self.openai_proxy: return None try: @@ -584,18 +602,18 @@ def _http_client(self) -> Optional[httpx.Client]: "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - self.http_client = httpx.Client(proxy=self.openai_proxy) - return self.http_client + self._http_client = httpx.Client(proxy=self.openai_proxy) + return self._http_client @property - def _http_async_client(self) -> Optional[httpx.AsyncClient]: + def http_async_client(self) -> Optional[httpx.AsyncClient]: """Optional httpx.AsyncClient. Only used for async invocations. Must specify http_client as well if you'd like a custom client for sync invocations. """ - if self.http_async_client is not None: - return self.http_async_client + if self._http_async_client is not None: + return self._http_async_client if not self.openai_proxy: return None try: @@ -605,41 +623,41 @@ def _http_async_client(self) -> Optional[httpx.AsyncClient]: "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - self.http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) - return self.http_async_client + self._http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) + return self._http_async_client @property - def _root_client(self) -> openai.OpenAI: - if self.root_client is not None: - return self.root_client - sync_specific = {"http_client": self._http_client} - self.root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] - return self.root_client + def root_client(self) -> openai.OpenAI: + if self._root_client is not None: + return self._root_client + sync_specific = {"http_client": self.http_client} + self._root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] + return self._root_client @property - def _root_async_client(self) -> openai.AsyncOpenAI: - if self.root_async_client is not None: - return self.root_async_client - async_specific = {"http_client": self._http_async_client} - self.root_async_client = openai.AsyncOpenAI( + def root_async_client(self) -> openai.AsyncOpenAI: + if self._root_async_client is not None: + return self._root_async_client + async_specific = {"http_client": self.http_async_client} + self._root_async_client = openai.AsyncOpenAI( **self._client_params, **async_specific, # type: ignore[arg-type] ) - return self.root_async_client + return self._root_async_client @property - def _client(self) -> Any: - if self.client is not None: - return self.client - self.client = self._root_client.chat.completions - return self.client + def client(self) -> Any: + if self._client is not None: + return self._client + self._client = self.root_client.chat.completions + return self._client @property - def _async_client(self) -> Any: - if self.async_client is not None: - return self.async_client - self.async_client = self._root_async_client.chat.completions - return self.async_client + def async_client(self) -> Any: + if self._async_client is not None: + return self._async_client + self._async_client = self.root_async_client.chat.completions + return self._async_client @property def _default_params(self) -> Dict[str, Any]: @@ -766,15 +784,15 @@ def _stream( "specified." ) payload.pop("stream") - response_stream = self._root_client.beta.chat.completions.stream(**payload) + response_stream = self.root_client.beta.chat.completions.stream(**payload) context_manager = response_stream else: if self.include_response_headers: - raw_response = self._client.with_raw_response.create(**payload) + raw_response = self.client.with_raw_response.create(**payload) response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = self._client.create(**payload) + response = self.client.create(**payload) context_manager = response try: with context_manager as response: @@ -834,15 +852,15 @@ def _generate( ) payload.pop("stream") try: - response = self._root_client.beta.chat.completions.parse(**payload) + response = self.root_client.beta.chat.completions.parse(**payload) except openai.BadRequestError as e: _handle_openai_bad_request(e) elif self.include_response_headers: - raw_response = self._client.with_raw_response.create(**payload) + raw_response = self.client.with_raw_response.create(**payload) response = raw_response.parse() generation_info = {"headers": dict(raw_response.headers)} else: - response = self._client.create(**payload) + response = self.client.create(**payload) return self._create_chat_result(response, generation_info) def _get_request_payload( @@ -930,19 +948,19 @@ async def _astream( "specified." ) payload.pop("stream") - response_stream = self._root_async_client.beta.chat.completions.stream( + response_stream = self.root_async_client.beta.chat.completions.stream( **payload ) context_manager = response_stream else: if self.include_response_headers: - raw_response = await self._async_client.with_raw_response.create( + raw_response = await self.async_client.with_raw_response.create( **payload ) response = raw_response.parse() base_generation_info = {"headers": dict(raw_response.headers)} else: - response = await self._async_client.create(**payload) + response = await self.async_client.create(**payload) context_manager = response try: async with context_manager as response: @@ -1002,17 +1020,17 @@ async def _agenerate( ) payload.pop("stream") try: - response = await self._root_async_client.beta.chat.completions.parse( + response = await self.root_async_client.beta.chat.completions.parse( **payload ) except openai.BadRequestError as e: _handle_openai_bad_request(e) elif self.include_response_headers: - raw_response = await self._async_client.with_raw_response.create(**payload) + raw_response = await self.async_client.with_raw_response.create(**payload) response = raw_response.parse() generation_info = {"headers": dict(raw_response.headers)} else: - response = await self._async_client.create(**payload) + response = await self.async_client.create(**payload) return await run_in_executor( None, self._create_chat_result, response, generation_info ) @@ -2023,6 +2041,9 @@ class Joke(BaseModel): max_tokens: Optional[int] = Field(default=None, alias="max_completion_tokens") """Maximum number of tokens to generate.""" + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 5a7dfabaed083..09cae79520bf1 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -660,9 +660,6 @@ class MyModel(BaseModel): def test_openai_proxy() -> None: """Test ChatOpenAI with proxy.""" chat_openai = ChatOpenAI(openai_proxy="http://localhost:8080") - assert chat_openai.client is None - _ = chat_openai._client # force client to instantiate - assert chat_openai.client is not None mounts = chat_openai.client._client._client._mounts assert len(mounts) == 1 for key, value in mounts.items(): @@ -671,9 +668,6 @@ def test_openai_proxy() -> None: assert proxy.host == b"localhost" assert proxy.port == 8080 - assert chat_openai.async_client is None - _ = chat_openai._async_client # force client to instantiate - assert chat_openai.async_client is not None async_client_mounts = chat_openai.async_client._client._client._mounts assert len(async_client_mounts) == 1 for key, value in async_client_mounts.items(): diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py b/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py index bee3f742a5efe..64b2de8ba22d9 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_azure.py @@ -14,6 +14,7 @@ def test_initialize_azure_openai() -> None: azure_deployment="35-turbo-dev", openai_api_version="2023-05-15", azure_endpoint="my-base-url", + http_client=None, ) assert llm.deployment_name == "35-turbo-dev" assert llm.openai_api_version == "2023-05-15" diff --git a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py index 08d2ce395ebec..4954cf8398cc5 100644 --- a/libs/partners/openai/tests/unit_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/unit_tests/chat_models/test_base.py @@ -298,7 +298,7 @@ async def mock_create(*args: Any, **kwargs: Any) -> MockAsyncContextManager: usage_chunk = mock_glm4_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "async_client", mock_client): + with patch.object(llm, "_async_client", mock_client): async for chunk in llm.astream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -323,7 +323,7 @@ def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager: usage_chunk = mock_glm4_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): for chunk in llm.stream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -378,7 +378,7 @@ async def mock_create(*args: Any, **kwargs: Any) -> MockAsyncContextManager: mock_client.create = mock_create usage_chunk = mock_deepseek_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "async_client", mock_client): + with patch.object(llm, "_async_client", mock_client): async for chunk in llm.astream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -402,7 +402,7 @@ def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager: mock_client.create = mock_create usage_chunk = mock_deepseek_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): for chunk in llm.stream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -446,7 +446,7 @@ async def mock_create(*args: Any, **kwargs: Any) -> MockAsyncContextManager: mock_client.create = mock_create usage_chunk = mock_openai_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "async_client", mock_client): + with patch.object(llm, "_async_client", mock_client): async for chunk in llm.astream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -470,7 +470,7 @@ def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager: mock_client.create = mock_create usage_chunk = mock_openai_completion[-1] usage_metadata: Optional[UsageMetadata] = None - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): for chunk in llm.stream("你的名字叫什么?只回答名字"): assert isinstance(chunk, AIMessageChunk) if chunk.usage_metadata is not None: @@ -533,7 +533,7 @@ def mock_async_client(mock_completion: dict) -> AsyncMock: def test_openai_invoke(mock_client: MagicMock) -> None: llm = ChatOpenAI() - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): res = llm.invoke("bar") assert res.content == "Bar Baz" @@ -541,13 +541,13 @@ def test_openai_invoke(mock_client: MagicMock) -> None: assert "headers" not in res.response_metadata assert mock_client.create.called - assert llm.async_client is None + assert llm._async_client is None async def test_openai_ainvoke(mock_async_client: AsyncMock) -> None: llm = ChatOpenAI() - with patch.object(llm, "async_client", mock_async_client): + with patch.object(llm, "_async_client", mock_async_client): res = await llm.ainvoke("bar") assert res.content == "Bar Baz" @@ -575,7 +575,7 @@ def test__get_encoding_model(model: str) -> None: def test_openai_invoke_name(mock_client: MagicMock) -> None: llm = ChatOpenAI() - with patch.object(llm, "client", mock_client): + with patch.object(llm, "_client", mock_client): messages = [HumanMessage(content="Foo", name="Katie")] res = llm.invoke(messages) call_args, call_kwargs = mock_client.create.call_args From 69400b8704a5df6986cdae8319f57b5a92a62c0b Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 19:32:09 -0500 Subject: [PATCH 06/15] refactor --- .../langchain_openai/chat_models/azure.py | 33 ++++---- .../langchain_openai/chat_models/base.py | 79 +++++++++---------- 2 files changed, 54 insertions(+), 58 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index f4fe272c46df7..83b484b3c948e 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -661,35 +661,34 @@ def validate_environment(self) -> Self: @property def root_client(self) -> openai.AzureOpenAI: - if self._root_client is not None: - return self._root_client - sync_specific = {"http_client": self.http_client} - self._root_client = openai.AzureOpenAI(**self._client_params, **sync_specific) # type: ignore[call-overload] + if self._root_client is None: + sync_specific = {"http_client": self.http_client} + self._root_client = openai.AzureOpenAI( + **self._client_params, + **sync_specific, # type: ignore[call-overload] + ) return self._root_client @property def root_async_client(self) -> openai.AsyncAzureOpenAI: - if self._root_async_client is not None: - return self._root_async_client - async_specific = {"http_client": self.http_async_client} - self._root_async_client = openai.AsyncAzureOpenAI( - **self._client_params, - **async_specific, # type: ignore[call-overload] - ) + if self._root_async_client is None: + async_specific = {"http_client": self.http_async_client} + self._root_async_client = openai.AsyncAzureOpenAI( + **self._client_params, + **async_specific, # type: ignore[call-overload] + ) return self._root_async_client @property def client(self) -> Any: - if self._client is not None: - return self._client - self._client = self.root_client.chat.completions + if self._client is None: + self._client = self.root_client.chat.completions return self._client @property def async_client(self) -> Any: - if self._async_client is not None: - return self._async_client - self._async_client = self.root_async_client.chat.completions + if self._async_client is None: + self._async_client = self.root_async_client.chat.completions return self._async_client @property diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 7fe69cc712add..ae80d38abcf98 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -591,18 +591,17 @@ def http_client(self) -> Optional[httpx.Client]: # Configure a custom httpx client. See the # [httpx documentation](https://www.python-httpx.org/api/#client) for more # details. - if self._http_client is not None: - return self._http_client - if not self.openai_proxy: - return None - try: - import httpx - except ImportError as e: - raise ImportError( - "Could not import httpx python package. " - "Please install it with `pip install httpx`." - ) from e - self._http_client = httpx.Client(proxy=self.openai_proxy) + if self._http_client is None: + if not self.openai_proxy: + return None + try: + import httpx + except ImportError as e: + raise ImportError( + "Could not import httpx python package. " + "Please install it with `pip install httpx`." + ) from e + self._http_client = httpx.Client(proxy=self.openai_proxy) return self._http_client @property @@ -612,51 +611,49 @@ def http_async_client(self) -> Optional[httpx.AsyncClient]: Must specify http_client as well if you'd like a custom client for sync invocations. """ - if self._http_async_client is not None: - return self._http_async_client - if not self.openai_proxy: - return None - try: - import httpx - except ImportError as e: - raise ImportError( - "Could not import httpx python package. " - "Please install it with `pip install httpx`." - ) from e - self._http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) + if self._http_async_client is None: + if not self.openai_proxy: + return None + try: + import httpx + except ImportError as e: + raise ImportError( + "Could not import httpx python package. " + "Please install it with `pip install httpx`." + ) from e + self._http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) return self._http_async_client @property def root_client(self) -> openai.OpenAI: - if self._root_client is not None: - return self._root_client - sync_specific = {"http_client": self.http_client} - self._root_client = openai.OpenAI(**self._client_params, **sync_specific) # type: ignore[arg-type] + if self._root_client is None: + sync_specific = {"http_client": self.http_client} + self._root_client = openai.OpenAI( + **self._client_params, + **sync_specific, # type: ignore[arg-type] + ) return self._root_client @property def root_async_client(self) -> openai.AsyncOpenAI: - if self._root_async_client is not None: - return self._root_async_client - async_specific = {"http_client": self.http_async_client} - self._root_async_client = openai.AsyncOpenAI( - **self._client_params, - **async_specific, # type: ignore[arg-type] - ) + if self._root_async_client is None: + async_specific = {"http_client": self.http_async_client} + self._root_async_client = openai.AsyncOpenAI( + **self._client_params, + **async_specific, # type: ignore[arg-type] + ) return self._root_async_client @property def client(self) -> Any: - if self._client is not None: - return self._client - self._client = self.root_client.chat.completions + if self._client is None: + self._client = self.root_client.chat.completions return self._client @property def async_client(self) -> Any: - if self._async_client is not None: - return self._async_client - self._async_client = self.root_async_client.chat.completions + if self._async_client is None: + self._async_client = self.root_async_client.chat.completions return self._async_client @property From 24765e49fd640db098ca8d8fa1ef141a997686ae Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 19:45:43 -0500 Subject: [PATCH 07/15] set global ssl context --- .../openai/langchain_openai/chat_models/base.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index ae80d38abcf98..6468ecf9fdf34 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -6,6 +6,7 @@ import json import logging import os +import ssl import sys import warnings from io import BytesIO @@ -108,6 +109,8 @@ logger = logging.getLogger(__name__) +global_ssl_context = ssl.create_default_context() + def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: """Convert a dictionary to a LangChain message. @@ -601,7 +604,9 @@ def http_client(self) -> Optional[httpx.Client]: "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - self._http_client = httpx.Client(proxy=self.openai_proxy) + self._http_client = httpx.Client( + proxy=self.openai_proxy, verify=global_ssl_context + ) return self._http_client @property @@ -621,7 +626,9 @@ def http_async_client(self) -> Optional[httpx.AsyncClient]: "Could not import httpx python package. " "Please install it with `pip install httpx`." ) from e - self._http_async_client = httpx.AsyncClient(proxy=self.openai_proxy) + self._http_async_client = httpx.AsyncClient( + proxy=self.openai_proxy, verify=global_ssl_context + ) return self._http_async_client @property From 7506c723463043b08785b5b7b69c6f8b028edac1 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 21:11:49 -0500 Subject: [PATCH 08/15] remove redundant properties on azure --- .../openai/langchain_openai/chat_models/azure.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index 83b484b3c948e..f73f65458c62e 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -679,18 +679,6 @@ def root_async_client(self) -> openai.AsyncAzureOpenAI: ) return self._root_async_client - @property - def client(self) -> Any: - if self._client is None: - self._client = self.root_client.chat.completions - return self._client - - @property - def async_client(self) -> Any: - if self._async_client is None: - self._async_client = self.root_async_client.chat.completions - return self._async_client - @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" From 230f2e030af68c8d30cdd7e72c06cfc5d57780a9 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 21:12:12 -0500 Subject: [PATCH 09/15] add setters --- .../openai/langchain_openai/chat_models/base.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 6468ecf9fdf34..2b99c34b18d7c 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -641,6 +641,10 @@ def root_client(self) -> openai.OpenAI: ) return self._root_client + @root_client.setter + def root_client(self, value: openai.OpenAI) -> None: + self._root_client = value + @property def root_async_client(self) -> openai.AsyncOpenAI: if self._root_async_client is None: @@ -651,18 +655,30 @@ def root_async_client(self) -> openai.AsyncOpenAI: ) return self._root_async_client + @root_async_client.setter + def root_async_client(self, value: openai.AsyncOpenAI) -> None: + self._root_async_client = value + @property def client(self) -> Any: if self._client is None: self._client = self.root_client.chat.completions return self._client + @client.setter + def client(self, value: Any) -> None: + self._client = value + @property def async_client(self) -> Any: if self._async_client is None: self._async_client = self.root_async_client.chat.completions return self._async_client + @async_client.setter + def async_client(self, value: Any) -> None: + self._async_client = value + @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" From be587d1640ebc3869c4accce6e37eae092367099 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 21:13:30 -0500 Subject: [PATCH 10/15] update xai --- .../partners/xai/langchain_xai/chat_models.py | 22 +++---------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/libs/partners/xai/langchain_xai/chat_models.py b/libs/partners/xai/langchain_xai/chat_models.py index 0081a734cf3cb..1e4d308f9afa3 100644 --- a/libs/partners/xai/langchain_xai/chat_models.py +++ b/libs/partners/xai/langchain_xai/chat_models.py @@ -7,7 +7,6 @@ Optional, ) -import openai from langchain_core.language_models.chat_models import LangSmithParams from langchain_core.utils import secret_from_env from langchain_openai.chat_models.base import BaseChatOpenAI @@ -325,7 +324,7 @@ def validate_environment(self) -> Self: if self.n is not None and self.n > 1 and self.streaming: raise ValueError("n must be 1 when streaming.") - client_params: dict = { + self._client_params: dict = { "api_key": ( self.xai_api_key.get_secret_value() if self.xai_api_key else None ), @@ -335,27 +334,12 @@ def validate_environment(self) -> Self: "default_query": self.default_query, } if self.max_retries is not None: - client_params["max_retries"] = self.max_retries + self._client_params["max_retries"] = self.max_retries - if client_params["api_key"] is None: + if self._client_params["api_key"] is None: raise ValueError( "xAI API key is not set. Please set it in the `xai_api_key` field or " "in the `XAI_API_KEY` environment variable." ) - if not (self.client or None): - sync_specific: dict = {"http_client": self.http_client} - self.client = openai.OpenAI( - **client_params, **sync_specific - ).chat.completions - self.root_client = openai.OpenAI(**client_params, **sync_specific) - if not (self.async_client or None): - async_specific: dict = {"http_client": self.http_async_client} - self.async_client = openai.AsyncOpenAI( - **client_params, **async_specific - ).chat.completions - self.root_async_client = openai.AsyncOpenAI( - **client_params, - **async_specific, - ) return self From 0c4fdeae67baafcaa028d77eb9c6ce1a68793240 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 21:14:19 -0500 Subject: [PATCH 11/15] update deepseek --- .../deepseek/langchain_deepseek/chat_models.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/libs/partners/deepseek/langchain_deepseek/chat_models.py b/libs/partners/deepseek/langchain_deepseek/chat_models.py index f3bcc2d158354..f53d9b3fd8914 100644 --- a/libs/partners/deepseek/langchain_deepseek/chat_models.py +++ b/libs/partners/deepseek/langchain_deepseek/chat_models.py @@ -178,7 +178,7 @@ def validate_environment(self) -> Self: self.api_key and self.api_key.get_secret_value() ): raise ValueError("If using default api base, DEEPSEEK_API_KEY must be set.") - client_params: dict = { + self._client_params: dict = { k: v for k, v in { "api_key": self.api_key.get_secret_value() if self.api_key else None, @@ -191,16 +191,6 @@ def validate_environment(self) -> Self: if v is not None } - if not (self.client or None): - sync_specific: dict = {"http_client": self.http_client} - self.client = openai.OpenAI( - **client_params, **sync_specific - ).chat.completions - if not (self.async_client or None): - async_specific: dict = {"http_client": self.http_async_client} - self.async_client = openai.AsyncOpenAI( - **client_params, **async_specific - ).chat.completions return self def _create_chat_result( From c23d7f1c2a65838a496a61964f227e25068c72e0 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 21:22:45 -0500 Subject: [PATCH 12/15] cr --- libs/partners/openai/langchain_openai/chat_models/base.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index 2b99c34b18d7c..d700bbda07001 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -33,6 +33,7 @@ ) from urllib.parse import urlparse +import certifi import openai import tiktoken from langchain_core._api.deprecation import deprecated @@ -109,7 +110,8 @@ logger = logging.getLogger(__name__) -global_ssl_context = ssl.create_default_context() +# This SSL context is equivelent to the default `verify=True`. +global_ssl_context = ssl.create_default_context(cafile=certifi.where()) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: From e19fed1fcc65a06bad65eaf132a922fb28761707 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 21:23:07 -0500 Subject: [PATCH 13/15] lint --- .../openai/langchain_openai/chat_models/azure.py | 12 ++++++++++-- .../openai/langchain_openai/chat_models/base.py | 4 ++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/libs/partners/openai/langchain_openai/chat_models/azure.py b/libs/partners/openai/langchain_openai/chat_models/azure.py index f73f65458c62e..a6ee5d584fc65 100644 --- a/libs/partners/openai/langchain_openai/chat_models/azure.py +++ b/libs/partners/openai/langchain_openai/chat_models/azure.py @@ -660,7 +660,7 @@ def validate_environment(self) -> Self: return self @property - def root_client(self) -> openai.AzureOpenAI: + def root_client(self) -> Any: if self._root_client is None: sync_specific = {"http_client": self.http_client} self._root_client = openai.AzureOpenAI( @@ -669,8 +669,12 @@ def root_client(self) -> openai.AzureOpenAI: ) return self._root_client + @root_client.setter + def root_client(self, value: openai.AzureOpenAI) -> None: + self._root_client = value + @property - def root_async_client(self) -> openai.AsyncAzureOpenAI: + def root_async_client(self) -> Any: if self._root_async_client is None: async_specific = {"http_client": self.http_async_client} self._root_async_client = openai.AsyncAzureOpenAI( @@ -679,6 +683,10 @@ def root_async_client(self) -> openai.AsyncAzureOpenAI: ) return self._root_async_client + @root_async_client.setter + def root_async_client(self, value: openai.AsyncAzureOpenAI) -> None: + self._root_async_client = value + @property def _identifying_params(self) -> Dict[str, Any]: """Get the identifying parameters.""" diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index d700bbda07001..efa21aa40049b 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -634,7 +634,7 @@ def http_async_client(self) -> Optional[httpx.AsyncClient]: return self._http_async_client @property - def root_client(self) -> openai.OpenAI: + def root_client(self) -> Any: if self._root_client is None: sync_specific = {"http_client": self.http_client} self._root_client = openai.OpenAI( @@ -648,7 +648,7 @@ def root_client(self, value: openai.OpenAI) -> None: self._root_client = value @property - def root_async_client(self) -> openai.AsyncOpenAI: + def root_async_client(self) -> Any: if self._root_async_client is None: async_specific = {"http_client": self.http_async_client} self._root_async_client = openai.AsyncOpenAI( From b07bb53f5bae970856bf6fc42a90851b5514f63f Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Fri, 21 Feb 2025 23:32:31 -0500 Subject: [PATCH 14/15] setters for http clients --- libs/partners/openai/langchain_openai/chat_models/base.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/libs/partners/openai/langchain_openai/chat_models/base.py b/libs/partners/openai/langchain_openai/chat_models/base.py index efa21aa40049b..1af857f91d10f 100644 --- a/libs/partners/openai/langchain_openai/chat_models/base.py +++ b/libs/partners/openai/langchain_openai/chat_models/base.py @@ -611,6 +611,10 @@ def http_client(self) -> Optional[httpx.Client]: ) return self._http_client + @http_client.setter + def http_client(self, value: Optional[httpx.Client]) -> None: + self._http_client = value + @property def http_async_client(self) -> Optional[httpx.AsyncClient]: """Optional httpx.AsyncClient. Only used for async invocations. @@ -633,6 +637,10 @@ def http_async_client(self) -> Optional[httpx.AsyncClient]: ) return self._http_async_client + @http_async_client.setter + def http_async_client(self, value: Optional[httpx.AsyncClient]) -> None: + self._http_async_client = value + @property def root_client(self) -> Any: if self._root_client is None: From 6f67aacab372100cd0017a66446f77cd8db8b643 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Sat, 22 Feb 2025 09:28:45 -0500 Subject: [PATCH 15/15] add test --- .../tests/integration_tests/chat_models/test_base.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py index 09cae79520bf1..2568938fec5e0 100644 --- a/libs/partners/openai/tests/integration_tests/chat_models/test_base.py +++ b/libs/partners/openai/tests/integration_tests/chat_models/test_base.py @@ -676,6 +676,16 @@ def test_openai_proxy() -> None: assert proxy.host == b"localhost" assert proxy.port == 8080 + http_async_client = httpx.AsyncClient(proxy="http://localhost:8081") + chat_openai = ChatOpenAI(http_async_client=http_async_client) + mounts = chat_openai.async_client._client._client._mounts + assert len(mounts) == 1 + for key, value in mounts.items(): + proxy = value._pool._proxy_url.origin + assert proxy.scheme == b"http" + assert proxy.host == b"localhost" + assert proxy.port == 8081 + def test_openai_response_headers() -> None: """Test ChatOpenAI response headers."""