Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
11b1b20
add profile module to langchain-core
ccurme Nov 19, 2025
0ce2d59
add tomli to core deps
ccurme Nov 19, 2025
771d1e6
turn langchain-model-profiles into cli tool for refreshing data
ccurme Nov 19, 2025
2f96ba6
add data to anthropic
ccurme Nov 19, 2025
d6f9ef9
move loading to provider packages
ccurme Nov 19, 2025
4c60b4c
move to attribute in core
ccurme Nov 19, 2025
829eed4
support on anthropic
ccurme Nov 19, 2025
55c0a4d
allow setting profile fields without mutation
ccurme Nov 19, 2025
09b0e9a
nit
ccurme Nov 20, 2025
97a6015
Merge branch 'master' into cc/model_profiles_distributed
ccurme Nov 20, 2025
42a39d1
cr
ccurme Nov 20, 2025
ed461f4
move data loader tests
ccurme Nov 20, 2025
fc4666c
restructure augmentations toml
ccurme Nov 20, 2025
14f6770
update langchain-model-profiles tests
ccurme Nov 20, 2025
b5bee8d
update langchain-model-profiles readme
ccurme Nov 20, 2025
3f14327
fixes
ccurme Nov 20, 2025
55f00a7
implement _get_default_model_profile
ccurme Nov 20, 2025
3012d7b
update openai
ccurme Nov 20, 2025
a0beff2
update langchain_v1
ccurme Nov 20, 2025
4c5aa25
override for azure
ccurme Nov 20, 2025
18cc6c4
deepseek
ccurme Nov 20, 2025
2bb9315
fireworks
ccurme Nov 20, 2025
c4514ea
groq
ccurme Nov 20, 2025
9622375
huggingface
ccurme Nov 20, 2025
e3c2631
mistral
ccurme Nov 20, 2025
d05641b
perplexity
ccurme Nov 20, 2025
a86c3f2
xai
ccurme Nov 20, 2025
4fd9c77
resolve todo
ccurme Nov 20, 2025
62eb6e8
feat(model-profiles): convert to auto-generated .py files (#34062)
ccurme Nov 21, 2025
639cdaf
document beta status
ccurme Nov 21, 2025
76ffca9
Merge branch 'master' into cc/model_profiles_distributed
ccurme Nov 21, 2025
b7ec873
make module private
ccurme Nov 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions libs/core/langchain_core/language_models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,10 @@
ParrotFakeChatModel,
)
from langchain_core.language_models.llms import LLM, BaseLLM
from langchain_core.language_models.model_profile import (
ModelProfile,
ModelProfileRegistry,
)

__all__ = (
"LLM",
Expand All @@ -68,6 +72,8 @@
"LanguageModelInput",
"LanguageModelLike",
"LanguageModelOutput",
"ModelProfile",
"ModelProfileRegistry",
"ParrotFakeChatModel",
"SimpleChatModel",
"get_tokenizer",
Expand All @@ -90,6 +96,8 @@
"GenericFakeChatModel": "fake_chat_models",
"ParrotFakeChatModel": "fake_chat_models",
"LLM": "llms",
"ModelProfile": "model_profile",
"ModelProfileRegistry": "model_profile",
"BaseLLM": "llms",
"is_openai_data_block": "_utils",
}
Expand Down
53 changes: 16 additions & 37 deletions libs/core/langchain_core/language_models/chat_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import override

from langchain_core._api.beta_decorator import beta
from langchain_core.caches import BaseCache
from langchain_core.callbacks import (
AsyncCallbackManager,
Expand All @@ -34,6 +33,7 @@
LangSmithParams,
LanguageModelInput,
)
from langchain_core.language_models.model_profile import ModelProfile
from langchain_core.load import dumpd, dumps
from langchain_core.messages import (
AIMessage,
Expand Down Expand Up @@ -76,8 +76,6 @@
if TYPE_CHECKING:
import uuid

from langchain_model_profiles import ModelProfile # type: ignore[import-untyped]

from langchain_core.output_parsers.base import OutputParserLike
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.tools import BaseTool
Expand Down Expand Up @@ -339,6 +337,21 @@ class BaseChatModel(BaseLanguageModel[AIMessage], ABC):
"""

profile: ModelProfile | None = Field(default=None, exclude=True)
"""Profile detailing model capabilities.
!!! warning "Beta feature"
This is a beta feature. The format of model profiles is subject to change.
If not specified, automatically loaded from the provider package on initialization
if data is available.
Example profile data includes context window sizes, supported modalities, or support
for tool calling, structured output, and other features.
!!! version-added "Added in `langchain-core` 1.1"
"""

model_config = ConfigDict(
arbitrary_types_allowed=True,
)
Expand Down Expand Up @@ -1688,40 +1701,6 @@ class AnswerWithJustification(BaseModel):
return RunnableMap(raw=llm) | parser_with_fallback
return llm | output_parser

@property
@beta()
def profile(self) -> ModelProfile:
"""Return profiling information for the model.
This property relies on the `langchain-model-profiles` package to retrieve chat
model capabilities, such as context window sizes and supported features.
Raises:
ImportError: If `langchain-model-profiles` is not installed.
Returns:
A `ModelProfile` object containing profiling information for the model.
"""
try:
from langchain_model_profiles import get_model_profile # noqa: PLC0415
except ImportError as err:
informative_error_message = (
"To access model profiling information, please install the "
"`langchain-model-profiles` package: "
"`pip install langchain-model-profiles`."
)
raise ImportError(informative_error_message) from err

provider_id = self._llm_type
model_name = (
# Model name is not standardized across integrations. New integrations
# should prefer `model`.
getattr(self, "model", None)
or getattr(self, "model_name", None)
or getattr(self, "model_id", "")
)
return get_model_profile(provider_id, model_name) or {}


class SimpleChatModel(BaseChatModel):
"""Simplified implementation for a chat model to inherit from.
Expand Down
84 changes: 84 additions & 0 deletions libs/core/langchain_core/language_models/model_profile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
"""Model profile types and utilities."""

from typing_extensions import TypedDict


class ModelProfile(TypedDict, total=False):
"""Model profile.
!!! warning "Beta feature"
This is a beta feature. The format of model profiles is subject to change.
Provides information about chat model capabilities, such as context window sizes
and supported features.
"""

# --- Input constraints ---

max_input_tokens: int
"""Maximum context window (tokens)"""

image_inputs: bool
"""Whether image inputs are supported."""
# TODO: add more detail about formats?

image_url_inputs: bool
"""Whether [image URL inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""

pdf_inputs: bool
"""Whether [PDF inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# TODO: add more detail about formats? e.g. bytes or base64

audio_inputs: bool
"""Whether [audio inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# TODO: add more detail about formats? e.g. bytes or base64

video_inputs: bool
"""Whether [video inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# TODO: add more detail about formats? e.g. bytes or base64

image_tool_message: bool
"""Whether images can be included in tool messages."""

pdf_tool_message: bool
"""Whether PDFs can be included in tool messages."""

# --- Output constraints ---

max_output_tokens: int
"""Maximum output tokens"""

reasoning_output: bool
"""Whether the model supports [reasoning / chain-of-thought](https://docs.langchain.com/oss/python/langchain/models#reasoning)"""

image_outputs: bool
"""Whether [image outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""

audio_outputs: bool
"""Whether [audio outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""

video_outputs: bool
"""Whether [video outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""

# --- Tool calling ---
tool_calling: bool
"""Whether the model supports [tool calling](https://docs.langchain.com/oss/python/langchain/models#tool-calling)"""

tool_choice: bool
"""Whether the model supports [tool choice](https://docs.langchain.com/oss/python/langchain/models#forcing-tool-calls)"""

# --- Structured output ---
structured_output: bool
"""Whether the model supports a native [structured output](https://docs.langchain.com/oss/python/langchain/models#structured-outputs)
feature"""


ModelProfileRegistry = dict[str, ModelProfile]
"""Registry mapping model identifiers or names to their ModelProfile."""
3 changes: 0 additions & 3 deletions libs/core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ typing = [
"mypy>=1.18.1,<1.19.0",
"types-pyyaml>=6.0.12.2,<7.0.0.0",
"types-requests>=2.28.11.5,<3.0.0.0",
"langchain-model-profiles",
"langchain-text-splitters",
]
dev = [
Expand All @@ -58,15 +57,13 @@ test = [
"blockbuster>=1.5.18,<1.6.0",
"numpy>=1.26.4; python_version<'3.13'",
"numpy>=2.1.0; python_version>='3.13'",
"langchain-model-profiles",
"langchain-tests",
"pytest-benchmark",
"pytest-codspeed",
]
test_integration = []

[tool.uv.sources]
langchain-model-profiles = { path = "../model-profiles" }
langchain-tests = { path = "../standard-tests" }
langchain-text-splitters = { path = "../text-splitters" }

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1222,19 +1222,12 @@ def _llm_type(self) -> str:

def test_model_profiles() -> None:
model = GenericFakeChatModel(messages=iter([]))
profile = model.profile
assert profile == {}
assert model.profile is None

class MyModel(GenericFakeChatModel):
model: str = "gpt-5"

@property
def _llm_type(self) -> str:
return "openai-chat"

model = MyModel(messages=iter([]))
profile = model.profile
assert profile
model_with_profile = GenericFakeChatModel(
messages=iter([]), profile={"max_input_tokens": 100}
)
assert model_with_profile.profile == {"max_input_tokens": 100}


class MockResponse:
Expand Down
2 changes: 2 additions & 0 deletions libs/core/tests/unit_tests/language_models/test_imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
"FakeStreamingListLLM",
"FakeListLLM",
"ParrotFakeChatModel",
"ModelProfile",
"ModelProfileRegistry",
"is_openai_data_block",
]

Expand Down
44 changes: 0 additions & 44 deletions libs/core/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

23 changes: 10 additions & 13 deletions libs/langchain_v1/langchain/agents/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
STRUCTURED_OUTPUT_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."

FALLBACK_MODELS_WITH_STRUCTURED_OUTPUT = [
# if langchain-model-profiles is not installed, these models are assumed to support
# if model profile data are not available, these models are assumed to support
# structured output
"grok",
"gpt-5",
Expand Down Expand Up @@ -381,18 +381,15 @@ def _supports_provider_strategy(model: str | BaseChatModel, tools: list | None =
or getattr(model, "model", None)
or getattr(model, "model_id", "")
)
try:
model_profile = model.profile
except ImportError:
pass
else:
if (
model_profile.get("structured_output")
# We make an exception for Gemini models, which currently do not support
# simultaneous tool use with structured output
and not (tools and isinstance(model_name, str) and "gemini" in model_name.lower())
):
return True
model_profile = model.profile
if (
model_profile is not None
and model_profile.get("structured_output")
# We make an exception for Gemini models, which currently do not support
# simultaneous tool use with structured output
and not (tools and isinstance(model_name, str) and "gemini" in model_name.lower())
):
return True

return (
any(part in model_name.lower() for part in FALLBACK_MODELS_WITH_STRUCTURED_OUTPUT)
Expand Down
10 changes: 6 additions & 4 deletions libs/langchain_v1/langchain/agents/middleware/summarization.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,11 @@ def __init__(
requires_profile = True
if requires_profile and self._get_profile_limits() is None:
msg = (
"Model profile information is required to use fractional token limits. "
'pip install "langchain[model-profiles]" or use absolute token counts '
"instead."
"Model profile information is required to use fractional token limits, "
"and is unavailable for the specified model. Please use absolute token "
"counts instead, or pass "
'`\n\nChatModel(..., profile={"max_input_tokens": ...})`.\n\n'
"with a desired integer value of the model's maximum input tokens."
)
raise ValueError(msg)

Expand Down Expand Up @@ -308,7 +310,7 @@ def _get_profile_limits(self) -> int | None:
"""Retrieve max input token limit from the model profile."""
try:
profile = self.model.profile
except (AttributeError, ImportError):
except AttributeError:
return None

if not isinstance(profile, Mapping):
Expand Down
Loading
Loading