Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [UNRELEASED]

### New features

* Added new functions: `parallel_chat()`, `parallel_chat_text()`, and `parallel_chat_structured()`. These functions make it easy to submit multiple prompts at once with some basic rate limiting toggles. (#188)

## [0.13.2] - 2025-10-02

Expand Down
4 changes: 4 additions & 0 deletions chatlas/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from ._content_image import content_image_file, content_image_plot, content_image_url
from ._content_pdf import content_pdf_file, content_pdf_url
from ._interpolate import interpolate, interpolate_file
from ._parallel import parallel_chat, parallel_chat_structured, parallel_chat_text
from ._provider import Provider
from ._provider_anthropic import ChatAnthropic, ChatBedrockAnthropic
from ._provider_cloudflare import ChatCloudflare
Expand Down Expand Up @@ -46,6 +47,9 @@
"batch_chat_completed",
"batch_chat_structured",
"batch_chat_text",
"parallel_chat",
"parallel_chat_structured",
"parallel_chat_text",
"ChatAnthropic",
"ChatAuto",
"ChatBedrockAnthropic",
Expand Down
16 changes: 16 additions & 0 deletions chatlas/_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -1061,6 +1061,7 @@ def chat_structured(
data_model: type[BaseModelT],
echo: EchoOptions = "none",
stream: bool = False,
kwargs: Optional[SubmitInputArgsT] = None,
) -> BaseModelT:
"""
Extract structured data.
Expand All @@ -1081,6 +1082,9 @@ def chat_structured(
- `"none"`: Do not echo any content.
stream
Whether to stream the response (i.e., have the response appear in chunks).
kwargs
Additional keyword arguments to pass to the method used for requesting
the response.

Returns
-------
Expand All @@ -1092,6 +1096,7 @@ def chat_structured(
data_model=data_model,
echo=echo,
stream=stream,
kwargs=kwargs,
)
return data_model.model_validate(dat)

Expand Down Expand Up @@ -1124,6 +1129,7 @@ def _submit_and_extract_data(
data_model: type[BaseModel],
echo: EchoOptions = "none",
stream: bool = False,
kwargs: Optional[SubmitInputArgsT] = None,
) -> dict[str, Any]:
display = self._markdown_display(echo=echo)

Expand All @@ -1133,6 +1139,7 @@ def _submit_and_extract_data(
data_model=data_model,
echo=echo,
stream=stream,
kwargs=kwargs,
)
)

Expand All @@ -1151,6 +1158,7 @@ async def chat_structured_async(
data_model: type[BaseModelT],
echo: EchoOptions = "none",
stream: bool = False,
kwargs: Optional[SubmitInputArgsT] = None,
) -> BaseModelT:
"""
Extract structured data from the given input asynchronously.
Expand All @@ -1172,6 +1180,9 @@ async def chat_structured_async(
stream
Whether to stream the response (i.e., have the response appear in chunks).
Defaults to `True` if `echo` is not "none".
kwargs
Additional keyword arguments to pass to the method used for requesting
the response.

Returns
-------
Expand All @@ -1183,6 +1194,7 @@ async def chat_structured_async(
data_model=data_model,
echo=echo,
stream=stream,
kwargs=kwargs,
)
return data_model.model_validate(dat)

Expand All @@ -1192,6 +1204,7 @@ async def extract_data_async(
data_model: type[BaseModel],
echo: EchoOptions = "none",
stream: bool = False,
kwargs: Optional[SubmitInputArgsT] = None,
) -> dict[str, Any]:
"""
Deprecated: use `.chat_structured_async()` instead.
Expand All @@ -1207,6 +1220,7 @@ async def extract_data_async(
data_model=data_model,
echo=echo,
stream=stream,
kwargs=kwargs,
)

async def _submit_and_extract_data_async(
Expand All @@ -1215,6 +1229,7 @@ async def _submit_and_extract_data_async(
data_model: type[BaseModel],
echo: EchoOptions = "none",
stream: bool = False,
kwargs: Optional[SubmitInputArgsT] = None,
) -> dict[str, Any]:
display = self._markdown_display(echo=echo)

Expand All @@ -1224,6 +1239,7 @@ async def _submit_and_extract_data_async(
data_model=data_model,
echo=echo,
stream=stream,
kwargs=kwargs,
)
)

Expand Down
Loading
Loading