Skip to content

Commit 8ce69f2

Browse files
feat(api): manual updates
add /uploads endpoint
1 parent d9165bc commit 8ce69f2

File tree

14 files changed

+1056
-6
lines changed

14 files changed

+1056
-6
lines changed

.stats.yml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
configured_endpoints: 4
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/meta%2Fllama-api-bfa0267b010dcc4b39e62dfbd698ac6f9421f3212c44b3408b9b154bd6c67a8b.yml
3-
openapi_spec_hash: 7f424537bc7ea7638e3934ef721b8d71
4-
config_hash: fd80f72884b7fef46327f3fb70dcb1c4
1+
configured_endpoints: 7
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/meta%2Fllama-api-edf0a308dd29bea2feb29f2e7f04eec4dbfb130ffe52511641783958168f60a4.yml
3+
openapi_spec_hash: 23af966c58151516aaef00e0af602c01
4+
config_hash: 431a8aed31c3576451a36d2db8f48c25

README.md

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,24 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ
172172

173173
Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`.
174174

175+
## File uploads
176+
177+
Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`.
178+
179+
```python
180+
from pathlib import Path
181+
from llama_api_client import LlamaAPIClient
182+
183+
client = LlamaAPIClient()
184+
185+
client.uploads.part(
186+
upload_id="upload_id",
187+
data=Path("/path/to/file"),
188+
)
189+
```
190+
191+
The async client uses the exact same interface. If you pass a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance, the file contents will be read asynchronously automatically.
192+
175193
## Handling errors
176194

177195
When the library is unable to connect to the API (for example, due to network connection problems or a timeout), a subclass of `llama_api_client.APIConnectionError` is raised.

api.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,20 @@ Methods:
3535
- <code title="get /models/{model}">client.models.<a href="./src/llama_api_client/resources/models.py">retrieve</a>(model) -> <a href="./src/llama_api_client/types/llama_model.py">LlamaModel</a></code>
3636
- <code title="get /models">client.models.<a href="./src/llama_api_client/resources/models.py">list</a>() -> <a href="./src/llama_api_client/types/model_list_response.py">ModelListResponse</a></code>
3737

38+
# Uploads
39+
40+
Types:
41+
42+
```python
43+
from llama_api_client.types import UploadCreateResponse, UploadGetResponse, UploadPartResponse
44+
```
45+
46+
Methods:
47+
48+
- <code title="post /uploads">client.uploads.<a href="./src/llama_api_client/resources/uploads.py">create</a>(\*\*<a href="src/llama_api_client/types/upload_create_params.py">params</a>) -> <a href="./src/llama_api_client/types/upload_create_response.py">UploadCreateResponse</a></code>
49+
- <code title="get /uploads/{upload_id}">client.uploads.<a href="./src/llama_api_client/resources/uploads.py">get</a>(upload_id) -> <a href="./src/llama_api_client/types/upload_get_response.py">UploadGetResponse</a></code>
50+
- <code title="post /uploads/{upload_id}">client.uploads.<a href="./src/llama_api_client/resources/uploads.py">part</a>(upload_id, \*\*<a href="src/llama_api_client/types/upload_part_params.py">params</a>) -> <a href="./src/llama_api_client/types/upload_part_response.py">UploadPartResponse</a></code>
51+
3852
# Moderations
3953

4054
Types:

src/llama_api_client/_client.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
)
2222
from ._utils import is_given, get_async_library
2323
from ._version import __version__
24-
from .resources import models, moderations
24+
from .resources import models, uploads, moderations
2525
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
2626
from ._exceptions import APIStatusError, LlamaAPIClientError
2727
from ._base_client import (
@@ -46,6 +46,7 @@
4646
class LlamaAPIClient(SyncAPIClient):
4747
chat: chat.ChatResource
4848
models: models.ModelsResource
49+
uploads: uploads.UploadsResource
4950
moderations: moderations.ModerationsResource
5051
with_raw_response: LlamaAPIClientWithRawResponse
5152
with_streaming_response: LlamaAPIClientWithStreamedResponse
@@ -106,6 +107,7 @@ def __init__(
106107

107108
self.chat = chat.ChatResource(self)
108109
self.models = models.ModelsResource(self)
110+
self.uploads = uploads.UploadsResource(self)
109111
self.moderations = moderations.ModerationsResource(self)
110112
self.with_raw_response = LlamaAPIClientWithRawResponse(self)
111113
self.with_streaming_response = LlamaAPIClientWithStreamedResponse(self)
@@ -218,6 +220,7 @@ def _make_status_error(
218220
class AsyncLlamaAPIClient(AsyncAPIClient):
219221
chat: chat.AsyncChatResource
220222
models: models.AsyncModelsResource
223+
uploads: uploads.AsyncUploadsResource
221224
moderations: moderations.AsyncModerationsResource
222225
with_raw_response: AsyncLlamaAPIClientWithRawResponse
223226
with_streaming_response: AsyncLlamaAPIClientWithStreamedResponse
@@ -278,6 +281,7 @@ def __init__(
278281

279282
self.chat = chat.AsyncChatResource(self)
280283
self.models = models.AsyncModelsResource(self)
284+
self.uploads = uploads.AsyncUploadsResource(self)
281285
self.moderations = moderations.AsyncModerationsResource(self)
282286
self.with_raw_response = AsyncLlamaAPIClientWithRawResponse(self)
283287
self.with_streaming_response = AsyncLlamaAPIClientWithStreamedResponse(self)
@@ -391,27 +395,31 @@ class LlamaAPIClientWithRawResponse:
391395
def __init__(self, client: LlamaAPIClient) -> None:
392396
self.chat = chat.ChatResourceWithRawResponse(client.chat)
393397
self.models = models.ModelsResourceWithRawResponse(client.models)
398+
self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads)
394399
self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations)
395400

396401

397402
class AsyncLlamaAPIClientWithRawResponse:
398403
def __init__(self, client: AsyncLlamaAPIClient) -> None:
399404
self.chat = chat.AsyncChatResourceWithRawResponse(client.chat)
400405
self.models = models.AsyncModelsResourceWithRawResponse(client.models)
406+
self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads)
401407
self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations)
402408

403409

404410
class LlamaAPIClientWithStreamedResponse:
405411
def __init__(self, client: LlamaAPIClient) -> None:
406412
self.chat = chat.ChatResourceWithStreamingResponse(client.chat)
407413
self.models = models.ModelsResourceWithStreamingResponse(client.models)
414+
self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads)
408415
self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations)
409416

410417

411418
class AsyncLlamaAPIClientWithStreamedResponse:
412419
def __init__(self, client: AsyncLlamaAPIClient) -> None:
413420
self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat)
414421
self.models = models.AsyncModelsResourceWithStreamingResponse(client.models)
422+
self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads)
415423
self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations)
416424

417425

src/llama_api_client/_files.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ def assert_is_file_content(obj: object, *, key: str | None = None) -> None:
3434
if not is_file_content(obj):
3535
prefix = f"Expected entry at `{key}`" if key is not None else f"Expected file input `{obj!r}`"
3636
raise RuntimeError(
37-
f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead."
37+
f"{prefix} to be bytes, an io.IOBase instance, PathLike or a tuple but received {type(obj)} instead. See https://github.com/meta-llama/llama-api-python/tree/main#file-uploads"
3838
) from None
3939

4040

src/llama_api_client/resources/__init__.py

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,14 @@
1616
ModelsResourceWithStreamingResponse,
1717
AsyncModelsResourceWithStreamingResponse,
1818
)
19+
from .uploads import (
20+
UploadsResource,
21+
AsyncUploadsResource,
22+
UploadsResourceWithRawResponse,
23+
AsyncUploadsResourceWithRawResponse,
24+
UploadsResourceWithStreamingResponse,
25+
AsyncUploadsResourceWithStreamingResponse,
26+
)
1927
from .moderations import (
2028
ModerationsResource,
2129
AsyncModerationsResource,
@@ -38,6 +46,12 @@
3846
"AsyncModelsResourceWithRawResponse",
3947
"ModelsResourceWithStreamingResponse",
4048
"AsyncModelsResourceWithStreamingResponse",
49+
"UploadsResource",
50+
"AsyncUploadsResource",
51+
"UploadsResourceWithRawResponse",
52+
"AsyncUploadsResourceWithRawResponse",
53+
"UploadsResourceWithStreamingResponse",
54+
"AsyncUploadsResourceWithStreamingResponse",
4155
"ModerationsResource",
4256
"AsyncModerationsResource",
4357
"ModerationsResourceWithRawResponse",

0 commit comments

Comments
 (0)