|
21 | 21 | ) |
22 | 22 | from ._utils import is_given, get_async_library |
23 | 23 | from ._version import __version__ |
24 | | -from .resources import models, moderations |
| 24 | +from .resources import models, uploads, moderations |
25 | 25 | from ._streaming import Stream as Stream, AsyncStream as AsyncStream |
26 | 26 | from ._exceptions import APIStatusError, LlamaAPIClientError |
27 | 27 | from ._base_client import ( |
|
46 | 46 | class LlamaAPIClient(SyncAPIClient): |
47 | 47 | chat: chat.ChatResource |
48 | 48 | models: models.ModelsResource |
| 49 | + uploads: uploads.UploadsResource |
49 | 50 | moderations: moderations.ModerationsResource |
50 | 51 | with_raw_response: LlamaAPIClientWithRawResponse |
51 | 52 | with_streaming_response: LlamaAPIClientWithStreamedResponse |
@@ -106,6 +107,7 @@ def __init__( |
106 | 107 |
|
107 | 108 | self.chat = chat.ChatResource(self) |
108 | 109 | self.models = models.ModelsResource(self) |
| 110 | + self.uploads = uploads.UploadsResource(self) |
109 | 111 | self.moderations = moderations.ModerationsResource(self) |
110 | 112 | self.with_raw_response = LlamaAPIClientWithRawResponse(self) |
111 | 113 | self.with_streaming_response = LlamaAPIClientWithStreamedResponse(self) |
@@ -218,6 +220,7 @@ def _make_status_error( |
218 | 220 | class AsyncLlamaAPIClient(AsyncAPIClient): |
219 | 221 | chat: chat.AsyncChatResource |
220 | 222 | models: models.AsyncModelsResource |
| 223 | + uploads: uploads.AsyncUploadsResource |
221 | 224 | moderations: moderations.AsyncModerationsResource |
222 | 225 | with_raw_response: AsyncLlamaAPIClientWithRawResponse |
223 | 226 | with_streaming_response: AsyncLlamaAPIClientWithStreamedResponse |
@@ -278,6 +281,7 @@ def __init__( |
278 | 281 |
|
279 | 282 | self.chat = chat.AsyncChatResource(self) |
280 | 283 | self.models = models.AsyncModelsResource(self) |
| 284 | + self.uploads = uploads.AsyncUploadsResource(self) |
281 | 285 | self.moderations = moderations.AsyncModerationsResource(self) |
282 | 286 | self.with_raw_response = AsyncLlamaAPIClientWithRawResponse(self) |
283 | 287 | self.with_streaming_response = AsyncLlamaAPIClientWithStreamedResponse(self) |
@@ -391,27 +395,31 @@ class LlamaAPIClientWithRawResponse: |
391 | 395 | def __init__(self, client: LlamaAPIClient) -> None: |
392 | 396 | self.chat = chat.ChatResourceWithRawResponse(client.chat) |
393 | 397 | self.models = models.ModelsResourceWithRawResponse(client.models) |
| 398 | + self.uploads = uploads.UploadsResourceWithRawResponse(client.uploads) |
394 | 399 | self.moderations = moderations.ModerationsResourceWithRawResponse(client.moderations) |
395 | 400 |
|
396 | 401 |
|
397 | 402 | class AsyncLlamaAPIClientWithRawResponse: |
398 | 403 | def __init__(self, client: AsyncLlamaAPIClient) -> None: |
399 | 404 | self.chat = chat.AsyncChatResourceWithRawResponse(client.chat) |
400 | 405 | self.models = models.AsyncModelsResourceWithRawResponse(client.models) |
| 406 | + self.uploads = uploads.AsyncUploadsResourceWithRawResponse(client.uploads) |
401 | 407 | self.moderations = moderations.AsyncModerationsResourceWithRawResponse(client.moderations) |
402 | 408 |
|
403 | 409 |
|
404 | 410 | class LlamaAPIClientWithStreamedResponse: |
405 | 411 | def __init__(self, client: LlamaAPIClient) -> None: |
406 | 412 | self.chat = chat.ChatResourceWithStreamingResponse(client.chat) |
407 | 413 | self.models = models.ModelsResourceWithStreamingResponse(client.models) |
| 414 | + self.uploads = uploads.UploadsResourceWithStreamingResponse(client.uploads) |
408 | 415 | self.moderations = moderations.ModerationsResourceWithStreamingResponse(client.moderations) |
409 | 416 |
|
410 | 417 |
|
411 | 418 | class AsyncLlamaAPIClientWithStreamedResponse: |
412 | 419 | def __init__(self, client: AsyncLlamaAPIClient) -> None: |
413 | 420 | self.chat = chat.AsyncChatResourceWithStreamingResponse(client.chat) |
414 | 421 | self.models = models.AsyncModelsResourceWithStreamingResponse(client.models) |
| 422 | + self.uploads = uploads.AsyncUploadsResourceWithStreamingResponse(client.uploads) |
415 | 423 | self.moderations = moderations.AsyncModerationsResourceWithStreamingResponse(client.moderations) |
416 | 424 |
|
417 | 425 |
|
|
0 commit comments