|
33 | 33 | datasets,
|
34 | 34 | inference,
|
35 | 35 | providers,
|
| 36 | + responses, |
36 | 37 | telemetry,
|
37 | 38 | vector_io,
|
38 | 39 | benchmarks,
|
@@ -71,6 +72,7 @@ class LlamaStackClient(SyncAPIClient):
|
71 | 72 | toolgroups: toolgroups.ToolgroupsResource
|
72 | 73 | tools: tools.ToolsResource
|
73 | 74 | tool_runtime: tool_runtime.ToolRuntimeResource
|
| 75 | + responses: responses.ResponsesResource |
74 | 76 | agents: agents.AgentsResource
|
75 | 77 | datasets: datasets.DatasetsResource
|
76 | 78 | eval: eval.EvalResource
|
@@ -153,6 +155,7 @@ def __init__(
|
153 | 155 | self.toolgroups = toolgroups.ToolgroupsResource(self)
|
154 | 156 | self.tools = tools.ToolsResource(self)
|
155 | 157 | self.tool_runtime = tool_runtime.ToolRuntimeResource(self)
|
| 158 | + self.responses = responses.ResponsesResource(self) |
156 | 159 | self.agents = agents.AgentsResource(self)
|
157 | 160 | self.datasets = datasets.DatasetsResource(self)
|
158 | 161 | self.eval = eval.EvalResource(self)
|
@@ -287,6 +290,7 @@ class AsyncLlamaStackClient(AsyncAPIClient):
|
287 | 290 | toolgroups: toolgroups.AsyncToolgroupsResource
|
288 | 291 | tools: tools.AsyncToolsResource
|
289 | 292 | tool_runtime: tool_runtime.AsyncToolRuntimeResource
|
| 293 | + responses: responses.AsyncResponsesResource |
290 | 294 | agents: agents.AsyncAgentsResource
|
291 | 295 | datasets: datasets.AsyncDatasetsResource
|
292 | 296 | eval: eval.AsyncEvalResource
|
@@ -369,6 +373,7 @@ def __init__(
|
369 | 373 | self.toolgroups = toolgroups.AsyncToolgroupsResource(self)
|
370 | 374 | self.tools = tools.AsyncToolsResource(self)
|
371 | 375 | self.tool_runtime = tool_runtime.AsyncToolRuntimeResource(self)
|
| 376 | + self.responses = responses.AsyncResponsesResource(self) |
372 | 377 | self.agents = agents.AsyncAgentsResource(self)
|
373 | 378 | self.datasets = datasets.AsyncDatasetsResource(self)
|
374 | 379 | self.eval = eval.AsyncEvalResource(self)
|
@@ -504,6 +509,7 @@ def __init__(self, client: LlamaStackClient) -> None:
|
504 | 509 | self.toolgroups = toolgroups.ToolgroupsResourceWithRawResponse(client.toolgroups)
|
505 | 510 | self.tools = tools.ToolsResourceWithRawResponse(client.tools)
|
506 | 511 | self.tool_runtime = tool_runtime.ToolRuntimeResourceWithRawResponse(client.tool_runtime)
|
| 512 | + self.responses = responses.ResponsesResourceWithRawResponse(client.responses) |
507 | 513 | self.agents = agents.AgentsResourceWithRawResponse(client.agents)
|
508 | 514 | self.datasets = datasets.DatasetsResourceWithRawResponse(client.datasets)
|
509 | 515 | self.eval = eval.EvalResourceWithRawResponse(client.eval)
|
@@ -533,6 +539,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
|
533 | 539 | self.toolgroups = toolgroups.AsyncToolgroupsResourceWithRawResponse(client.toolgroups)
|
534 | 540 | self.tools = tools.AsyncToolsResourceWithRawResponse(client.tools)
|
535 | 541 | self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithRawResponse(client.tool_runtime)
|
| 542 | + self.responses = responses.AsyncResponsesResourceWithRawResponse(client.responses) |
536 | 543 | self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents)
|
537 | 544 | self.datasets = datasets.AsyncDatasetsResourceWithRawResponse(client.datasets)
|
538 | 545 | self.eval = eval.AsyncEvalResourceWithRawResponse(client.eval)
|
@@ -564,6 +571,7 @@ def __init__(self, client: LlamaStackClient) -> None:
|
564 | 571 | self.toolgroups = toolgroups.ToolgroupsResourceWithStreamingResponse(client.toolgroups)
|
565 | 572 | self.tools = tools.ToolsResourceWithStreamingResponse(client.tools)
|
566 | 573 | self.tool_runtime = tool_runtime.ToolRuntimeResourceWithStreamingResponse(client.tool_runtime)
|
| 574 | + self.responses = responses.ResponsesResourceWithStreamingResponse(client.responses) |
567 | 575 | self.agents = agents.AgentsResourceWithStreamingResponse(client.agents)
|
568 | 576 | self.datasets = datasets.DatasetsResourceWithStreamingResponse(client.datasets)
|
569 | 577 | self.eval = eval.EvalResourceWithStreamingResponse(client.eval)
|
@@ -595,6 +603,7 @@ def __init__(self, client: AsyncLlamaStackClient) -> None:
|
595 | 603 | self.toolgroups = toolgroups.AsyncToolgroupsResourceWithStreamingResponse(client.toolgroups)
|
596 | 604 | self.tools = tools.AsyncToolsResourceWithStreamingResponse(client.tools)
|
597 | 605 | self.tool_runtime = tool_runtime.AsyncToolRuntimeResourceWithStreamingResponse(client.tool_runtime)
|
| 606 | + self.responses = responses.AsyncResponsesResourceWithStreamingResponse(client.responses) |
598 | 607 | self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents)
|
599 | 608 | self.datasets = datasets.AsyncDatasetsResourceWithStreamingResponse(client.datasets)
|
600 | 609 | self.eval = eval.AsyncEvalResourceWithStreamingResponse(client.eval)
|
|
0 commit comments