@@ -43,12 +43,18 @@ inference_client = GradientAI(
4343)
4444agent_client = GradientAI(
4545 agent_key = os.environ.get(" GRADIENTAI_AGENT_KEY" ), # This is the default and can be omitted
46- agent_endpoint = " https://my-cool- agent.agents.do-ai.run" ,
46+ agent_endpoint = " https://my-agent.agents.do-ai.run" ,
4747)
4848
49- print (api_client.agents.list())
49+ # # API
50+ api_response = api_client.agents.list()
51+ print (" --- API" )
52+ if api_response.agents:
53+ print (api_response.agents[0 ].name)
5054
51- completion = inference_client.chat.completions.create(
55+
56+ # # Serverless Inference
57+ inference_response = inference_client.chat.completions.create(
5258 messages = [
5359 {
5460 " role" : " user" ,
@@ -58,7 +64,22 @@ completion = inference_client.chat.completions.create(
5864 model = " llama3.3-70b-instruct" ,
5965)
6066
61- print (completion.choices[0 ].message)
67+ print (" --- Serverless Inference" )
68+ print (inference_response.choices[0 ].message.content)
69+
70+ # # Agent Inference
71+ agent_response = agent_client.agents.chat.completions.create(
72+ messages = [
73+ {
74+ " role" : " user" ,
75+ " content" : " What is the capital of Portugal?" ,
76+ }
77+ ],
78+ model = " llama3.3-70b-instruct" ,
79+ )
80+
81+ print (" --- Agent Inference" )
82+ print (agent_response.choices[0 ].message.content)
6283```
6384
6485While you can provide an ` api_key ` , ` inference_key ` keyword argument,
0 commit comments