1
+ """
2
+ LLM Caller Utility
3
+
4
+ A utility class for making calls to various LLM APIs with consistent interface.
5
+ """
6
+
7
+ import os
8
+ from typing import Dict , List , Optional , Union
9
+
10
+ import anthropic
11
+ from openai import OpenAI
12
+ from google .cloud import aiplatform
13
+ from azure .ai .ml import MLClient
14
+
15
+ class LLMCaller :
16
+ """A utility class for making LLM API calls."""
17
+
18
+ @staticmethod
19
+ def get_client (provider : str = None ):
20
+ """Get the appropriate LLM client based on environment configuration."""
21
+ if not provider :
22
+ # Determine provider based on available API keys
23
+ if os .getenv ("ANTHROPIC_API_KEY" ):
24
+ provider = "anthropic"
25
+ elif os .getenv ("OPENAI_API_KEY" ):
26
+ provider = "openai"
27
+ elif os .getenv ("GOOGLE_APPLICATION_CREDENTIALS" ):
28
+ provider = "google"
29
+ elif os .getenv ("AZURE_OPENAI_API_KEY" ):
30
+ provider = "azure"
31
+ else :
32
+ raise ValueError ("No LLM API credentials found in environment" )
33
+
34
+ if provider == "anthropic" :
35
+ return anthropic .Anthropic ()
36
+ elif provider == "openai" :
37
+ return OpenAI ()
38
+ elif provider == "google" :
39
+ return aiplatform .init ()
40
+ elif provider == "azure" :
41
+ return MLClient .from_config ()
42
+ else :
43
+ raise ValueError (f"Unsupported LLM provider: { provider } " )
44
+
45
+ @staticmethod
46
+ def get_default_model (provider : str ) -> str :
47
+ """Get the default model for a provider."""
48
+ defaults = {
49
+ "anthropic" : "claude-3-opus-20240229" ,
50
+ "openai" : "gpt-4-turbo-preview" ,
51
+ "google" : "text-bison@002" ,
52
+ "azure" : "gpt-4"
53
+ }
54
+ return os .getenv (f"{ provider .upper ()} _DEFAULT_MODEL" , defaults [provider ])
55
+
56
+ @staticmethod
57
+ async def call (
58
+ system : str ,
59
+ user : str ,
60
+ provider : str = None ,
61
+ model : str = None ,
62
+ temperature : float = 0.7 ,
63
+ max_tokens : int = 1000 ,
64
+ stop : Optional [Union [str , List [str ]]] = None
65
+ ) -> str :
66
+ """
67
+ Make a call to an LLM API.
68
+
69
+ Args:
70
+ system: System message/prompt
71
+ user: User message/prompt
72
+ provider: LLM provider (anthropic, openai, google, azure)
73
+ model: Model to use (defaults to provider's default)
74
+ temperature: Sampling temperature (0.0 to 1.0)
75
+ max_tokens: Maximum tokens in response
76
+ stop: Optional stop sequence(s)
77
+
78
+ Returns:
79
+ The LLM's response text
80
+ """
81
+ client = LLMCaller .get_client (provider )
82
+ provider = provider or ("anthropic" if isinstance (client , anthropic .Anthropic ) else
83
+ "openai" if isinstance (client , OpenAI ) else
84
+ "google" if str (client .__class__ ).startswith ("google" ) else
85
+ "azure" )
86
+
87
+ model = model or LLMCaller .get_default_model (provider )
88
+
89
+ if provider == "anthropic" :
90
+ response = await client .messages .create (
91
+ model = model ,
92
+ max_tokens = max_tokens ,
93
+ temperature = temperature ,
94
+ system = system ,
95
+ messages = [{"role" : "user" , "content" : user }],
96
+ stop_sequences = stop
97
+ )
98
+ return response .content [0 ].text
99
+
100
+ elif provider == "openai" :
101
+ response = await client .chat .completions .create (
102
+ model = model ,
103
+ temperature = temperature ,
104
+ max_tokens = max_tokens ,
105
+ messages = [
106
+ {"role" : "system" , "content" : system },
107
+ {"role" : "user" , "content" : user }
108
+ ],
109
+ stop = stop
110
+ )
111
+ return response .choices [0 ].message .content
112
+
113
+ elif provider == "google" :
114
+ response = await client .predict_text (
115
+ model = model ,
116
+ temperature = temperature ,
117
+ max_output_tokens = max_tokens ,
118
+ prompt = f"{ system } \n \n { user } " ,
119
+ stop_sequences = stop
120
+ )
121
+ return response .text
122
+
123
+ elif provider == "azure" :
124
+ response = await client .chat .completions .create (
125
+ deployment_name = model ,
126
+ temperature = temperature ,
127
+ max_tokens = max_tokens ,
128
+ messages = [
129
+ {"role" : "system" , "content" : system },
130
+ {"role" : "user" , "content" : user }
131
+ ],
132
+ stop = stop
133
+ )
134
+ return response .choices [0 ].message .content
135
+
136
+ else :
137
+ raise ValueError (f"Unsupported LLM provider: { provider } " )
138
+
139
+ @staticmethod
140
+ def format_prompt (template : str , ** kwargs ) -> str :
141
+ """Format a prompt template with variables."""
142
+ return template .format (** kwargs )
0 commit comments