Skip to content

Commit 1e7b9cf

Browse files
(fix) Pass through spend tracking - ensure custom_llm_provider is tracked for Vertex, Google AI Studio, Anthropic (#8882)
* fix track custom llm provider on pass through routes * fix use correct provider for google ai studio * fix tracking custom llm provider on pass through route * ui fix get provider logo * update tests to track custom llm provider * test_anthropic_streaming_with_headers * Potential fix for code scanning alert no. 2263: Incomplete URL substring sanitization Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> --------- Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
1 parent 047d1b1 commit 1e7b9cf

File tree

7 files changed

+37
-2
lines changed

7 files changed

+37
-2
lines changed

litellm/proxy/pass_through_endpoints/llm_provider_handlers/anthropic_passthrough_logging_handler.py

+3
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,9 @@ def _create_anthropic_response_logging_payload(
123123
litellm_model_response.id = logging_obj.litellm_call_id
124124
litellm_model_response.model = model
125125
logging_obj.model_call_details["model"] = model
126+
logging_obj.model_call_details["custom_llm_provider"] = (
127+
litellm.LlmProviders.ANTHROPIC.value
128+
)
126129
return kwargs
127130
except Exception as e:
128131
verbose_proxy_logger.exception(

litellm/proxy/pass_through_endpoints/llm_provider_handlers/vertex_passthrough_logging_handler.py

+16-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import re
33
from datetime import datetime
44
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
5-
5+
from urllib.parse import urlparse
66
import httpx
77

88
import litellm
@@ -66,6 +66,9 @@ def vertex_passthrough_handler(
6666
start_time=start_time,
6767
end_time=end_time,
6868
logging_obj=logging_obj,
69+
custom_llm_provider=VertexPassthroughLoggingHandler._get_custom_llm_provider_from_url(
70+
url_route
71+
),
6972
)
7073

7174
return {
@@ -167,6 +170,9 @@ def _handle_logging_vertex_collected_chunks(
167170
start_time=start_time,
168171
end_time=end_time,
169172
logging_obj=litellm_logging_obj,
173+
custom_llm_provider=VertexPassthroughLoggingHandler._get_custom_llm_provider_from_url(
174+
url_route
175+
),
170176
)
171177

172178
return {
@@ -213,6 +219,13 @@ def extract_model_from_url(url: str) -> str:
213219
return match.group(1)
214220
return "unknown"
215221

222+
@staticmethod
223+
def _get_custom_llm_provider_from_url(url: str) -> str:
224+
parsed_url = urlparse(url)
225+
if parsed_url.hostname and parsed_url.hostname.endswith("generativelanguage.googleapis.com"):
226+
return litellm.LlmProviders.GEMINI.value
227+
return litellm.LlmProviders.VERTEX_AI.value
228+
216229
@staticmethod
217230
def _create_vertex_response_logging_payload_for_generate_content(
218231
litellm_model_response: Union[ModelResponse, TextCompletionResponse],
@@ -221,6 +234,7 @@ def _create_vertex_response_logging_payload_for_generate_content(
221234
start_time: datetime,
222235
end_time: datetime,
223236
logging_obj: LiteLLMLoggingObj,
237+
custom_llm_provider: str,
224238
):
225239
"""
226240
Create the standard logging object for Vertex passthrough generateContent (streaming and non-streaming)
@@ -240,4 +254,5 @@ def _create_vertex_response_logging_payload_for_generate_content(
240254
litellm_model_response.id = logging_obj.litellm_call_id
241255
logging_obj.model = litellm_model_response.model or model
242256
logging_obj.model_call_details["model"] = logging_obj.model
257+
logging_obj.model_call_details["custom_llm_provider"] = custom_llm_provider
243258
return kwargs

tests/pass_through_tests/test_anthropic_passthrough.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -162,6 +162,7 @@ async def test_anthropic_basic_completion_with_headers():
162162
), "Should have user API key in metadata"
163163

164164
assert "claude" in log_entry["model"]
165+
assert log_entry["custom_llm_provider"] == "anthropic"
165166

166167

167168
@pytest.mark.asyncio
@@ -230,7 +231,7 @@ async def test_anthropic_streaming_with_headers():
230231
print("anthropic_api_output_tokens", anthropic_api_output_tokens)
231232

232233
# Wait for spend to be logged
233-
await asyncio.sleep(10)
234+
await asyncio.sleep(20)
234235

235236
# Check spend logs for this specific request
236237
async with session.get(
@@ -301,3 +302,4 @@ async def test_anthropic_streaming_with_headers():
301302
assert "claude" in log_entry["model"]
302303

303304
assert log_entry["end_user"] == "test-user-1"
305+
assert log_entry["custom_llm_provider"] == "anthropic"

tests/pass_through_tests/test_gemini_with_spend.test.js

+2
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ describe('Gemini AI Tests', () => {
6262
expect(spendData[0].request_tags).toEqual(['gemini-js-sdk', 'pass-through-endpoint']);
6363
expect(spendData[0].metadata).toHaveProperty('user_api_key');
6464
expect(spendData[0].model).toContain('gemini');
65+
expect(spendData[0].custom_llm_provider).toBe('gemini');
6566
expect(spendData[0].spend).toBeGreaterThan(0);
6667
}, 25000);
6768

@@ -119,5 +120,6 @@ describe('Gemini AI Tests', () => {
119120
expect(spendData[0].metadata).toHaveProperty('user_api_key');
120121
expect(spendData[0].model).toContain('gemini');
121122
expect(spendData[0].spend).toBeGreaterThan(0);
123+
expect(spendData[0].custom_llm_provider).toBe('gemini');
122124
}, 25000);
123125
});

tests/pass_through_tests/test_vertex_with_spend.test.js

+2
Original file line numberDiff line numberDiff line change
@@ -121,6 +121,7 @@ describe('Vertex AI Tests', () => {
121121
expect(spendData[0].metadata).toHaveProperty('user_api_key');
122122
expect(spendData[0].model).toContain('gemini');
123123
expect(spendData[0].spend).toBeGreaterThan(0);
124+
expect(spendData[0].custom_llm_provider).toBe('vertex_ai');
124125
}, 25000);
125126

126127
test('should successfully generate streaming content with tags', async () => {
@@ -190,5 +191,6 @@ describe('Vertex AI Tests', () => {
190191
expect(spendData[0].metadata).toHaveProperty('user_api_key');
191192
expect(spendData[0].model).toContain('gemini');
192193
expect(spendData[0].spend).toBeGreaterThan(0);
194+
expect(spendData[0].custom_llm_provider).toBe('vertex_ai');
193195
}, 25000);
194196
});

ui/litellm-dashboard/src/components/provider_info_helpers.tsx

+7
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,13 @@ export const getProviderLogoAndName = (providerValue: string): { logo: string, d
6262
return { logo: "", displayName: "-" };
6363
}
6464

65+
// Handle special case for "gemini" provider value
66+
if (providerValue.toLowerCase() === "gemini") {
67+
const displayName = Providers.Google_AI_Studio;
68+
const logo = providerLogoMap[displayName];
69+
return { logo, displayName };
70+
}
71+
6572
// Find the enum key by matching provider_map values
6673
const enumKey = Object.keys(provider_map).find(
6774
key => provider_map[key].toLowerCase() === providerValue.toLowerCase()

ui/litellm-dashboard/src/components/view_logs/index.tsx

+4
Original file line numberDiff line numberDiff line change
@@ -598,6 +598,10 @@ function RequestViewer({ row }: { row: Row<LogEntry> }) {
598598
<span className="font-medium w-1/3">Model:</span>
599599
<span>{row.original.model}</span>
600600
</div>
601+
<div className="flex">
602+
<span className="font-medium w-1/3">Custom LLM Provider:</span>
603+
<span>{row.original.custom_llm_provider}</span>
604+
</div>
601605
<div className="flex">
602606
<span className="font-medium w-1/3">Api Base:</span>
603607
<span>{row.original.api_base}</span>

0 commit comments

Comments
 (0)