Openai compatible provider #86
-
|
Will there be a openai-compatible provider? I would like to use other models(e.g. kimi and glm) which implements the OpenAI API. In vercel ai-sdk it works well with https://ai-sdk.dev/providers/openai-compatible-providers. |
Beta Was this translation helpful? Give feedback.
Replies: 3 comments 2 replies
-
|
there's an adapter called import { openai, type OpenAIConfig } from "@tanstack/ai-openai";
const config: OpenAIConfig = {
apiKey: process.env.OPEN_AI_COMPATIBLE_API_KEY!,
organization: "org-...",
baseURL: "https://api.openai.com/v1", // here you can use any OpenAI compatible provider base URL instead
};reference : https://tanstack.com/ai/latest/docs/adapters/openai |
Beta Was this translation helpful? Give feedback.
-
|
We haven't still gotten to implementing anything outside the 4 standard ones we offer atm. We tried to cover 90% of cases but we're aware the 10% is missing. We definitely plan on tackling this, bedrock... and all other adapters in the future! |
Beta Was this translation helpful? Give feedback.
-
|
I needed this for using OpenCode's Zen with Big Pickle model, and Opus 4.5 was able to vibe up a working implementation. /**
* OpenAI-compatible adapter that uses the Chat Completions API format
* Works with any API that follows the OpenAI Chat Completions spec
*/
import type {
ModelMessage,
StreamChunk,
Tool,
ToolCall,
} from '@tanstack/ai';
import { convertZodToJsonSchema } from '@tanstack/ai';
export interface OpenAICompatibleConfig {
apiKey: string;
baseURL: string;
model: string;
}
export interface OpenAICompatibleChatOptions {
model: string;
messages: ModelMessage[];
systemPrompts?: string[];
tools?: Tool[];
temperature?: number;
maxTokens?: number;
}
// OpenAI Chat Completions API message format
interface ChatCompletionMessage {
role: 'system' | 'user' | 'assistant' | 'tool';
content: string | null;
tool_calls?: ToolCall[];
tool_call_id?: string;
}
export class OpenAICompatibleAdapter {
name = 'openai-compatible' as const;
private apiKey: string;
private baseURL: string;
constructor(config: OpenAICompatibleConfig) {
this.apiKey = config.apiKey;
this.baseURL = config.baseURL;
}
async *chatStream(options: OpenAICompatibleChatOptions): AsyncGenerator<StreamChunk> {
const timestamp = Date.now();
const requestId = `oai-compat-${timestamp}-${Math.random().toString(36).substring(7)}`;
// Convert messages to OpenAI format
const messages: ChatCompletionMessage[] = [];
// Add system prompts first
if (options.systemPrompts?.length) {
messages.push({
role: 'system',
content: options.systemPrompts.join('\n'),
});
}
// Add conversation messages
for (const msg of options.messages) {
if (msg.role === 'tool') {
// Handle both camelCase (toolCallId) and snake_case (tool_call_id) property names
const toolCallId = msg.toolCallId || (msg as Record<string, unknown>).tool_call_id;
if (!toolCallId) {
console.warn('Tool message missing toolCallId:', msg);
continue;
}
messages.push({
role: 'tool',
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
tool_call_id: toolCallId as string,
});
} else if (msg.role === 'assistant' && msg.toolCalls?.length) {
// Ensure tool_calls are in OpenAI format (id, type, function)
const formattedToolCalls = msg.toolCalls.map((tc: Record<string, unknown>) => ({
id: tc.id || tc.toolCallId,
type: tc.type || 'function',
function: tc.function,
}));
messages.push({
role: 'assistant',
content: typeof msg.content === 'string' ? msg.content : null,
tool_calls: formattedToolCalls,
});
} else {
messages.push({
role: msg.role as 'user' | 'assistant',
content: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
});
}
}
// Convert Tool[] to OpenAI tool format
const openAITools = options.tools?.map(tool => ({
type: 'function' as const,
function: {
name: tool.name,
description: tool.description,
parameters: tool.inputSchema ? convertZodToJsonSchema(tool.inputSchema) : { type: 'object', properties: {} },
},
}));
const body: Record<string, unknown> = {
model: options.model,
messages,
stream: true,
};
if (openAITools?.length) {
body.tools = openAITools;
}
if (options.temperature !== undefined) {
body.temperature = options.temperature;
}
if (options.maxTokens !== undefined) {
body.max_tokens = options.maxTokens;
}
try {
const response = await fetch(`${this.baseURL}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${this.apiKey}`,
},
body: JSON.stringify(body),
});
if (!response.ok) {
const errorText = await response.text();
yield {
type: 'error',
id: requestId,
model: options.model,
timestamp,
error: { message: `API error ${response.status}: ${errorText}` },
};
return;
}
const reader = response.body?.getReader();
if (!reader) {
yield {
type: 'error',
id: requestId,
model: options.model,
timestamp,
error: { message: 'No response body' },
};
return;
}
const decoder = new TextDecoder();
let buffer = '';
let accumulatedContent = '';
const toolCalls = new Map<number, { id: string; name: string; arguments: string }>();
let finishReason: string | null = null;
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (!line.startsWith('data: ')) continue;
const data = line.slice(6).trim();
if (data === '[DONE]') continue;
try {
const chunk = JSON.parse(data);
const choice = chunk.choices?.[0];
if (!choice) continue;
const delta = choice.delta;
finishReason = choice.finish_reason || finishReason;
// Handle content
if (delta?.content) {
accumulatedContent += delta.content;
yield {
type: 'content',
id: requestId,
model: options.model,
timestamp,
delta: delta.content,
content: accumulatedContent,
role: 'assistant',
};
}
// Handle tool calls
if (delta?.tool_calls) {
for (const tc of delta.tool_calls) {
const index = tc.index ?? 0;
let existing = toolCalls.get(index);
if (!existing) {
existing = { id: tc.id || '', name: '', arguments: '' };
toolCalls.set(index, existing);
}
if (tc.id) existing.id = tc.id;
if (tc.function?.name) existing.name = tc.function.name;
if (tc.function?.arguments) existing.arguments += tc.function.arguments;
yield {
type: 'tool_call',
id: requestId,
model: options.model,
timestamp,
index,
toolCall: {
id: existing.id,
type: 'function',
function: {
name: existing.name,
arguments: existing.arguments,
},
},
};
}
}
} catch (e) {
// Skip invalid JSON
console.warn('Failed to parse SSE chunk:', data, e);
}
}
}
// Emit tool-input-available for completed tool calls
for (const [, tc] of toolCalls) {
try {
const parsedArgs = JSON.parse(tc.arguments);
yield {
type: 'tool-input-available',
id: requestId,
model: options.model,
timestamp,
toolCallId: tc.id,
toolName: tc.name,
input: parsedArgs,
};
} catch (e) {
console.warn('Failed to parse tool arguments:', tc.arguments, e);
}
}
// Emit done
yield {
type: 'done',
id: requestId,
model: options.model,
timestamp,
finishReason: (finishReason as 'stop' | 'tool_calls' | null) || 'stop',
};
} catch (error: unknown) {
const message = error instanceof Error ? error.message : 'Unknown error';
yield {
type: 'error',
id: requestId,
model: options.model,
timestamp,
error: { message },
};
}
}
}
export function createOpenAICompatible(config: OpenAICompatibleConfig): OpenAICompatibleAdapter {
return new OpenAICompatibleAdapter(config);
} |
Beta Was this translation helpful? Give feedback.
We haven't still gotten to implementing anything outside the 4 standard ones we offer atm. We tried to cover 90% of cases but we're aware the 10% is missing.
We definitely plan on tackling this, bedrock... and all other adapters in the future!