diff --git a/bun.lock b/bun.lock
index 746360f1b51..e56967dff6f 100644
--- a/bun.lock
+++ b/bun.lock
@@ -437,8 +437,10 @@
"@tailwindcss/vite": "catalog:",
"@tsconfig/node22": "catalog:",
"@types/bun": "catalog:",
+ "@types/dompurify": "3.2.0",
"@types/katex": "0.16.7",
"@types/luxon": "catalog:",
+ "@types/strip-ansi": "5.2.1",
"tailwindcss": "catalog:",
"typescript": "catalog:",
"vite": "catalog:",
@@ -1807,6 +1809,8 @@
"@types/deep-eql": ["@types/deep-eql@4.0.2", "", {}, "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw=="],
+ "@types/dompurify": ["@types/dompurify@3.2.0", "", { "dependencies": { "dompurify": "*" } }, "sha512-Fgg31wv9QbLDA0SpTOXO3MaxySc4DKGLi8sna4/Utjo4r3ZRPdCt4UQee8BWr+Q5z21yifghREPJGYaEOEIACg=="],
+
"@types/estree": ["@types/estree@1.0.8", "", {}, "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w=="],
"@types/estree-jsx": ["@types/estree-jsx@1.0.5", "", { "dependencies": { "@types/estree": "*" } }, "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg=="],
@@ -1869,6 +1873,8 @@
"@types/serve-static": ["@types/serve-static@1.15.10", "", { "dependencies": { "@types/http-errors": "*", "@types/node": "*", "@types/send": "<1" } }, "sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw=="],
+ "@types/strip-ansi": ["@types/strip-ansi@5.2.1", "", { "dependencies": { "strip-ansi": "*" } }, "sha512-1l5iM0LBkVU8JXxnIoBqNvg+yyOXxPeN6DNoD+7A9AN1B8FhYPSeIXgyNqwIqg1uzipTgVC2hmuDzB0u9qw/PA=="],
+
"@types/trusted-types": ["@types/trusted-types@2.0.7", "", {}, "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw=="],
"@types/tsscmp": ["@types/tsscmp@1.0.2", "", {}, "sha512-cy7BRSU8GYYgxjcx0Py+8lo5MthuDhlyu076KUcYzVNXL23luYgRHkMG2fIFEc6neckeh/ntP82mw+U4QjZq+g=="],
diff --git a/packages/opencode/src/auth/index.ts b/packages/opencode/src/auth/index.ts
index ce948b92ac8..0d8aa9f9a11 100644
--- a/packages/opencode/src/auth/index.ts
+++ b/packages/opencode/src/auth/index.ts
@@ -20,6 +20,7 @@ export namespace Auth {
.object({
type: z.literal("api"),
key: z.string(),
+ host: z.string().optional(), // For providers like Databricks that need a host URL
})
.meta({ ref: "ApiAuth" })
diff --git a/packages/opencode/src/cli/cmd/auth.ts b/packages/opencode/src/cli/cmd/auth.ts
index bbaecfd8c71..d0bec52de3b 100644
--- a/packages/opencode/src/cli/cmd/auth.ts
+++ b/packages/opencode/src/cli/cmd/auth.ts
@@ -276,6 +276,7 @@ export const AuthLoginCommand = cmd({
google: 4,
openrouter: 5,
vercel: 6,
+ databricks: 7,
}
let provider = await prompts.autocomplete({
message: "Select provider",
@@ -344,6 +345,19 @@ export const AuthLoginCommand = cmd({
)
}
+ if (provider === "databricks") {
+ prompts.log.info(
+ "Databricks Foundation Model APIs authentication:\n" +
+ " Enter your workspace URL and Personal Access Token\n" +
+ " Create token at: Workspace > Settings > Developer > Access tokens\n\n" +
+ "Authentication options (in priority order):\n" +
+ " 1. PAT: Enter your Personal Access Token below, or set DATABRICKS_TOKEN\n" +
+ " 2. OAuth M2M: Set DATABRICKS_CLIENT_ID + DATABRICKS_CLIENT_SECRET\n" +
+ " 3. Azure AD Service Principal: Set ARM_CLIENT_ID + ARM_CLIENT_SECRET + ARM_TENANT_ID\n" +
+ " 4. Azure CLI (auto): For Azure Databricks, will use 'az account get-access-token' if logged in",
+ )
+ }
+
if (provider === "opencode") {
prompts.log.info("Create an api key at https://opencode.ai/auth")
}
@@ -358,6 +372,22 @@ export const AuthLoginCommand = cmd({
)
}
+ // For Databricks, prompt for host first
+ let host: string | undefined
+ if (provider === "databricks") {
+ const hostInput = await prompts.text({
+ message: "Enter your Databricks workspace URL",
+ placeholder: "https://your-workspace.cloud.databricks.com",
+ validate: (x) => {
+ if (!x || x.length === 0) return "Required"
+ if (!x.startsWith("https://")) return "Must start with https://"
+ return undefined
+ },
+ })
+ if (prompts.isCancel(hostInput)) throw new UI.CancelledError()
+ host = hostInput.replace(/\/$/, "") // Remove trailing slash
+ }
+
const key = await prompts.password({
message: "Enter your API key",
validate: (x) => (x && x.length > 0 ? undefined : "Required"),
@@ -366,6 +396,7 @@ export const AuthLoginCommand = cmd({
await Auth.set(provider, {
type: "api",
key,
+ host,
})
prompts.outro("Done")
diff --git a/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx b/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx
index dc3f337370a..dc28f0f2738 100644
--- a/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx
+++ b/packages/opencode/src/cli/cmd/tui/component/dialog-provider.tsx
@@ -1,4 +1,4 @@
-import { createMemo, createSignal, onMount, Show } from "solid-js"
+import { createMemo, createSignal, onMount, Show, createEffect } from "solid-js"
import { useSync } from "@tui/context/sync"
import { map, pipe, sortBy } from "remeda"
import { DialogSelect } from "@tui/ui/dialog-select"
@@ -97,6 +97,10 @@ export function createDialogProviderOptions() {
}
}
if (method.type === "api") {
+ // Databricks requires both host and API key
+ if (provider.id === "databricks") {
+ return dialog.replace(() => )
+ }
return dialog.replace(() => )
}
},
@@ -212,6 +216,119 @@ function CodeMethod(props: CodeMethodProps) {
)
}
+interface DatabricksApiMethodProps {
+ providerID: string
+ title: string
+}
+function DatabricksApiMethod(props: DatabricksApiMethodProps) {
+ const { theme } = useTheme()
+ const dialog = useDialog()
+ const sdk = useSDK()
+ const sync = useSync()
+ // Get host from environment variable
+ const envHost = typeof process !== "undefined" ? process.env["DATABRICKS_HOST"] : undefined
+
+ // Check if we have a valid token in the CLI cache for this host
+ onMount(async () => {
+ if (!envHost) return
+
+ const normalizedHost = envHost.replace(/\/$/, "")
+ const homedir = typeof process !== "undefined" ? (process.env["HOME"] ?? process.env["USERPROFILE"]) : undefined
+ if (!homedir) return
+
+ try {
+ const tokenCachePath = `${homedir}/.databricks/token-cache.json`
+ const file = Bun.file(tokenCachePath)
+ if (!(await file.exists())) return
+
+ const cache = (await file.json()) as {
+ tokens: Record
+ }
+
+ const tokenEntry = cache.tokens[normalizedHost]
+ if (!tokenEntry) return
+
+ // Check if token is valid or can be refreshed
+ const expiry = new Date(tokenEntry.expiry)
+ const hasValidToken = expiry.getTime() - 5 * 60 * 1000 > Date.now()
+ const canRefresh = Boolean(tokenEntry.refresh_token)
+
+ if (hasValidToken || canRefresh) {
+ // We have CLI auth available, skip prompts and go straight to model selection
+ // Dispose and bootstrap to pick up the CLI token
+ await sdk.client.instance.dispose()
+ await sync.bootstrap()
+ dialog.replace(() => )
+ }
+ } catch {
+ // Token cache not available or invalid, continue with normal flow
+ }
+ })
+
+ return (
+ (
+
+ Enter your Databricks workspace URL
+ Examples:
+ • https://dbc-xxx.cloud.databricks.com (AWS/GCP)
+ • https://adb-xxx.azuredatabricks.net (Azure)
+
+ )}
+ onConfirm={(value) => {
+ if (!value) return
+ // Remove trailing slash if present
+ const cleanHost = value.replace(/\/$/, "")
+ dialog.replace(() => (
+
+ ))
+ }}
+ />
+ )
+}
+
+interface DatabricksApiKeyMethodProps {
+ providerID: string
+ title: string
+ host: string
+}
+function DatabricksApiKeyMethod(props: DatabricksApiKeyMethodProps) {
+ const dialog = useDialog()
+ const sdk = useSDK()
+ const sync = useSync()
+ const { theme } = useTheme()
+
+ return (
+
+ Enter your Databricks Personal Access Token
+ Create at: Workspace → Settings → Developer → Access tokens
+
+ }
+ onConfirm={async (value) => {
+ if (!value) return
+ sdk.client.auth.set({
+ providerID: props.providerID,
+ auth: {
+ type: "api",
+ key: value,
+ host: props.host,
+ },
+ })
+ await sdk.client.instance.dispose()
+ await sync.bootstrap()
+ dialog.replace(() => )
+ }}
+ />
+ )
+}
+
interface ApiMethodProps {
providerID: string
title: string
diff --git a/packages/opencode/src/cli/cmd/tui/ui/dialog-prompt.tsx b/packages/opencode/src/cli/cmd/tui/ui/dialog-prompt.tsx
index b2965241243..68d8b776600 100644
--- a/packages/opencode/src/cli/cmd/tui/ui/dialog-prompt.tsx
+++ b/packages/opencode/src/cli/cmd/tui/ui/dialog-prompt.tsx
@@ -20,8 +20,12 @@ export function DialogPrompt(props: DialogPromptProps) {
useKeyboard((evt) => {
if (evt.name === "return") {
+ evt.preventDefault()
props.onConfirm?.(textarea.plainText)
}
+ if (evt.name === "escape") {
+ props.onCancel?.()
+ }
})
onMount(() => {
@@ -47,6 +51,12 @@ export function DialogPrompt(props: DialogPromptProps) {
onSubmit={() => {
props.onConfirm?.(textarea.plainText)
}}
+ onKeyDown={(e) => {
+ if (e.name === "return") {
+ e.preventDefault()
+ props.onConfirm?.(textarea.plainText)
+ }
+ }}
height={3}
keyBindings={[{ name: "return", action: "submit" }]}
ref={(val: TextareaRenderable) => (textarea = val)}
diff --git a/packages/opencode/src/provider/auth.ts b/packages/opencode/src/provider/auth.ts
index e6681ff0891..1276178c94f 100644
--- a/packages/opencode/src/provider/auth.ts
+++ b/packages/opencode/src/provider/auth.ts
@@ -121,11 +121,13 @@ export namespace ProviderAuth {
z.object({
providerID: z.string(),
key: z.string(),
+ host: z.string().optional(),
}),
async (input) => {
await Auth.set(input.providerID, {
type: "api",
key: input.key,
+ host: input.host,
})
},
)
diff --git a/packages/opencode/src/provider/provider.ts b/packages/opencode/src/provider/provider.ts
index e79cb170894..78549208022 100644
--- a/packages/opencode/src/provider/provider.ts
+++ b/packages/opencode/src/provider/provider.ts
@@ -506,6 +506,923 @@ export namespace Provider {
},
}
},
+ databricks: async (input) => {
+ // Azure Databricks resource ID for OAuth/AAD authentication
+ // This is the official Azure AD application ID for Azure Databricks
+ // See: https://learn.microsoft.com/en-us/azure/databricks/dev-tools/auth/oauth-m2m
+ const AZURE_DATABRICKS_RESOURCE_ID = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d"
+
+ const config = await Config.get()
+ const providerConfig = config.provider?.["databricks"]
+ const auth = await Auth.get("databricks")
+
+ // Helper to read host from ~/.databrickscfg profile
+ const getHostFromProfile = async (profileName: string): Promise => {
+ try {
+ const homedir = Env.get("HOME") ?? Env.get("USERPROFILE")
+ if (!homedir) return undefined
+ const configPath = Env.get("DATABRICKS_CONFIG_FILE") ?? `${homedir}/.databrickscfg`
+ const file = Bun.file(configPath)
+ if (!(await file.exists())) return undefined
+
+ const content = await file.text()
+ const lines = content.split("\n")
+
+ let currentSection = ""
+ for (const line of lines) {
+ const trimmed = line.trim()
+ // Check for section header [profile-name]
+ const sectionMatch = trimmed.match(/^\[(.+)\]$/)
+ if (sectionMatch) {
+ currentSection = sectionMatch[1]
+ continue
+ }
+ // Check for host = value in the target section
+ if (currentSection === profileName) {
+ const hostMatch = trimmed.match(/^host\s*=\s*(.+)$/)
+ if (hostMatch) {
+ return hostMatch[1].trim().replace(/\/$/, "")
+ }
+ }
+ }
+ return undefined
+ } catch {
+ return undefined
+ }
+ }
+
+ // Host resolution: 1) stored auth, 2) config file, 3) env var, 4) profile from ~/.databrickscfg
+ const authHost = auth?.type === "api" ? auth.host : undefined
+ const configHost = providerConfig?.options?.baseURL ?? providerConfig?.options?.host
+ const envHost = Env.get("DATABRICKS_HOST")
+ const profileName = Env.get("DATABRICKS_CONFIG_PROFILE") ?? providerConfig?.options?.profile ?? "DEFAULT"
+ const profileHost = await getHostFromProfile(profileName)
+ const host = authHost ?? configHost ?? envHost ?? profileHost
+
+ if (!host) return { autoload: false }
+
+ // Authentication precedence:
+ // 1. PAT token (DATABRICKS_TOKEN or stored auth)
+ // 2. OAuth M2M (DATABRICKS_CLIENT_ID + DATABRICKS_CLIENT_SECRET) for Azure
+ // 3. Azure AD Service Principal (azure_client_id + azure_client_secret + azure_tenant_id)
+ const token = Env.get("DATABRICKS_TOKEN") ?? (auth?.type === "api" ? auth.key : undefined)
+
+ // OAuth M2M credentials for Azure Databricks
+ // Note: Standard OAuth auth type doesn't include clientId/clientSecret fields,
+ // so we use type assertion. In practice, these come from env vars or config.
+ const clientId =
+ Env.get("DATABRICKS_CLIENT_ID") ??
+ providerConfig?.options?.clientId ??
+ (auth?.type === "oauth" ? (auth as any).clientId : undefined)
+ const clientSecret =
+ Env.get("DATABRICKS_CLIENT_SECRET") ??
+ providerConfig?.options?.clientSecret ??
+ (auth?.type === "oauth" ? (auth as any).clientSecret : undefined)
+
+ // Azure AD Service Principal credentials
+ const azureClientId = Env.get("ARM_CLIENT_ID") ?? providerConfig?.options?.azureClientId
+ const azureClientSecret = Env.get("ARM_CLIENT_SECRET") ?? providerConfig?.options?.azureClientSecret
+ const azureTenantId = Env.get("ARM_TENANT_ID") ?? providerConfig?.options?.azureTenantId
+
+ // Determine which auth method to use
+ const hasOAuthM2M = clientId && clientSecret
+ const hasAzureAD = azureClientId && azureClientSecret && azureTenantId
+ const hasPAT = Boolean(token)
+ // Check if Azure CLI is available for Azure Databricks workspaces
+ const isAzureDatabricks = host.includes("azuredatabricks.net")
+
+ // Check for Databricks CLI token cache
+ const hasDatabricksCLI = await (async () => {
+ try {
+ const homedir = Env.get("HOME") ?? Env.get("USERPROFILE")
+ if (!homedir) return false
+ const tokenCachePath = `${homedir}/.databricks/token-cache.json`
+ const file = Bun.file(tokenCachePath)
+ return await file.exists()
+ } catch {
+ return false
+ }
+ })()
+
+ if (!hasPAT && !hasOAuthM2M && !hasAzureAD && !isAzureDatabricks && !hasDatabricksCLI) return { autoload: false }
+
+ // Databricks Foundation Model APIs use OpenAI-compatible endpoints
+ // The base URL format is: https:///serving-endpoints
+ // If baseURL is already a full path (includes /serving-endpoints), use it as-is
+ const baseURL = host.includes("/serving-endpoints")
+ ? host.replace(/\/$/, "")
+ : host.replace(/\/$/, "") + "/serving-endpoints"
+
+ // For OAuth M2M, we need to fetch an access token
+ let accessToken: string | undefined = token
+ if (!accessToken && hasOAuthM2M) {
+ // Fetch OAuth token from Databricks OIDC endpoint
+ const tokenEndpoint = `${host.replace(/\/$/, "")}/oidc/v1/token`
+ try {
+ const response = await fetch(tokenEndpoint, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/x-www-form-urlencoded",
+ Authorization: `Basic ${Buffer.from(`${clientId}:${clientSecret}`).toString("base64")}`,
+ },
+ body: "grant_type=client_credentials&scope=all-apis",
+ })
+ if (response.ok) {
+ const data = (await response.json()) as { access_token: string }
+ accessToken = data.access_token
+ } else {
+ log.debug("Failed to fetch Databricks OAuth token", {
+ status: response.status,
+ statusText: response.statusText,
+ })
+ }
+ } catch (e) {
+ log.debug("Failed to fetch Databricks OAuth token", {
+ error: e instanceof Error ? e.message : "Unknown error",
+ })
+ }
+ }
+
+ // For Azure AD Service Principal, we need to fetch an Azure AD token first
+ if (!accessToken && hasAzureAD) {
+ try {
+ // Get Azure AD token for Databricks resource
+ const aadTokenEndpoint = `https://login.microsoftonline.com/${azureTenantId}/oauth2/v2.0/token`
+ const response = await fetch(aadTokenEndpoint, {
+ method: "POST",
+ headers: { "Content-Type": "application/x-www-form-urlencoded" },
+ body: new URLSearchParams({
+ grant_type: "client_credentials",
+ client_id: azureClientId,
+ client_secret: azureClientSecret,
+ scope: `${AZURE_DATABRICKS_RESOURCE_ID}/.default`,
+ }).toString(),
+ })
+ if (response.ok) {
+ const data = (await response.json()) as { access_token: string }
+ accessToken = data.access_token
+ } else {
+ log.debug("Failed to fetch Azure AD token for Databricks", {
+ status: response.status,
+ statusText: response.statusText,
+ })
+ }
+ } catch (e) {
+ log.debug("Failed to fetch Azure AD token for Databricks", {
+ error: e instanceof Error ? e.message : "Unknown error",
+ })
+ }
+ }
+
+ // Try Databricks CLI token cache (from `databricks auth login`)
+ if (!accessToken) {
+ try {
+ const homedir = Env.get("HOME") ?? Env.get("USERPROFILE")
+ if (homedir) {
+ const tokenCachePath = `${homedir}/.databricks/token-cache.json`
+ const file = Bun.file(tokenCachePath)
+ if (await file.exists()) {
+ const cacheContent = await file.text()
+ const cache = JSON.parse(cacheContent) as {
+ version: number
+ tokens: Record<
+ string,
+ {
+ access_token: string
+ token_type: string
+ refresh_token: string
+ expiry: string
+ expires_in?: number
+ }
+ >
+ }
+
+ // Normalize host for lookup (remove trailing slash)
+ const normalizedHost = host.replace(/\/$/, "")
+
+ // Find token for this host
+ const tokenEntry = cache.tokens[normalizedHost]
+ if (tokenEntry) {
+ const expiry = new Date(tokenEntry.expiry)
+ const now = new Date()
+
+ // Check if token is still valid (with 5 minute buffer)
+ if (expiry.getTime() - 5 * 60 * 1000 > now.getTime()) {
+ accessToken = tokenEntry.access_token
+ log.info("Using Databricks CLI token cache for authentication")
+ } else if (tokenEntry.refresh_token) {
+ // Token expired, try to refresh it
+ log.debug("Databricks CLI token expired, attempting refresh")
+ const tokenEndpoint = `${normalizedHost}/oidc/v1/token`
+ try {
+ const response = await fetch(tokenEndpoint, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/x-www-form-urlencoded",
+ },
+ body: new URLSearchParams({
+ grant_type: "refresh_token",
+ refresh_token: tokenEntry.refresh_token,
+ client_id: "databricks-cli",
+ }).toString(),
+ })
+ if (response.ok) {
+ const data = (await response.json()) as {
+ access_token: string
+ refresh_token?: string
+ expires_in?: number
+ }
+ accessToken = data.access_token
+ log.info("Refreshed Databricks CLI token successfully")
+
+ // Update the token cache with new tokens
+ cache.tokens[normalizedHost] = {
+ ...tokenEntry,
+ access_token: data.access_token,
+ refresh_token: data.refresh_token ?? tokenEntry.refresh_token,
+ expiry: new Date(Date.now() + (data.expires_in ?? 3600) * 1000).toISOString(),
+ expires_in: data.expires_in ?? 3600,
+ }
+ await Bun.write(tokenCachePath, JSON.stringify(cache, null, 2))
+ } else {
+ log.debug("Failed to refresh Databricks CLI token", {
+ status: response.status,
+ statusText: response.statusText,
+ })
+ }
+ } catch (refreshError) {
+ log.debug("Failed to refresh Databricks CLI token", {
+ error: refreshError instanceof Error ? refreshError.message : "Unknown error",
+ })
+ }
+ }
+ }
+ }
+ }
+ } catch (e) {
+ log.debug("Failed to read Databricks CLI token cache", {
+ error: e instanceof Error ? e.message : "Unknown error",
+ })
+ }
+ }
+
+ // For Azure Databricks, try Azure CLI as a fallback
+ if (!accessToken && isAzureDatabricks) {
+ try {
+ // Try to get token from Azure CLI
+ const proc = Bun.spawn(
+ ["az", "account", "get-access-token", "--resource", AZURE_DATABRICKS_RESOURCE_ID, "-o", "json"],
+ { stdout: "pipe", stderr: "pipe" },
+ )
+ const output = await new Response(proc.stdout).text()
+ const exitCode = await proc.exited
+ if (exitCode === 0) {
+ try {
+ const data = JSON.parse(output) as { accessToken: string }
+ accessToken = data.accessToken
+ log.info("Using Azure CLI token for Databricks authentication")
+ } catch (parseError) {
+ log.debug("Failed to parse Azure CLI token response", {
+ error: parseError instanceof Error ? parseError.message : "Unknown error",
+ })
+ }
+ } else {
+ log.debug("Azure CLI returned non-zero exit code", { exitCode })
+ }
+ } catch (e) {
+ log.debug("Azure CLI not available for Databricks auth", {
+ error: e instanceof Error ? e.message : "Unknown error",
+ })
+ }
+ }
+
+ if (!accessToken) return { autoload: false }
+
+ // Store normalized host for token lookups
+ const normalizedHost = host.replace(/\/$/, "")
+
+ // Function to get fresh token from Databricks CLI token cache
+ const getFreshToken = async (): Promise => {
+ try {
+ const homedir = Env.get("HOME") ?? Env.get("USERPROFILE")
+ if (homedir) {
+ const tokenCachePath = `${homedir}/.databricks/token-cache.json`
+ const file = Bun.file(tokenCachePath)
+ if (await file.exists()) {
+ const cacheContent = await file.text()
+ const cache = JSON.parse(cacheContent) as {
+ version: number
+ tokens: Record<
+ string,
+ {
+ access_token: string
+ refresh_token?: string
+ expiry: string
+ expires_in?: number
+ }
+ >
+ }
+
+ const tokenEntry = cache.tokens[normalizedHost]
+ if (tokenEntry) {
+ const expiry = new Date(tokenEntry.expiry)
+ const now = new Date()
+
+ // Check if cached token is still valid (with 5 minute buffer)
+ if (expiry.getTime() - 5 * 60 * 1000 > now.getTime()) {
+ return tokenEntry.access_token
+ }
+
+ // Token expired, try to refresh it if we have a refresh_token
+ if (tokenEntry.refresh_token) {
+ log.debug("Databricks CLI token expired during session, attempting refresh")
+ const tokenEndpoint = `${normalizedHost}/oidc/v1/token`
+ try {
+ const response = await fetch(tokenEndpoint, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/x-www-form-urlencoded",
+ },
+ body: new URLSearchParams({
+ grant_type: "refresh_token",
+ refresh_token: tokenEntry.refresh_token,
+ client_id: "databricks-cli",
+ }).toString(),
+ })
+ if (response.ok) {
+ const data = (await response.json()) as {
+ access_token: string
+ refresh_token?: string
+ expires_in?: number
+ }
+ log.info("Refreshed Databricks CLI token successfully during session")
+
+ // Update the token cache with new tokens
+ cache.tokens[normalizedHost] = {
+ ...tokenEntry,
+ access_token: data.access_token,
+ refresh_token: data.refresh_token ?? tokenEntry.refresh_token,
+ expiry: new Date(Date.now() + (data.expires_in ?? 3600) * 1000).toISOString(),
+ expires_in: data.expires_in ?? 3600,
+ }
+ await Bun.write(tokenCachePath, JSON.stringify(cache, null, 2))
+
+ return data.access_token
+ } else {
+ log.debug("Failed to refresh Databricks CLI token during session", {
+ status: response.status,
+ statusText: response.statusText,
+ })
+ }
+ } catch (refreshError) {
+ log.debug("Failed to refresh Databricks CLI token during session", {
+ error: refreshError instanceof Error ? refreshError.message : "Unknown error",
+ })
+ }
+ }
+
+ // Token expired and refresh failed or no refresh token available
+ log.warn("Databricks CLI token expired. Run `databricks auth login --profile ` to refresh.")
+ }
+ }
+ }
+ } catch (e) {
+ log.debug("Failed to read Databricks CLI token cache", {
+ error: e instanceof Error ? e.message : "Unknown error",
+ })
+ }
+
+ // Fall back to the token we got at initialization
+ return accessToken
+ }
+
+ // Define default Databricks Foundation Model API endpoints
+ // These are the pay-per-token endpoints available in most workspaces
+ // Users can override or add more models in their opencode.json config
+ const defaultModels: Record = {
+ // OpenAI GPT Models
+ "databricks-gpt-5-2": {
+ id: "databricks-gpt-5-2",
+ name: "GPT-5.2 (Databricks)",
+ family: "gpt-5",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-12-17",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 1.25, output: 10, cache_read: 0.125 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-5-1": {
+ id: "databricks-gpt-5-1",
+ name: "GPT-5.1 (Databricks)",
+ family: "gpt-5",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-10-10",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 1.25, output: 10, cache_read: 0.125 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-5-1-codex-max": {
+ id: "databricks-gpt-5-1-codex-max",
+ name: "GPT-5.1 Codex Max (Databricks)",
+ family: "gpt-5-codex",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-10-10",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 2.5, output: 20, cache_read: 0.25 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-5": {
+ id: "databricks-gpt-5",
+ name: "GPT-5 (Databricks)",
+ family: "gpt-5",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-06-12",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 1.25, output: 10, cache_read: 0.125 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-5-mini": {
+ id: "databricks-gpt-5-mini",
+ name: "GPT-5 mini (Databricks)",
+ family: "gpt-5-mini",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-06-12",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 0.15, output: 0.6, cache_read: 0.015 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-5-nano": {
+ id: "databricks-gpt-5-nano",
+ name: "GPT-5 nano (Databricks)",
+ family: "gpt-5-nano",
+ attachment: true,
+ reasoning: false,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-06-12",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 0.05, output: 0.2, cache_read: 0.005 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-5-1-codex-mini": {
+ id: "databricks-gpt-5-1-codex-mini",
+ name: "GPT-5.1 Codex Mini (Databricks)",
+ family: "gpt-5.1-codex",
+ attachment: true,
+ reasoning: true,
+ tool_call: false, // Only supports Responses API, not Chat Completions API
+ temperature: true,
+ release_date: "2025-09-15",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 0.15, output: 0.6, cache_read: 0.015 },
+ limit: { context: 400000, output: 128000 },
+ options: {},
+ },
+ "databricks-gpt-oss-120b": {
+ id: "databricks-gpt-oss-120b",
+ name: "GPT OSS 120B (Databricks)",
+ family: "gpt-oss",
+ attachment: false,
+ reasoning: true,
+ tool_call: false, // OSS models don't support full JSON Schema (e.g., maxLength) for tool parameters
+ temperature: true,
+ release_date: "2025-11-01",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 0.5, output: 1.5 },
+ limit: { context: 128000, output: 32000 },
+ options: {},
+ },
+ "databricks-gpt-oss-20b": {
+ id: "databricks-gpt-oss-20b",
+ name: "GPT OSS 20B (Databricks)",
+ family: "gpt-oss",
+ attachment: false,
+ reasoning: true,
+ tool_call: false, // OSS models don't support full JSON Schema (e.g., maxLength) for tool parameters
+ temperature: true,
+ release_date: "2025-11-01",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 0.1, output: 0.3 },
+ limit: { context: 128000, output: 32000 },
+ options: {},
+ },
+ // Google Gemini Models
+ "databricks-gemini-3-pro": {
+ id: "databricks-gemini-3-pro",
+ name: "Gemini 3 Pro (Databricks)",
+ family: "gemini-3",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-11-20",
+ modalities: { input: ["text", "image", "audio", "video"], output: ["text"] },
+ cost: { input: 2, output: 12, cache_read: 0.2 },
+ limit: { context: 1000000, output: 65536 },
+ options: {},
+ },
+ "databricks-gemini-3-flash": {
+ id: "databricks-gemini-3-flash",
+ name: "Gemini 3 Flash (Databricks)",
+ family: "gemini-3",
+ attachment: true,
+ reasoning: false,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-11-20",
+ modalities: { input: ["text", "image", "audio", "video"], output: ["text"] },
+ cost: { input: 0.5, output: 3, cache_read: 0.05 },
+ limit: { context: 1000000, output: 65536 },
+ options: {},
+ },
+ "databricks-gemini-2-5-pro": {
+ id: "databricks-gemini-2-5-pro",
+ name: "Gemini 2.5 Pro (Databricks)",
+ family: "gemini-2.5",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-04-10",
+ modalities: { input: ["text", "image", "audio", "video"], output: ["text", "audio"] },
+ cost: { input: 1.25, output: 10, cache_read: 0.125 },
+ limit: { context: 1000000, output: 65536 },
+ options: {},
+ },
+ "databricks-gemini-2-5-flash": {
+ id: "databricks-gemini-2-5-flash",
+ name: "Gemini 2.5 Flash (Databricks)",
+ family: "gemini-2.5",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-04-10",
+ modalities: { input: ["text", "image", "audio", "video"], output: ["text"] },
+ cost: { input: 0.15, output: 0.6, cache_read: 0.015 },
+ limit: { context: 1000000, output: 65536 },
+ options: {},
+ },
+ "databricks-gemma-3-12b": {
+ id: "databricks-gemma-3-12b",
+ name: "Gemma 3 12B (Databricks)",
+ family: "gemma-3",
+ attachment: true,
+ reasoning: false,
+ tool_call: false, // Smaller model with limited tool support
+ temperature: true,
+ release_date: "2025-11-01",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 0.1, output: 0.3 },
+ limit: { context: 128000, output: 8192 },
+ options: {},
+ },
+ // Anthropic Claude Models
+ "databricks-claude-sonnet-4": {
+ id: "databricks-claude-sonnet-4",
+ name: "Claude Sonnet 4 (Databricks)",
+ family: "claude-sonnet",
+ attachment: true,
+ reasoning: false,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-05-22",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 3, output: 15, cache_read: 0.3 },
+ limit: { context: 200000, output: 64000 },
+ options: {},
+ },
+ "databricks-claude-sonnet-4-5": {
+ id: "databricks-claude-sonnet-4-5",
+ name: "Claude Sonnet 4.5 (Databricks)",
+ family: "claude-sonnet",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-10-22",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 3, output: 15, cache_read: 0.3 },
+ limit: { context: 200000, output: 64000 },
+ options: {},
+ },
+ "databricks-claude-haiku-4-5": {
+ id: "databricks-claude-haiku-4-5",
+ name: "Claude Haiku 4.5 (Databricks)",
+ family: "claude-haiku",
+ attachment: true,
+ reasoning: false,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-10-22",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 0.8, output: 4, cache_read: 0.08 },
+ limit: { context: 200000, output: 8192 },
+ options: {},
+ },
+ "databricks-claude-opus-4-5": {
+ id: "databricks-claude-opus-4-5",
+ name: "Claude Opus 4.5 (Databricks)",
+ family: "claude-opus",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-10-22",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 15, output: 75, cache_read: 1.5 },
+ limit: { context: 200000, output: 32000 },
+ options: {},
+ },
+ "databricks-meta-llama-3-3-70b-instruct": {
+ id: "databricks-meta-llama-3-3-70b-instruct",
+ name: "Meta Llama 3.3 70B Instruct (Databricks)",
+ family: "llama-3.3",
+ attachment: false,
+ reasoning: false,
+ tool_call: false, // Llama models have unreliable tool support via OpenAI-compatible API
+ temperature: true,
+ release_date: "2024-12-06",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 0.65, output: 2.56 },
+ limit: { context: 128000, output: 4096 },
+ options: {},
+ },
+ "databricks-claude-3-7-sonnet": {
+ id: "databricks-claude-3-7-sonnet",
+ name: "Claude 3.7 Sonnet (Databricks)",
+ family: "claude-sonnet",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-02-24",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 3, output: 15, cache_read: 0.3 },
+ limit: { context: 200000, output: 64000 },
+ options: {},
+ },
+ "databricks-claude-opus-4-1": {
+ id: "databricks-claude-opus-4-1",
+ name: "Claude Opus 4.1 (Databricks)",
+ family: "claude-opus",
+ attachment: true,
+ reasoning: true,
+ tool_call: true,
+ temperature: true,
+ release_date: "2025-04-16",
+ modalities: { input: ["text", "image"], output: ["text"] },
+ cost: { input: 15, output: 75, cache_read: 1.5 },
+ limit: { context: 200000, output: 32000 },
+ options: {},
+ },
+ "databricks-llama-4-maverick": {
+ id: "databricks-llama-4-maverick",
+ name: "Llama 4 Maverick (Databricks)",
+ family: "llama-4",
+ attachment: false,
+ reasoning: false,
+ tool_call: false, // Llama models have unreliable tool support via OpenAI-compatible API
+ temperature: true,
+ release_date: "2025-04-05",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 0.2, output: 0.6 },
+ limit: { context: 1048576, output: 65536 },
+ options: {},
+ },
+ "databricks-meta-llama-3-1-405b-instruct": {
+ id: "databricks-meta-llama-3-1-405b-instruct",
+ name: "Meta Llama 3.1 405B Instruct (Databricks)",
+ family: "llama-3.1",
+ attachment: false,
+ reasoning: false,
+ tool_call: false, // Llama models have unreliable tool support via OpenAI-compatible API
+ temperature: true,
+ release_date: "2024-07-23",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 3, output: 3 },
+ limit: { context: 128000, output: 4096 },
+ options: {},
+ },
+ "databricks-meta-llama-3-1-8b-instruct": {
+ id: "databricks-meta-llama-3-1-8b-instruct",
+ name: "Meta Llama 3.1 8B Instruct (Databricks)",
+ family: "llama-3.1",
+ attachment: false,
+ reasoning: false,
+ tool_call: false, // Llama models have unreliable tool support via OpenAI-compatible API
+ temperature: true,
+ release_date: "2024-07-23",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 0.1, output: 0.1 },
+ limit: { context: 128000, output: 4096 },
+ options: {},
+ },
+ // Qwen Models
+ "databricks-qwen3-next-80b-a3b-instruct": {
+ id: "databricks-qwen3-next-80b-a3b-instruct",
+ name: "Qwen3 Next 80B A3B Instruct (Databricks)",
+ family: "qwen3",
+ attachment: false,
+ reasoning: false,
+ tool_call: false, // Qwen models have unreliable tool support via OpenAI-compatible API
+ temperature: true,
+ release_date: "2025-11-01",
+ modalities: { input: ["text"], output: ["text"] },
+ cost: { input: 0.5, output: 1.5 },
+ limit: { context: 512000, output: 32768 },
+ options: {},
+ },
+ }
+
+ // Transform ModelsDev.Model to Provider.Model format
+ function toProviderModel(model: ModelsDev.Model): Model {
+ return {
+ id: model.id,
+ providerID: "databricks",
+ name: model.name,
+ family: model.family,
+ api: {
+ id: model.id,
+ url: baseURL,
+ npm: "@ai-sdk/openai-compatible",
+ },
+ status: "active",
+ headers: {},
+ options: model.options ?? {},
+ cost: {
+ input: model.cost?.input ?? 0,
+ output: model.cost?.output ?? 0,
+ cache: {
+ read: model.cost?.cache_read ?? 0,
+ write: model.cost?.cache_write ?? 0,
+ },
+ },
+ limit: {
+ context: model.limit.context,
+ output: model.limit.output,
+ },
+ capabilities: {
+ temperature: model.temperature,
+ reasoning: model.reasoning,
+ attachment: model.attachment,
+ toolcall: model.tool_call,
+ input: {
+ text: model.modalities?.input?.includes("text") ?? false,
+ audio: model.modalities?.input?.includes("audio") ?? false,
+ image: model.modalities?.input?.includes("image") ?? false,
+ video: model.modalities?.input?.includes("video") ?? false,
+ pdf: model.modalities?.input?.includes("pdf") ?? false,
+ },
+ output: {
+ text: model.modalities?.output?.includes("text") ?? false,
+ audio: model.modalities?.output?.includes("audio") ?? false,
+ image: model.modalities?.output?.includes("image") ?? false,
+ video: model.modalities?.output?.includes("video") ?? false,
+ pdf: model.modalities?.output?.includes("pdf") ?? false,
+ },
+ interleaved: false,
+ },
+ release_date: model.release_date,
+ variants: {},
+ }
+ }
+
+ // Add default models to the input provider if not already defined
+ // Only include models that support tool calling since opencode requires it
+ for (const [modelID, model] of Object.entries(defaultModels)) {
+ if (!input.models[modelID] && model.tool_call) {
+ input.models[modelID] = toProviderModel(model)
+ }
+ }
+
+ // Custom fetch that gets fresh token before each request and fixes empty content
+ const databricksFetch = async (input: RequestInfo | URL, init?: RequestInit) => {
+ const freshToken = await getFreshToken()
+ const headers = new Headers(init?.headers)
+ headers.set("Authorization", `Bearer ${freshToken}`)
+
+ // Fix empty content issue: Databricks API rejects messages with empty string content
+ // The AI SDK sends content: "" for assistant messages with only tool calls
+ let body = init?.body
+ let isGeminiModel = false
+ if (body && typeof body === "string") {
+ try {
+ const parsed = JSON.parse(body)
+ // Detect if this is a Gemini model request
+ isGeminiModel = parsed.model?.includes("gemini") ?? false
+
+ if (parsed.messages && Array.isArray(parsed.messages)) {
+ parsed.messages = parsed.messages.map((msg: any) => {
+ // For assistant messages with tool_calls but empty content, set content to null
+ if (msg.role === "assistant" && msg.tool_calls && msg.content === "") {
+ return { ...msg, content: null }
+ }
+ return msg
+ })
+ body = JSON.stringify(parsed)
+ }
+ } catch {
+ // If parsing fails, use original body
+ }
+ }
+
+ const response = await fetch(input, { ...init, body, headers })
+
+ // For Gemini models, transform streaming responses
+ // Gemini returns content as array [{type:"text", text:"..."}] but AI SDK expects string
+ if (isGeminiModel && response.body) {
+ const originalBody = response.body
+ const transformStream = new TransformStream({
+ transform(chunk, controller) {
+ const text = new TextDecoder().decode(chunk)
+ const lines = text.split("\n")
+ const transformedLines = lines.map((line) => {
+ if (!line.startsWith("data: ") || line === "data: [DONE]") {
+ return line
+ }
+
+ try {
+ const jsonStr = line.slice(6) // Remove "data: " prefix
+ if (!jsonStr.trim()) return line
+
+ const data = JSON.parse(jsonStr)
+
+ // Transform choices[].delta.content from array to string
+ if (data.choices && Array.isArray(data.choices)) {
+ for (const choice of data.choices) {
+ if (choice.delta && Array.isArray(choice.delta.content)) {
+ // Extract text from content array
+ const textParts = choice.delta.content
+ .filter((part: any) => part.type === "text" && part.text)
+ .map((part: any) => part.text)
+ choice.delta.content = textParts.join("")
+ }
+ }
+ return "data: " + JSON.stringify(data)
+ }
+ } catch {
+ // If parsing fails, return original line
+ }
+ return line
+ })
+
+ controller.enqueue(new TextEncoder().encode(transformedLines.join("\n")))
+ },
+ })
+
+ const transformedBody = originalBody.pipeThrough(transformStream)
+ return new Response(transformedBody, {
+ status: response.status,
+ statusText: response.statusText,
+ headers: response.headers,
+ })
+ }
+
+ return response
+ }
+
+ return {
+ autoload: true,
+ async getModel(sdk: any, modelID: string, _options?: Record) {
+ return sdk.languageModel(modelID)
+ },
+ options: {
+ baseURL,
+ apiKey: accessToken,
+ // Disable stream_options to prevent "unknown field" errors with Databricks OSS models
+ includeUsage: false,
+ headers: {
+ "User-Agent": "opencode",
+ // Prevent Claude beta headers from breaking Databricks Model Serving
+ "x-databricks-disable-beta-headers": "true",
+ },
+ // Use custom fetch that refreshes token when expired
+ fetch: databricksFetch,
+ },
+ }
+ },
}
export const Model = z
@@ -712,6 +1629,19 @@ export namespace Provider {
}
}
+ // Add Databricks provider for Foundation Model APIs
+ // This provider is not in models.dev so we create it programmatically
+ if (!database["databricks"]) {
+ database["databricks"] = {
+ id: "databricks",
+ name: "Databricks",
+ source: "custom",
+ env: ["DATABRICKS_TOKEN"],
+ options: {},
+ models: {},
+ }
+ }
+
function mergeProvider(providerID: string, provider: Partial) {
const existing = providers[providerID]
if (existing) {
diff --git a/packages/opencode/src/provider/transform.ts b/packages/opencode/src/provider/transform.ts
index 39b25a4b5b4..be60cdee911 100644
--- a/packages/opencode/src/provider/transform.ts
+++ b/packages/opencode/src/provider/transform.ts
@@ -45,9 +45,9 @@ export namespace ProviderTransform {
model: Provider.Model,
options: Record,
): ModelMessage[] {
- // Anthropic rejects messages with empty content - filter out empty string messages
+ // Anthropic and Databricks reject messages with empty content - filter out empty string messages
// and remove empty text/reasoning parts from array content
- if (model.api.npm === "@ai-sdk/anthropic") {
+ if (model.api.npm === "@ai-sdk/anthropic" || model.providerID === "databricks") {
msgs = msgs
.map((msg) => {
if (typeof msg.content === "string") {
@@ -57,7 +57,7 @@ export namespace ProviderTransform {
if (!Array.isArray(msg.content)) return msg
const filtered = msg.content.filter((part) => {
if (part.type === "text" || part.type === "reasoning") {
- return part.text !== ""
+ return (part as any).text !== ""
}
return true
})
@@ -249,7 +249,9 @@ export namespace ProviderTransform {
model.api.id.includes("claude") ||
model.id.includes("anthropic") ||
model.id.includes("claude") ||
- model.api.npm === "@ai-sdk/anthropic"
+ model.api.npm === "@ai-sdk/anthropic" ||
+ // Apply caching for Databricks models that support it (GPT, Gemini)
+ (model.providerID === "databricks" && model.cost.cache.read > 0)
) {
msgs = applyCaching(msgs, model.providerID)
}
@@ -723,6 +725,87 @@ export namespace ProviderTransform {
}
export function schema(model: Provider.Model, schema: JSONSchema.BaseSchema) {
+ // Databricks requires type: "object" on tool parameter schemas
+ if (model.providerID === "databricks") {
+ const ensureType = (obj: any): any => {
+ if (obj === null || typeof obj !== "object") {
+ return obj
+ }
+ if (Array.isArray(obj)) {
+ return obj.map(ensureType)
+ }
+ const result: any = { ...obj }
+ // If schema has properties but no type, add type: "object"
+ if (result.properties && !result.type) {
+ result.type = "object"
+ }
+ // Recursively process nested schemas
+ for (const [key, value] of Object.entries(result)) {
+ if (typeof value === "object" && value !== null) {
+ result[key] = ensureType(value)
+ }
+ }
+ return result
+ }
+ schema = ensureType(schema)
+
+ // For Databricks Gemini models, strip $schema and resolve $ref references
+ // Gemini API rejects tool schemas containing $schema field
+ if (model.id.includes("gemini")) {
+ const sanitizeForGemini = (obj: any, defs?: Record, resolving?: Set): any => {
+ if (obj === null || typeof obj !== "object") {
+ return obj
+ }
+
+ if (Array.isArray(obj)) {
+ return obj.map((item) => sanitizeForGemini(item, defs, resolving))
+ }
+
+ const result: any = {}
+ const seen = resolving ?? new Set()
+
+ // Collect $defs/definitions for reference resolution
+ const definitions = obj.$defs ?? obj.definitions ?? defs
+
+ for (const [key, value] of Object.entries(obj)) {
+ // Strip $schema, $defs, and definitions fields
+ if (key === "$schema" || key === "$defs" || key === "definitions") {
+ continue
+ }
+
+ // Resolve $ref references inline
+ if (key === "$ref" && typeof value === "string" && definitions) {
+ const refPath = value.replace(/^#\/(\$defs|definitions)\//, "")
+ // Detect circular references
+ if (seen.has(refPath)) {
+ Object.assign(result, { type: "object" })
+ continue
+ }
+ const resolved = definitions[refPath]
+ if (resolved) {
+ seen.add(refPath)
+ // Merge resolved reference into result (without the $ref key)
+ const sanitized = sanitizeForGemini(resolved, definitions, seen)
+ Object.assign(result, sanitized)
+ seen.delete(refPath)
+ continue
+ }
+ }
+
+ if (typeof value === "object" && value !== null) {
+ result[key] = sanitizeForGemini(value, definitions, seen)
+ } else {
+ result[key] = value
+ }
+ }
+
+ return result
+ }
+
+ schema = sanitizeForGemini(schema)
+ }
+ }
+
/*
if (["openai", "azure"].includes(providerID)) {
if (schema.type === "object" && schema.properties) {
diff --git a/packages/opencode/src/server/routes/provider.ts b/packages/opencode/src/server/routes/provider.ts
index 872b48be79d..b5547851625 100644
--- a/packages/opencode/src/server/routes/provider.ts
+++ b/packages/opencode/src/server/routes/provider.ts
@@ -40,6 +40,17 @@ export const ProviderRoutes = lazy(() =>
const enabled = config.enabled_providers ? new Set(config.enabled_providers) : undefined
const allProviders = await ModelsDev.get()
+
+ // Add Databricks if not already present (it's not in models.dev)
+ if (!allProviders["databricks"]) {
+ allProviders["databricks"] = {
+ id: "databricks",
+ name: "Databricks",
+ env: ["DATABRICKS_TOKEN"],
+ models: {},
+ }
+ }
+
const filteredProviders: Record = {}
for (const [key, value] of Object.entries(allProviders)) {
if ((enabled ? enabled.has(key) : true) && !disabled.has(key)) {
@@ -54,7 +65,14 @@ export const ProviderRoutes = lazy(() =>
)
return c.json({
all: Object.values(providers),
- default: mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0].id),
+ default: Object.fromEntries(
+ Object.entries(providers)
+ .map(([key, item]) => {
+ const sorted = Provider.sort(Object.values(item.models))
+ return sorted[0] ? [key, sorted[0].id] : undefined
+ })
+ .filter((entry): entry is [string, string] => entry !== undefined),
+ ),
connected: Object.keys(connected),
})
},
diff --git a/packages/opencode/src/session/message-v2.ts b/packages/opencode/src/session/message-v2.ts
index 6358c6c5e9b..29443af6ffd 100644
--- a/packages/opencode/src/session/message-v2.ts
+++ b/packages/opencode/src/session/message-v2.ts
@@ -441,7 +441,8 @@ export namespace MessageV2 {
const toModelOutput = (output: unknown) => {
if (typeof output === "string") {
- return { type: "text", value: output }
+ // Ensure non-empty text for APIs that reject empty content (Databricks, Anthropic)
+ return { type: "text", value: output || "[No output]" }
}
if (typeof output === "object") {
@@ -456,7 +457,8 @@ export namespace MessageV2 {
return {
type: "content",
value: [
- { type: "text", text: outputObject.text },
+ // Ensure non-empty text for APIs that reject empty content
+ { type: "text", text: outputObject.text || "[No output]" },
...attachments.map((attachment) => ({
type: "media",
mediaType: attachment.mime,
@@ -483,7 +485,8 @@ export namespace MessageV2 {
}
result.push(userMessage)
for (const part of msg.parts) {
- if (part.type === "text" && !part.ignored)
+ // Skip empty or ignored text parts - some APIs reject empty text content blocks
+ if (part.type === "text" && !part.ignored && part.text)
userMessage.parts.push({
type: "text",
text: part.text,
@@ -530,7 +533,8 @@ export namespace MessageV2 {
parts: [],
}
for (const part of msg.parts) {
- if (part.type === "text")
+ // Skip empty text parts - some APIs (Databricks, Anthropic) reject empty text content blocks
+ if (part.type === "text" && part.text)
assistantMessage.parts.push({
type: "text",
text: part.text,
@@ -543,7 +547,10 @@ export namespace MessageV2 {
if (part.type === "tool") {
toolNames.add(part.tool)
if (part.state.status === "completed") {
- const outputText = part.state.time.compacted ? "[Old tool result content cleared]" : part.state.output
+ // Ensure non-empty output text - some APIs reject empty text content blocks
+ const outputText = part.state.time.compacted
+ ? "[Old tool result content cleared]"
+ : (part.state.output || "[No output]")
const attachments = part.state.time.compacted ? [] : (part.state.attachments ?? [])
const output =
attachments.length > 0
@@ -583,7 +590,8 @@ export namespace MessageV2 {
...(differentModel ? {} : { callProviderMetadata: part.metadata }),
})
}
- if (part.type === "reasoning") {
+ // Skip empty reasoning parts - some APIs reject empty text content blocks
+ if (part.type === "reasoning" && part.text) {
assistantMessage.parts.push({
type: "reasoning",
text: part.text,
diff --git a/packages/opencode/test/preload.ts b/packages/opencode/test/preload.ts
index c1b03ea8213..f35836c70b2 100644
--- a/packages/opencode/test/preload.ts
+++ b/packages/opencode/test/preload.ts
@@ -51,6 +51,13 @@ delete process.env["DEEPSEEK_API_KEY"]
delete process.env["FIREWORKS_API_KEY"]
delete process.env["CEREBRAS_API_KEY"]
delete process.env["SAMBANOVA_API_KEY"]
+delete process.env["DATABRICKS_HOST"]
+delete process.env["DATABRICKS_TOKEN"]
+delete process.env["DATABRICKS_CLIENT_ID"]
+delete process.env["DATABRICKS_CLIENT_SECRET"]
+delete process.env["ARM_CLIENT_ID"]
+delete process.env["ARM_CLIENT_SECRET"]
+delete process.env["ARM_TENANT_ID"]
// Now safe to import from src/
const { Log } = await import("../src/util/log")
diff --git a/packages/opencode/test/provider/databricks.test.ts b/packages/opencode/test/provider/databricks.test.ts
new file mode 100644
index 00000000000..edf307f0dee
--- /dev/null
+++ b/packages/opencode/test/provider/databricks.test.ts
@@ -0,0 +1,1509 @@
+import { test, expect, mock } from "bun:test"
+import path from "path"
+
+// === Mocks ===
+// These mocks are required because Provider.list() triggers:
+// 1. BunProc.install() for various packages
+// 2. Plugin.list() which calls BunProc.install() for default plugins
+// Without mocks, these would attempt real package installations that timeout in tests.
+
+mock.module("../../src/bun/index", () => ({
+ BunProc: {
+ install: async (pkg: string) => pkg,
+ run: async () => {
+ throw new Error("BunProc.run should not be called in tests")
+ },
+ which: () => process.execPath,
+ InstallFailedError: class extends Error {},
+ },
+}))
+
+mock.module("@aws-sdk/credential-providers", () => ({
+ fromNodeProviderChain: () => async () => ({
+ accessKeyId: "mock-access-key-id",
+ secretAccessKey: "mock-secret-access-key",
+ }),
+}))
+
+const mockPlugin = () => ({})
+mock.module("opencode-copilot-auth", () => ({ default: mockPlugin }))
+mock.module("opencode-anthropic-auth", () => ({ default: mockPlugin }))
+mock.module("@gitlab/opencode-gitlab-auth", () => ({ default: mockPlugin }))
+
+// Import after mocks are set up
+const { tmpdir } = await import("../fixture/fixture")
+const { Instance } = await import("../../src/project/instance")
+const { Provider } = await import("../../src/provider/provider")
+const { Env } = await import("../../src/env")
+const { Global } = await import("../../src/global")
+
+test("Databricks: loads when DATABRICKS_HOST and DATABRICKS_TOKEN are set", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].name).toBe("Databricks")
+ },
+ })
+})
+
+test("Databricks: does not load when only DATABRICKS_HOST is set (no auth)", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+
+ // Backup and clear auth.json to ensure no stored Databricks auth
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ // Save and override HOME to prevent finding real ~/.databricks/token-cache.json
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.remove("DATABRICKS_TOKEN") // Explicitly clear token
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeUndefined()
+ },
+ })
+ } finally {
+ // Restore HOME
+ if (originalHome) process.env.HOME = originalHome
+ // Restore auth.json
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: config host takes precedence over DATABRICKS_HOST env var", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ provider: {
+ databricks: {
+ options: {
+ host: "https://config-workspace.cloud.databricks.com",
+ },
+ },
+ },
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://env-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ // baseURL should use config host
+ expect(providers["databricks"].options.baseURL).toContain("config-workspace")
+ },
+ })
+})
+
+test("Databricks: baseURL option takes precedence", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ provider: {
+ databricks: {
+ options: {
+ baseURL: "https://custom-url.cloud.databricks.com/serving-endpoints",
+ },
+ },
+ },
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://env-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].options.baseURL).toBe("https://custom-url.cloud.databricks.com/serving-endpoints")
+ },
+ })
+})
+
+test("Databricks: includes default models", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ const models = Object.keys(providers["databricks"].models)
+ // Should include Claude models (tool-calling capable)
+ expect(models.some((m) => m.includes("claude"))).toBe(true)
+ // Should include GPT models (tool-calling capable)
+ expect(models.some((m) => m.includes("gpt-5"))).toBe(true)
+ // Should include Gemini models (tool-calling capable)
+ expect(models.some((m) => m.includes("gemini"))).toBe(true)
+ },
+ })
+})
+
+test("Databricks: custom models via config", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ provider: {
+ databricks: {
+ models: {
+ "custom-endpoint": {
+ name: "Custom Endpoint",
+ tool_call: true,
+ limit: { context: 100000, output: 10000 },
+ },
+ },
+ },
+ },
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].models["custom-endpoint"]).toBeDefined()
+ expect(providers["databricks"].models["custom-endpoint"].name).toBe("Custom Endpoint")
+ },
+ })
+})
+
+test("Databricks: loads when bearer token from auth.json is present", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+
+ const authPath = path.join(Global.Path.data, "auth.json")
+
+ // Backup existing auth.json if it exists
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+
+ // Write test auth
+ await Bun.write(
+ authPath,
+ JSON.stringify({
+ databricks: {
+ type: "api",
+ key: "test-bearer-token",
+ },
+ }),
+ )
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ // No DATABRICKS_TOKEN env var - using auth.json instead
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ },
+ })
+ } finally {
+ // Restore original auth.json or delete if it didn't exist
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ } else {
+ await Bun.write(authPath, JSON.stringify({}))
+ }
+ }
+})
+
+test("Databricks: appends /serving-endpoints to host URL", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].options.baseURL).toBe(
+ "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ )
+ },
+ })
+})
+
+test("Databricks: does not duplicate /serving-endpoints if already present", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ provider: {
+ databricks: {
+ options: {
+ host: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ },
+ },
+ },
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ // Should not duplicate /serving-endpoints
+ expect(providers["databricks"].options.baseURL).toBe(
+ "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ )
+ },
+ })
+})
+
+test("Databricks: sets User-Agent header", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].options.headers["User-Agent"]).toBe("opencode")
+ },
+ })
+})
+
+test("Databricks: sets x-databricks-disable-beta-headers header", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].options.headers["x-databricks-disable-beta-headers"]).toBe("true")
+ },
+ })
+})
+
+test("Databricks: sets includeUsage to false", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].options.includeUsage).toBe(false)
+ },
+ })
+})
+
+// OAuth M2M tests - note: these test the config parsing, not actual token fetching
+// since we'd need to mock the OAuth endpoint
+
+test("Databricks: OAuth M2M credentials via config", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ provider: {
+ databricks: {
+ options: {
+ host: "https://my-workspace.cloud.databricks.com",
+ clientId: "test-client-id",
+ clientSecret: "test-client-secret",
+ },
+ },
+ },
+ }),
+ )
+ },
+ })
+ // This test verifies that the config is parsed correctly
+ // The actual OAuth flow would require mocking the fetch call
+ await Instance.provide({
+ directory: tmp.path,
+ fn: async () => {
+ // Without a way to mock the OAuth endpoint, this will return autoload: false
+ // because the token fetch will fail. We're just verifying config parsing works.
+ const providers = await Provider.list()
+ // Provider won't load because OAuth token fetch fails (no mock endpoint)
+ // This is expected behavior - we'd need to mock fetch for a full test
+ },
+ })
+})
+
+test("Databricks: model capabilities are set correctly", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(
+ path.join(dir, "opencode.json"),
+ JSON.stringify({
+ $schema: "https://opencode.ai/config.json",
+ }),
+ )
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ // Check Claude model capabilities
+ const claudeModel = providers["databricks"].models["databricks-claude-sonnet-4"]
+ expect(claudeModel).toBeDefined()
+ expect(claudeModel.capabilities.toolcall).toBe(true)
+ expect(claudeModel.capabilities.attachment).toBe(true)
+
+ // Check GPT model capabilities
+ const gptModel = providers["databricks"].models["databricks-gpt-5"]
+ expect(gptModel).toBeDefined()
+ expect(gptModel.capabilities.toolcall).toBe(true)
+ expect(gptModel.capabilities.attachment).toBe(true)
+ },
+ })
+})
+
+// Model family tests - verify all model types are present with correct capabilities
+
+test("Databricks: GPT-5 models have correct capabilities", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ const models = providers["databricks"].models
+
+ // GPT-5.2
+ const gpt52 = models["databricks-gpt-5-2"]
+ expect(gpt52).toBeDefined()
+ expect(gpt52.family).toBe("gpt-5")
+ expect(gpt52.capabilities.reasoning).toBe(true)
+ expect(gpt52.capabilities.toolcall).toBe(true)
+ expect(gpt52.capabilities.attachment).toBe(true)
+ expect(gpt52.capabilities.input.image).toBe(true)
+
+ // GPT-5.1
+ const gpt51 = models["databricks-gpt-5-1"]
+ expect(gpt51).toBeDefined()
+ expect(gpt51.family).toBe("gpt-5")
+ expect(gpt51.capabilities.reasoning).toBe(true)
+
+ // GPT-5.1 Codex Max
+ const codexMax = models["databricks-gpt-5-1-codex-max"]
+ expect(codexMax).toBeDefined()
+ expect(codexMax.family).toBe("gpt-5-codex")
+ expect(codexMax.capabilities.reasoning).toBe(true)
+
+ // GPT-5.1 Codex Mini - excluded (only supports Responses API, not Chat Completions API)
+ expect(models["databricks-gpt-5-1-codex-mini"]).toBeUndefined()
+
+ // GPT-5
+ const gpt5 = models["databricks-gpt-5"]
+ expect(gpt5).toBeDefined()
+ expect(gpt5.family).toBe("gpt-5")
+ expect(gpt5.capabilities.reasoning).toBe(true)
+
+ // GPT-5 mini
+ const gpt5Mini = models["databricks-gpt-5-mini"]
+ expect(gpt5Mini).toBeDefined()
+ expect(gpt5Mini.family).toBe("gpt-5-mini")
+ expect(gpt5Mini.capabilities.reasoning).toBe(true)
+
+ // GPT-5 nano - no reasoning
+ const gpt5Nano = models["databricks-gpt-5-nano"]
+ expect(gpt5Nano).toBeDefined()
+ expect(gpt5Nano.family).toBe("gpt-5-nano")
+ expect(gpt5Nano.capabilities.reasoning).toBe(false)
+
+ // GPT OSS models are excluded - they don't support tool calling reliably
+ expect(models["databricks-gpt-oss-120b"]).toBeUndefined()
+ expect(models["databricks-gpt-oss-20b"]).toBeUndefined()
+ },
+ })
+})
+
+test("Databricks: Gemini models have correct capabilities", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ const models = providers["databricks"].models
+
+ // Gemini 3 Pro
+ const gemini3Pro = models["databricks-gemini-3-pro"]
+ expect(gemini3Pro).toBeDefined()
+ expect(gemini3Pro.family).toBe("gemini-3")
+ expect(gemini3Pro.capabilities.reasoning).toBe(true)
+ expect(gemini3Pro.capabilities.input.image).toBe(true)
+ expect(gemini3Pro.capabilities.input.audio).toBe(true)
+ expect(gemini3Pro.capabilities.input.video).toBe(true)
+ expect(gemini3Pro.limit.context).toBe(1000000) // 1M context
+
+ // Gemini 3 Flash - no reasoning
+ const gemini3Flash = models["databricks-gemini-3-flash"]
+ expect(gemini3Flash).toBeDefined()
+ expect(gemini3Flash.family).toBe("gemini-3")
+ expect(gemini3Flash.capabilities.reasoning).toBe(false)
+
+ // Gemini 2.5 Pro
+ const gemini25Pro = models["databricks-gemini-2-5-pro"]
+ expect(gemini25Pro).toBeDefined()
+ expect(gemini25Pro.family).toBe("gemini-2.5")
+ expect(gemini25Pro.capabilities.reasoning).toBe(true)
+
+ // Gemini 2.5 Flash
+ const gemini25Flash = models["databricks-gemini-2-5-flash"]
+ expect(gemini25Flash).toBeDefined()
+ expect(gemini25Flash.family).toBe("gemini-2.5")
+ expect(gemini25Flash.capabilities.reasoning).toBe(true)
+
+ // Gemma 3 12B is excluded - limited tool support
+ expect(models["databricks-gemma-3-12b"]).toBeUndefined()
+ },
+ })
+})
+
+test("Databricks: Claude models have correct capabilities", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ const models = providers["databricks"].models
+
+ // Claude Sonnet 4 - no reasoning
+ const claudeSonnet4 = models["databricks-claude-sonnet-4"]
+ expect(claudeSonnet4).toBeDefined()
+ expect(claudeSonnet4.family).toBe("claude-sonnet")
+ expect(claudeSonnet4.capabilities.reasoning).toBe(false)
+ expect(claudeSonnet4.capabilities.attachment).toBe(true)
+ expect(claudeSonnet4.capabilities.input.image).toBe(true)
+
+ // Claude Sonnet 4.5 - with reasoning
+ const claudeSonnet45 = models["databricks-claude-sonnet-4-5"]
+ expect(claudeSonnet45).toBeDefined()
+ expect(claudeSonnet45.family).toBe("claude-sonnet")
+ expect(claudeSonnet45.capabilities.reasoning).toBe(true)
+
+ // Claude Haiku 4.5
+ const claudeHaiku = models["databricks-claude-haiku-4-5"]
+ expect(claudeHaiku).toBeDefined()
+ expect(claudeHaiku.family).toBe("claude-haiku")
+ expect(claudeHaiku.capabilities.reasoning).toBe(false)
+
+ // Claude Opus 4.5
+ const claudeOpus45 = models["databricks-claude-opus-4-5"]
+ expect(claudeOpus45).toBeDefined()
+ expect(claudeOpus45.family).toBe("claude-opus")
+ expect(claudeOpus45.capabilities.reasoning).toBe(true)
+
+ // Claude 3.7 Sonnet
+ const claude37 = models["databricks-claude-3-7-sonnet"]
+ expect(claude37).toBeDefined()
+ expect(claude37.family).toBe("claude-sonnet")
+ expect(claude37.capabilities.reasoning).toBe(true)
+
+ // Claude Opus 4.1
+ const claudeOpus41 = models["databricks-claude-opus-4-1"]
+ expect(claudeOpus41).toBeDefined()
+ expect(claudeOpus41.family).toBe("claude-opus")
+ expect(claudeOpus41.capabilities.reasoning).toBe(true)
+ },
+ })
+})
+
+test("Databricks: non-tool-calling models are excluded", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ const models = providers["databricks"].models
+
+ // Llama models are excluded - unreliable tool support via OpenAI-compatible API
+ expect(models["databricks-llama-4-maverick"]).toBeUndefined()
+ expect(models["databricks-meta-llama-3-3-70b-instruct"]).toBeUndefined()
+ expect(models["databricks-meta-llama-3-1-405b-instruct"]).toBeUndefined()
+ expect(models["databricks-meta-llama-3-1-8b-instruct"]).toBeUndefined()
+
+ // Qwen models are excluded - unreliable tool support via OpenAI-compatible API
+ expect(models["databricks-qwen3-next-80b-a3b-instruct"]).toBeUndefined()
+ },
+ })
+})
+
+test("Databricks: all models have required API configuration and tool support", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ const models = providers["databricks"].models
+
+ // All models should use openai-compatible SDK and support tool calling
+ for (const [modelId, model] of Object.entries(models)) {
+ expect(model.api.npm).toBe("@ai-sdk/openai-compatible")
+ expect(model.providerID).toBe("databricks")
+ expect(model.api.url).toContain("serving-endpoints")
+ expect(model.status).toBe("active")
+ // All included models must support tool calling
+ expect(model.capabilities.toolcall).toBe(true)
+ }
+ },
+ })
+})
+
+test("Databricks: model costs are set correctly", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ },
+ })
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "test-token")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ const models = providers["databricks"].models
+
+ // GPT-5 models should have cache pricing
+ const gpt5 = models["databricks-gpt-5"]
+ expect(gpt5.cost.input).toBeGreaterThan(0)
+ expect(gpt5.cost.output).toBeGreaterThan(0)
+ expect(gpt5.cost.cache.read).toBeGreaterThan(0)
+
+ // Gemini models should have cache pricing
+ const gemini = models["databricks-gemini-3-pro"]
+ expect(gemini.cost.input).toBeGreaterThan(0)
+ expect(gemini.cost.output).toBeGreaterThan(0)
+ expect(gemini.cost.cache.read).toBeGreaterThan(0)
+
+ // Claude models should have cache pricing
+ const claude = models["databricks-claude-sonnet-4"]
+ expect(claude.cost.input).toBeGreaterThan(0)
+ expect(claude.cost.output).toBeGreaterThan(0)
+ expect(claude.cost.cache.read).toBeGreaterThan(0)
+ },
+ })
+})
+
+// === Databricks CLI Token Cache Tests ===
+
+test("Databricks: loads provider using valid CLI token cache", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create a valid token cache with a token that expires in 1 hour
+ const futureExpiry = new Date(Date.now() + 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ "https://my-workspace.cloud.databricks.com": {
+ access_token: "cli-cached-token",
+ token_type: "Bearer",
+ refresh_token: "cli-refresh-token",
+ expiry: futureExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json to ensure we're not using stored auth
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.remove("DATABRICKS_TOKEN") // No env token - should use CLI cache
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].name).toBe("Databricks")
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: does not load with expired CLI token and no refresh token", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create an expired token cache without refresh token
+ const pastExpiry = new Date(Date.now() - 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ "https://my-workspace.cloud.databricks.com": {
+ access_token: "expired-token",
+ token_type: "Bearer",
+ refresh_token: "", // Empty refresh token
+ expiry: pastExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should not load because token is expired and can't be refreshed
+ expect(providers["databricks"]).toBeUndefined()
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: does not load when CLI token cache has no matching host", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create a token cache for a different host
+ const futureExpiry = new Date(Date.now() + 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ "https://other-workspace.cloud.databricks.com": {
+ access_token: "other-workspace-token",
+ token_type: "Bearer",
+ refresh_token: "other-refresh-token",
+ expiry: futureExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should not load because no token for this host
+ expect(providers["databricks"]).toBeUndefined()
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: CLI token cache handles trailing slash in host normalization", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create a token cache WITHOUT trailing slash
+ const futureExpiry = new Date(Date.now() + 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ "https://my-workspace.cloud.databricks.com": {
+ access_token: "normalized-token",
+ token_type: "Bearer",
+ refresh_token: "refresh-token",
+ expiry: futureExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ // Set host WITH trailing slash - should still match
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com/")
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should load because host normalization removes trailing slash
+ expect(providers["databricks"]).toBeDefined()
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: respects 5-minute buffer for token expiry", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create a token that expires in 3 minutes (less than 5 minute buffer)
+ // This should be treated as expired
+ const nearExpiry = new Date(Date.now() + 3 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ "https://my-workspace.cloud.databricks.com": {
+ access_token: "near-expiry-token",
+ token_type: "Bearer",
+ refresh_token: "", // No refresh token
+ expiry: nearExpiry,
+ expires_in: 180,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should not load because token is within 5-minute buffer and no refresh token
+ expect(providers["databricks"]).toBeUndefined()
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: handles malformed token cache gracefully", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory with malformed token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+ await Bun.write(path.join(databricksDir, "token-cache.json"), "{ invalid json }")
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should not load but also should not throw
+ expect(providers["databricks"]).toBeUndefined()
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: env token takes precedence over CLI token cache", async () => {
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ const futureExpiry = new Date(Date.now() + 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ "https://my-workspace.cloud.databricks.com": {
+ access_token: "cli-token-should-not-be-used",
+ token_type: "Bearer",
+ refresh_token: "refresh-token",
+ expiry: futureExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", "https://my-workspace.cloud.databricks.com")
+ Env.set("DATABRICKS_TOKEN", "env-token-takes-precedence")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ expect(providers["databricks"]).toBeDefined()
+ // Provider should load using env token (PAT takes precedence)
+ },
+ })
+ } finally {
+ if (originalHome) process.env.HOME = originalHome
+ }
+})
+
+test("Databricks: refreshes expired CLI token using refresh_token and updates cache", async () => {
+ const testHost = "https://my-workspace.cloud.databricks.com"
+ const expiredToken = "expired-access-token"
+ const refreshToken = "valid-refresh-token"
+ const newAccessToken = "new-refreshed-access-token"
+ const newRefreshToken = "new-refresh-token"
+
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache with expired token
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create an expired token cache WITH refresh token
+ const pastExpiry = new Date(Date.now() - 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ [testHost]: {
+ access_token: expiredToken,
+ token_type: "Bearer",
+ refresh_token: refreshToken,
+ expiry: pastExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ // Mock fetch to intercept the token refresh request
+ const originalFetch = globalThis.fetch
+ const fetchMock = mock(async (input: RequestInfo | URL, init?: RequestInit) => {
+ const url = input.toString()
+ if (url === `${testHost}/oidc/v1/token`) {
+ // Verify the refresh token request
+ const body = init?.body?.toString() ?? ""
+ expect(body).toContain("grant_type=refresh_token")
+ expect(body).toContain(`refresh_token=${refreshToken}`)
+ expect(body).toContain("client_id=databricks-cli")
+
+ return new Response(
+ JSON.stringify({
+ access_token: newAccessToken,
+ refresh_token: newRefreshToken,
+ expires_in: 3600,
+ }),
+ { status: 200, headers: { "Content-Type": "application/json" } },
+ )
+ }
+ // For any other requests, use the original fetch
+ return originalFetch(input, init)
+ })
+ globalThis.fetch = fetchMock as unknown as typeof fetch
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", testHost)
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should load because token was refreshed
+ expect(providers["databricks"]).toBeDefined()
+ expect(providers["databricks"].name).toBe("Databricks")
+
+ // Verify the token refresh endpoint was called
+ expect(fetchMock).toHaveBeenCalled()
+
+ // Verify the token cache was updated with new tokens
+ const tokenCachePath = path.join(tmp.path, ".databricks", "token-cache.json")
+ const updatedCache = JSON.parse(await Bun.file(tokenCachePath).text())
+ expect(updatedCache.tokens[testHost].access_token).toBe(newAccessToken)
+ expect(updatedCache.tokens[testHost].refresh_token).toBe(newRefreshToken)
+ // Verify expiry was updated to future
+ const newExpiry = new Date(updatedCache.tokens[testHost].expiry)
+ expect(newExpiry.getTime()).toBeGreaterThan(Date.now())
+ },
+ })
+ } finally {
+ globalThis.fetch = originalFetch
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+test("Databricks: falls back gracefully when token refresh fails", async () => {
+ const testHost = "https://my-workspace.cloud.databricks.com"
+
+ await using tmp = await tmpdir({
+ init: async (dir) => {
+ await Bun.write(path.join(dir, "opencode.json"), JSON.stringify({ $schema: "https://opencode.ai/config.json" }))
+ // Create .databricks directory and token cache with expired token
+ const databricksDir = path.join(dir, ".databricks")
+ await Bun.write(path.join(databricksDir, ".gitkeep"), "")
+
+ // Create an expired token cache WITH refresh token
+ const pastExpiry = new Date(Date.now() - 60 * 60 * 1000).toISOString()
+ await Bun.write(
+ path.join(databricksDir, "token-cache.json"),
+ JSON.stringify({
+ version: 1,
+ tokens: {
+ [testHost]: {
+ access_token: "expired-token",
+ token_type: "Bearer",
+ refresh_token: "invalid-refresh-token",
+ expiry: pastExpiry,
+ expires_in: 3600,
+ },
+ },
+ }),
+ )
+ },
+ })
+
+ // Clear auth.json
+ const authPath = path.join(Global.Path.data, "auth.json")
+ const authFile = Bun.file(authPath)
+ const existingAuth = (await authFile.exists()) ? await authFile.text() : null
+ await Bun.write(authPath, JSON.stringify({}))
+
+ const originalHome = process.env.HOME
+ process.env.HOME = tmp.path
+
+ // Mock fetch to return an error for token refresh
+ const originalFetch = globalThis.fetch
+ const fetchMock = mock(async (input: RequestInfo | URL, _init?: RequestInit) => {
+ const url = input.toString()
+ if (url === `${testHost}/oidc/v1/token`) {
+ // Return 401 Unauthorized for invalid refresh token
+ return new Response(JSON.stringify({ error: "invalid_grant" }), {
+ status: 401,
+ headers: { "Content-Type": "application/json" },
+ })
+ }
+ return originalFetch(input, _init)
+ })
+ globalThis.fetch = fetchMock as unknown as typeof fetch
+
+ try {
+ await Instance.provide({
+ directory: tmp.path,
+ init: async () => {
+ Env.set("DATABRICKS_HOST", testHost)
+ Env.remove("DATABRICKS_TOKEN")
+ },
+ fn: async () => {
+ const providers = await Provider.list()
+ // Provider should NOT load because refresh failed and no other auth available
+ expect(providers["databricks"]).toBeUndefined()
+
+ // Verify the token refresh endpoint was called
+ expect(fetchMock).toHaveBeenCalled()
+ },
+ })
+ } finally {
+ globalThis.fetch = originalFetch
+ if (originalHome) process.env.HOME = originalHome
+ if (existingAuth !== null) {
+ await Bun.write(authPath, existingAuth)
+ }
+ }
+})
+
+// === Provider Route: Empty Models Handling ===
+
+test("Provider.sort with empty models array does not crash", () => {
+ // Provider.sort([]) returns [], so accessing [0].id would crash
+ const sorted = Provider.sort([])
+ expect(sorted).toEqual([])
+ expect(sorted[0]).toBeUndefined()
+})
+
+test("Provider route: default model map handles providers with empty models", async () => {
+ // Simulate the route logic that crashed at provider.ts:68
+ // mapValues(providers, (item) => Provider.sort(Object.values(item.models))[0].id)
+ // When models is {}, this crashes because [0] is undefined
+ const providers: Record = {
+ databricks: {
+ id: "databricks",
+ name: "Databricks",
+ models: {},
+ },
+ openai: {
+ id: "openai",
+ name: "OpenAI",
+ models: {
+ "gpt-5": {
+ id: "gpt-5",
+ name: "GPT-5",
+ },
+ },
+ },
+ }
+
+ // This is the fixed logic - should not crash
+ const defaults: Record = {}
+ for (const [key, item] of Object.entries(providers)) {
+ const sorted = Provider.sort(Object.values(item.models))
+ if (sorted[0]) {
+ defaults[key] = sorted[0].id
+ }
+ }
+
+ // Provider with empty models should be excluded from defaults
+ expect(defaults["databricks"]).toBeUndefined()
+ // Provider with models should have a default
+ expect(defaults["openai"]).toBe("gpt-5")
+})
+
+// === Gemini Stream Transform Tests ===
+
+test("Gemini stream transform: converts content array to string", () => {
+ // Simulate the transform logic from provider.ts:1357-1393
+ const sseLine = JSON.stringify({
+ choices: [
+ {
+ delta: {
+ content: [{ type: "text", text: "Hello world" }],
+ },
+ },
+ ],
+ })
+
+ const data = JSON.parse(sseLine)
+ if (data.choices && Array.isArray(data.choices)) {
+ for (const choice of data.choices) {
+ if (choice.delta && Array.isArray(choice.delta.content)) {
+ const textParts = choice.delta.content
+ .filter((part: any) => part.type === "text" && part.text)
+ .map((part: any) => part.text)
+ choice.delta.content = textParts.join("")
+ }
+ }
+ }
+
+ expect(data.choices[0].delta.content).toBe("Hello world")
+})
+
+test("Gemini stream transform: handles multiple text parts", () => {
+ const data: any = {
+ choices: [
+ {
+ delta: {
+ content: [
+ { type: "text", text: "Hello " },
+ { type: "text", text: "world" },
+ ],
+ },
+ },
+ ],
+ }
+
+ for (const choice of data.choices) {
+ if (choice.delta && Array.isArray(choice.delta.content)) {
+ const textParts = choice.delta.content
+ .filter((part: any) => part.type === "text" && part.text)
+ .map((part: any) => part.text)
+ choice.delta.content = textParts.join("")
+ }
+ }
+
+ expect(data.choices[0].delta.content).toBe("Hello world")
+})
+
+test("Gemini stream transform: handles thoughtSignature parts", () => {
+ // thoughtSignature parts should be filtered out - only text parts are extracted
+ const data: any = {
+ choices: [
+ {
+ delta: {
+ content: [
+ { type: "text", text: "The answer is 42" },
+ { type: "thoughtSignature", thoughtSignature: "abc123" },
+ ],
+ },
+ },
+ ],
+ }
+
+ for (const choice of data.choices) {
+ if (choice.delta && Array.isArray(choice.delta.content)) {
+ const textParts = choice.delta.content
+ .filter((part: any) => part.type === "text" && part.text)
+ .map((part: any) => part.text)
+ choice.delta.content = textParts.join("")
+ }
+ }
+
+ // Only text content should remain, thoughtSignature should be filtered out
+ expect(data.choices[0].delta.content).toBe("The answer is 42")
+})
+
+test("Gemini stream transform: handles empty content array", () => {
+ const data: any = {
+ choices: [
+ {
+ delta: {
+ content: [],
+ },
+ },
+ ],
+ }
+
+ for (const choice of data.choices) {
+ if (choice.delta && Array.isArray(choice.delta.content)) {
+ const textParts = choice.delta.content
+ .filter((part: any) => part.type === "text" && part.text)
+ .map((part: any) => part.text)
+ choice.delta.content = textParts.join("")
+ }
+ }
+
+ expect(data.choices[0].delta.content).toBe("")
+})
+
+test("Gemini stream transform: passes through string content unchanged", () => {
+ // When content is already a string, it should not be transformed
+ const data = {
+ choices: [
+ {
+ delta: {
+ content: "Already a string",
+ },
+ },
+ ],
+ }
+
+ for (const choice of data.choices) {
+ if (choice.delta && Array.isArray(choice.delta.content)) {
+ const textParts = choice.delta.content
+ .filter((part: any) => part.type === "text" && part.text)
+ .map((part: any) => part.text)
+ choice.delta.content = textParts.join("")
+ }
+ }
+
+ // String content should pass through unchanged
+ expect(data.choices[0].delta.content).toBe("Already a string")
+})
diff --git a/packages/opencode/test/provider/transform.test.ts b/packages/opencode/test/provider/transform.test.ts
index d483539f1f2..e7e092629a4 100644
--- a/packages/opencode/test/provider/transform.test.ts
+++ b/packages/opencode/test/provider/transform.test.ts
@@ -590,6 +590,442 @@ describe("ProviderTransform.message - empty image handling", () => {
})
})
+describe("ProviderTransform.message - databricks empty content filtering", () => {
+ // Test with Databricks Claude (Anthropic model via OpenAI-compatible API)
+ const databricksClaudeModel = {
+ id: "databricks-claude-sonnet-4",
+ providerID: "databricks",
+ api: {
+ id: "databricks-claude-sonnet-4",
+ url: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "Claude Sonnet 4 (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: false,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 3,
+ output: 15,
+ cache: { read: 0.3, write: 0 },
+ },
+ limit: {
+ context: 200000,
+ output: 64000,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ // Test with Databricks GPT-5 (OpenAI model via OpenAI-compatible API)
+ const databricksGptModel = {
+ id: "databricks-gpt-5",
+ providerID: "databricks",
+ api: {
+ id: "databricks-gpt-5",
+ url: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "GPT-5 (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 1.25,
+ output: 10,
+ cache: { read: 0.125, write: 0 },
+ },
+ limit: {
+ context: 400000,
+ output: 128000,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ // Test with Databricks Gemini (Google model via OpenAI-compatible API)
+ const databricksGeminiModel = {
+ id: "databricks-gemini-3-pro",
+ providerID: "databricks",
+ api: {
+ id: "databricks-gemini-3-pro",
+ url: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "Gemini 3 Pro (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: true, image: true, video: true, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 2,
+ output: 12,
+ cache: { read: 0.2, write: 0 },
+ },
+ limit: {
+ context: 1000000,
+ output: 65536,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ // Use databricksClaudeModel as the default for existing tests
+ const databricksModel = databricksClaudeModel
+
+ test("filters out messages with empty string content", () => {
+ const msgs = [
+ { role: "user", content: "Hello" },
+ { role: "assistant", content: "" },
+ { role: "user", content: "World" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Hello")
+ expect(result[1].content).toBe("World")
+ })
+
+ test("filters out empty text parts from array content", () => {
+ const msgs = [
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "text", text: "Hello" },
+ { type: "text", text: "" },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(1)
+ expect(result[0].content).toHaveLength(1)
+ expect(result[0].content[0]).toMatchObject({ type: "text", text: "Hello" })
+ })
+
+ test("keeps tool-call parts when text parts are empty", () => {
+ const msgs = [
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "123", toolName: "bash", input: { command: "ls" } },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(1)
+ expect(result[0].content).toHaveLength(1)
+ expect(result[0].content[0]).toMatchObject({
+ type: "tool-call",
+ toolCallId: "123",
+ toolName: "bash",
+ input: { command: "ls" },
+ })
+ })
+
+ test("keeps tool-result parts when text parts are empty", () => {
+ const msgs = [
+ {
+ role: "tool",
+ content: [{ type: "tool-result", toolCallId: "123", toolName: "bash", result: "output" }],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(1)
+ expect(result[0].content).toHaveLength(1)
+ })
+
+ test("removes entire message when all parts are empty", () => {
+ const msgs = [
+ { role: "user", content: "Hello" },
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "reasoning", text: "" },
+ ],
+ },
+ { role: "user", content: "World" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(2)
+ expect(result[0].content).toBe("Hello")
+ expect(result[1].content).toBe("World")
+ })
+
+ test("handles assistant message with only tool call (no text)", () => {
+ const msgs = [
+ { role: "user", content: "Run a command" },
+ {
+ role: "assistant",
+ content: [{ type: "tool-call", toolCallId: "call_123", toolName: "bash", input: { command: "ls" } }],
+ },
+ {
+ role: "tool",
+ content: [{ type: "tool-result", toolCallId: "call_123", toolName: "bash", result: "file1.txt" }],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(3)
+ // Assistant message should just have tool call, no text
+ expect(result[1].content).toHaveLength(1)
+ expect(result[1].content[0]).toMatchObject({ type: "tool-call", toolCallId: "call_123" })
+ // Tool result should be preserved
+ expect(result[2].content).toHaveLength(1)
+ expect(result[2].content[0]).toMatchObject({ type: "tool-result", toolCallId: "call_123" })
+ })
+
+ test("handles empty text alongside tool call (empty text should be filtered)", () => {
+ const msgs = [
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "call_123", toolName: "bash", input: { command: "ls" } },
+ { type: "text", text: "" },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksModel, {})
+
+ expect(result).toHaveLength(1)
+ // Empty text parts should be filtered, only tool call remains
+ expect(result[0].content).toHaveLength(1)
+ expect(result[0].content[0]).toMatchObject({ type: "tool-call" })
+ })
+
+ // Explicit tool calling tests for each Databricks model type
+
+ describe("Databricks Claude (Anthropic) - tool calling", () => {
+ test("filters empty text and keeps tool calls", () => {
+ const msgs = [
+ { role: "user", content: "Run a command" },
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "claude_call_1", toolName: "bash", input: { command: "echo hello" } },
+ ],
+ },
+ {
+ role: "tool",
+ content: [{ type: "tool-result", toolCallId: "claude_call_1", toolName: "bash", result: "hello" }],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksClaudeModel, {})
+
+ expect(result).toHaveLength(3)
+ // Assistant message: empty text filtered, tool call preserved
+ expect(result[1].content).toHaveLength(1)
+ expect(result[1].content[0]).toMatchObject({
+ type: "tool-call",
+ toolCallId: "claude_call_1",
+ toolName: "bash",
+ })
+ // Tool result preserved
+ expect(result[2].content[0]).toMatchObject({
+ type: "tool-result",
+ toolCallId: "claude_call_1",
+ })
+ })
+
+ test("handles multiple tool calls with empty text", () => {
+ const msgs = [
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "call_1", toolName: "read", input: { file: "foo.ts" } },
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "call_2", toolName: "edit", input: { file: "bar.ts" } },
+ { type: "text", text: "" },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksClaudeModel, {})
+
+ expect(result).toHaveLength(1)
+ // All empty text parts filtered, both tool calls preserved
+ expect(result[0].content).toHaveLength(2)
+ expect(result[0].content[0]).toMatchObject({ type: "tool-call", toolCallId: "call_1" })
+ expect(result[0].content[1]).toMatchObject({ type: "tool-call", toolCallId: "call_2" })
+ })
+ })
+
+ describe("Databricks GPT-5 (OpenAI) - tool calling", () => {
+ test("filters empty text and keeps tool calls", () => {
+ const msgs = [
+ { role: "user", content: "List files" },
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "gpt_call_1", toolName: "bash", input: { command: "ls -la" } },
+ ],
+ },
+ {
+ role: "tool",
+ content: [{ type: "tool-result", toolCallId: "gpt_call_1", toolName: "bash", result: "total 0\ndrwxr-xr-x" }],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGptModel, {})
+
+ expect(result).toHaveLength(3)
+ // Assistant message: empty text filtered, tool call preserved
+ expect(result[1].content).toHaveLength(1)
+ expect(result[1].content[0]).toMatchObject({
+ type: "tool-call",
+ toolCallId: "gpt_call_1",
+ toolName: "bash",
+ })
+ // Tool result preserved
+ expect(result[2].content[0]).toMatchObject({
+ type: "tool-result",
+ toolCallId: "gpt_call_1",
+ })
+ })
+
+ test("handles reasoning with tool calls (empty reasoning filtered)", () => {
+ const msgs = [
+ {
+ role: "assistant",
+ content: [
+ { type: "reasoning", text: "" },
+ { type: "tool-call", toolCallId: "gpt_reason_call", toolName: "read", input: { file: "config.json" } },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGptModel, {})
+
+ expect(result).toHaveLength(1)
+ // Empty reasoning filtered, tool call preserved
+ expect(result[0].content).toHaveLength(1)
+ expect(result[0].content[0]).toMatchObject({ type: "tool-call", toolCallId: "gpt_reason_call" })
+ })
+ })
+
+ describe("Databricks Gemini (Google) - tool calling", () => {
+ test("filters empty text and keeps tool calls", () => {
+ const msgs = [
+ { role: "user", content: "Search for files" },
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "gemini_call_1", toolName: "glob", input: { pattern: "**/*.ts" } },
+ ],
+ },
+ {
+ role: "tool",
+ content: [
+ { type: "tool-result", toolCallId: "gemini_call_1", toolName: "glob", result: "src/index.ts\nsrc/app.ts" },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGeminiModel, {})
+
+ expect(result).toHaveLength(3)
+ // Assistant message: empty text filtered, tool call preserved
+ expect(result[1].content).toHaveLength(1)
+ expect(result[1].content[0]).toMatchObject({
+ type: "tool-call",
+ toolCallId: "gemini_call_1",
+ toolName: "glob",
+ })
+ // Tool result preserved
+ expect(result[2].content[0]).toMatchObject({
+ type: "tool-result",
+ toolCallId: "gemini_call_1",
+ })
+ })
+
+ test("handles multi-turn conversation with tools", () => {
+ const msgs = [
+ { role: "user", content: "Read the config" },
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "" },
+ { type: "tool-call", toolCallId: "gem_1", toolName: "read", input: { file: "config.json" } },
+ ],
+ },
+ {
+ role: "tool",
+ content: [{ type: "tool-result", toolCallId: "gem_1", toolName: "read", result: '{"debug": true}' }],
+ },
+ {
+ role: "assistant",
+ content: [
+ { type: "text", text: "The config has debug enabled. Let me update it." },
+ {
+ type: "tool-call",
+ toolCallId: "gem_2",
+ toolName: "edit",
+ input: { file: "config.json", content: '{"debug": false}' },
+ },
+ ],
+ },
+ {
+ role: "tool",
+ content: [{ type: "tool-result", toolCallId: "gem_2", toolName: "edit", result: "File updated" }],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGeminiModel, {})
+
+ expect(result).toHaveLength(5)
+ // First assistant: only tool call (empty text filtered)
+ expect(result[1].content).toHaveLength(1)
+ expect(result[1].content[0]).toMatchObject({ type: "tool-call" })
+ // Second assistant: text + tool call preserved
+ expect(result[3].content).toHaveLength(2)
+ expect(result[3].content[0]).toMatchObject({
+ type: "text",
+ text: "The config has debug enabled. Let me update it.",
+ })
+ expect(result[3].content[1]).toMatchObject({ type: "tool-call" })
+ })
+ })
+})
+
describe("ProviderTransform.message - anthropic empty content filtering", () => {
const anthropicModel = {
id: "anthropic/claude-3-5-sonnet",
@@ -1928,3 +2364,655 @@ describe("ProviderTransform.variants", () => {
})
})
})
+
+describe("ProviderTransform.message - Databricks prompt caching", () => {
+ const databricksGptModel = {
+ id: "databricks-gpt-5",
+ providerID: "databricks",
+ api: {
+ id: "databricks-gpt-5",
+ url: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "GPT-5 (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 1.25,
+ output: 10,
+ cache: { read: 0.125, write: 0 },
+ },
+ limit: {
+ context: 400000,
+ output: 128000,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ const databricksClaudeModel = {
+ id: "databricks-claude-sonnet-4",
+ providerID: "databricks",
+ api: {
+ id: "databricks-claude-sonnet-4",
+ url: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "Claude Sonnet 4 (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: false,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 3,
+ output: 15,
+ cache: { read: 0.3, write: 0 },
+ },
+ limit: {
+ context: 200000,
+ output: 64000,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ const databricksNoCacheModel = {
+ id: "databricks-no-cache-model",
+ providerID: "databricks",
+ api: {
+ id: "databricks-no-cache-model",
+ url: "https://my-workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "No Cache Model (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: false,
+ attachment: false,
+ toolcall: true,
+ input: { text: true, audio: false, image: false, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 1,
+ output: 2,
+ cache: { read: 0, write: 0 }, // No cache support
+ },
+ limit: {
+ context: 100000,
+ output: 10000,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ test("applies cache_control to system messages for Databricks GPT model", () => {
+ const msgs = [
+ { role: "system", content: "You are a helpful assistant." },
+ { role: "user", content: "Hello" },
+ { role: "assistant", content: "Hi there!" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGptModel, {}) as any[]
+
+ // System message should have cache control
+ const systemMsg = result.find((m) => m.role === "system")
+ expect(systemMsg).toBeDefined()
+ expect(systemMsg!.providerOptions).toBeDefined()
+ expect(systemMsg!.providerOptions.openaiCompatible).toEqual({ cache_control: { type: "ephemeral" } })
+ })
+
+ test("applies cache_control to system messages for Databricks Claude model", () => {
+ const msgs = [
+ { role: "system", content: "You are a coding assistant." },
+ { role: "user", content: "Write code" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksClaudeModel, {}) as any[]
+
+ const systemMsg = result.find((m) => m.role === "system")
+ expect(systemMsg).toBeDefined()
+ expect(systemMsg!.providerOptions).toBeDefined()
+ expect(systemMsg!.providerOptions.openaiCompatible).toEqual({ cache_control: { type: "ephemeral" } })
+ })
+
+ test("applies cache_control to last messages in conversation", () => {
+ const msgs = [
+ { role: "system", content: "System prompt" },
+ { role: "user", content: "First message" },
+ { role: "assistant", content: "First response" },
+ { role: "user", content: "Second message" },
+ { role: "assistant", content: "Second response" },
+ { role: "user", content: "Third message" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGptModel, {}) as any[]
+
+ // Last 2 non-system messages should have cache control
+ const lastTwo = result.filter((m) => m.role !== "system").slice(-2)
+ expect(lastTwo).toHaveLength(2)
+
+ for (const msg of lastTwo) {
+ expect(msg.providerOptions).toBeDefined()
+ expect(msg.providerOptions!.openaiCompatible).toEqual({ cache_control: { type: "ephemeral" } })
+ }
+ })
+
+ test("does not apply caching for Databricks model without cache cost", () => {
+ const msgs = [
+ { role: "system", content: "You are a helpful assistant." },
+ { role: "user", content: "Hello" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksNoCacheModel, {}) as any[]
+
+ // No cache control should be applied when cache.read is 0
+ const systemMsg = result.find((m) => m.role === "system")
+ expect(systemMsg).toBeDefined()
+ expect(systemMsg!.providerOptions?.openaiCompatible?.cache_control).toBeUndefined()
+ })
+
+ test("applies cache_control to array content for Databricks models", () => {
+ const msgs = [
+ { role: "system", content: "System prompt" },
+ {
+ role: "user",
+ content: [
+ { type: "text", text: "Hello" },
+ { type: "text", text: "World" },
+ ],
+ },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGptModel, {}) as any[]
+
+ // User message with array content should have cache control on last content part
+ const userMsg = result.find((m) => m.role === "user")
+ expect(userMsg).toBeDefined()
+ expect(userMsg!.content).toHaveLength(2)
+
+ // Last content part should have providerOptions with cache_control
+ const lastPart = userMsg!.content[userMsg!.content.length - 1]
+ expect(lastPart.providerOptions).toBeDefined()
+ expect(lastPart.providerOptions.openaiCompatible).toEqual({ cache_control: { type: "ephemeral" } })
+ })
+
+ test("caching is applied to first 2 system messages", () => {
+ const msgs = [
+ { role: "system", content: "First system message" },
+ { role: "system", content: "Second system message" },
+ { role: "system", content: "Third system message" },
+ { role: "user", content: "Hello" },
+ ] as any[]
+
+ const result = ProviderTransform.message(msgs, databricksGptModel, {}) as any[]
+
+ const systemMsgs = result.filter((m) => m.role === "system")
+
+ // First two system messages should have cache control
+ expect(systemMsgs[0].providerOptions?.openaiCompatible).toEqual({ cache_control: { type: "ephemeral" } })
+ expect(systemMsgs[1].providerOptions?.openaiCompatible).toEqual({ cache_control: { type: "ephemeral" } })
+
+ // Third system message should NOT have cache control
+ expect(systemMsgs[2].providerOptions?.openaiCompatible?.cache_control).toBeUndefined()
+ })
+})
+
+describe("ProviderTransform.schema - Databricks Gemini $schema stripping", () => {
+ const databricksGeminiModel = {
+ id: "databricks-gemini-3-pro",
+ providerID: "databricks",
+ api: {
+ id: "databricks-gemini-3-pro",
+ url: "https://workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "Gemini 3 Pro (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: true, image: true, video: true, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 2,
+ output: 12,
+ cache: { read: 0.2, write: 0 },
+ },
+ limit: {
+ context: 1000000,
+ output: 65536,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ release_date: "2025-11-20",
+ } as any
+
+ const databricksGptModel2 = {
+ id: "databricks-gpt-5",
+ providerID: "databricks",
+ api: {
+ id: "databricks-gpt-5",
+ url: "https://workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "GPT-5 (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: {
+ input: 1.25,
+ output: 10,
+ cache: { read: 0.125, write: 0 },
+ },
+ limit: {
+ context: 400000,
+ output: 128000,
+ },
+ status: "active",
+ options: {},
+ headers: {},
+ release_date: "2025-06-12",
+ } as any
+
+ test("strips $schema field from Databricks Gemini tool schemas", () => {
+ const schema = {
+ $schema: "https://json-schema.org/draft/2020-12/schema",
+ type: "object",
+ properties: {
+ name: { type: "string" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.$schema).toBeUndefined()
+ expect(result.type).toBe("object")
+ expect(result.properties.name.type).toBe("string")
+ })
+
+ test("strips $defs and definitions from Databricks Gemini tool schemas", () => {
+ const schema = {
+ type: "object",
+ $defs: {
+ MyType: { type: "string" },
+ },
+ definitions: {
+ AnotherType: { type: "number" },
+ },
+ properties: {
+ name: { type: "string" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.$defs).toBeUndefined()
+ expect(result.definitions).toBeUndefined()
+ expect(result.properties.name.type).toBe("string")
+ })
+
+ test("resolves $ref references inline for Databricks Gemini", () => {
+ const schema = {
+ type: "object",
+ $defs: {
+ Address: {
+ type: "object",
+ properties: {
+ street: { type: "string" },
+ city: { type: "string" },
+ },
+ },
+ },
+ properties: {
+ homeAddress: { $ref: "#/$defs/Address" },
+ workAddress: { $ref: "#/$defs/Address" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ // $defs should be stripped
+ expect(result.$defs).toBeUndefined()
+
+ // $ref should be resolved inline
+ expect(result.properties.homeAddress.type).toBe("object")
+ expect(result.properties.homeAddress.properties.street.type).toBe("string")
+ expect(result.properties.homeAddress.$ref).toBeUndefined()
+
+ expect(result.properties.workAddress.type).toBe("object")
+ expect(result.properties.workAddress.properties.city.type).toBe("string")
+ })
+
+ test("resolves nested $ref references", () => {
+ const schema = {
+ type: "object",
+ $defs: {
+ Person: {
+ type: "object",
+ properties: {
+ name: { type: "string" },
+ address: { $ref: "#/$defs/Address" },
+ },
+ },
+ Address: {
+ type: "object",
+ properties: {
+ city: { type: "string" },
+ },
+ },
+ },
+ properties: {
+ person: { $ref: "#/$defs/Person" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.properties.person.type).toBe("object")
+ expect(result.properties.person.properties.name.type).toBe("string")
+ expect(result.properties.person.properties.address.type).toBe("object")
+ expect(result.properties.person.properties.address.properties.city.type).toBe("string")
+ })
+
+ test("does NOT strip $schema for non-Gemini Databricks models", () => {
+ const schema = {
+ $schema: "https://json-schema.org/draft/2020-12/schema",
+ type: "object",
+ properties: {
+ name: { type: "string" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGptModel2, schema) as any
+
+ // GPT models keep $schema (they handle it fine)
+ expect(result.$schema).toBe("https://json-schema.org/draft/2020-12/schema")
+ })
+
+ test("handles schemas with both $schema and $ref", () => {
+ const schema = {
+ $schema: "https://json-schema.org/draft/2020-12/schema",
+ type: "object",
+ $defs: {
+ Item: { type: "string" },
+ },
+ properties: {
+ items: {
+ type: "array",
+ items: { $ref: "#/$defs/Item" },
+ },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.$schema).toBeUndefined()
+ expect(result.$defs).toBeUndefined()
+ expect(result.properties.items.type).toBe("array")
+ expect(result.properties.items.items.type).toBe("string")
+ })
+
+ test("preserves other schema fields while stripping $schema", () => {
+ const schema = {
+ $schema: "https://json-schema.org/draft/2020-12/schema",
+ type: "object",
+ title: "MyTool",
+ description: "A useful tool",
+ required: ["name"],
+ properties: {
+ name: { type: "string", description: "The name" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.$schema).toBeUndefined()
+ expect(result.title).toBe("MyTool")
+ expect(result.description).toBe("A useful tool")
+ expect(result.required).toEqual(["name"])
+ expect(result.properties.name.description).toBe("The name")
+ })
+})
+
+describe("ProviderTransform.schema - Databricks Gemini advanced $ref and $schema handling", () => {
+ const databricksGeminiModel = {
+ id: "databricks-gemini-3-pro",
+ providerID: "databricks",
+ api: {
+ id: "databricks-gemini-3-pro",
+ url: "https://workspace.cloud.databricks.com/serving-endpoints",
+ npm: "@ai-sdk/openai-compatible",
+ },
+ name: "Gemini 3 Pro (Databricks)",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: true, image: true, video: true, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: { input: 2, output: 12, cache: { read: 0.2, write: 0 } },
+ limit: { context: 1000000, output: 65536 },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ const openaiModel = {
+ id: "gpt-5",
+ providerID: "openai",
+ api: {
+ id: "gpt-5",
+ url: "https://api.openai.com",
+ npm: "@ai-sdk/openai",
+ },
+ name: "GPT-5",
+ capabilities: {
+ temperature: true,
+ reasoning: true,
+ attachment: true,
+ toolcall: true,
+ input: { text: true, audio: false, image: true, video: false, pdf: false },
+ output: { text: true, audio: false, image: false, video: false, pdf: false },
+ interleaved: false,
+ },
+ cost: { input: 1.25, output: 10, cache: { read: 0.125, write: 0 } },
+ limit: { context: 400000, output: 128000 },
+ status: "active",
+ options: {},
+ headers: {},
+ } as any
+
+ test("strips $schema from MCP tool schemas with deeply nested properties", () => {
+ // MCP tools include $schema at root - Gemini rejects this
+ const schema = {
+ $schema: "https://json-schema.org/draft/2020-12/schema",
+ type: "object",
+ properties: {
+ query: { type: "string" },
+ options: {
+ type: "object",
+ properties: {
+ nested: {
+ type: "object",
+ $schema: "https://json-schema.org/draft/2020-12/schema",
+ properties: {
+ deep: { type: "string" },
+ },
+ },
+ },
+ },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.$schema).toBeUndefined()
+ // Nested $schema should also be stripped
+ expect(result.properties.options.properties.nested.$schema).toBeUndefined()
+ expect(result.properties.options.properties.nested.type).toBe("object")
+ expect(result.properties.options.properties.nested.properties.deep.type).toBe("string")
+ })
+
+ test("handles circular $ref without infinite loop", () => {
+ // TreeNode references itself - must not infinite loop
+ const schema = {
+ type: "object",
+ $defs: {
+ TreeNode: {
+ type: "object",
+ properties: {
+ value: { type: "string" },
+ children: {
+ type: "array",
+ items: { $ref: "#/$defs/TreeNode" },
+ },
+ },
+ },
+ },
+ properties: {
+ root: { $ref: "#/$defs/TreeNode" },
+ },
+ } as any
+
+ // Should not hang - must complete within reasonable time
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ // Root should be resolved
+ expect(result.properties.root.type).toBe("object")
+ expect(result.properties.root.properties.value.type).toBe("string")
+ // Circular ref should be replaced with {type: "object"} fallback
+ expect(result.properties.root.properties.children.type).toBe("array")
+ expect(result.properties.root.properties.children.items).toBeDefined()
+ // Should not have $ref remaining
+ expect(result.properties.root.properties.children.items.$ref).toBeUndefined()
+ })
+
+ test("expands $ref with definitions (legacy format)", () => {
+ const schema = {
+ type: "object",
+ definitions: {
+ Color: {
+ type: "string",
+ enum: ["red", "green", "blue"],
+ },
+ },
+ properties: {
+ favoriteColor: { $ref: "#/definitions/Color" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.definitions).toBeUndefined()
+ expect(result.properties.favoriteColor.type).toBe("string")
+ expect(result.properties.favoriteColor.enum).toEqual(["red", "green", "blue"])
+ expect(result.properties.favoriteColor.$ref).toBeUndefined()
+ })
+
+ test("preserves description alongside $ref", () => {
+ const schema = {
+ type: "object",
+ $defs: {
+ Address: {
+ type: "object",
+ properties: {
+ street: { type: "string" },
+ },
+ },
+ },
+ properties: {
+ home: {
+ $ref: "#/$defs/Address",
+ description: "Home address override",
+ },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ // The resolved ref should be inlined
+ expect(result.properties.home.type).toBe("object")
+ expect(result.properties.home.properties.street.type).toBe("string")
+ expect(result.properties.home.$ref).toBeUndefined()
+ // The local description should be preserved (overrides resolved ref)
+ expect(result.properties.home.description).toBe("Home address override")
+ })
+
+ test("does not expand $ref for non-Databricks providers", () => {
+ const schema = {
+ type: "object",
+ $defs: {
+ Item: { type: "string" },
+ },
+ properties: {
+ name: { $ref: "#/$defs/Item" },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(openaiModel, schema) as any
+
+ // OpenAI provider should keep $ref as-is
+ expect(result.properties.name.$ref).toBe("#/$defs/Item")
+ expect(result.$defs).toBeDefined()
+ })
+
+ test("expands $ref in array items", () => {
+ const schema = {
+ type: "object",
+ $defs: {
+ QuestionOption: {
+ type: "object",
+ properties: {
+ label: { type: "string" },
+ description: { type: "string" },
+ },
+ required: ["label", "description"],
+ },
+ },
+ properties: {
+ options: {
+ type: "array",
+ items: { $ref: "#/$defs/QuestionOption" },
+ },
+ },
+ } as any
+
+ const result = ProviderTransform.schema(databricksGeminiModel, schema) as any
+
+ expect(result.$defs).toBeUndefined()
+ expect(result.properties.options.type).toBe("array")
+ expect(result.properties.options.items.type).toBe("object")
+ expect(result.properties.options.items.properties.label.type).toBe("string")
+ expect(result.properties.options.items.$ref).toBeUndefined()
+ })
+})
diff --git a/packages/sdk/js/src/v2/gen/types.gen.ts b/packages/sdk/js/src/v2/gen/types.gen.ts
index a8c61c4daae..f2b43e9cd8b 100644
--- a/packages/sdk/js/src/v2/gen/types.gen.ts
+++ b/packages/sdk/js/src/v2/gen/types.gen.ts
@@ -1811,28 +1811,6 @@ export type BadRequestError = {
success: false
}
-export type OAuth = {
- type: "oauth"
- refresh: string
- access: string
- expires: number
- accountId?: string
- enterpriseUrl?: string
-}
-
-export type ApiAuth = {
- type: "api"
- key: string
-}
-
-export type WellKnownAuth = {
- type: "wellknown"
- key: string
- token: string
-}
-
-export type Auth = OAuth | ApiAuth | WellKnownAuth
-
export type NotFoundError = {
name: "NotFoundError"
data: {
@@ -2156,6 +2134,29 @@ export type FormatterStatus = {
enabled: boolean
}
+export type OAuth = {
+ type: "oauth"
+ refresh: string
+ access: string
+ expires: number
+ accountId?: string
+ enterpriseUrl?: string
+}
+
+export type ApiAuth = {
+ type: "api"
+ key: string
+ host?: string
+}
+
+export type WellKnownAuth = {
+ type: "wellknown"
+ key: string
+ token: string
+}
+
+export type Auth = OAuth | ApiAuth | WellKnownAuth
+
export type GlobalHealthData = {
body?: never
path?: never
diff --git a/packages/ui/package.json b/packages/ui/package.json
index 5db88629fd6..23cdab1eb50 100644
--- a/packages/ui/package.json
+++ b/packages/ui/package.json
@@ -30,8 +30,10 @@
"@tailwindcss/vite": "catalog:",
"@tsconfig/node22": "catalog:",
"@types/bun": "catalog:",
+ "@types/dompurify": "3.2.0",
"@types/katex": "0.16.7",
"@types/luxon": "catalog:",
+ "@types/strip-ansi": "5.2.1",
"tailwindcss": "catalog:",
"typescript": "catalog:",
"vite": "catalog:",
diff --git a/packages/web/src/content/docs/providers.mdx b/packages/web/src/content/docs/providers.mdx
index 2a803945288..cb917ce9f10 100644
--- a/packages/web/src/content/docs/providers.mdx
+++ b/packages/web/src/content/docs/providers.mdx
@@ -1740,6 +1740,150 @@ Some useful routing options:
---
+### Databricks
+
+To use Databricks Foundation Model APIs with OpenCode:
+
+#### Quick Start with Databricks CLI (Recommended)
+
+If you have the [Databricks CLI](https://docs.databricks.com/en/dev-tools/cli/index.html) installed and authenticated, OpenCode will automatically detect and use your credentials:
+
+1. Authenticate with the Databricks CLI:
+
+ ```bash
+ databricks auth login --host https://your-workspace.cloud.databricks.com
+ ```
+
+2. Run the `/connect` command and search for **Databricks**.
+
+ ```txt
+ /connect
+ ```
+
+ :::tip
+ If you have valid CLI credentials, OpenCode will automatically detect them and skip the manual authentication prompts.
+ :::
+
+3. Run the `/models` command to see available Databricks models.
+
+ ```txt
+ /models
+ ```
+
+#### Manual Setup
+
+If you don't have the Databricks CLI, you can authenticate manually:
+
+1. Run the `/connect` command and search for **Databricks**.
+
+ ```txt
+ /connect
+ ```
+
+2. Enter your Databricks workspace URL and Personal Access Token.
+
+ ```txt
+ ┌ Databricks Host URL
+ │ https://your-workspace.cloud.databricks.com
+ │
+ └ enter
+ ```
+
+ ```txt
+ ┌ API key (Personal Access Token)
+ │
+ │
+ └ enter
+ ```
+
+ :::tip
+ Create a Personal Access Token at: Workspace → Settings → Developer → Access tokens
+ :::
+
+3. Run the `/models` command to see available Databricks models.
+
+ ```txt
+ /models
+ ```
+
+#### Authentication Methods
+
+Databricks supports multiple authentication methods (in priority order):
+
+1. **Stored credentials**: Credentials saved via `/connect` command
+2. **Configuration file**: Host URL from `opencode.json` config
+3. **Environment variables**: `DATABRICKS_HOST` and `DATABRICKS_TOKEN`
+4. **Databricks CLI profile**: Host from `~/.databrickscfg` and token from `~/.databricks/token-cache.json`
+5. **OAuth M2M**: Set `DATABRICKS_CLIENT_ID` and `DATABRICKS_CLIENT_SECRET` environment variables
+6. **Azure AD Service Principal**: Set `ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, and `ARM_TENANT_ID` environment variables
+7. **Azure CLI**: Automatically used for Azure Databricks workspaces when logged in with `az login`
+
+#### Using Databricks CLI Profiles
+
+OpenCode can read your workspace host from `~/.databrickscfg` profiles:
+
+```ini title="~/.databrickscfg"
+[DEFAULT]
+host = https://your-workspace.cloud.databricks.com
+
+[staging]
+host = https://staging-workspace.cloud.databricks.com
+```
+
+To use a specific profile, set the `DATABRICKS_CONFIG_PROFILE` environment variable or configure it in `opencode.json`:
+
+```bash
+DATABRICKS_CONFIG_PROFILE=staging opencode
+```
+
+Or in your config:
+
+```json title="opencode.json"
+{
+ "$schema": "https://opencode.ai/config.json",
+ "provider": {
+ "databricks": {
+ "options": {
+ "profile": "staging"
+ }
+ }
+ }
+}
+```
+
+:::tip
+When using Databricks CLI authentication, OpenCode automatically refreshes expired tokens from the CLI token cache (`~/.databricks/token-cache.json`). If your token expires, simply run `databricks auth login --profile ` to refresh it.
+:::
+
+#### Configuration
+
+You can also configure Databricks through your `opencode.json` file:
+
+```json title="opencode.json"
+{
+ "$schema": "https://opencode.ai/config.json",
+ "provider": {
+ "databricks": {
+ "options": {
+ "baseURL": "https://your-workspace.cloud.databricks.com/serving-endpoints"
+ },
+ "models": {
+ "custom-model": {
+ "name": "My Custom Model"
+ }
+ }
+ }
+ }
+}
+```
+
+#### Workspace URL Formats
+
+- **AWS/GCP**: `https://dbc-xxx.cloud.databricks.com`
+- **Azure**: `https://adb-xxx.azuredatabricks.net`
+
+---
+
## Custom provider
To add any **OpenAI-compatible** provider that's not listed in the `/connect` command: