From 19ace4436fb5f28de94d342143c067457af986ef Mon Sep 17 00:00:00 2001 From: massaindustries Date: Thu, 29 Jan 2026 12:36:44 +0000 Subject: [PATCH 1/4] add-regolo-01 --- providers/regolo-ai/logo.svg | 46 +++++++++++++++++++ .../models/Llama-3.1-8B-Instruct.toml | 24 ++++++++++ .../models/Llama-3.3-70B-Instruct.toml | 24 ++++++++++ providers/regolo-ai/models/Qwen3-8B.toml | 24 ++++++++++ providers/regolo-ai/models/Qwen3-VL-32b.toml | 26 +++++++++++ .../regolo-ai/models/deepseek-r1-70b.toml | 24 ++++++++++ providers/regolo-ai/models/gpt-oss-120b.toml | 24 ++++++++++ .../regolo-ai/models/iQuest-coder-v1-40b.toml | 26 +++++++++++ .../models/maestrale-chat-v0.4-beta.toml | 24 ++++++++++ .../regolo-ai/models/mistral-small3.2.toml | 24 ++++++++++ providers/regolo-ai/models/qwen3-30b.toml | 24 ++++++++++ .../regolo-ai/models/qwen3-coder-30b.toml | 24 ++++++++++ providers/regolo-ai/provider.toml | 5 ++ 13 files changed, 319 insertions(+) create mode 100644 providers/regolo-ai/logo.svg create mode 100644 providers/regolo-ai/models/Llama-3.1-8B-Instruct.toml create mode 100644 providers/regolo-ai/models/Llama-3.3-70B-Instruct.toml create mode 100644 providers/regolo-ai/models/Qwen3-8B.toml create mode 100644 providers/regolo-ai/models/Qwen3-VL-32b.toml create mode 100644 providers/regolo-ai/models/deepseek-r1-70b.toml create mode 100644 providers/regolo-ai/models/gpt-oss-120b.toml create mode 100644 providers/regolo-ai/models/iQuest-coder-v1-40b.toml create mode 100644 providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml create mode 100644 providers/regolo-ai/models/mistral-small3.2.toml create mode 100644 providers/regolo-ai/models/qwen3-30b.toml create mode 100644 providers/regolo-ai/models/qwen3-coder-30b.toml create mode 100644 providers/regolo-ai/provider.toml diff --git a/providers/regolo-ai/logo.svg b/providers/regolo-ai/logo.svg new file mode 100644 index 000000000..320803a36 --- /dev/null +++ b/providers/regolo-ai/logo.svg @@ -0,0 +1,46 @@ + + + + + + + + + + + + diff --git a/providers/regolo-ai/models/Llama-3.1-8B-Instruct.toml b/providers/regolo-ai/models/Llama-3.1-8B-Instruct.toml new file mode 100644 index 000000000..e75d7de2e --- /dev/null +++ b/providers/regolo-ai/models/Llama-3.1-8B-Instruct.toml @@ -0,0 +1,24 @@ +name = "Llama‑3.1‑8B‑Instruct" +family = "llama-3.1" +release_date = "2025-04-07" +last_updated = "2025-04-07" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Llama‑3.1‑8B‑Instruct is a compact multilingual language model by Meta, designed for instruction following and conversational use, with reliable performance in text understanding, code generation, and structured outputs." + +[cost] +input = 0.05 # € per 1M tokens (0.00000005 * 1_000_000) +output = 0.25 # € per 1M tokens (0.00000025 * 1_000_000) + +[limit] +context = 120000 +output = 120000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/Llama-3.3-70B-Instruct.toml b/providers/regolo-ai/models/Llama-3.3-70B-Instruct.toml new file mode 100644 index 000000000..5a8a12c0b --- /dev/null +++ b/providers/regolo-ai/models/Llama-3.3-70B-Instruct.toml @@ -0,0 +1,24 @@ +name = "Llama‑3.3‑70B‑Instruct" +family = "llama-3.3" +release_date = "2025-04-28" +last_updated = "2025-04-28" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Llama‑3.3‑70B‑Instruct is a 70B‑parameter multilingual language model by Meta, designed for instruction following, long‑context understanding, and structured reasoning, with strong capabilities in code generation and complex analytical tasks." + +[cost] +input = 0.60 # € per 1M tokens (0.0000006 * 1_000_000) +output = 2.70 # € per 1M tokens (0.0000027 * 1_000_000) + +[limit] +context = 16000 +output = 16000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/Qwen3-8B.toml b/providers/regolo-ai/models/Qwen3-8B.toml new file mode 100644 index 000000000..cb4658b8c --- /dev/null +++ b/providers/regolo-ai/models/Qwen3-8B.toml @@ -0,0 +1,24 @@ +name = "Qwen3‑8B" +family = "qwen3-8b" +release_date = "2025-04-28" +last_updated = "2025-04-28" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Qwen3‑8B is a versatile 8.2‑billion parameter language model that uniquely allows switching between a step‑by‑step 'thinking mode' for complex tasks and a fast 'non‑thinking mode' for general conversation." + +[cost] +input = 0.07 # € per 1M tokens (placeholder from Regolo pricing) + output = 0.35 # € per 1M tokens (placeholder from Regolo pricing) + +[limit] +context = 40960 +output = 40960 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/Qwen3-VL-32b.toml b/providers/regolo-ai/models/Qwen3-VL-32b.toml new file mode 100644 index 000000000..f09f4d1e8 --- /dev/null +++ b/providers/regolo-ai/models/Qwen3-VL-32b.toml @@ -0,0 +1,26 @@ +name = "qwen3‑vl‑32b" +family = "qwen3-vl-32b" +release_date = "2025-01-01" +last_updated = "2025-01-01" +attachment = true +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Qwen3‑VL‑32B is a 32‑billion‑parameter multimodal vision‑language model from the Qwen 3 family, supporting text, image, audio and video inputs." + +[cost] +# Prices need verification against Regolo pricing page +input = 0.50 # € per 1M tokens (official price) +output = 2.50 # € per 1M tokens (official price) + +[limit] +context = 128000 +output = 128000 + +[modalities] +input = ["text", "image"] +output = ["text"] + diff --git a/providers/regolo-ai/models/deepseek-r1-70b.toml b/providers/regolo-ai/models/deepseek-r1-70b.toml new file mode 100644 index 000000000..8565a6674 --- /dev/null +++ b/providers/regolo-ai/models/deepseek-r1-70b.toml @@ -0,0 +1,24 @@ +name = "deepseek‑r1‑70b" +family = "deepseek-r1" +release_date = "2025-01-20" +last_updated = "2025-01-20" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[description] +text = "DeepSeek‑R1‑Distill‑Llama‑70B is a 70B‑parameter distilled LLM, combining reasoning, speed, and accuracy for code, math, and complex logic tasks." + +[cost] +input = 0.60 # € per 1M tokens (0.0000006 * 1_000_000) +output = 2.70 # € per 1M tokens (0.0000027 * 1_000_000) + +[limit] +context = 128000 +output = 128000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/gpt-oss-120b.toml b/providers/regolo-ai/models/gpt-oss-120b.toml new file mode 100644 index 000000000..debc1d613 --- /dev/null +++ b/providers/regolo-ai/models/gpt-oss-120b.toml @@ -0,0 +1,24 @@ +name = "gpt-oss-120b" +family = "gpt-oss" +release_date = "2025-08-05" +last_updated = "2025-08-05" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[description] +text = "GPT‑OSS‑120B is an open‑weight 117B‑parameter Mixture‑of‑Experts model by OpenAI, using only 5.1B active parameters per token. Supports reasoning, chain‑of‑thought, tool use, and fine‑tuning." + +[cost] +input = 1.00 # € per 1M tokens (0.000001 * 1_000_000) +output = 4.20 # € per 1M tokens (0.0000042 * 1_000_000) + +[limit] +context = 128000 +output = 128000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/iQuest-coder-v1-40b.toml b/providers/regolo-ai/models/iQuest-coder-v1-40b.toml new file mode 100644 index 000000000..f511c623b --- /dev/null +++ b/providers/regolo-ai/models/iQuest-coder-v1-40b.toml @@ -0,0 +1,26 @@ +name = "iQuest‑coder‑v1‑40b" +family = "iquest-coder-v1" +release_date = "2025-12-10" +last_updated = "2025-12-10" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[description] +text = "iQuest‑coder‑v1‑40b is a 40‑billion‑parameter open‑source code generation model optimized for instruction‑following and tool‑calling tasks." + +[cost] +# Prices need to be verified against Regolo AI pricing page +input = 0.80 # € per 1M tokens (official price) +output = 2.80 # € per 1M tokens (official price) + +[limit] +context = 128000 +output = 128000 + +[modalities] +input = ["text"] +output = ["text"] + diff --git a/providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml b/providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml new file mode 100644 index 000000000..3b0a50d63 --- /dev/null +++ b/providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml @@ -0,0 +1,24 @@ +name = "maestrale‑chat‑v0.4‑beta" +family = "maestrale-chat-v0.4-beta" +release_date = "2025-01-01" +last_updated = "2025-01-01" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Mistral‑7b for the Italian language, continued pre‑training for Italian on a curated large‑scale high‑quality corpus. Powered by mii‑llm." + +[cost] +input = 0.05 # € per 1M tokens (0.00000005 * 1_000_000) +output = 0.25 # € per 1M tokens (0.00000025 * 1_000_000) + +[limit] +context = 64000 +output = 64000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/mistral-small3.2.toml b/providers/regolo-ai/models/mistral-small3.2.toml new file mode 100644 index 000000000..cdef2e083 --- /dev/null +++ b/providers/regolo-ai/models/mistral-small3.2.toml @@ -0,0 +1,24 @@ +name = "mistral‑small3.2" +family = "mistral-small3.2" +release_date = "2025-01-31" +last_updated = "2025-01-31" +attachment = false +reasoning = true +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Mistral‑Small‑3.2‑24B‑Instruct‑2506: 24B multimodal instruction‑tuned model, optimized for reasoning and STEM, supports robust function calling, reduces repetition, handles both text and vision inputs efficiently." + +[cost] +input = 0.50 # € per 1M tokens (0.0000005 * 1_000_000) +output = 2.20 # € per 1M tokens (0.0000022 * 1_000_000) + +[limit] +context = 120000 +output = 120000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/qwen3-30b.toml b/providers/regolo-ai/models/qwen3-30b.toml new file mode 100644 index 000000000..f88526f78 --- /dev/null +++ b/providers/regolo-ai/models/qwen3-30b.toml @@ -0,0 +1,24 @@ +name = "qwen3‑30b" +family = "qwen3" +release_date = "2025-04-28" +last_updated = "2025-04-28" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Qwen3‑30B is a large multilingual instruction‑tuned model from the Qwen3 family, built on a mixture‑of‑experts architecture and optimized for conversational reasoning, structured outputs, and long‑context dialogue across diverse domains." + +[cost] +input = 0.50 # € per 1M tokens (0.0000005 * 1_000_000) +output = 1.80 # € per 1M tokens (0.0000018 * 1_000_000) + +[limit] +context = 32000 +output = 32000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/qwen3-coder-30b.toml b/providers/regolo-ai/models/qwen3-coder-30b.toml new file mode 100644 index 000000000..7db53715b --- /dev/null +++ b/providers/regolo-ai/models/qwen3-coder-30b.toml @@ -0,0 +1,24 @@ +name = "qwen3‑coder‑30b" +family = "qwen3-coder-30b" +release_date = "2025-01-01" +last_updated = "2025-01-01" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Qwen3‑Coder‑30B is a large‑scale MOE coding model designed for complex software tasks, featuring strong code understanding, long‑context reasoning, and reliable structured outputs for repository‑level analysis and agentic programming workflows." + +[cost] +input = 0.50 # € per 1M tokens (0.0000005 * 1_000_000) +output = 2.00 # € per 1M tokens (0.0000020 * 1_000_000) + +[limit] +context = 120000 +output = 120000 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/provider.toml b/providers/regolo-ai/provider.toml new file mode 100644 index 000000000..c0aab7eb4 --- /dev/null +++ b/providers/regolo-ai/provider.toml @@ -0,0 +1,5 @@ +name = "Regolo AI" +env = ["REGOLO_API_KEY"] +npm = "@ai-sdk/openai-compatible" +doc = "https://docs.regolo.ai/" +api = "https://api.regolo.ai/v1" From a46966efef29ae3def125b5914f35617f85dd081 Mon Sep 17 00:00:00 2001 From: massaindustries Date: Thu, 29 Jan 2026 15:14:02 +0000 Subject: [PATCH 2/4] add-regolo-02 --- .../regolo-ai/models/iQuest-coder-v1-40b.toml | 26 ------------------- .../models/maestrale-chat-v0.4-beta.toml | 24 ----------------- 2 files changed, 50 deletions(-) delete mode 100644 providers/regolo-ai/models/iQuest-coder-v1-40b.toml delete mode 100644 providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml diff --git a/providers/regolo-ai/models/iQuest-coder-v1-40b.toml b/providers/regolo-ai/models/iQuest-coder-v1-40b.toml deleted file mode 100644 index f511c623b..000000000 --- a/providers/regolo-ai/models/iQuest-coder-v1-40b.toml +++ /dev/null @@ -1,26 +0,0 @@ -name = "iQuest‑coder‑v1‑40b" -family = "iquest-coder-v1" -release_date = "2025-12-10" -last_updated = "2025-12-10" -attachment = false -reasoning = true -temperature = true -tool_call = true -open_weights = false - -[description] -text = "iQuest‑coder‑v1‑40b is a 40‑billion‑parameter open‑source code generation model optimized for instruction‑following and tool‑calling tasks." - -[cost] -# Prices need to be verified against Regolo AI pricing page -input = 0.80 # € per 1M tokens (official price) -output = 2.80 # € per 1M tokens (official price) - -[limit] -context = 128000 -output = 128000 - -[modalities] -input = ["text"] -output = ["text"] - diff --git a/providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml b/providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml deleted file mode 100644 index 3b0a50d63..000000000 --- a/providers/regolo-ai/models/maestrale-chat-v0.4-beta.toml +++ /dev/null @@ -1,24 +0,0 @@ -name = "maestrale‑chat‑v0.4‑beta" -family = "maestrale-chat-v0.4-beta" -release_date = "2025-01-01" -last_updated = "2025-01-01" -attachment = false -reasoning = false -temperature = true -tool_call = true -open_weights = false - -[description] -text = "Mistral‑7b for the Italian language, continued pre‑training for Italian on a curated large‑scale high‑quality corpus. Powered by mii‑llm." - -[cost] -input = 0.05 # € per 1M tokens (0.00000005 * 1_000_000) -output = 0.25 # € per 1M tokens (0.00000025 * 1_000_000) - -[limit] -context = 64000 -output = 64000 - -[modalities] -input = ["text"] -output = ["text"] From bb42d0b85573e11d47e01d3b7cf34aa9b8e1c3bd Mon Sep 17 00:00:00 2001 From: massaindustries Date: Thu, 5 Feb 2026 09:09:30 +0000 Subject: [PATCH 3/4] add qwen-next-coder --- providers/regolo-ai/models/qwen3-30b.toml | 24 ------------------- .../models/qwen3-coder-next-fp8.toml | 24 +++++++++++++++++++ .../{Qwen3-VL-32b.toml => qwen3-vl-32b.toml} | 0 3 files changed, 24 insertions(+), 24 deletions(-) delete mode 100644 providers/regolo-ai/models/qwen3-30b.toml create mode 100644 providers/regolo-ai/models/qwen3-coder-next-fp8.toml rename providers/regolo-ai/models/{Qwen3-VL-32b.toml => qwen3-vl-32b.toml} (100%) diff --git a/providers/regolo-ai/models/qwen3-30b.toml b/providers/regolo-ai/models/qwen3-30b.toml deleted file mode 100644 index f88526f78..000000000 --- a/providers/regolo-ai/models/qwen3-30b.toml +++ /dev/null @@ -1,24 +0,0 @@ -name = "qwen3‑30b" -family = "qwen3" -release_date = "2025-04-28" -last_updated = "2025-04-28" -attachment = false -reasoning = false -temperature = true -tool_call = true -open_weights = false - -[description] -text = "Qwen3‑30B is a large multilingual instruction‑tuned model from the Qwen3 family, built on a mixture‑of‑experts architecture and optimized for conversational reasoning, structured outputs, and long‑context dialogue across diverse domains." - -[cost] -input = 0.50 # € per 1M tokens (0.0000005 * 1_000_000) -output = 1.80 # € per 1M tokens (0.0000018 * 1_000_000) - -[limit] -context = 32000 -output = 32000 - -[modalities] -input = ["text"] -output = ["text"] diff --git a/providers/regolo-ai/models/qwen3-coder-next-fp8.toml b/providers/regolo-ai/models/qwen3-coder-next-fp8.toml new file mode 100644 index 000000000..4fcbf2b9d --- /dev/null +++ b/providers/regolo-ai/models/qwen3-coder-next-fp8.toml @@ -0,0 +1,24 @@ +name = "qwen3-coder-next" +family = "qwen3-coder-next-fp8" +release_date = "2025-02-05" +last_updated = "2025-02-05" +attachment = false +reasoning = false +temperature = true +tool_call = true +open_weights = false + +[description] +text = "Qwen3-Coder-Next-FP8 is a 3B activated (80B total) MoE coding model designed for coding agents and local development. Features 256K context length and advanced tool calling capabilities." + +[cost] +input = 0.50 # € per 1M tokens +output = 2.00 # € per 1M tokens + +[limit] +context = 262144 +output = 65536 + +[modalities] +input = ["text"] +output = ["text"] diff --git a/providers/regolo-ai/models/Qwen3-VL-32b.toml b/providers/regolo-ai/models/qwen3-vl-32b.toml similarity index 100% rename from providers/regolo-ai/models/Qwen3-VL-32b.toml rename to providers/regolo-ai/models/qwen3-vl-32b.toml From 6ecd9ec5090476783c25b680aaa68b8aabacc5d1 Mon Sep 17 00:00:00 2001 From: massaindustries Date: Thu, 5 Feb 2026 09:11:08 +0000 Subject: [PATCH 4/4] add qwen-next-coder-2 --- .../models/{qwen3-coder-next-fp8.toml => qwen3-coder-next.toml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename providers/regolo-ai/models/{qwen3-coder-next-fp8.toml => qwen3-coder-next.toml} (100%) diff --git a/providers/regolo-ai/models/qwen3-coder-next-fp8.toml b/providers/regolo-ai/models/qwen3-coder-next.toml similarity index 100% rename from providers/regolo-ai/models/qwen3-coder-next-fp8.toml rename to providers/regolo-ai/models/qwen3-coder-next.toml