diff --git a/packages/global/core/ai/model.d.ts b/packages/global/core/ai/model.d.ts index aef25e0cb7f2..9fa75b27790c 100644 --- a/packages/global/core/ai/model.d.ts +++ b/packages/global/core/ai/model.d.ts @@ -16,6 +16,7 @@ type BaseModelItemType = { isActive?: boolean; isCustom?: boolean; + isDefault?: boolean; // If has requestUrl, it will request the model directly requestUrl?: string; diff --git a/packages/service/core/ai/config/embedding/Embedding-V1.json b/packages/service/core/ai/config/embedding/Embedding-V1.json deleted file mode 100644 index 7c71ad428e40..000000000000 --- a/packages/service/core/ai/config/embedding/Embedding-V1.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Ernie", - "model": "Embedding-V1", - "name": "Embedding-V1", - - "defaultToken": 512, - "maxToken": 1000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/baidu-tao-8k.json b/packages/service/core/ai/config/embedding/baidu-tao-8k.json deleted file mode 100644 index 606aa906d40b..000000000000 --- a/packages/service/core/ai/config/embedding/baidu-tao-8k.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Ernie", - "model": "tao-8k", - "name": "tao-8k", - - "defaultToken": 512, - "maxToken": 8000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/bge-m3.json b/packages/service/core/ai/config/embedding/bge-m3.json deleted file mode 100644 index 4963c7302b79..000000000000 --- a/packages/service/core/ai/config/embedding/bge-m3.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "BAAI", - "model": "bge-m3", - "name": "bge-m3", - - "defaultToken": 512, - "maxToken": 8000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/doubao-embedding-large.json b/packages/service/core/ai/config/embedding/doubao-embedding-large.json deleted file mode 100644 index ee474f99067b..000000000000 --- a/packages/service/core/ai/config/embedding/doubao-embedding-large.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-embedding-large", - "name": "Doubao-embedding-large", - - "defaultToken": 512, - "maxToken": 4096, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/doubao-embedding.json b/packages/service/core/ai/config/embedding/doubao-embedding.json deleted file mode 100644 index acdde31eb8a1..000000000000 --- a/packages/service/core/ai/config/embedding/doubao-embedding.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-embedding", - "name": "Doubao-embedding", - - "defaultToken": 512, - "maxToken": 4096, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/hunyuan-embedding.json b/packages/service/core/ai/config/embedding/hunyuan-embedding.json deleted file mode 100644 index 725e572f570c..000000000000 --- a/packages/service/core/ai/config/embedding/hunyuan-embedding.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-embedding", - "name": "hunyuan-embedding", - - "defaultToken": 512, - "maxToken": 1024, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/siliconflow-bge-m3.json b/packages/service/core/ai/config/embedding/siliconflow-bge-m3.json deleted file mode 100644 index 31509ccf74d4..000000000000 --- a/packages/service/core/ai/config/embedding/siliconflow-bge-m3.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "BAAI/bge-m3", - "name": "BAAI/bge-m3", - - "defaultToken": 512, - "maxToken": 8000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/text-embedding-004.json b/packages/service/core/ai/config/embedding/text-embedding-004.json deleted file mode 100644 index 9b63190b2652..000000000000 --- a/packages/service/core/ai/config/embedding/text-embedding-004.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "Gemini", - "model": "text-embedding-004", - "name": "text-embedding-004", - - "defaultToken": 512, - "maxToken": 2000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/text-embedding-3-large.json b/packages/service/core/ai/config/embedding/text-embedding-3-large.json deleted file mode 100644 index 5a37f9c7775c..000000000000 --- a/packages/service/core/ai/config/embedding/text-embedding-3-large.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "provider": "OpenAI", - "model": "text-embedding-3-large", - "name": "text-embedding-3-large", - - "defaultToken": 512, - "maxToken": 8000, - - "charsPointsPrice": 0, - - "defaultConfig": { - "dimensions": 1024 - } -} diff --git a/packages/service/core/ai/config/embedding/text-embedding-3-small.json b/packages/service/core/ai/config/embedding/text-embedding-3-small.json deleted file mode 100644 index 096e63d05e12..000000000000 --- a/packages/service/core/ai/config/embedding/text-embedding-3-small.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "OpenAI", - "model": "text-embedding-3-small", - "name": "text-embedding-3-small", - - "defaultToken": 512, - "maxToken": 8000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/text-embedding-ada-002.json b/packages/service/core/ai/config/embedding/text-embedding-ada-002.json deleted file mode 100644 index cdec0bae71bd..000000000000 --- a/packages/service/core/ai/config/embedding/text-embedding-ada-002.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "provider": "OpenAI", - "model": "text-embedding-ada-002", - "name": "text-embedding-ada-002", - - "defaultToken": 512, - "maxToken": 8000, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/embedding/zhipu-embedding-3.json b/packages/service/core/ai/config/embedding/zhipu-embedding-3.json deleted file mode 100644 index 275a13d2b058..000000000000 --- a/packages/service/core/ai/config/embedding/zhipu-embedding-3.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "embedding-3", - "name": "embedding-3", - - "defaultToken": 512, - "maxToken": 8000, - "defaultConfig": { - "dimensions": 1024 - }, - - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/llm/Doubao-lite-128k.json b/packages/service/core/ai/config/llm/Doubao-lite-128k.json deleted file mode 100644 index 8919f377a0f4..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-lite-128k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-lite-128k", - "name": "Doubao-lite-128k", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-lite-32k.json b/packages/service/core/ai/config/llm/Doubao-lite-32k.json deleted file mode 100644 index 05596cdf9ce1..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-lite-32k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-lite-32k", - "name": "Doubao-lite-32k", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-lite-4k.json b/packages/service/core/ai/config/llm/Doubao-lite-4k.json deleted file mode 100644 index 626d21702c4d..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-lite-4k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-lite-4k", - "name": "Doubao-lite-4k", - - "maxContext": 4000, - "maxResponse": 4000, - "quoteMaxToken": 4000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-pro-128k.json b/packages/service/core/ai/config/llm/Doubao-pro-128k.json deleted file mode 100644 index 552ca08c8371..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-pro-128k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-pro-128k", - "name": "Doubao-pro-128k", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-pro-32k.json b/packages/service/core/ai/config/llm/Doubao-pro-32k.json deleted file mode 100644 index ada380a24b2d..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-pro-32k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-pro-32k", - "name": "Doubao-pro-32k", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-pro-4k.json b/packages/service/core/ai/config/llm/Doubao-pro-4k.json deleted file mode 100644 index e6b3cedc79db..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-pro-4k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-pro-4k", - "name": "Doubao-pro-4k", - - "maxContext": 4000, - "maxResponse": 4000, - "quoteMaxToken": 4000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-vision-lite-32k.json b/packages/service/core/ai/config/llm/Doubao-vision-lite-32k.json deleted file mode 100644 index 6decf23de3be..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-vision-lite-32k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-vision-lite-32k", - "name": "Doubao-vision-lite-32k", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/Doubao-vision-pro-32k.json b/packages/service/core/ai/config/llm/Doubao-vision-pro-32k.json deleted file mode 100644 index 3867616bb7ba..000000000000 --- a/packages/service/core/ai/config/llm/Doubao-vision-pro-32k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Doubao", - "model": "Doubao-vision-pro-32k", - "name": "Doubao-vision-pro-32k", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/ERNIE-4.0-8K.json b/packages/service/core/ai/config/llm/ERNIE-4.0-8K.json deleted file mode 100644 index 5fec2f058b15..000000000000 --- a/packages/service/core/ai/config/llm/ERNIE-4.0-8K.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Ernie", - "model": "ERNIE-4.0-8K", - "name": "ERNIE-4.0-8K", - - "maxContext": 8000, - "maxResponse": 2048, - "quoteMaxToken": 5000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/ERNIE-4.0-Turbo-8K.json b/packages/service/core/ai/config/llm/ERNIE-4.0-Turbo-8K.json deleted file mode 100644 index 7ff86c06a7e2..000000000000 --- a/packages/service/core/ai/config/llm/ERNIE-4.0-Turbo-8K.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Ernie", - "model": "ERNIE-4.0-Turbo-8K", - "name": "ERNIE-4.0-Turbo-8K", - - "maxContext": 8000, - "maxResponse": 2048, - "quoteMaxToken": 5000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/ERNIE-Lite-8K.json b/packages/service/core/ai/config/llm/ERNIE-Lite-8K.json deleted file mode 100644 index 492ccda11d35..000000000000 --- a/packages/service/core/ai/config/llm/ERNIE-Lite-8K.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Ernie", - "model": "ERNIE-Lite-8K", - "name": "ERNIE-lite-8k", - - "maxContext": 8000, - "maxResponse": 2048, - "quoteMaxToken": 6000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/ERNIE-Speed-128K.json b/packages/service/core/ai/config/llm/ERNIE-Speed-128K.json deleted file mode 100644 index 76555c3d4fd9..000000000000 --- a/packages/service/core/ai/config/llm/ERNIE-Speed-128K.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Ernie", - "model": "ERNIE-Speed-128K", - "name": "ERNIE-Speed-128K", - - "maxContext": 128000, - "maxResponse": 4096, - "quoteMaxToken": 120000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/MiniMax-Text-01.json b/packages/service/core/ai/config/llm/MiniMax-Text-01.json deleted file mode 100644 index 54f3f0b916cf..000000000000 --- a/packages/service/core/ai/config/llm/MiniMax-Text-01.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "MiniMax", - "model": "MiniMax-Text-01", - "name": "MiniMax-Text-01", - - "maxContext": 1000000, - "maxResponse": 1000000, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/SparkDesk-lite.json b/packages/service/core/ai/config/llm/SparkDesk-lite.json deleted file mode 100644 index e554973574dc..000000000000 --- a/packages/service/core/ai/config/llm/SparkDesk-lite.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "SparkDesk", - "model": "lite", - "name": "SparkDesk-lite", - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/SparkDesk-max-32k.json b/packages/service/core/ai/config/llm/SparkDesk-max-32k.json deleted file mode 100644 index 74bd46ea1c36..000000000000 --- a/packages/service/core/ai/config/llm/SparkDesk-max-32k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "SparkDesk", - "model": "max-32k", - "name": "SparkDesk-max-32k", - - "maxContext": 32000, - "maxResponse": 8000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/SparkDesk-max.json b/packages/service/core/ai/config/llm/SparkDesk-max.json deleted file mode 100644 index 41b55201e56c..000000000000 --- a/packages/service/core/ai/config/llm/SparkDesk-max.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "SparkDesk", - "model": "generalv3.5", - "name": "SparkDesk-max", - "maxContext": 32000, - "maxResponse": 8000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/SparkDesk-pro-128k.json b/packages/service/core/ai/config/llm/SparkDesk-pro-128k.json deleted file mode 100644 index 86567a8c7fd5..000000000000 --- a/packages/service/core/ai/config/llm/SparkDesk-pro-128k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "SparkDesk", - "model": "pro-128k", - "name": "SparkDesk-Pro-128k", - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 128000, - "maxTemperature": 1, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/SparkDesk-pro.json b/packages/service/core/ai/config/llm/SparkDesk-pro.json deleted file mode 100644 index 524cc35ca1b8..000000000000 --- a/packages/service/core/ai/config/llm/SparkDesk-pro.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "SparkDesk", - "model": "generalv3", - "name": "SparkDesk-Pro", - "maxContext": 8000, - "maxResponse": 8000, - "quoteMaxToken": 8000, - "maxTemperature": 1, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/SparkDesk-v4.0.json b/packages/service/core/ai/config/llm/SparkDesk-v4.0.json deleted file mode 100644 index 75577d820340..000000000000 --- a/packages/service/core/ai/config/llm/SparkDesk-v4.0.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "SparkDesk", - "model": "4.0Ultra", - "name": "SparkDesk-v4.0 Ultra", - - "maxContext": 8000, - "maxResponse": 8000, - "quoteMaxToken": 8000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/abab6.5s-chat.json b/packages/service/core/ai/config/llm/abab6.5s-chat.json deleted file mode 100644 index 53c82cc909e1..000000000000 --- a/packages/service/core/ai/config/llm/abab6.5s-chat.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "MiniMax", - "model": "abab6.5s-chat", - "name": "MiniMax-abab6.5s", - - "maxContext": 245000, - "maxResponse": 10000, - "quoteMaxToken": 240000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/claude-3-5-haiku-20241022.json b/packages/service/core/ai/config/llm/claude-3-5-haiku-20241022.json deleted file mode 100644 index 27f8f7a0d407..000000000000 --- a/packages/service/core/ai/config/llm/claude-3-5-haiku-20241022.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Claude", - "model": "claude-3-5-haiku-20241022", - "name": "claude-3-5-haiku-20241022", - - "maxContext": 200000, - "maxResponse": 8000, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/claude-3-5-sonnet-20240620.json b/packages/service/core/ai/config/llm/claude-3-5-sonnet-20240620.json deleted file mode 100644 index 950d699f2c4c..000000000000 --- a/packages/service/core/ai/config/llm/claude-3-5-sonnet-20240620.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Claude", - "model": "claude-3-5-sonnet-20240620", - "name": "Claude-3-5-sonnet-20240620", - - "maxContext": 200000, - "maxResponse": 8000, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/claude-3-5-sonnet-20241022.json b/packages/service/core/ai/config/llm/claude-3-5-sonnet-20241022.json deleted file mode 100644 index 7bd9cad45bb9..000000000000 --- a/packages/service/core/ai/config/llm/claude-3-5-sonnet-20241022.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Claude", - "model": "claude-3-5-sonnet-20241022", - "name": "Claude-3-5-sonnet-20241022", - - "maxContext": 200000, - "maxResponse": 8000, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/claude-3-opus-20240229.json b/packages/service/core/ai/config/llm/claude-3-opus-20240229.json deleted file mode 100644 index f361d5084c59..000000000000 --- a/packages/service/core/ai/config/llm/claude-3-opus-20240229.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Claude", - "model": "claude-3-opus-20240229", - "name": "claude-3-opus-20240229", - - "maxContext": 200000, - "maxResponse": 4096, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/deepseek-chat.json b/packages/service/core/ai/config/llm/deepseek-chat.json deleted file mode 100644 index 3e80722a39eb..000000000000 --- a/packages/service/core/ai/config/llm/deepseek-chat.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "provider": "DeepSeek", - "model": "deepseek-chat", - "name": "Deepseek-chat", - - "maxContext": 64000, - "maxResponse": 4096, - "quoteMaxToken": 60000, - "maxTemperature": 1.5, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true -} diff --git a/packages/service/core/ai/config/llm/deepseek-reasoner.json b/packages/service/core/ai/config/llm/deepseek-reasoner.json deleted file mode 100644 index 782b65a27716..000000000000 --- a/packages/service/core/ai/config/llm/deepseek-reasoner.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "provider": "DeepSeek", - "model": "deepseek-reasoner", - "name": "Deepseek-reasoner", - - "maxContext": 64000, - "maxResponse": 4096, - "quoteMaxToken": 60000, - "maxTemperature": 1.5, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": { - "temperature": null - }, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gemini-1.5-flash.json b/packages/service/core/ai/config/llm/gemini-1.5-flash.json deleted file mode 100644 index f01b2497c2f7..000000000000 --- a/packages/service/core/ai/config/llm/gemini-1.5-flash.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Gemini", - "model": "gemini-1.5-flash", - "name": "Gemini-1.5-flash", - - "maxContext": 1000000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gemini-1.5-pro.json b/packages/service/core/ai/config/llm/gemini-1.5-pro.json deleted file mode 100644 index 2255259beb0c..000000000000 --- a/packages/service/core/ai/config/llm/gemini-1.5-pro.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Gemini", - "model": "gemini-1.5-pro", - "name": "Gemini-1.5-pro", - - "maxContext": 2000000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gemini-2.0-flash-exp.json b/packages/service/core/ai/config/llm/gemini-2.0-flash-exp.json deleted file mode 100644 index 0697f35d2700..000000000000 --- a/packages/service/core/ai/config/llm/gemini-2.0-flash-exp.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Gemini", - "model": "gemini-2.0-flash-exp", - "name": "Gemini-2.0-flash-exp", - - "maxContext": 1000000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gemini-2.0-flash-thinking-exp.json b/packages/service/core/ai/config/llm/gemini-2.0-flash-thinking-exp.json deleted file mode 100644 index c0287ee87fb1..000000000000 --- a/packages/service/core/ai/config/llm/gemini-2.0-flash-thinking-exp.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Gemini", - "model": "gemini-2.0-flash-thinking-exp-01-21", - "name": "Gemini-2.0-flash-thinking-exp", - - "maxContext": 1000000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gemini-exp-1206.json b/packages/service/core/ai/config/llm/gemini-exp-1206.json deleted file mode 100644 index 3702c04bde62..000000000000 --- a/packages/service/core/ai/config/llm/gemini-exp-1206.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Gemini", - "model": "gemini-exp-1206", - "name": "gemini-exp-1206", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 120000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/glm-4-air.json b/packages/service/core/ai/config/llm/glm-4-air.json deleted file mode 100644 index 52792a3b6c0e..000000000000 --- a/packages/service/core/ai/config/llm/glm-4-air.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "glm-4-air", - "name": "glm-4-air", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 0.99, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/glm-4-flash.json b/packages/service/core/ai/config/llm/glm-4-flash.json deleted file mode 100644 index f68338fb6645..000000000000 --- a/packages/service/core/ai/config/llm/glm-4-flash.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "glm-4-flash", - "name": "glm-4-flash", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 0.99, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/glm-4-long.json b/packages/service/core/ai/config/llm/glm-4-long.json deleted file mode 100644 index 15d6fa36ba4d..000000000000 --- a/packages/service/core/ai/config/llm/glm-4-long.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "glm-4-long", - "name": "glm-4-long", - - "maxContext": 1000000, - "maxResponse": 4000, - "quoteMaxToken": 900000, - "maxTemperature": 0.99, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/glm-4-plus.json b/packages/service/core/ai/config/llm/glm-4-plus.json deleted file mode 100644 index 7c87b9be6228..000000000000 --- a/packages/service/core/ai/config/llm/glm-4-plus.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "glm-4-plus", - "name": "GLM-4-plus", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 0.99, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/glm-4v-flash.json b/packages/service/core/ai/config/llm/glm-4v-flash.json deleted file mode 100644 index b22035c6b5e2..000000000000 --- a/packages/service/core/ai/config/llm/glm-4v-flash.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "glm-4v-flash", - "name": "glm-4v-flash", - - "maxContext": 8000, - "maxResponse": 1000, - "quoteMaxToken": 6000, - "maxTemperature": 0.99, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/glm-4v-plus.json b/packages/service/core/ai/config/llm/glm-4v-plus.json deleted file mode 100644 index c6d43ea3c800..000000000000 --- a/packages/service/core/ai/config/llm/glm-4v-plus.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "ChatGLM", - "model": "glm-4v-plus", - "name": "GLM-4v-plus", - - "maxContext": 8000, - "maxResponse": 1000, - "quoteMaxToken": 6000, - "maxTemperature": 0.99, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gpt-3.5-turbo.json b/packages/service/core/ai/config/llm/gpt-3.5-turbo.json deleted file mode 100644 index 28f49c1cd7c4..000000000000 --- a/packages/service/core/ai/config/llm/gpt-3.5-turbo.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "provider": "OpenAI", - "model": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo", - - "maxContext": 16000, - "maxResponse": 4000, - "quoteMaxToken": 13000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true -} diff --git a/packages/service/core/ai/config/llm/gpt-4-turbo.json b/packages/service/core/ai/config/llm/gpt-4-turbo.json deleted file mode 100644 index 4a75e5aebe36..000000000000 --- a/packages/service/core/ai/config/llm/gpt-4-turbo.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "provider": "OpenAI", - "model": "gpt-4-turbo", - "name": "gpt-4-turbo", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true -} diff --git a/packages/service/core/ai/config/llm/gpt-4o-mini.json b/packages/service/core/ai/config/llm/gpt-4o-mini.json deleted file mode 100644 index 1a70d53de73b..000000000000 --- a/packages/service/core/ai/config/llm/gpt-4o-mini.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "provider": "OpenAI", - "model": "gpt-4o-mini", - "name": "GPT-4o-mini", - - "maxContext": 128000, - "maxResponse": 16000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/gpt-4o.json b/packages/service/core/ai/config/llm/gpt-4o.json deleted file mode 100644 index 8eed54e855a3..000000000000 --- a/packages/service/core/ai/config/llm/gpt-4o.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "OpenAI", - "model": "gpt-4o", - "name": "GPT-4o", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": true, - "functionCall": true, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/groq-llama-3.1-8b-instant.json b/packages/service/core/ai/config/llm/groq-llama-3.1-8b-instant.json deleted file mode 100644 index ab5e5977316c..000000000000 --- a/packages/service/core/ai/config/llm/groq-llama-3.1-8b-instant.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "provider": "Groq", - "model": "llama-3.1-8b-instant", - "name": "Groq-llama-3.1-8b-instant", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {} -} diff --git a/packages/service/core/ai/config/llm/groq-llama-3.3-70b-versatile copy.json b/packages/service/core/ai/config/llm/groq-llama-3.3-70b-versatile copy.json deleted file mode 100644 index ebf95b00ebfe..000000000000 --- a/packages/service/core/ai/config/llm/groq-llama-3.3-70b-versatile copy.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "provider": "Groq", - "model": "llama-3.3-70b-versatile", - "name": "Groq-llama-3.3-70b-versatile", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-large.json b/packages/service/core/ai/config/llm/hunyuan-large.json deleted file mode 100644 index d038004fa2a4..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-large.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-large", - "name": "hunyuan-large", - - "maxContext": 28000, - "maxResponse": 4000, - "quoteMaxToken": 20000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-lite.json b/packages/service/core/ai/config/llm/hunyuan-lite.json deleted file mode 100644 index 1e63e08ba44f..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-lite.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-lite", - "name": "hunyuan-lite", - - "maxContext": 250000, - "maxResponse": 6000, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-pro.json b/packages/service/core/ai/config/llm/hunyuan-pro.json deleted file mode 100644 index 474726214d07..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-pro.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-pro", - "name": "hunyuan-pro", - - "maxContext": 28000, - "maxResponse": 4000, - "quoteMaxToken": 28000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-standard.json b/packages/service/core/ai/config/llm/hunyuan-standard.json deleted file mode 100644 index 41ba34c6de07..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-standard.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-standard", - "name": "hunyuan-standard", - - "maxContext": 32000, - "maxResponse": 2000, - "quoteMaxToken": 20000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json b/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json deleted file mode 100644 index 340d1e125c1f..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-turbo-vision", - "name": "hunyuan-turbo-vision", - - "maxContext": 6000, - "maxResponse": 2000, - "quoteMaxToken": 6000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-turbo.json b/packages/service/core/ai/config/llm/hunyuan-turbo.json deleted file mode 100644 index 6039977c5ac0..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-turbo.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-turbo", - "name": "hunyuan-turbo", - - "maxContext": 28000, - "maxResponse": 4000, - "quoteMaxToken": 20000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/hunyuan-vision.json b/packages/service/core/ai/config/llm/hunyuan-vision.json deleted file mode 100644 index ffc08ecfc4da..000000000000 --- a/packages/service/core/ai/config/llm/hunyuan-vision.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Hunyuan", - "model": "hunyuan-vision", - "name": "hunyuan-vision", - - "maxContext": 6000, - "maxResponse": 2000, - "quoteMaxToken": 4000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/internlm2-pro-chat.json b/packages/service/core/ai/config/llm/internlm2-pro-chat.json deleted file mode 100644 index 2128f68fbe04..000000000000 --- a/packages/service/core/ai/config/llm/internlm2-pro-chat.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Intern", - "model": "internlm2-pro-chat", - "name": "internlm2-pro-chat", - - "maxContext": 32000, - "maxResponse": 8000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/internlm3-8b-instruct.json b/packages/service/core/ai/config/llm/internlm3-8b-instruct.json deleted file mode 100644 index 53acf23a7f80..000000000000 --- a/packages/service/core/ai/config/llm/internlm3-8b-instruct.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Intern", - "model": "internlm3-8b-instruct", - "name": "internlm3-8b-instruct", - - "maxContext": 32000, - "maxResponse": 8000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/ministral-3b-latest.json b/packages/service/core/ai/config/llm/ministral-3b-latest.json deleted file mode 100644 index 395113bd1896..000000000000 --- a/packages/service/core/ai/config/llm/ministral-3b-latest.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "MistralAI", - "model": "ministral-3b-latest", - "name": "Ministral-3b-latest", - - "maxContext": 130000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/ministral-8b-latest.json b/packages/service/core/ai/config/llm/ministral-8b-latest.json deleted file mode 100644 index a9841d84abef..000000000000 --- a/packages/service/core/ai/config/llm/ministral-8b-latest.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "MistralAI", - "model": "ministral-8b-latest", - "name": "Ministral-8b-latest", - - "maxContext": 130000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/mistral-large-latest.json b/packages/service/core/ai/config/llm/mistral-large-latest.json deleted file mode 100644 index 3a627d57acca..000000000000 --- a/packages/service/core/ai/config/llm/mistral-large-latest.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "MistralAI", - "model": "mistral-large-latest", - "name": "Mistral-large-latest", - - "maxContext": 130000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/mistral-small-latest.json b/packages/service/core/ai/config/llm/mistral-small-latest.json deleted file mode 100644 index f28fc06ead72..000000000000 --- a/packages/service/core/ai/config/llm/mistral-small-latest.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "MistralAI", - "model": "mistral-small-latest", - "name": "Mistral-small-latest", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/moonshot-v1-128k.json b/packages/service/core/ai/config/llm/moonshot-v1-128k.json deleted file mode 100644 index 50d144cc32ee..000000000000 --- a/packages/service/core/ai/config/llm/moonshot-v1-128k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Moonshot", - "model": "moonshot-v1-128k", - "name": "moonshot-v1-128k", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 60000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/moonshot-v1-32k.json b/packages/service/core/ai/config/llm/moonshot-v1-32k.json deleted file mode 100644 index dc6f0f4bc7ac..000000000000 --- a/packages/service/core/ai/config/llm/moonshot-v1-32k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Moonshot", - "model": "moonshot-v1-32k", - "name": "moonshot-v1-32k", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/moonshot-v1-8k.json b/packages/service/core/ai/config/llm/moonshot-v1-8k.json deleted file mode 100644 index 56b75a2df051..000000000000 --- a/packages/service/core/ai/config/llm/moonshot-v1-8k.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Moonshot", - "model": "moonshot-v1-8k", - "name": "moonshot-v1-8k", - - "maxContext": 8000, - "maxResponse": 4000, - "quoteMaxToken": 6000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/o1-mini.json b/packages/service/core/ai/config/llm/o1-mini.json deleted file mode 100644 index aa8a4f8090dc..000000000000 --- a/packages/service/core/ai/config/llm/o1-mini.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "provider": "OpenAI", - "model": "o1-mini", - "name": "o1-mini", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": { - "temperature": 1, - "max_tokens": null - } -} diff --git a/packages/service/core/ai/config/llm/o1-preview.json b/packages/service/core/ai/config/llm/o1-preview.json deleted file mode 100644 index c13b7d5251b5..000000000000 --- a/packages/service/core/ai/config/llm/o1-preview.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "provider": "OpenAI", - "model": "o1-preview", - "name": "o1-preview", - - "maxContext": 128000, - "maxResponse": 4000, - "quoteMaxToken": 120000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": { - "temperature": 1, - "max_tokens": null, - "stream": false - } -} diff --git a/packages/service/core/ai/config/llm/o1.json b/packages/service/core/ai/config/llm/o1.json deleted file mode 100644 index 35d5ec0c1b74..000000000000 --- a/packages/service/core/ai/config/llm/o1.json +++ /dev/null @@ -1,29 +0,0 @@ -{ - "provider": "OpenAI", - "model": "o1", - "name": "o1", - - "maxContext": 195000, - "maxResponse": 8000, - "quoteMaxToken": 120000, - "maxTemperature": 1.2, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": { - "temperature": 1, - "max_tokens": null, - "stream": false - } -} diff --git a/packages/service/core/ai/config/llm/qwen-coder-turbo.json b/packages/service/core/ai/config/llm/qwen-coder-turbo.json deleted file mode 100644 index e9e9f9f0523c..000000000000 --- a/packages/service/core/ai/config/llm/qwen-coder-turbo.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen-coder-turbo", - "name": "qwen-coder-turbo", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 50000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen-max.json b/packages/service/core/ai/config/llm/qwen-max.json deleted file mode 100644 index e4874053584c..000000000000 --- a/packages/service/core/ai/config/llm/qwen-max.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen-max", - "name": "Qwen-max", - - "maxContext": 8000, - "maxResponse": 4000, - "quoteMaxToken": 6000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen-plus.json b/packages/service/core/ai/config/llm/qwen-plus.json deleted file mode 100644 index 95a9015293e5..000000000000 --- a/packages/service/core/ai/config/llm/qwen-plus.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen-plus", - "name": "Qwen-plus", - - "maxContext": 64000, - "maxResponse": 8000, - "quoteMaxToken": 60000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen-turbo.json b/packages/service/core/ai/config/llm/qwen-turbo.json deleted file mode 100644 index f3e5bc14314e..000000000000 --- a/packages/service/core/ai/config/llm/qwen-turbo.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen-turbo", - "name": "Qwen-turbo", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 100000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen-vl-max.json b/packages/service/core/ai/config/llm/qwen-vl-max.json deleted file mode 100644 index 4474ec2c36fc..000000000000 --- a/packages/service/core/ai/config/llm/qwen-vl-max.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen-vl-max", - "name": "qwen-vl-max", - - "maxContext": 32000, - "maxResponse": 2000, - "quoteMaxToken": 20000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen-vl-plus.json b/packages/service/core/ai/config/llm/qwen-vl-plus.json deleted file mode 100644 index 700988627aef..000000000000 --- a/packages/service/core/ai/config/llm/qwen-vl-plus.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen-vl-plus", - "name": "qwen-vl-plus", - - "maxContext": 32000, - "maxResponse": 2000, - "quoteMaxToken": 20000, - "maxTemperature": 1.2, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true -} diff --git a/packages/service/core/ai/config/llm/qwen2.5-14b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-14b-instruct.json deleted file mode 100644 index 3ac48e54a833..000000000000 --- a/packages/service/core/ai/config/llm/qwen2.5-14b-instruct.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen2.5-14b-instruct", - "name": "qwen2.5-14b-instruct", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 50000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen2.5-32b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-32b-instruct.json deleted file mode 100644 index add3479dda26..000000000000 --- a/packages/service/core/ai/config/llm/qwen2.5-32b-instruct.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen2.5-32b-instruct", - "name": "qwen2.5-32b-instruct", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 50000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen2.5-72b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-72b-instruct.json deleted file mode 100644 index b04e62045f9c..000000000000 --- a/packages/service/core/ai/config/llm/qwen2.5-72b-instruct.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen2.5-72b-instruct", - "name": "Qwen2.5-72B-instruct", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 50000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/qwen2.5-7b-instruct.json b/packages/service/core/ai/config/llm/qwen2.5-7b-instruct.json deleted file mode 100644 index c68ab80eabd2..000000000000 --- a/packages/service/core/ai/config/llm/qwen2.5-7b-instruct.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Qwen", - "model": "qwen2.5-7b-instruct", - "name": "qwen2.5-7b-instruct", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 50000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/siliconflow-Qwen2-VL-72B-Instruct.json b/packages/service/core/ai/config/llm/siliconflow-Qwen2-VL-72B-Instruct.json deleted file mode 100644 index 82ee180c877e..000000000000 --- a/packages/service/core/ai/config/llm/siliconflow-Qwen2-VL-72B-Instruct.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "Qwen/Qwen2-VL-72B-Instruct", - "name": "Qwen/Qwen2-VL-72B-Instruct", - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - "charsPointsPrice": 0, - "censor": false, - "vision": true, - "datasetProcess": false, - "usedInClassify": false, - "usedInExtractFields": false, - "usedInToolCall": false, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "", - "defaultConfig": {} -} diff --git a/packages/service/core/ai/config/llm/siliconflow-deepseek-v2.5.json b/packages/service/core/ai/config/llm/siliconflow-deepseek-v2.5.json deleted file mode 100644 index 69fd77e70e92..000000000000 --- a/packages/service/core/ai/config/llm/siliconflow-deepseek-v2.5.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "deepseek-ai/DeepSeek-V2.5", - "name": "deepseek-ai/DeepSeek-V2.5", - - "maxContext": 32000, - "maxResponse": 4000, - "quoteMaxToken": 32000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/siliconflow-qwen2.5-72b-instruct.json b/packages/service/core/ai/config/llm/siliconflow-qwen2.5-72b-instruct.json deleted file mode 100644 index 48f13a039464..000000000000 --- a/packages/service/core/ai/config/llm/siliconflow-qwen2.5-72b-instruct.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "Qwen/Qwen2.5-72B-Instruct", - "name": "Qwen/Qwen2.5-72B-Instruct", - - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 50000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": true, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/step-1-128k.json b/packages/service/core/ai/config/llm/step-1-128k.json deleted file mode 100644 index ed5c5e931fcf..000000000000 --- a/packages/service/core/ai/config/llm/step-1-128k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1-128k", - "name": "step-1-128k", - "maxContext": 128000, - "maxResponse": 8000, - "quoteMaxToken": 128000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1-256k.json b/packages/service/core/ai/config/llm/step-1-256k.json deleted file mode 100644 index 4d13c2813a80..000000000000 --- a/packages/service/core/ai/config/llm/step-1-256k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1-256k", - "name": "step-1-256k", - "maxContext": 256000, - "maxResponse": 8000, - "quoteMaxToken": 256000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1-32k.json b/packages/service/core/ai/config/llm/step-1-32k.json deleted file mode 100644 index 6172becab9b7..000000000000 --- a/packages/service/core/ai/config/llm/step-1-32k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1-32k", - "name": "step-1-32k", - "maxContext": 32000, - "maxResponse": 8000, - "quoteMaxToken": 32000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1-8k.json b/packages/service/core/ai/config/llm/step-1-8k.json deleted file mode 100644 index 0d2c5cac7c91..000000000000 --- a/packages/service/core/ai/config/llm/step-1-8k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1-8k", - "name": "step-1-8k", - "maxContext": 8000, - "maxResponse": 8000, - "quoteMaxToken": 8000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1-flash.json b/packages/service/core/ai/config/llm/step-1-flash.json deleted file mode 100644 index ffeabc0b2d0d..000000000000 --- a/packages/service/core/ai/config/llm/step-1-flash.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1-flash", - "name": "step-1-flash", - "maxContext": 8000, - "maxResponse": 4000, - "quoteMaxToken": 6000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1o-vision-32k.json b/packages/service/core/ai/config/llm/step-1o-vision-32k.json deleted file mode 100644 index b3252ed019c6..000000000000 --- a/packages/service/core/ai/config/llm/step-1o-vision-32k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1o-vision-32k", - "name": "step-1o-vision-32k", - "maxContext": 32000, - "quoteMaxToken": 32000, - "maxResponse": 8000, - "maxTemperature": 2, - "vision": true, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1v-32k.json b/packages/service/core/ai/config/llm/step-1v-32k.json deleted file mode 100644 index 5f50ff1912a9..000000000000 --- a/packages/service/core/ai/config/llm/step-1v-32k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1v-32k", - "name": "step-1v-32k", - "maxContext": 32000, - "quoteMaxToken": 32000, - "maxResponse": 8000, - "maxTemperature": 2, - "vision": true, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-1v-8k.json b/packages/service/core/ai/config/llm/step-1v-8k.json deleted file mode 100644 index 967a0696041e..000000000000 --- a/packages/service/core/ai/config/llm/step-1v-8k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-1v-8k", - "name": "step-1v-8k", - "maxContext": 8000, - "maxResponse": 8000, - "quoteMaxToken": 8000, - "maxTemperature": 2, - "vision": true, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-2-16k-exp.json b/packages/service/core/ai/config/llm/step-2-16k-exp.json deleted file mode 100644 index 985a2a2f932b..000000000000 --- a/packages/service/core/ai/config/llm/step-2-16k-exp.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-2-16k-exp", - "name": "step-2-16k-exp", - "maxContext": 16000, - "maxResponse": 4000, - "quoteMaxToken": 4000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-2-16k.json b/packages/service/core/ai/config/llm/step-2-16k.json deleted file mode 100644 index 22e413347061..000000000000 --- a/packages/service/core/ai/config/llm/step-2-16k.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-2-16k", - "name": "step-2-16k", - "maxContext": 16000, - "maxResponse": 4000, - "quoteMaxToken": 4000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/step-2-mini.json b/packages/service/core/ai/config/llm/step-2-mini.json deleted file mode 100644 index 3baaaa0194df..000000000000 --- a/packages/service/core/ai/config/llm/step-2-mini.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-2-mini", - "name": "step-2-mini", - "maxContext": 8000, - "maxResponse": 4000, - "quoteMaxToken": 6000, - "maxTemperature": 2, - "vision": false, - "datasetProcess": true, - "usedInClassify": true, - "usedInExtractFields": true, - "usedInToolCall": true, - "usedInQueryExtension": true, - "toolChoice": false, - "functionCall": false, - "customCQPrompt": "", - "customExtractPrompt": "", - "defaultSystemChatPrompt": "" -} diff --git a/packages/service/core/ai/config/llm/yi-lightning.json b/packages/service/core/ai/config/llm/yi-lightning.json deleted file mode 100644 index 3f93984b6a02..000000000000 --- a/packages/service/core/ai/config/llm/yi-lightning.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Yi", - "model": "yi-lightning", - "name": "yi-lightning", - - "maxContext": 16000, - "maxResponse": 4000, - "quoteMaxToken": 12000, - "maxTemperature": 1, - - "vision": false, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/llm/yi-vision-v2.json b/packages/service/core/ai/config/llm/yi-vision-v2.json deleted file mode 100644 index 493b6385a2f7..000000000000 --- a/packages/service/core/ai/config/llm/yi-vision-v2.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "provider": "Yi", - "model": "yi-vision-v2", - "name": "yi-vision-v2", - - "maxContext": 16000, - "maxResponse": 4000, - "quoteMaxToken": 12000, - "maxTemperature": 1, - - "vision": true, - "toolChoice": false, - "functionCall": false, - "defaultSystemChatPrompt": "", - - "datasetProcess": true, - "usedInClassify": true, - "customCQPrompt": "", - "usedInExtractFields": true, - "usedInQueryExtension": true, - "customExtractPrompt": "", - "usedInToolCall": true, - - "defaultConfig": {}, - "fieldMap": {} -} diff --git a/packages/service/core/ai/config/provider/AliCloud.json b/packages/service/core/ai/config/provider/AliCloud.json new file mode 100644 index 000000000000..703ed71d03d0 --- /dev/null +++ b/packages/service/core/ai/config/provider/AliCloud.json @@ -0,0 +1,4 @@ +{ + "provider": "AliCloud", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/BAAI.json b/packages/service/core/ai/config/provider/BAAI.json new file mode 100644 index 000000000000..86237c65461d --- /dev/null +++ b/packages/service/core/ai/config/provider/BAAI.json @@ -0,0 +1,17 @@ +{ + "provider": "BAAI", + "list": [ + { + "model": "bge-m3", + "name": "bge-m3", + "defaultToken": 512, + "maxToken": 8000, + "type": "embedding" + }, + { + "model": "bge-reranker-v2-m3", + "name": "bge-reranker-v2-m3", + "type": "rerank" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Baichuan.json b/packages/service/core/ai/config/provider/Baichuan.json new file mode 100644 index 000000000000..e5599e5c610a --- /dev/null +++ b/packages/service/core/ai/config/provider/Baichuan.json @@ -0,0 +1,4 @@ +{ + "provider": "Baichuan", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/ChatGLM.json b/packages/service/core/ai/config/provider/ChatGLM.json new file mode 100644 index 000000000000..e687be24333f --- /dev/null +++ b/packages/service/core/ai/config/provider/ChatGLM.json @@ -0,0 +1,147 @@ +{ + "provider": "ChatGLM", + "list": [ + { + "model": "glm-4-air", + "name": "glm-4-air", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 0.99, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "glm-4-flash", + "name": "glm-4-flash", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 0.99, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "glm-4-long", + "name": "glm-4-long", + "maxContext": 1000000, + "maxResponse": 4000, + "quoteMaxToken": 900000, + "maxTemperature": 0.99, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "glm-4-plus", + "name": "GLM-4-plus", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 0.99, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "glm-4v-flash", + "name": "glm-4v-flash", + "maxContext": 8000, + "maxResponse": 1000, + "quoteMaxToken": 6000, + "maxTemperature": 0.99, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "glm-4v-plus", + "name": "GLM-4v-plus", + "maxContext": 8000, + "maxResponse": 1000, + "quoteMaxToken": 6000, + "maxTemperature": 0.99, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "embedding-3", + "name": "embedding-3", + "defaultToken": 512, + "maxToken": 8000, + "defaultConfig": { + "dimensions": 1024 + }, + "type": "embedding" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Claude.json b/packages/service/core/ai/config/provider/Claude.json new file mode 100644 index 000000000000..0b43edc2190b --- /dev/null +++ b/packages/service/core/ai/config/provider/Claude.json @@ -0,0 +1,93 @@ +{ + "provider": "Claude", + "list": [ + { + "model": "claude-3-5-haiku-20241022", + "name": "claude-3-5-haiku-20241022", + "maxContext": 200000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "claude-3-5-sonnet-20240620", + "name": "Claude-3-5-sonnet-20240620", + "maxContext": 200000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "claude-3-5-sonnet-20241022", + "name": "Claude-3-5-sonnet-20241022", + "maxContext": 200000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "claude-3-opus-20240229", + "name": "claude-3-opus-20240229", + "maxContext": 200000, + "maxResponse": 4096, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/DeepSeek.json b/packages/service/core/ai/config/provider/DeepSeek.json new file mode 100644 index 000000000000..1fb0b525fe91 --- /dev/null +++ b/packages/service/core/ai/config/provider/DeepSeek.json @@ -0,0 +1,49 @@ +{ + "provider": "DeepSeek", + "list": [ + { + "model": "deepseek-chat", + "name": "Deepseek-chat", + "maxContext": 64000, + "maxResponse": 4096, + "quoteMaxToken": 60000, + "maxTemperature": 1.5, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "type": "llm" + }, + { + "model": "deepseek-reasoner", + "name": "Deepseek-reasoner", + "maxContext": 64000, + "maxResponse": 4096, + "quoteMaxToken": 60000, + "maxTemperature": 1.5, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": { + "temperature": null + }, + "fieldMap": {}, + "type": "llm" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Doubao.json b/packages/service/core/ai/config/provider/Doubao.json new file mode 100644 index 000000000000..dfb2e22d82ee --- /dev/null +++ b/packages/service/core/ai/config/provider/Doubao.json @@ -0,0 +1,195 @@ +{ + "provider": "Doubao", + "list": [ + { + "model": "Doubao-lite-128k", + "name": "Doubao-lite-128k", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-lite-32k", + "name": "Doubao-lite-32k", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-lite-4k", + "name": "Doubao-lite-4k", + "maxContext": 4000, + "maxResponse": 4000, + "quoteMaxToken": 4000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-pro-128k", + "name": "Doubao-pro-128k", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-pro-32k", + "name": "Doubao-pro-32k", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-pro-4k", + "name": "Doubao-pro-4k", + "maxContext": 4000, + "maxResponse": 4000, + "quoteMaxToken": 4000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-vision-lite-32k", + "name": "Doubao-vision-lite-32k", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-vision-pro-32k", + "name": "Doubao-vision-pro-32k", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Doubao-embedding-large", + "name": "Doubao-embedding-large", + "defaultToken": 512, + "maxToken": 4096, + "type": "embedding" + }, + { + "model": "Doubao-embedding", + "name": "Doubao-embedding", + "defaultToken": 512, + "maxToken": 4096, + "type": "embedding" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Ernie.json b/packages/service/core/ai/config/provider/Ernie.json new file mode 100644 index 000000000000..0eaf786998b8 --- /dev/null +++ b/packages/service/core/ai/config/provider/Ernie.json @@ -0,0 +1,107 @@ +{ + "provider": "Ernie", + "list": [ + { + "model": "ERNIE-4.0-8K", + "name": "ERNIE-4.0-8K", + "maxContext": 8000, + "maxResponse": 2048, + "quoteMaxToken": 5000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "ERNIE-4.0-Turbo-8K", + "name": "ERNIE-4.0-Turbo-8K", + "maxContext": 8000, + "maxResponse": 2048, + "quoteMaxToken": 5000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "ERNIE-Lite-8K", + "name": "ERNIE-lite-8k", + "maxContext": 8000, + "maxResponse": 2048, + "quoteMaxToken": 6000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "ERNIE-Speed-128K", + "name": "ERNIE-Speed-128K", + "maxContext": 128000, + "maxResponse": 4096, + "quoteMaxToken": 120000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Embedding-V1", + "name": "Embedding-V1", + "defaultToken": 512, + "maxToken": 1000, + "type": "embedding" + }, + { + "model": "tao-8k", + "name": "tao-8k", + "defaultToken": 512, + "maxToken": 8000, + "type": "embedding" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/FishAudio.json b/packages/service/core/ai/config/provider/FishAudio.json new file mode 100644 index 000000000000..083d565d5134 --- /dev/null +++ b/packages/service/core/ai/config/provider/FishAudio.json @@ -0,0 +1,4 @@ +{ + "provider": "FishAudio", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Gemini.json b/packages/service/core/ai/config/provider/Gemini.json new file mode 100644 index 000000000000..859a49a9bac8 --- /dev/null +++ b/packages/service/core/ai/config/provider/Gemini.json @@ -0,0 +1,122 @@ +{ + "provider": "Gemini", + "list": [ + { + "model": "gemini-1.5-flash", + "name": "Gemini-1.5-flash", + "maxContext": 1000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "gemini-1.5-pro", + "name": "Gemini-1.5-pro", + "maxContext": 2000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "gemini-2.0-flash-exp", + "name": "Gemini-2.0-flash-exp", + "maxContext": 1000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "gemini-2.0-flash-thinking-exp-01-21", + "name": "Gemini-2.0-flash-thinking-exp", + "maxContext": 1000000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "gemini-exp-1206", + "name": "gemini-exp-1206", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 120000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "text-embedding-004", + "name": "text-embedding-004", + "defaultToken": 512, + "maxToken": 2000, + "type": "embedding" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Grok.json b/packages/service/core/ai/config/provider/Grok.json new file mode 100644 index 000000000000..3c6336109b2d --- /dev/null +++ b/packages/service/core/ai/config/provider/Grok.json @@ -0,0 +1,4 @@ +{ + "provider": "Grok", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Groq.json b/packages/service/core/ai/config/provider/Groq.json new file mode 100644 index 000000000000..22fc70b49c44 --- /dev/null +++ b/packages/service/core/ai/config/provider/Groq.json @@ -0,0 +1,47 @@ +{ + "provider": "Groq", + "list": [ + { + "model": "llama-3.1-8b-instant", + "name": "Groq-llama-3.1-8b-instant", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "type": "llm" + }, + { + "model": "llama-3.3-70b-versatile", + "name": "Groq-llama-3.3-70b-versatile", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "type": "llm" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Hunyuan.json b/packages/service/core/ai/config/provider/Hunyuan.json new file mode 100644 index 000000000000..cf6a4317abad --- /dev/null +++ b/packages/service/core/ai/config/provider/Hunyuan.json @@ -0,0 +1,166 @@ +{ + "provider": "Hunyuan", + "list": [ + { + "model": "hunyuan-large", + "name": "hunyuan-large", + "maxContext": 28000, + "maxResponse": 4000, + "quoteMaxToken": 20000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-lite", + "name": "hunyuan-lite", + "maxContext": 250000, + "maxResponse": 6000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-pro", + "name": "hunyuan-pro", + "maxContext": 28000, + "maxResponse": 4000, + "quoteMaxToken": 28000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-standard", + "name": "hunyuan-standard", + "maxContext": 32000, + "maxResponse": 2000, + "quoteMaxToken": 20000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-turbo-vision", + "name": "hunyuan-turbo-vision", + "maxContext": 6000, + "maxResponse": 2000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-turbo", + "name": "hunyuan-turbo", + "maxContext": 28000, + "maxResponse": 4000, + "quoteMaxToken": 20000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-vision", + "name": "hunyuan-vision", + "maxContext": 6000, + "maxResponse": 2000, + "quoteMaxToken": 4000, + "maxTemperature": 1, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "hunyuan-embedding", + "name": "hunyuan-embedding", + "defaultToken": 512, + "maxToken": 1024, + "type": "embedding" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Intern.json b/packages/service/core/ai/config/provider/Intern.json new file mode 100644 index 000000000000..45adf4a52f6c --- /dev/null +++ b/packages/service/core/ai/config/provider/Intern.json @@ -0,0 +1,49 @@ +{ + "provider": "Intern", + "list": [ + { + "model": "internlm2-pro-chat", + "name": "internlm2-pro-chat", + "maxContext": 32000, + "maxResponse": 8000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "internlm3-8b-instruct", + "name": "internlm3-8b-instruct", + "maxContext": 32000, + "maxResponse": 8000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Meta.json b/packages/service/core/ai/config/provider/Meta.json new file mode 100644 index 000000000000..a99db0bc8b42 --- /dev/null +++ b/packages/service/core/ai/config/provider/Meta.json @@ -0,0 +1,4 @@ +{ + "provider": "Meta", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/MiniMax.json b/packages/service/core/ai/config/provider/MiniMax.json new file mode 100644 index 000000000000..f05f1504d62b --- /dev/null +++ b/packages/service/core/ai/config/provider/MiniMax.json @@ -0,0 +1,240 @@ +{ + "provider": "MiniMax", + "list": [ + { + "model": "MiniMax-Text-01", + "name": "MiniMax-Text-01", + "maxContext": 1000000, + "maxResponse": 1000000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "abab6.5s-chat", + "name": "MiniMax-abab6.5s", + "maxContext": 245000, + "maxResponse": 10000, + "quoteMaxToken": 240000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "speech-01-turbo", + "name": "Minimax-speech-01-turbo", + "voices": [ + { + "label": "minimax-male-qn-qingse", + "value": "male-qn-qingse" + }, + { + "label": "minimax-male-qn-jingying", + "value": "male-qn-jingying" + }, + { + "label": "minimax-male-qn-badao", + "value": "male-qn-badao" + }, + { + "label": "minimax-male-qn-daxuesheng", + "value": "male-qn-daxuesheng" + }, + { + "label": "minimax-female-shaonv", + "value": "female-shaonv" + }, + { + "label": "minimax-female-yujie", + "value": "female-yujie" + }, + { + "label": "minimax-female-chengshu", + "value": "female-chengshu" + }, + { + "label": "minimax-female-tianmei", + "value": "female-tianmei" + }, + { + "label": "minimax-presenter_male", + "value": "presenter_male" + }, + { + "label": "minimax-presenter_female", + "value": "presenter_female" + }, + { + "label": "minimax-audiobook_male_1", + "value": "audiobook_male_1" + }, + { + "label": "minimax-audiobook_male_2", + "value": "audiobook_male_2" + }, + { + "label": "minimax-audiobook_female_1", + "value": "audiobook_female_1" + }, + { + "label": "minimax-audiobook_female_2", + "value": "audiobook_female_2" + }, + { + "label": "minimax-male-qn-qingse-jingpin", + "value": "male-qn-qingse-jingpin" + }, + { + "label": "minimax-male-qn-jingying-jingpin", + "value": "male-qn-jingying-jingpin" + }, + { + "label": "minimax-male-qn-badao-jingpin", + "value": "male-qn-badao-jingpin" + }, + { + "label": "minimax-male-qn-daxuesheng-jingpin", + "value": "male-qn-daxuesheng-jingpin" + }, + { + "label": "minimax-female-shaonv-jingpin", + "value": "female-shaonv-jingpin" + }, + { + "label": "minimax-female-yujie-jingpin", + "value": "female-yujie-jingpin" + }, + { + "label": "minimax-female-chengshu-jingpin", + "value": "female-chengshu-jingpin" + }, + { + "label": "minimax-female-tianmei-jingpin", + "value": "female-tianmei-jingpin" + }, + { + "label": "minimax-clever_boy", + "value": "clever_boy" + }, + { + "label": "minimax-cute_boy", + "value": "cute_boy" + }, + { + "label": "minimax-lovely_girl", + "value": "lovely_girl" + }, + { + "label": "minimax-cartoon_pig", + "value": "cartoon_pig" + }, + { + "label": "minimax-bingjiao_didi", + "value": "bingjiao_didi" + }, + { + "label": "minimax-junlang_nanyou", + "value": "junlang_nanyou" + }, + { + "label": "minimax-chunzhen_xuedi", + "value": "chunzhen_xuedi" + }, + { + "label": "minimax-lengdan_xiongzhang", + "value": "lengdan_xiongzhang" + }, + { + "label": "minimax-badao_shaoye", + "value": "badao_shaoye" + }, + { + "label": "minimax-tianxin_xiaoling", + "value": "tianxin_xiaoling" + }, + { + "label": "minimax-qiaopi_mengmei", + "value": "qiaopi_mengmei" + }, + { + "label": "minimax-wumei_yujie", + "value": "wumei_yujie" + }, + { + "label": "minimax-diadia_xuemei", + "value": "diadia_xuemei" + }, + { + "label": "minimax-danya_xuejie", + "value": "danya_xuejie" + }, + { + "label": "minimax-Santa_Claus", + "value": "Santa_Claus" + }, + { + "label": "minimax-Grinch", + "value": "Grinch" + }, + { + "label": "minimax-Rudolph", + "value": "Rudolph" + }, + { + "label": "minimax-Arnold", + "value": "Arnold" + }, + { + "label": "minimax-Charming_Santa", + "value": "Charming_Santa" + }, + { + "label": "minimax-Charming_Lady", + "value": "Charming_Lady" + }, + { + "label": "minimax-Sweet_Girl", + "value": "Sweet_Girl" + }, + { + "label": "minimax-Cute_Elf", + "value": "Cute_Elf" + }, + { + "label": "minimax-Attractive_Girl", + "value": "Attractive_Girl" + }, + { + "label": "minimax-Serene_Woman", + "value": "Serene_Woman" + } + ], + "type": "tts" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/MistralAI.json b/packages/service/core/ai/config/provider/MistralAI.json new file mode 100644 index 000000000000..3d271db2aba4 --- /dev/null +++ b/packages/service/core/ai/config/provider/MistralAI.json @@ -0,0 +1,93 @@ +{ + "provider": "MistralAI", + "list": [ + { + "model": "ministral-3b-latest", + "name": "Ministral-3b-latest", + "maxContext": 130000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "ministral-8b-latest", + "name": "Ministral-8b-latest", + "maxContext": 130000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "mistral-large-latest", + "name": "Mistral-large-latest", + "maxContext": 130000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "mistral-small-latest", + "name": "Mistral-small-latest", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Moka.json b/packages/service/core/ai/config/provider/Moka.json new file mode 100644 index 000000000000..90b68c9fffcc --- /dev/null +++ b/packages/service/core/ai/config/provider/Moka.json @@ -0,0 +1,4 @@ +{ + "provider": "Moka", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Moonshot.json b/packages/service/core/ai/config/provider/Moonshot.json new file mode 100644 index 000000000000..9b733677c4d8 --- /dev/null +++ b/packages/service/core/ai/config/provider/Moonshot.json @@ -0,0 +1,71 @@ +{ + "provider": "Moonshot", + "list": [ + { + "model": "moonshot-v1-8k", + "name": "moonshot-v1-8k", + "maxContext": 8000, + "maxResponse": 4000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "moonshot-v1-32k", + "name": "moonshot-v1-32k", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "moonshot-v1-128k", + "name": "moonshot-v1-128k", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} diff --git a/packages/service/core/ai/config/provider/Ollama.json b/packages/service/core/ai/config/provider/Ollama.json new file mode 100644 index 000000000000..6343aa95071e --- /dev/null +++ b/packages/service/core/ai/config/provider/Ollama.json @@ -0,0 +1,4 @@ +{ + "provider": "Ollama", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/OpenAI.json b/packages/service/core/ai/config/provider/OpenAI.json new file mode 100644 index 000000000000..fd573c6206a8 --- /dev/null +++ b/packages/service/core/ai/config/provider/OpenAI.json @@ -0,0 +1,222 @@ +{ + "provider": "OpenAI", + "list": [ + { + "model": "gpt-4o-mini", + "name": "GPT-4o-mini", + "maxContext": 128000, + "maxResponse": 16000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "gpt-4o", + "name": "GPT-4o", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": true, + "functionCall": true, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "o1-mini", + "name": "o1-mini", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": { + "temperature": 1, + "max_tokens": null + }, + "type": "llm" + }, + { + "model": "o1-preview", + "name": "o1-preview", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 120000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": { + "temperature": 1, + "max_tokens": null, + "stream": false + }, + "type": "llm" + }, + { + "model": "o1", + "name": "o1", + "maxContext": 195000, + "maxResponse": 8000, + "quoteMaxToken": 120000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": { + "temperature": 1, + "max_tokens": null, + "stream": false + }, + "type": "llm" + }, + { + "model": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo", + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 13000, + "maxTemperature": 1.2, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "type": "llm" + }, + { + "model": "gpt-4-turbo", + "name": "gpt-4-turbo", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 60000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "type": "llm" + }, + { + "model": "text-embedding-3-large", + "name": "text-embedding-3-large", + "defaultToken": 512, + "maxToken": 8000, + "defaultConfig": { + "dimensions": 1024 + }, + "type": "embedding" + }, + { + "model": "text-embedding-3-small", + "name": "text-embedding-3-small", + "defaultToken": 512, + "maxToken": 8000, + "type": "embedding" + }, + { + "model": "text-embedding-ada-002", + "name": "text-embedding-ada-002", + "defaultToken": 512, + "maxToken": 8000, + "type": "embedding" + }, + { + "model": "tts-1", + "name": "TTS1", + "voices": [ + { + "label": "Alloy", + "value": "alloy" + }, + { + "label": "Echo", + "value": "echo" + }, + { + "label": "Fable", + "value": "fable" + }, + { + "label": "Onyx", + "value": "onyx" + }, + { + "label": "Nova", + "value": "nova" + }, + { + "label": "Shimmer", + "value": "shimmer" + } + ], + "type": "tts" + }, + { + "model": "whisper-1", + "name": "whisper-1", + "type": "stt" + } + ] +} diff --git a/packages/service/core/ai/config/provider/Other.json b/packages/service/core/ai/config/provider/Other.json new file mode 100644 index 000000000000..4405e6086c32 --- /dev/null +++ b/packages/service/core/ai/config/provider/Other.json @@ -0,0 +1,4 @@ +{ + "provider": "Other", + "list": [] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/provider/Qwen.json b/packages/service/core/ai/config/provider/Qwen.json new file mode 100644 index 000000000000..cde27ac13037 --- /dev/null +++ b/packages/service/core/ai/config/provider/Qwen.json @@ -0,0 +1,223 @@ +{ + "provider": "Qwen", + "list": [ + { + "model": "qwen-turbo", + "name": "Qwen-turbo", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 100000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen-plus", + "name": "Qwen-plus", + "maxContext": 64000, + "maxResponse": 8000, + "quoteMaxToken": 60000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen-vl-plus", + "name": "qwen-vl-plus", + "maxContext": 32000, + "maxResponse": 2000, + "quoteMaxToken": 20000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "type": "llm" + }, + { + "model": "qwen-max", + "name": "Qwen-max", + "maxContext": 8000, + "maxResponse": 4000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen-vl-max", + "name": "qwen-vl-max", + "maxContext": 32000, + "maxResponse": 2000, + "quoteMaxToken": 20000, + "maxTemperature": 1.2, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen-coder-turbo", + "name": "qwen-coder-turbo", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen2.5-7b-instruct", + "name": "qwen2.5-7b-instruct", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen2.5-14b-instruct", + "name": "qwen2.5-14b-instruct", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen2.5-32b-instruct", + "name": "qwen2.5-32b-instruct", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "qwen2.5-72b-instruct", + "name": "Qwen2.5-72B-instruct", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} diff --git a/packages/service/core/ai/config/provider/Siliconflow.json b/packages/service/core/ai/config/provider/Siliconflow.json new file mode 100644 index 000000000000..f1e8a042a4ab --- /dev/null +++ b/packages/service/core/ai/config/provider/Siliconflow.json @@ -0,0 +1,204 @@ +{ + "provider": "Siliconflow", + "list": [ + { + "model": "Qwen/Qwen2.5-72B-Instruct", + "name": "Qwen/Qwen2.5-72B-Instruct", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + "vision": false, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "Qwen/Qwen2-VL-72B-Instruct", + "name": "Qwen/Qwen2-VL-72B-Instruct", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "censor": false, + "vision": true, + "datasetProcess": false, + "usedInClassify": false, + "usedInExtractFields": false, + "usedInToolCall": false, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "defaultConfig": {}, + "type": "llm" + }, + { + "model": "deepseek-ai/DeepSeek-V2.5", + "name": "deepseek-ai/DeepSeek-V2.5", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "BAAI/bge-m3", + "name": "BAAI/bge-m3", + "defaultToken": 512, + "maxToken": 8000, + "type": "embedding" + }, + { + "model": "FunAudioLLM/CosyVoice2-0.5B", + "name": "FunAudioLLM/CosyVoice2-0.5B", + "voices": [ + { + "label": "alex", + "value": "FunAudioLLM/CosyVoice2-0.5B:alex" + }, + { + "label": "anna", + "value": "FunAudioLLM/CosyVoice2-0.5B:anna" + }, + { + "label": "bella", + "value": "FunAudioLLM/CosyVoice2-0.5B:bella" + }, + { + "label": "benjamin", + "value": "FunAudioLLM/CosyVoice2-0.5B:benjamin" + }, + { + "label": "charles", + "value": "FunAudioLLM/CosyVoice2-0.5B:charles" + }, + { + "label": "claire", + "value": "FunAudioLLM/CosyVoice2-0.5B:claire" + }, + { + "label": "david", + "value": "FunAudioLLM/CosyVoice2-0.5B:david" + }, + { + "label": "diana", + "value": "FunAudioLLM/CosyVoice2-0.5B:diana" + } + ], + "type": "tts" + }, + { + "model": "RVC-Boss/GPT-SoVITS", + "name": "RVC-Boss/GPT-SoVITS", + "voices": [ + { + "label": "alex", + "value": "RVC-Boss/GPT-SoVITS:alex" + }, + { + "label": "anna", + "value": "RVC-Boss/GPT-SoVITS:anna" + }, + { + "label": "bella", + "value": "RVC-Boss/GPT-SoVITS:bella" + }, + { + "label": "benjamin", + "value": "RVC-Boss/GPT-SoVITS:benjamin" + }, + { + "label": "charles", + "value": "RVC-Boss/GPT-SoVITS:charles" + }, + { + "label": "claire", + "value": "RVC-Boss/GPT-SoVITS:claire" + }, + { + "label": "david", + "value": "RVC-Boss/GPT-SoVITS:david" + }, + { + "label": "diana", + "value": "RVC-Boss/GPT-SoVITS:diana" + } + ], + "type": "tts" + }, + { + "model": "fishaudio/fish-speech-1.5", + "name": "fish-speech-1.5", + "voices": [ + { + "label": "alex", + "value": "fishaudio/fish-speech-1.5:alex" + }, + { + "label": "anna", + "value": "fishaudio/fish-speech-1.5:anna" + }, + { + "label": "bella", + "value": "fishaudio/fish-speech-1.5:bella" + }, + { + "label": "benjamin", + "value": "fishaudio/fish-speech-1.5:benjamin" + }, + { + "label": "charles", + "value": "fishaudio/fish-speech-1.5:charles" + }, + { + "label": "claire", + "value": "fishaudio/fish-speech-1.5:claire" + }, + { + "label": "david", + "value": "fishaudio/fish-speech-1.5:david" + }, + { + "label": "diana", + "value": "fishaudio/fish-speech-1.5:diana" + } + ], + "type": "tts" + }, + { + "model": "FunAudioLLM/SenseVoiceSmall", + "name": "FunAudioLLM/SenseVoiceSmall", + "type": "stt" + }, + { + "model": "BAAI/bge-reranker-v2-m3", + "name": "BAAI/bge-reranker-v2-m3", + "type": "rerank" + } + ] +} diff --git a/packages/service/core/ai/config/provider/SparkDesk.json b/packages/service/core/ai/config/provider/SparkDesk.json new file mode 100644 index 000000000000..095136850bcf --- /dev/null +++ b/packages/service/core/ai/config/provider/SparkDesk.json @@ -0,0 +1,129 @@ +{ + "provider": "SparkDesk", + "list": [ + { + "model": "lite", + "name": "SparkDesk-lite", + "maxContext": 32000, + "maxResponse": 4000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "generalv3", + "name": "SparkDesk-Pro", + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 8000, + "maxTemperature": 1, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "pro-128k", + "name": "SparkDesk-Pro-128k", + "maxContext": 128000, + "maxResponse": 4000, + "quoteMaxToken": 128000, + "maxTemperature": 1, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "generalv3.5", + "name": "SparkDesk-max", + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 8000, + "maxTemperature": 1, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "max-32k", + "name": "SparkDesk-max-32k", + "maxContext": 32000, + "maxResponse": 8000, + "quoteMaxToken": 32000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "4.0Ultra", + "name": "SparkDesk-v4.0 Ultra", + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 8000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} diff --git a/packages/service/core/ai/config/provider/StepFun.json b/packages/service/core/ai/config/provider/StepFun.json new file mode 100644 index 000000000000..87c896117c39 --- /dev/null +++ b/packages/service/core/ai/config/provider/StepFun.json @@ -0,0 +1,308 @@ +{ + "provider": "StepFun", + "list": [ + { + "model": "step-1-flash", + "name": "step-1-flash", + "maxContext": 8000, + "maxResponse": 4000, + "quoteMaxToken": 6000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1-8k", + "name": "step-1-8k", + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 8000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1-32k", + "name": "step-1-32k", + "maxContext": 32000, + "maxResponse": 8000, + "quoteMaxToken": 32000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1-128k", + "name": "step-1-128k", + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 128000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1-256k", + "name": "step-1-256k", + "maxContext": 256000, + "maxResponse": 8000, + "quoteMaxToken": 256000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1o-vision-32k", + "name": "step-1o-vision-32k", + "maxContext": 32000, + "quoteMaxToken": 32000, + "maxResponse": 8000, + "maxTemperature": 2, + "vision": true, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1v-8k", + "name": "step-1v-8k", + "maxContext": 8000, + "maxResponse": 8000, + "quoteMaxToken": 8000, + "maxTemperature": 2, + "vision": true, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-1v-32k", + "name": "step-1v-32k", + "maxContext": 32000, + "quoteMaxToken": 32000, + "maxResponse": 8000, + "maxTemperature": 2, + "vision": true, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-2-mini", + "name": "step-2-mini", + "maxContext": 8000, + "maxResponse": 4000, + "quoteMaxToken": 6000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-2-16k", + "name": "step-2-16k", + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 4000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-2-16k-exp", + "name": "step-2-16k-exp", + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 4000, + "maxTemperature": 2, + "vision": false, + "datasetProcess": true, + "usedInClassify": true, + "usedInExtractFields": true, + "usedInToolCall": true, + "usedInQueryExtension": true, + "toolChoice": false, + "functionCall": false, + "customCQPrompt": "", + "customExtractPrompt": "", + "defaultSystemChatPrompt": "", + "type": "llm" + }, + { + "model": "step-tts-mini", + "name": "step-tts-mini", + "voices": [ + { + "label": "cixingnansheng", + "value": "cixingnansheng" + }, + { + "label": "zhengpaiqingnian", + "value": "zhengpaiqingnian" + }, + { + "label": "yuanqinansheng", + "value": "yuanqinansheng" + }, + { + "label": "qingniandaxuesheng", + "value": "qingniandaxuesheng" + }, + { + "label": "boyinnansheng", + "value": "boyinnansheng" + }, + { + "label": "ruyananshi", + "value": "ruyananshi" + }, + { + "label": "shenchennanyin", + "value": "shenchennanyin" + }, + { + "label": "qinqienvsheng", + "value": "qinqienvsheng" + }, + { + "label": "wenrounvsheng", + "value": "wenrounvsheng" + }, + { + "label": "jilingshaonv", + "value": "jilingshaonv" + }, + { + "label": "yuanqishaonv", + "value": "yuanqishaonv" + }, + { + "label": "ruanmengnvsheng", + "value": "ruanmengnvsheng" + }, + { + "label": "youyanvsheng", + "value": "youyanvsheng" + }, + { + "label": "lengyanyujie", + "value": "lengyanyujie" + }, + { + "label": "shuangkuaijiejie", + "value": "shuangkuaijiejie" + }, + { + "label": "wenjingxuejie", + "value": "wenjingxuejie" + }, + { + "label": "linjiajiejie", + "value": "linjiajiejie" + }, + { + "label": "linjiameimei", + "value": "linjiameimei" + }, + { + "label": "zhixingjiejie", + "value": "zhixingjiejie" + } + ], + "type": "tts" + } + ] +} diff --git a/packages/service/core/ai/config/provider/Yi.json b/packages/service/core/ai/config/provider/Yi.json new file mode 100644 index 000000000000..b43a9ba90887 --- /dev/null +++ b/packages/service/core/ai/config/provider/Yi.json @@ -0,0 +1,49 @@ +{ + "provider": "Yi", + "list": [ + { + "model": "yi-lightning", + "name": "yi-lightning", + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 12000, + "maxTemperature": 1, + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + }, + { + "model": "yi-vision-v2", + "name": "yi-vision-v2", + "maxContext": 16000, + "maxResponse": 4000, + "quoteMaxToken": 12000, + "maxTemperature": 1, + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + "defaultConfig": {}, + "fieldMap": {}, + "type": "llm" + } + ] +} \ No newline at end of file diff --git a/packages/service/core/ai/config/rerank/bge-reranker-v2-m3.json b/packages/service/core/ai/config/rerank/bge-reranker-v2-m3.json deleted file mode 100644 index 3cc1a33b5a42..000000000000 --- a/packages/service/core/ai/config/rerank/bge-reranker-v2-m3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "provider": "BAAI", - "model": "bge-reranker-v2-m3", - "name": "bge-reranker-v2-m3", - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/rerank/siliconflow-bge-reranker-v2-m3.json b/packages/service/core/ai/config/rerank/siliconflow-bge-reranker-v2-m3.json deleted file mode 100644 index 96219e21888a..000000000000 --- a/packages/service/core/ai/config/rerank/siliconflow-bge-reranker-v2-m3.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "BAAI/bge-reranker-v2-m3", - "name": "BAAI/bge-reranker-v2-m3", - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/stt/FunAudioLLM-SenseVoiceSmall.json b/packages/service/core/ai/config/stt/FunAudioLLM-SenseVoiceSmall.json deleted file mode 100644 index d923b42fa5c5..000000000000 --- a/packages/service/core/ai/config/stt/FunAudioLLM-SenseVoiceSmall.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "FunAudioLLM/SenseVoiceSmall", - "name": "FunAudioLLM/SenseVoiceSmall", - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/stt/whisper-1.json b/packages/service/core/ai/config/stt/whisper-1.json deleted file mode 100644 index 2d8639786266..000000000000 --- a/packages/service/core/ai/config/stt/whisper-1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "provider": "OpenAI", - "model": "whisper-1", - "name": "whisper-1", - "charsPointsPrice": 0 -} diff --git a/packages/service/core/ai/config/tts/Minimax-speech-01-turbo.json b/packages/service/core/ai/config/tts/Minimax-speech-01-turbo.json deleted file mode 100644 index 7bf8babe4e2b..000000000000 --- a/packages/service/core/ai/config/tts/Minimax-speech-01-turbo.json +++ /dev/null @@ -1,192 +0,0 @@ -{ - "provider": "MiniMax", - "model": "speech-01-turbo", - "name": "Minimax-speech-01-turbo", - "charsPointsPrice": 0, - "voices": [ - { - "label": "minimax-male-qn-qingse", - "value": "male-qn-qingse" - }, - { - "label": "minimax-male-qn-jingying", - "value": "male-qn-jingying" - }, - { - "label": "minimax-male-qn-badao", - "value": "male-qn-badao" - }, - { - "label": "minimax-male-qn-daxuesheng", - "value": "male-qn-daxuesheng" - }, - { - "label": "minimax-female-shaonv", - "value": "female-shaonv" - }, - { - "label": "minimax-female-yujie", - "value": "female-yujie" - }, - { - "label": "minimax-female-chengshu", - "value": "female-chengshu" - }, - { - "label": "minimax-female-tianmei", - "value": "female-tianmei" - }, - { - "label": "minimax-presenter_male", - "value": "presenter_male" - }, - { - "label": "minimax-presenter_female", - "value": "presenter_female" - }, - { - "label": "minimax-audiobook_male_1", - "value": "audiobook_male_1" - }, - { - "label": "minimax-audiobook_male_2", - "value": "audiobook_male_2" - }, - { - "label": "minimax-audiobook_female_1", - "value": "audiobook_female_1" - }, - { - "label": "minimax-audiobook_female_2", - "value": "audiobook_female_2" - }, - { - "label": "minimax-male-qn-qingse-jingpin", - "value": "male-qn-qingse-jingpin" - }, - { - "label": "minimax-male-qn-jingying-jingpin", - "value": "male-qn-jingying-jingpin" - }, - { - "label": "minimax-male-qn-badao-jingpin", - "value": "male-qn-badao-jingpin" - }, - { - "label": "minimax-male-qn-daxuesheng-jingpin", - "value": "male-qn-daxuesheng-jingpin" - }, - { - "label": "minimax-female-shaonv-jingpin", - "value": "female-shaonv-jingpin" - }, - { - "label": "minimax-female-yujie-jingpin", - "value": "female-yujie-jingpin" - }, - { - "label": "minimax-female-chengshu-jingpin", - "value": "female-chengshu-jingpin" - }, - { - "label": "minimax-female-tianmei-jingpin", - "value": "female-tianmei-jingpin" - }, - { - "label": "minimax-clever_boy", - "value": "clever_boy" - }, - { - "label": "minimax-cute_boy", - "value": "cute_boy" - }, - { - "label": "minimax-lovely_girl", - "value": "lovely_girl" - }, - { - "label": "minimax-cartoon_pig", - "value": "cartoon_pig" - }, - { - "label": "minimax-bingjiao_didi", - "value": "bingjiao_didi" - }, - { - "label": "minimax-junlang_nanyou", - "value": "junlang_nanyou" - }, - { - "label": "minimax-chunzhen_xuedi", - "value": "chunzhen_xuedi" - }, - { - "label": "minimax-lengdan_xiongzhang", - "value": "lengdan_xiongzhang" - }, - { - "label": "minimax-badao_shaoye", - "value": "badao_shaoye" - }, - { - "label": "minimax-tianxin_xiaoling", - "value": "tianxin_xiaoling" - }, - { - "label": "minimax-qiaopi_mengmei", - "value": "qiaopi_mengmei" - }, - { - "label": "minimax-wumei_yujie", - "value": "wumei_yujie" - }, - { - "label": "minimax-diadia_xuemei", - "value": "diadia_xuemei" - }, - { - "label": "minimax-danya_xuejie", - "value": "danya_xuejie" - }, - { - "label": "minimax-Santa_Claus", - "value": "Santa_Claus" - }, - { - "label": "minimax-Grinch", - "value": "Grinch" - }, - { - "label": "minimax-Rudolph", - "value": "Rudolph" - }, - { - "label": "minimax-Arnold", - "value": "Arnold" - }, - { - "label": "minimax-Charming_Santa", - "value": "Charming_Santa" - }, - { - "label": "minimax-Charming_Lady", - "value": "Charming_Lady" - }, - { - "label": "minimax-Sweet_Girl", - "value": "Sweet_Girl" - }, - { - "label": "minimax-Cute_Elf", - "value": "Cute_Elf" - }, - { - "label": "minimax-Attractive_Girl", - "value": "Attractive_Girl" - }, - { - "label": "minimax-Serene_Woman", - "value": "Serene_Woman" - } - ] -} diff --git a/packages/service/core/ai/config/tts/siliconflow-CosyVoice2-0.5B.json b/packages/service/core/ai/config/tts/siliconflow-CosyVoice2-0.5B.json deleted file mode 100644 index e03e39de006a..000000000000 --- a/packages/service/core/ai/config/tts/siliconflow-CosyVoice2-0.5B.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "FunAudioLLM/CosyVoice2-0.5B", - "name": "FunAudioLLM/CosyVoice2-0.5B", - "charsPointsPrice": 0, - "voices": [ - { - "label": "alex", - "value": "FunAudioLLM/CosyVoice2-0.5B:alex" - }, - { - "label": "anna", - "value": "FunAudioLLM/CosyVoice2-0.5B:anna" - }, - { - "label": "bella", - "value": "FunAudioLLM/CosyVoice2-0.5B:bella" - }, - { - "label": "benjamin", - "value": "FunAudioLLM/CosyVoice2-0.5B:benjamin" - }, - { - "label": "charles", - "value": "FunAudioLLM/CosyVoice2-0.5B:charles" - }, - { - "label": "claire", - "value": "FunAudioLLM/CosyVoice2-0.5B:claire" - }, - { - "label": "david", - "value": "FunAudioLLM/CosyVoice2-0.5B:david" - }, - { - "label": "diana", - "value": "FunAudioLLM/CosyVoice2-0.5B:diana" - } - ] -} diff --git a/packages/service/core/ai/config/tts/siliconflow-RVC-Boss-GPT-SoVITS.json b/packages/service/core/ai/config/tts/siliconflow-RVC-Boss-GPT-SoVITS.json deleted file mode 100644 index f1c18a5deacc..000000000000 --- a/packages/service/core/ai/config/tts/siliconflow-RVC-Boss-GPT-SoVITS.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "RVC-Boss/GPT-SoVITS", - "name": "RVC-Boss/GPT-SoVITS", - "charsPointsPrice": 0, - "voices": [ - { - "label": "alex", - "value": "RVC-Boss/GPT-SoVITS:alex" - }, - { - "label": "anna", - "value": "RVC-Boss/GPT-SoVITS:anna" - }, - { - "label": "bella", - "value": "RVC-Boss/GPT-SoVITS:bella" - }, - { - "label": "benjamin", - "value": "RVC-Boss/GPT-SoVITS:benjamin" - }, - { - "label": "charles", - "value": "RVC-Boss/GPT-SoVITS:charles" - }, - { - "label": "claire", - "value": "RVC-Boss/GPT-SoVITS:claire" - }, - { - "label": "david", - "value": "RVC-Boss/GPT-SoVITS:david" - }, - { - "label": "diana", - "value": "RVC-Boss/GPT-SoVITS:diana" - } - ] -} diff --git a/packages/service/core/ai/config/tts/siliconflow-fish-speech-1.5.json b/packages/service/core/ai/config/tts/siliconflow-fish-speech-1.5.json deleted file mode 100644 index 45fa6b4c42bb..000000000000 --- a/packages/service/core/ai/config/tts/siliconflow-fish-speech-1.5.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "provider": "Siliconflow", - "model": "fishaudio/fish-speech-1.5", - "name": "fish-speech-1.5", - "charsPointsPrice": 0, - "voices": [ - { - "label": "alex", - "value": "fishaudio/fish-speech-1.5:alex" - }, - { - "label": "anna", - "value": "fishaudio/fish-speech-1.5:anna" - }, - { - "label": "bella", - "value": "fishaudio/fish-speech-1.5:bella" - }, - { - "label": "benjamin", - "value": "fishaudio/fish-speech-1.5:benjamin" - }, - { - "label": "charles", - "value": "fishaudio/fish-speech-1.5:charles" - }, - { - "label": "claire", - "value": "fishaudio/fish-speech-1.5:claire" - }, - { - "label": "david", - "value": "fishaudio/fish-speech-1.5:david" - }, - { - "label": "diana", - "value": "fishaudio/fish-speech-1.5:diana" - } - ] -} diff --git a/packages/service/core/ai/config/tts/step-tts-mini.json b/packages/service/core/ai/config/tts/step-tts-mini.json deleted file mode 100644 index 688590f2adfa..000000000000 --- a/packages/service/core/ai/config/tts/step-tts-mini.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "provider": "StepFun", - "model": "step-tts-mini", - "name": "step-tts-mini", - "charsPointsPrice": 0, - "voices": [ - { - "label": "cixingnansheng", - "value": "cixingnansheng" - }, - { - "label": "zhengpaiqingnian", - "value": "zhengpaiqingnian" - }, - { - "label": "yuanqinansheng", - "value": "yuanqinansheng" - }, - { - "label": "qingniandaxuesheng", - "value": "qingniandaxuesheng" - }, - { - "label": "boyinnansheng", - "value": "boyinnansheng" - }, - { - "label": "ruyananshi", - "value": "ruyananshi" - }, - { - "label": "shenchennanyin", - "value": "shenchennanyin" - }, - { - "label": "qinqienvsheng", - "value": "qinqienvsheng" - }, - { - "label": "wenrounvsheng", - "value": "wenrounvsheng" - }, - { - "label": "jilingshaonv", - "value": "jilingshaonv" - }, - { - "label": "yuanqishaonv", - "value": "yuanqishaonv" - }, - { - "label": "ruanmengnvsheng", - "value": "ruanmengnvsheng" - }, - { - "label": "youyanvsheng", - "value": "youyanvsheng" - }, - { - "label": "lengyanyujie", - "value": "lengyanyujie" - }, - { - "label": "shuangkuaijiejie", - "value": "shuangkuaijiejie" - }, - { - "label": "wenjingxuejie", - "value": "wenjingxuejie" - }, - { - "label": "linjiajiejie", - "value": "linjiajiejie" - }, - { - "label": "linjiameimei", - "value": "linjiameimei" - }, - { - "label": "zhixingjiejie", - "value": "zhixingjiejie" - } - ] -} diff --git a/packages/service/core/ai/config/tts/tts-1.json b/packages/service/core/ai/config/tts/tts-1.json deleted file mode 100644 index 80105c227767..000000000000 --- a/packages/service/core/ai/config/tts/tts-1.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "provider": "OpenAI", - "model": "tts-1", - "name": "TTS1", - "charsPointsPrice": 0, - "voices": [ - { - "label": "Alloy", - "value": "alloy" - }, - { - "label": "Echo", - "value": "echo" - }, - { - "label": "Fable", - "value": "fable" - }, - { - "label": "Onyx", - "value": "onyx" - }, - { - "label": "Nova", - "value": "nova" - }, - { - "label": "Shimmer", - "value": "shimmer" - } - ] -} diff --git a/packages/service/core/ai/config/utils.ts b/packages/service/core/ai/config/utils.ts index 4f6f9973f100..8e0397eaf5f8 100644 --- a/packages/service/core/ai/config/utils.ts +++ b/packages/service/core/ai/config/utils.ts @@ -11,8 +11,7 @@ import { ReRankModelItemType } from '@fastgpt/global/core/ai/model.d'; import { debounce } from 'lodash'; - -type FolderBaseType = `${ModelTypeEnum}`; +import { ModelProviderType } from '@fastgpt/global/core/ai/provider'; /* TODO: 分优先级读取: @@ -20,9 +19,9 @@ type FolderBaseType = `${ModelTypeEnum}`; 2. 没有外部挂载目录,则读取本地的。然后试图拉取云端的进行覆盖。 */ export const loadSystemModels = async (init = false) => { - const getModelNameList = (base: FolderBaseType) => { + const getProviderList = () => { const currentFileUrl = new URL(import.meta.url); - const modelsPath = path.join(path.dirname(currentFileUrl.pathname), base); + const modelsPath = path.join(path.dirname(currentFileUrl.pathname), 'provider'); return fs.readdirSync(modelsPath) as string[]; }; @@ -35,18 +34,33 @@ export const loadSystemModels = async (init = false) => { if (model.type === ModelTypeEnum.llm) { global.llmModelMap.set(model.model, model); global.llmModelMap.set(model.name, model); + if (model.isDefault) { + global.systemDefaultModel.llm = model; + } } else if (model.type === ModelTypeEnum.embedding) { global.embeddingModelMap.set(model.model, model); global.embeddingModelMap.set(model.name, model); + if (model.isDefault) { + global.systemDefaultModel.embedding = model; + } } else if (model.type === ModelTypeEnum.tts) { global.ttsModelMap.set(model.model, model); global.ttsModelMap.set(model.name, model); + if (model.isDefault) { + global.systemDefaultModel.tts = model; + } } else if (model.type === ModelTypeEnum.stt) { global.sttModelMap.set(model.model, model); global.sttModelMap.set(model.name, model); + if (model.isDefault) { + global.systemDefaultModel.stt = model; + } } else if (model.type === ModelTypeEnum.rerank) { global.reRankModelMap.set(model.model, model); global.reRankModelMap.set(model.name, model); + if (model.isDefault) { + global.systemDefaultModel.rerank = model; + } } } }; @@ -60,40 +74,34 @@ export const loadSystemModels = async (init = false) => { global.ttsModelMap = new Map(); global.sttModelMap = new Map(); global.reRankModelMap = new Map(); + // @ts-ignore + global.systemDefaultModel = {}; try { const dbModels = await MongoSystemModel.find({}).lean(); - - const baseList: FolderBaseType[] = [ - ModelTypeEnum.llm, - ModelTypeEnum.embedding, - ModelTypeEnum.tts, - ModelTypeEnum.stt, - ModelTypeEnum.rerank - ]; + const providerList = getProviderList(); // System model await Promise.all( - baseList.map(async (base) => { - const modelList = getModelNameList(base); - const nameList = modelList.map((name) => `${base}/${name}`); - - await Promise.all( - nameList.map(async (name) => { - const fileContent = (await import(`./${name}`))?.default as SystemModelItemType; - - const dbModel = dbModels.find((item) => item.model === fileContent.model); - - const model: any = { - ...fileContent, - ...dbModel?.metadata, - type: dbModel?.metadata?.type || base, - isCustom: false - }; - - pushModel(model); - }) - ); + providerList.map(async (name) => { + const fileContent = (await import(`./provider/${name}`))?.default as { + provider: ModelProviderType; + list: SystemModelItemType[]; + }; + + fileContent.list.forEach((fileModel) => { + const dbModel = dbModels.find((item) => item.model === fileModel.model); + + const modelData: any = { + ...fileModel, + ...dbModel?.metadata, + provider: fileContent.provider, + type: dbModel?.metadata?.type || fileModel.type, + isCustom: false + }; + + pushModel(modelData); + }); }) ); @@ -107,6 +115,23 @@ export const loadSystemModels = async (init = false) => { }); }); + // Default model check + if (!global.systemDefaultModel.llm) { + global.systemDefaultModel.llm = Array.from(global.llmModelMap.values())[0]; + } + if (!global.systemDefaultModel.embedding) { + global.systemDefaultModel.embedding = Array.from(global.embeddingModelMap.values())[0]; + } + if (!global.systemDefaultModel.tts) { + global.systemDefaultModel.tts = Array.from(global.ttsModelMap.values())[0]; + } + if (!global.systemDefaultModel.stt) { + global.systemDefaultModel.stt = Array.from(global.sttModelMap.values())[0]; + } + if (!global.systemDefaultModel.rerank) { + global.systemDefaultModel.rerank = Array.from(global.reRankModelMap.values())[0]; + } + console.log('Load models success', JSON.stringify(global.systemActiveModelList, null, 2)); } catch (error) { console.error('Load models error', error); diff --git a/packages/service/core/ai/model.ts b/packages/service/core/ai/model.ts index b829e5056799..185881a23f67 100644 --- a/packages/service/core/ai/model.ts +++ b/packages/service/core/ai/model.ts @@ -1,43 +1,41 @@ import { SystemModelItemType } from './type'; -export const getFirstLLMModel = () => { - return Array.from(global.llmModelMap.values())[0]; -}; +export const getDefaultLLMModel = () => global?.systemDefaultModel.llm!; export const getLLMModel = (model?: string) => { - if (!model) return getFirstLLMModel(); - return global.llmModelMap.get(model) || getFirstLLMModel(); + if (!model) return getDefaultLLMModel(); + return global.llmModelMap.get(model) || getDefaultLLMModel(); }; export const getDatasetModel = (model?: string) => { return ( Array.from(global.llmModelMap.values()) ?.filter((item) => item.datasetProcess) - ?.find((item) => item.model === model || item.name === model) ?? getFirstLLMModel() + ?.find((item) => item.model === model || item.name === model) ?? getDefaultLLMModel() ); }; -export const getFirstEmbeddingModel = () => Array.from(global.embeddingModelMap.values())[0]; +export const getDefaultEmbeddingModel = () => global?.systemDefaultModel.embedding!; export const getEmbeddingModel = (model?: string) => { - if (!model) return getFirstEmbeddingModel(); - return global.embeddingModelMap.get(model) || getFirstEmbeddingModel(); + if (!model) return getDefaultEmbeddingModel(); + return global.embeddingModelMap.get(model) || getDefaultEmbeddingModel(); }; -export const getFirstTTSModel = () => Array.from(global.ttsModelMap.values())[0]; +export const getDefaultTTSModel = () => global?.systemDefaultModel.tts!; export function getTTSModel(model?: string) { - if (!model) return getFirstTTSModel(); - return global.ttsModelMap.get(model) || getFirstTTSModel(); + if (!model) return getDefaultTTSModel(); + return global.ttsModelMap.get(model) || getDefaultTTSModel(); } -export const getFirstSTTModel = () => Array.from(global.sttModelMap.values())[0]; +export const getDefaultSTTModel = () => global?.systemDefaultModel.stt!; export function getSTTModel(model?: string) { - if (!model) return getFirstSTTModel(); - return global.sttModelMap.get(model) || getFirstSTTModel(); + if (!model) return getDefaultSTTModel(); + return global.sttModelMap.get(model) || getDefaultSTTModel(); } -export const getFirstReRankModel = () => Array.from(global.reRankModelMap.values())[0]; +export const getDefaultRerankModel = () => global?.systemDefaultModel.rerank!; export function getReRankModel(model?: string) { - if (!model) return getFirstReRankModel(); - return global.reRankModelMap.get(model) || getFirstReRankModel(); + if (!model) return getDefaultRerankModel(); + return global.reRankModelMap.get(model) || getDefaultRerankModel(); } export const findAIModel = (model: string): SystemModelItemType | undefined => { diff --git a/packages/service/core/ai/rerank/index.ts b/packages/service/core/ai/rerank/index.ts index e4324695932f..d99cd26ad5a8 100644 --- a/packages/service/core/ai/rerank/index.ts +++ b/packages/service/core/ai/rerank/index.ts @@ -1,6 +1,6 @@ import { addLog } from '../../../common/system/log'; import { POST } from '../../../common/api/serverRequest'; -import { getFirstReRankModel } from '../model'; +import { getDefaultRerankModel } from '../model'; import { getAxiosConfig } from '../config'; type PostReRankResponse = { @@ -19,7 +19,7 @@ export function reRankRecall({ query: string; documents: { id: string; text: string }[]; }): Promise { - const model = getFirstReRankModel(); + const model = getDefaultRerankModel(); if (!model || !model?.requestUrl) { return Promise.reject('no rerank model'); diff --git a/packages/service/core/ai/type.d.ts b/packages/service/core/ai/type.d.ts index d64c7417a3d3..c014ed6046e8 100644 --- a/packages/service/core/ai/type.d.ts +++ b/packages/service/core/ai/type.d.ts @@ -20,6 +20,14 @@ export type SystemModelItemType = | STTModelType | ReRankModelItemType; +export type SystemDefaultModelType = { + [ModelTypeEnum.llm]?: LLMModelItemType; + [ModelTypeEnum.embedding]?: EmbeddingModelItemType; + [ModelTypeEnum.tts]?: TTSModelType; + [ModelTypeEnum.stt]?: STTModelType; + [ModelTypeEnum.rerank]?: ReRankModelItemType; +}; + declare global { var systemModelList: SystemModelItemType[]; // var systemModelMap: Map; @@ -30,4 +38,5 @@ declare global { var reRankModelMap: Map; var systemActiveModelList: SystemModelItemType[]; + var systemDefaultModel: SystemDefaultModelType; } diff --git a/packages/service/core/dataset/search/controller.ts b/packages/service/core/dataset/search/controller.ts index 555d1a99266c..df458f6320b9 100644 --- a/packages/service/core/dataset/search/controller.ts +++ b/packages/service/core/dataset/search/controller.ts @@ -5,10 +5,9 @@ import { } from '@fastgpt/global/core/dataset/constants'; import { recallFromVectorStore } from '../../../common/vectorStore/controller'; import { getVectorsByText } from '../../ai/embedding'; -import { getEmbeddingModel, getFirstReRankModel } from '../../ai/model'; +import { getEmbeddingModel, getDefaultRerankModel } from '../../ai/model'; import { MongoDatasetData } from '../data/schema'; import { - DatasetDataSchemaType, DatasetDataTextSchemaType, SearchDataResponseItemType } from '@fastgpt/global/core/dataset/type'; @@ -67,7 +66,7 @@ export async function searchDatasetData(props: SearchDatasetDataProps) { /* init params */ searchMode = DatasetSearchModeMap[searchMode] ? searchMode : DatasetSearchModeEnum.embedding; - usingReRank = usingReRank && !!getFirstReRankModel(); + usingReRank = usingReRank && !!getDefaultRerankModel(); // Compatible with topk limit let set = new Set(); diff --git a/packages/web/i18n/en/account.json b/packages/web/i18n/en/account.json index d9526114dd95..d5250b61a083 100644 --- a/packages/web/i18n/en/account.json +++ b/packages/web/i18n/en/account.json @@ -10,6 +10,7 @@ "create_model": "Add new model", "custom_model": "custom model", "default_model": "Default model", + "default_model_config": "Default model configuration", "logout": "Sign out", "model.active": "Active", "model.alias": "Alias", @@ -27,6 +28,7 @@ "model.defaultConfig_tip": "Each request will carry this additional Body parameter.", "model.default_config": "Body extra fields", "model.default_config_tip": "When initiating a conversation request, merge this configuration. \nFor example:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"", + "model.default_model": "Default model", "model.default_system_chat_prompt": "Default prompt", "model.default_system_chat_prompt_tip": "When the model talks, it will carry this default prompt word.", "model.default_token": "Default tokens", diff --git a/packages/web/i18n/en/common.json b/packages/web/i18n/en/common.json index 99b8a6015a46..25922003d45c 100644 --- a/packages/web/i18n/en/common.json +++ b/packages/web/i18n/en/common.json @@ -208,7 +208,6 @@ "common.Update": "Update", "common.Update Failed": "Update Failed", "common.Update Success": "Updated Successfully", - "common.Update Successful": "Updated Successfully", "common.Username": "Username", "common.Waiting": "Waiting", "common.Warning": "Warning", diff --git a/packages/web/i18n/zh-CN/account.json b/packages/web/i18n/zh-CN/account.json index bfb0622eb1fa..b0091f453140 100644 --- a/packages/web/i18n/zh-CN/account.json +++ b/packages/web/i18n/zh-CN/account.json @@ -10,6 +10,7 @@ "create_model": "新增模型", "custom_model": "自定义模型", "default_model": "预设模型", + "default_model_config": "默认模型配置", "logout": "登出", "model.active": "启用", "model.alias": "别名", @@ -27,6 +28,7 @@ "model.defaultConfig_tip": "每次请求时候,都会携带该额外 Body 参数", "model.default_config": "Body 额外字段", "model.default_config_tip": "发起对话请求时候,合并该配置。例如:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"", + "model.default_model": "默认模型", "model.default_system_chat_prompt": "默认提示词", "model.default_system_chat_prompt_tip": "模型对话时,都会携带该默认提示词", "model.default_token": "默认分块长度", diff --git a/packages/web/i18n/zh-CN/common.json b/packages/web/i18n/zh-CN/common.json index b1725937fb91..10fb2575d5e2 100644 --- a/packages/web/i18n/zh-CN/common.json +++ b/packages/web/i18n/zh-CN/common.json @@ -212,7 +212,6 @@ "common.Update": "更新", "common.Update Failed": "更新异常", "common.Update Success": "更新成功", - "common.Update Successful": "更新成功", "common.Username": "用户名", "common.Waiting": "等待中", "common.Warning": "警告", diff --git a/packages/web/i18n/zh-Hant/account.json b/packages/web/i18n/zh-Hant/account.json index 0ad90507037e..5ea906dd06f8 100644 --- a/packages/web/i18n/zh-Hant/account.json +++ b/packages/web/i18n/zh-Hant/account.json @@ -10,6 +10,7 @@ "create_model": "新增模型", "custom_model": "自訂模型", "default_model": "預設模型", + "default_model_config": "預設模型配置", "logout": "登出", "model.active": "啟用", "model.alias": "別名", @@ -26,6 +27,7 @@ "model.defaultConfig_tip": "每次請求時候,都會攜帶該額外 Body 參數", "model.default_config": "Body 額外字段", "model.default_config_tip": "發起對話請求時候,合併該配置。例如:\n\"\"\"\n{\n \"temperature\": 1,\n \"max_tokens\": null\n}\n\"\"\"", + "model.default_model": "預設模型", "model.default_system_chat_prompt": "預設提示詞", "model.default_system_chat_prompt_tip": "模型對話時,都會攜帶該預設提示詞", "model.default_token": "預設分塊長度", diff --git a/packages/web/i18n/zh-Hant/common.json b/packages/web/i18n/zh-Hant/common.json index 718d2da32eab..638c95d611f5 100644 --- a/packages/web/i18n/zh-Hant/common.json +++ b/packages/web/i18n/zh-Hant/common.json @@ -207,7 +207,6 @@ "common.Update": "更新", "common.Update Failed": "更新失敗", "common.Update Success": "更新成功", - "common.Update Successful": "更新成功", "common.Username": "使用者名稱", "common.Waiting": "等待中", "common.Warning": "警告", diff --git a/projects/app/src/components/Select/AIModelSelector.tsx b/projects/app/src/components/Select/AIModelSelector.tsx index 886744b15c8d..852c25d580c0 100644 --- a/projects/app/src/components/Select/AIModelSelector.tsx +++ b/projects/app/src/components/Select/AIModelSelector.tsx @@ -3,8 +3,8 @@ import React, { useCallback, useMemo, useState } from 'react'; import { useTranslation } from 'next-i18next'; import { useSystemStore } from '@/web/common/system/useSystemStore'; import MySelect, { SelectProps } from '@fastgpt/web/components/common/MySelect'; -import { HUGGING_FACE_ICON, LOGO_ICON } from '@fastgpt/global/common/system/constants'; -import { Box, Flex, HStack, useDisclosure } from '@chakra-ui/react'; +import { HUGGING_FACE_ICON } from '@fastgpt/global/common/system/constants'; +import { Box, Flex, HStack } from '@chakra-ui/react'; import Avatar from '@fastgpt/web/components/common/Avatar'; import MyTooltip from '@fastgpt/web/components/common/MyTooltip'; import dynamic from 'next/dynamic'; @@ -22,7 +22,8 @@ type Props = SelectProps & { const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => { const { t } = useTranslation(); - const { feConfigs, llmModelList, embeddingModelList } = useSystemStore(); + const { llmModelList, embeddingModelList, ttsModelList, sttModelList, reRankModelList } = + useSystemStore(); const avatarSize = useMemo(() => { const size = { @@ -35,7 +36,16 @@ const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => { }, [props.size]); const avatarList = list.map((item) => { - const modelData = getModelFromList([...llmModelList, ...embeddingModelList], item.value); + const modelData = getModelFromList( + [ + ...llmModelList, + ...embeddingModelList, + ...ttsModelList, + ...sttModelList, + ...reRankModelList + ], + item.value + ); return { value: item.value, @@ -54,20 +64,6 @@ const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => { }; }); - const expandList = useMemo(() => { - return feConfigs?.show_pay - ? avatarList.concat({ - label: ( - - - {t('common:support.user.Price')} - - ), - value: 'price' - }) - : avatarList; - }, [feConfigs.show_pay, avatarList, avatarSize, t]); - return ( { { if (e === 'price') { @@ -100,7 +97,8 @@ const OneRowSelector = ({ list, onchange, disableTip, ...props }: Props) => { }; const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) => { const { t } = useTranslation(); - const { llmModelList, embeddingModelList } = useSystemStore(); + const { llmModelList, embeddingModelList, ttsModelList, sttModelList, reRankModelList } = + useSystemStore(); const [value, setValue] = useState([]); const avatarSize = useMemo(() => { @@ -158,7 +156,16 @@ const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) => ); const SelectedModel = useMemo(() => { - const modelData = getModelFromList([...llmModelList, ...embeddingModelList], props.value); + const modelData = getModelFromList( + [ + ...llmModelList, + ...embeddingModelList, + ...ttsModelList, + ...sttModelList, + ...reRankModelList + ], + props.value + ); setValue([modelData.provider, props.value]); @@ -174,7 +181,15 @@ const MultipleRowSelector = ({ list, onchange, disableTip, ...props }: Props) => {modelData?.name} ); - }, [avatarSize, llmModelList, props.value, embeddingModelList]); + }, [ + llmModelList, + embeddingModelList, + ttsModelList, + sttModelList, + reRankModelList, + props.value, + avatarSize + ]); return ( value={value} rowMinWidth="160px" ButtonProps={{ - isDisabled: !!disableTip + isDisabled: !!disableTip, + h: '40px', + ...props }} /> diff --git a/projects/app/src/components/core/ai/SettingLLMModel/index.tsx b/projects/app/src/components/core/ai/SettingLLMModel/index.tsx index 234828d0afd5..9043ec0ff060 100644 --- a/projects/app/src/components/core/ai/SettingLLMModel/index.tsx +++ b/projects/app/src/components/core/ai/SettingLLMModel/index.tsx @@ -1,4 +1,4 @@ -import React, { useMemo } from 'react'; +import React, { useEffect, useMemo } from 'react'; import { useSystemStore } from '@/web/common/system/useSystemStore'; import { LLMModelTypeEnum, llmModelTypeFilterMap } from '@fastgpt/global/core/ai/constants'; import { Box, css, HStack, IconButton, useDisclosure } from '@chakra-ui/react'; @@ -24,7 +24,7 @@ const SettingLLMModel = ({ ...props }: AIChatSettingsModalProps & Props) => { const { t } = useTranslation(); - const { llmModelList } = useSystemStore(); + const { llmModelList, defaultModels } = useSystemStore(); const model = defaultData.model; @@ -39,15 +39,16 @@ const SettingLLMModel = ({ }), [llmModelList, llmModelType] ); + // Set default model - useMount(() => { - if (!model && modelList.length > 0) { + useEffect(() => { + if (!llmModelList.find((item) => item.model === model) && !!defaultModels.llm) { onChange({ ...defaultData, - model: modelList[0].model + model: defaultModels.llm.model }); } - }); + }, [model, defaultData, llmModelList, defaultModels.llm, onChange]); const { isOpen: isOpenAIChatSetting, diff --git a/projects/app/src/global/common/api/systemRes.d.ts b/projects/app/src/global/common/api/systemRes.d.ts index 5778c8d603e9..e2c4831dd62c 100644 --- a/projects/app/src/global/common/api/systemRes.d.ts +++ b/projects/app/src/global/common/api/systemRes.d.ts @@ -8,14 +8,15 @@ import type { import type { FastGPTFeConfigsType } from '@fastgpt/global/common/system/types/index.d'; import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type'; -import { SystemModelItemType } from '@fastgpt/service/core/ai/type'; +import { SystemDefaultModelType, SystemModelItemType } from '@fastgpt/service/core/ai/type'; export type InitDateResponse = { bufferId?: string; - feConfigs: FastGPTFeConfigsType; + feConfigs?: FastGPTFeConfigsType; subPlans?: SubPlanType; systemVersion: string; activeModelList?: SystemModelItemType[]; + defaultModels?: SystemDefaultModelType; }; diff --git a/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx b/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx index 33408ba8b83c..1ea52498c121 100644 --- a/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx +++ b/projects/app/src/pageComponents/account/model/ModelConfigTable.tsx @@ -36,7 +36,8 @@ import { getSystemModelDetail, getSystemModelList, getTestModel, - putSystemModel + putSystemModel, + putUpdateDefaultModels } from '@/web/core/ai/config'; import MyBox from '@fastgpt/web/components/common/MyBox'; import { SystemModelItemType } from '@fastgpt/service/core/ai/type'; @@ -54,20 +55,14 @@ import QuestionTip from '@fastgpt/web/components/common/MyTooltip/QuestionTip'; import { putUpdateWithJson } from '@/web/core/ai/config'; import CopyBox from '@fastgpt/web/components/common/String/CopyBox'; import MyIcon from '@fastgpt/web/components/common/Icon'; +import AIModelSelector from '@/components/Select/AIModelSelector'; const MyModal = dynamic(() => import('@fastgpt/web/components/common/MyModal')); const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { const { t } = useTranslation(); const { userInfo } = useUserStore(); - const { - llmModelList, - embeddingModelList, - ttsModelList, - sttModelList, - reRankModelList, - feConfigs - } = useSystemStore(); + const { defaultModels, feConfigs } = useSystemStore(); const isRoot = userInfo?.username === 'root'; @@ -273,14 +268,7 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { } ); const onCreateModel = (type: ModelTypeEnum) => { - const defaultModel = (() => { - if (type === ModelTypeEnum.llm) return llmModelList[0]; - if (type === ModelTypeEnum.embedding) return embeddingModelList[0]; - if (type === ModelTypeEnum.tts) return ttsModelList[0]; - if (type === ModelTypeEnum.stt) return sttModelList[0]; - if (type === ModelTypeEnum.rerank) return reRankModelList[0]; - return llmModelList[0]; - })(); + const defaultModel = defaultModels[type]; setEditModelData({ ...defaultModel, @@ -302,6 +290,11 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { onOpen: onOpenJsonConfig, onClose: onCloseJsonConfig } = useDisclosure(); + const { + onOpen: onOpenDefaultModel, + onClose: onCloseDefaultModel, + isOpen: isOpenDefaultModel + } = useDisclosure(); const isLoading = loadingModels || loadingData || updatingModel || testingModel; @@ -313,6 +306,9 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { {Tab} + @@ -505,6 +501,9 @@ const ModelTable = ({ Tab }: { Tab: React.ReactNode }) => { {isOpenJsonConfig && ( )} + {isOpenDefaultModel && ( + + )} ); }; @@ -1084,4 +1083,164 @@ const JsonConfigModal = ({ ); }; +const labelStyles = { + fontSize: 'sm', + color: 'myGray.900', + mb: 0.5 +}; +const DefaultModelModal = ({ + onSuccess, + onClose +}: { + onSuccess: () => void; + onClose: () => void; +}) => { + const { t } = useTranslation(); + const { + defaultModels, + llmModelList, + embeddingModelList, + ttsModelList, + sttModelList, + reRankModelList + } = useSystemStore(); + + // Create a copy of defaultModels for local state management + const [defaultData, setDefaultData] = useState(defaultModels); + + const { runAsync, loading } = useRequest2(putUpdateDefaultModels, { + onSuccess: () => { + onSuccess(); + onClose(); + }, + successToast: t('common:common.Update Success') + }); + + return ( + + + + {t('common:model.type.chat')} + + ({ + value: item.model, + label: item.name + }))} + onchange={(e) => { + setDefaultData((state) => ({ + ...state, + llm: llmModelList.find((item) => item.model === e) + })); + }} + /> + + + + {t('common:model.type.embedding')} + + ({ + value: item.model, + label: item.name + }))} + onchange={(e) => { + setDefaultData((state) => ({ + ...state, + embedding: embeddingModelList.find((item) => item.model === e) + })); + }} + /> + + + + {t('common:model.type.tts')} + + ({ + value: item.model, + label: item.name + }))} + onchange={(e) => { + setDefaultData((state) => ({ + ...state, + tts: ttsModelList.find((item) => item.model === e) + })); + }} + /> + + + + {t('common:model.type.stt')} + + ({ + value: item.model, + label: item.name + }))} + onchange={(e) => { + setDefaultData((state) => ({ + ...state, + stt: sttModelList.find((item) => item.model === e) + })); + }} + /> + + + + {t('common:model.type.reRank')} + + ({ + value: item.model, + label: item.name + }))} + onchange={(e) => { + setDefaultData((state) => ({ + ...state, + rerank: reRankModelList.find((item) => item.model === e) + })); + }} + /> + + + + + + + + + ); +}; + export default ModelTable; diff --git a/projects/app/src/pageComponents/app/detail/Publish/Link/index.tsx b/projects/app/src/pageComponents/app/detail/Publish/Link/index.tsx index eb53eea04087..61f86b6b0ddb 100644 --- a/projects/app/src/pageComponents/app/detail/Publish/Link/index.tsx +++ b/projects/app/src/pageComponents/app/detail/Publish/Link/index.tsx @@ -233,7 +233,7 @@ const Share = ({ appId }: { appId: string; type: PublishChannelEnum }) => { onEdit={() => { toast({ status: 'success', - title: t('common:common.Update Successful') + title: t('common:common.Update Success') }); refetchShareChatList(); setEditLinkData(undefined); diff --git a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx index cd5ebeba7a68..38918d81695a 100644 --- a/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx +++ b/projects/app/src/pageComponents/app/detail/WorkflowComponents/Flow/nodes/render/RenderInput/templates/SelectDatasetParams.tsx @@ -19,7 +19,7 @@ const SelectDatasetParam = ({ inputs = [], nodeId }: RenderInputProps) => { const nodeList = useContextSelector(WorkflowContext, (v) => v.nodeList); const { t } = useTranslation(); - const { llmModelList } = useSystemStore(); + const { defaultModels } = useSystemStore(); const [data, setData] = useState({ searchMode: DatasetSearchModeEnum.embedding, @@ -27,7 +27,7 @@ const SelectDatasetParam = ({ inputs = [], nodeId }: RenderInputProps) => { similarity: 0.5, usingReRank: false, datasetSearchUsingExtensionQuery: true, - datasetSearchExtensionModel: llmModelList[0]?.model, + datasetSearchExtensionModel: defaultModels.llm?.model, datasetSearchExtensionBg: '' }); @@ -45,7 +45,7 @@ const SelectDatasetParam = ({ inputs = [], nodeId }: RenderInputProps) => { }); return maxTokens; - }, [nodeList, llmModelList]); + }, [nodeList]); const { isOpen, onOpen, onClose } = useDisclosure(); diff --git a/projects/app/src/pageComponents/dataset/detail/InputDataModal.tsx b/projects/app/src/pageComponents/dataset/detail/InputDataModal.tsx index 7c7031d50765..e39373e6f101 100644 --- a/projects/app/src/pageComponents/dataset/detail/InputDataModal.tsx +++ b/projects/app/src/pageComponents/dataset/detail/InputDataModal.tsx @@ -67,7 +67,7 @@ const InputDataModal = ({ const theme = useTheme(); const { toast } = useToast(); const [currentTab, setCurrentTab] = useState(TabEnum.content); - const { embeddingModelList } = useSystemStore(); + const { embeddingModelList, defaultModels } = useSystemStore(); const { isPc } = useSystem(); const { register, handleSubmit, reset, control } = useForm(); const { @@ -159,10 +159,10 @@ const InputDataModal = ({ const maxToken = useMemo(() => { const vectorModel = embeddingModelList.find((item) => item.model === collection.dataset.vectorModel) || - embeddingModelList[0]; + defaultModels.embedding; return vectorModel?.maxToken || 3000; - }, [collection.dataset.vectorModel, embeddingModelList]); + }, [collection.dataset.vectorModel, defaultModels.embedding, embeddingModelList]); // import new data const { mutate: sureImportData, isLoading: isImporting } = useRequest({ diff --git a/projects/app/src/pageComponents/dataset/detail/Test.tsx b/projects/app/src/pageComponents/dataset/detail/Test.tsx index 14c217d54750..8d1c0529c857 100644 --- a/projects/app/src/pageComponents/dataset/detail/Test.tsx +++ b/projects/app/src/pageComponents/dataset/detail/Test.tsx @@ -48,7 +48,7 @@ type FormType = { const Test = ({ datasetId }: { datasetId: string }) => { const { t } = useTranslation(); const { toast } = useToast(); - const { llmModelList } = useSystemStore(); + const { defaultModels } = useSystemStore(); const datasetDetail = useContextSelector(DatasetPageContext, (v) => v.datasetDetail); const { pushDatasetTestItem } = useSearchTestStore(); const [inputType, setInputType] = useState<'text' | 'file'>('text'); @@ -70,7 +70,7 @@ const Test = ({ datasetId }: { datasetId: string }) => { limit: 5000, similarity: 0, datasetSearchUsingExtensionQuery: true, - datasetSearchExtensionModel: llmModelList[0].model, + datasetSearchExtensionModel: defaultModels.llm?.model, datasetSearchExtensionBg: '' } } diff --git a/projects/app/src/pageComponents/dataset/list/CreateModal.tsx b/projects/app/src/pageComponents/dataset/list/CreateModal.tsx index 5aff9f8616ed..e1145ab7353a 100644 --- a/projects/app/src/pageComponents/dataset/list/CreateModal.tsx +++ b/projects/app/src/pageComponents/dataset/list/CreateModal.tsx @@ -41,7 +41,7 @@ const CreateModal = ({ const { t } = useTranslation(); const { toast } = useToast(); const router = useRouter(); - const { embeddingModelList, datasetModelList } = useSystemStore(); + const { defaultModels, embeddingModelList, datasetModelList } = useSystemStore(); const { isPc } = useSystem(); const datasetTypeMap = useMemo(() => { @@ -78,8 +78,8 @@ const CreateModal = ({ avatar: datasetTypeMap[type].icon, name: '', intro: '', - vectorModel: filterNotHiddenVectorModelList[0].model, - agentModel: datasetModelList[0].model + vectorModel: defaultModels.embedding?.model, + agentModel: defaultModels.llm?.model } }); const { register, setValue, handleSubmit, watch } = form; diff --git a/projects/app/src/pages/api/common/system/getInitData.ts b/projects/app/src/pages/api/common/system/getInitData.ts index 50429696aa38..4ce77b3a8891 100644 --- a/projects/app/src/pages/api/common/system/getInitData.ts +++ b/projects/app/src/pages/api/common/system/getInitData.ts @@ -1,8 +1,13 @@ import type { NextApiResponse } from 'next'; import { ApiRequestProps } from '@fastgpt/service/type/next'; import { NextAPI } from '@/service/middleware/entry'; +import { InitDateResponse } from '@/global/common/api/systemRes'; +import { SystemModelItemType } from '@fastgpt/service/core/ai/type'; -async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: NextApiResponse) { +async function handler( + req: ApiRequestProps<{}, { bufferId?: string }>, + res: NextApiResponse +): Promise { const { bufferId } = req.query; const activeModelList = global.systemActiveModelList.map((model) => ({ @@ -17,7 +22,7 @@ async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: Nex queryConfig: undefined, requestUrl: undefined, requestAuth: undefined - })); + })) as SystemModelItemType[]; // If bufferId is the same as the current bufferId, return directly if (bufferId && global.systemInitBufferId && global.systemInitBufferId === bufferId) { @@ -31,8 +36,9 @@ async function handler(req: ApiRequestProps<{}, { bufferId?: string }>, res: Nex bufferId: global.systemInitBufferId, feConfigs: global.feConfigs, subPlans: global.subPlans, + systemVersion: global.systemVersion || '0.0.0', activeModelList, - systemVersion: global.systemVersion || '0.0.0' + defaultModels: global.systemDefaultModel }; } diff --git a/projects/app/src/pages/api/core/ai/agent/createQuestionGuide.ts b/projects/app/src/pages/api/core/ai/agent/createQuestionGuide.ts index f483ec0cc4c6..625b990098dc 100644 --- a/projects/app/src/pages/api/core/ai/agent/createQuestionGuide.ts +++ b/projects/app/src/pages/api/core/ai/agent/createQuestionGuide.ts @@ -16,7 +16,7 @@ import { MongoTeamMember } from '@fastgpt/service/support/user/team/teamMemberSc import { TeamMemberRoleEnum } from '@fastgpt/global/support/user/team/constant'; import { ChatErrEnum } from '@fastgpt/global/common/error/code/chat'; import { authCert } from '@fastgpt/service/support/permission/auth/common'; -import { getFirstLLMModel } from '@fastgpt/service/core/ai/model'; +import { getDefaultLLMModel } from '@fastgpt/service/core/ai/model'; async function handler( req: ApiRequestProps< @@ -36,7 +36,7 @@ async function handler( authApiKey: true }); - const qgModel = getFirstLLMModel(); + const qgModel = getDefaultLLMModel(); const { result, inputTokens, outputTokens } = await createQuestionGuide({ messages, @@ -48,6 +48,7 @@ async function handler( }); pushQuestionGuideUsage({ + model: qgModel.model, inputTokens, outputTokens, teamId, diff --git a/projects/app/src/pages/api/core/ai/agent/v2/createQuestionGuide.ts b/projects/app/src/pages/api/core/ai/agent/v2/createQuestionGuide.ts index 6c2be8bf87ac..241369eae60c 100644 --- a/projects/app/src/pages/api/core/ai/agent/v2/createQuestionGuide.ts +++ b/projects/app/src/pages/api/core/ai/agent/v2/createQuestionGuide.ts @@ -9,7 +9,7 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat'; import { getChatItems } from '@fastgpt/service/core/chat/controller'; import { chats2GPTMessages } from '@fastgpt/global/core/chat/adapt'; import { getAppLatestVersion } from '@fastgpt/service/core/app/version/controller'; -import { getFirstLLMModel } from '@fastgpt/service/core/ai/model'; +import { getDefaultLLMModel } from '@fastgpt/service/core/ai/model'; export type CreateQuestionGuideParams = OutLinkChatAuthProps & { appId: string; @@ -51,7 +51,7 @@ async function handler(req: ApiRequestProps, res: Nex }); const messages = chats2GPTMessages({ messages: histories, reserveId: false }); - const qgModel = questionGuide?.model || getFirstLLMModel().model; + const qgModel = questionGuide?.model || getDefaultLLMModel().model; const { result, inputTokens, outputTokens } = await createQuestionGuide({ messages, @@ -60,6 +60,7 @@ async function handler(req: ApiRequestProps, res: Nex }); pushQuestionGuideUsage({ + model: qgModel, inputTokens, outputTokens, teamId, diff --git a/projects/app/src/pages/api/core/ai/model/updateDefault.ts b/projects/app/src/pages/api/core/ai/model/updateDefault.ts new file mode 100644 index 000000000000..00cdc33d21b0 --- /dev/null +++ b/projects/app/src/pages/api/core/ai/model/updateDefault.ts @@ -0,0 +1,73 @@ +import type { ApiRequestProps, ApiResponseType } from '@fastgpt/service/type/next'; +import { NextAPI } from '@/service/middleware/entry'; +import { mongoSessionRun } from '@fastgpt/service/common/mongo/sessionRun'; +import { MongoSystemModel } from '@fastgpt/service/core/ai/config/schema'; +import { loadSystemModels } from '@fastgpt/service/core/ai/config/utils'; +import { updateFastGPTConfigBuffer } from '@fastgpt/service/common/system/config/controller'; +import { ModelTypeEnum } from '@fastgpt/global/core/ai/model'; + +export type updateDefaultQuery = {}; + +export type updateDefaultBody = { + [ModelTypeEnum.llm]?: string; + [ModelTypeEnum.embedding]?: string; + [ModelTypeEnum.tts]?: string; + [ModelTypeEnum.stt]?: string; + [ModelTypeEnum.rerank]?: string; +}; + +export type updateDefaultResponse = {}; + +async function handler( + req: ApiRequestProps, + res: ApiResponseType +): Promise { + const { llm, embedding, tts, stt, rerank } = req.body; + + await mongoSessionRun(async (session) => { + await MongoSystemModel.updateMany({}, { $set: { 'metadata.isDefault': false } }, { session }); + + if (llm) { + await MongoSystemModel.updateOne( + { model: llm }, + { $set: { 'metadata.isDefault': true } }, + { session } + ); + } + if (embedding) { + await MongoSystemModel.updateOne( + { model: embedding }, + { $set: { 'metadata.isDefault': true } }, + { session } + ); + } + if (tts) { + await MongoSystemModel.updateOne( + { model: tts }, + { $set: { 'metadata.isDefault': true } }, + { session } + ); + } + if (stt) { + await MongoSystemModel.updateOne( + { model: stt }, + { $set: { 'metadata.isDefault': true } }, + { session } + ); + } + if (rerank) { + await MongoSystemModel.updateOne( + { model: rerank }, + { $set: { 'metadata.isDefault': true } }, + { session } + ); + } + }); + + await loadSystemModels(true); + await updateFastGPTConfigBuffer(); + + return {}; +} + +export default NextAPI(handler); diff --git a/projects/app/src/pages/api/core/dataset/create.ts b/projects/app/src/pages/api/core/dataset/create.ts index a372d6065b25..1bf40c2c3ccf 100644 --- a/projects/app/src/pages/api/core/dataset/create.ts +++ b/projects/app/src/pages/api/core/dataset/create.ts @@ -6,7 +6,7 @@ import { getLLMModel, getEmbeddingModel, getDatasetModel, - getFirstEmbeddingModel + getDefaultEmbeddingModel } from '@fastgpt/service/core/ai/model'; import { checkTeamDatasetLimit } from '@fastgpt/service/support/permission/teamLimit'; import { WritePermissionVal } from '@fastgpt/global/support/permission/constant'; @@ -32,7 +32,7 @@ async function handler( intro, type = DatasetTypeEnum.dataset, avatar, - vectorModel = getFirstEmbeddingModel().model, + vectorModel = getDefaultEmbeddingModel().model, agentModel = getDatasetModel().model, apiServer, feishuServer, diff --git a/projects/app/src/pages/api/v1/audio/transcriptions.ts b/projects/app/src/pages/api/v1/audio/transcriptions.ts index b6c642cfd924..837d33ff4b61 100644 --- a/projects/app/src/pages/api/v1/audio/transcriptions.ts +++ b/projects/app/src/pages/api/v1/audio/transcriptions.ts @@ -9,7 +9,7 @@ import { OutLinkChatAuthProps } from '@fastgpt/global/support/permission/chat'; import { NextAPI } from '@/service/middleware/entry'; import { aiTranscriptions } from '@fastgpt/service/core/ai/audio/transcriptions'; import { useIPFrequencyLimit } from '@fastgpt/service/common/middle/reqFrequencyLimit'; -import { getFirstSTTModel } from '@fastgpt/service/core/ai/model'; +import { getDefaultSTTModel } from '@fastgpt/service/core/ai/model'; const upload = getUploadModel({ maxSize: 5 @@ -37,7 +37,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) { filePaths = [file.path]; - if (!getFirstSTTModel()) { + if (!getDefaultSTTModel()) { throw new Error('whisper model not found'); } @@ -66,7 +66,7 @@ async function handler(req: NextApiRequest, res: NextApiResponse) { // } const result = await aiTranscriptions({ - model: getFirstSTTModel().model, + model: getDefaultSTTModel().model, fileStream: fs.createReadStream(file.path) }); diff --git a/projects/app/src/service/support/wallet/usage/push.ts b/projects/app/src/service/support/wallet/usage/push.ts index 9054e40d28ac..407e5aace635 100644 --- a/projects/app/src/service/support/wallet/usage/push.ts +++ b/projects/app/src/service/support/wallet/usage/push.ts @@ -5,7 +5,7 @@ import { formatModelChars2Points } from '@fastgpt/service/support/wallet/usage/u import { ChatNodeUsageType } from '@fastgpt/global/support/wallet/bill/type'; import { i18nT } from '@fastgpt/web/i18n/utils'; import { ModelTypeEnum } from '@fastgpt/global/core/ai/model'; -import { getFirstLLMModel, getFirstSTTModel } from '@fastgpt/service/core/ai/model'; +import { getDefaultTTSModel } from '@fastgpt/service/core/ai/model'; export const pushChatUsage = ({ appName, @@ -176,21 +176,22 @@ export const pushGenerateVectorUsage = ({ }; export const pushQuestionGuideUsage = ({ + model, inputTokens, outputTokens, teamId, tmbId }: { + model: string; inputTokens: number; outputTokens: number; teamId: string; tmbId: string; }) => { - const qgModel = getFirstLLMModel(); const { totalPoints, modelName } = formatModelChars2Points({ inputTokens, outputTokens, - model: qgModel.model, + model, modelType: ModelTypeEnum.llm }); @@ -259,7 +260,7 @@ export function pushWhisperUsage({ tmbId: string; duration: number; }) { - const whisperModel = getFirstSTTModel(); + const whisperModel = getDefaultTTSModel(); if (!whisperModel) return; diff --git a/projects/app/src/web/common/system/useSystemStore.ts b/projects/app/src/web/common/system/useSystemStore.ts index 515d07559d91..df8f7d883815 100644 --- a/projects/app/src/web/common/system/useSystemStore.ts +++ b/projects/app/src/web/common/system/useSystemStore.ts @@ -15,6 +15,7 @@ import { FastGPTFeConfigsType } from '@fastgpt/global/common/system/types'; import { SubPlanType } from '@fastgpt/global/support/wallet/sub/type'; import { ModelTypeEnum } from '@fastgpt/global/core/ai/model'; import { TeamErrEnum } from '@fastgpt/global/common/error/code/team'; +import { SystemDefaultModelType } from '@fastgpt/service/core/ai/type'; type LoginStoreType = { provider: `${OAuthEnum}`; lastRoute: string; state: string }; @@ -49,6 +50,7 @@ type State = { feConfigs: FastGPTFeConfigsType; subPlans?: SubPlanType; systemVersion: string; + defaultModels: SystemDefaultModelType; llmModelList: LLMModelItemType[]; datasetModelList: LLMModelItemType[]; embeddingModelList: EmbeddingModelItemType[]; @@ -125,6 +127,7 @@ export const useSystemStore = create()( feConfigs: {}, subPlans: undefined, systemVersion: '0.0.0', + defaultModels: {}, llmModelList: [], datasetModelList: [], embeddingModelList: [], @@ -155,6 +158,8 @@ export const useSystemStore = create()( state.sttModelList = res.activeModelList?.filter((item) => item.type === ModelTypeEnum.stt) ?? state.sttModelList; + + state.defaultModels = res.defaultModels ?? state.defaultModels; }); } })), @@ -166,6 +171,7 @@ export const useSystemStore = create()( feConfigs: state.feConfigs, subPlans: state.subPlans, systemVersion: state.systemVersion, + defaultModels: state.defaultModels, llmModelList: state.llmModelList, datasetModelList: state.datasetModelList, embeddingModelList: state.embeddingModelList, diff --git a/projects/app/src/web/core/ai/config.ts b/projects/app/src/web/core/ai/config.ts index 62afaad313aa..fa192701deca 100644 --- a/projects/app/src/web/core/ai/config.ts +++ b/projects/app/src/web/core/ai/config.ts @@ -4,6 +4,7 @@ import type { updateBody } from '@/pages/api/core/ai/model/update'; import type { deleteQuery } from '@/pages/api/core/ai/model/delete'; import type { SystemModelItemType } from '@fastgpt/service/core/ai/type'; import type { updateWithJsonBody } from '@/pages/api/core/ai/model/updateWithJson'; +import type { updateDefaultBody } from '@/pages/api/core/ai/model/updateDefault'; export const getSystemModelList = () => GET('/core/ai/model/list'); export const getSystemModelDetail = (model: string) => @@ -18,3 +19,6 @@ export const putUpdateWithJson = (data: updateWithJsonBody) => PUT('/core/ai/model/updateWithJson', data); export const getTestModel = (model: String) => GET('/core/ai/model/test', { model }); + +export const putUpdateDefaultModels = (data: updateDefaultBody) => + PUT('/core/ai/model/updateDefault', data);