diff --git a/packages/service/core/ai/config/llm/gemini-exp-1206.json b/packages/service/core/ai/config/llm/gemini-exp-1206.json new file mode 100644 index 00000000000..d5ba8954103 --- /dev/null +++ b/packages/service/core/ai/config/llm/gemini-exp-1206.json @@ -0,0 +1,29 @@ +{ + "provider": "Gemini", + "model": "gemini-exp-1206", + "name": "gemini-exp-1206", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 120000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": true, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-pro-32k.json b/packages/service/core/ai/config/llm/hunyuan-pro-32k.json new file mode 100644 index 00000000000..616a4183969 --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-pro-32k.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-pro-32k(测试)", + "name": "hunyuan-pro-32k(测试)", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 28000, + "maxResponse": 4000, + "quoteMaxToken": 28000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": true, + "usedInClassify": true, + "customCQPrompt": "", + "usedInExtractFields": true, + "usedInQueryExtension": true, + "customExtractPrompt": "", + "usedInToolCall": true, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json b/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json new file mode 100644 index 00000000000..c5f0072f8ff --- /dev/null +++ b/packages/service/core/ai/config/llm/hunyuan-turbo-vision.json @@ -0,0 +1,29 @@ +{ + "provider": "Hunyuan", + "model": "hunyuan-turbo-vision", + "name": "hunyuan-turbo-vision", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 6000, + "maxResponse": 2000, + "quoteMaxToken": 6000, + "maxTemperature": 1, + + "vision": true, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": false, + "customCQPrompt": "", + "usedInExtractFields": false, + "usedInQueryExtension": false, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +} diff --git a/packages/service/core/ai/config/llm/qwen-coder-turbo.json b/packages/service/core/ai/config/llm/qwen-coder-turbo.json new file mode 100644 index 00000000000..48491c1574c --- /dev/null +++ b/packages/service/core/ai/config/llm/qwen-coder-turbo.json @@ -0,0 +1,29 @@ +{ + "provider": "Qwen", + "model": "qwen-coder-turbo", + "name": "qwen-coder-turbo", + + "censor": false, + "charsPointsPrice": 0, + + "maxContext": 128000, + "maxResponse": 8000, + "quoteMaxToken": 50000, + "maxTemperature": 1, + + "vision": false, + "toolChoice": false, + "functionCall": false, + "defaultSystemChatPrompt": "", + + "datasetProcess": false, + "usedInClassify": false, + "customCQPrompt": "", + "usedInExtractFields": false, + "usedInQueryExtension": false, + "customExtractPrompt": "", + "usedInToolCall": false, + + "defaultConfig": {}, + "fieldMap": {} +}