Skip to content

Commit c477830

Browse files
committed
1.增加适配o1-mini、o1-preview模型。2.修复配置Api Server。
1 parent 6666665 commit c477830

File tree

4 files changed

+31
-14
lines changed

4 files changed

+31
-14
lines changed

TMessagesProj/src/main/java/org/telegram/messenger/SendMessagesHelper.java

+8-9
Original file line numberDiff line numberDiff line change
@@ -5942,33 +5942,32 @@ protected void performSendMessageRequest(final TLObject req, final MessageObject
59425942
// todo 旧协议兼容
59435943
// 发送请求格式切换,多模态请求,以及旧模态请求(只能发送文本)
59445944
// if (isMultiCompletionRequest(aiModelReal, isOpenAIVision)) {
5945-
if (UserConfig.isSupportImageModel(currentAccount, user.id)) {
5945+
if (UserConfig.isMultiCompletionRequest(currentAccount, user.id)) {
59465946
List<ChatMultiMessage> chatMessageList = getChatMultiCompletionRequest(prompt, msgObj);
5947-
// if (true) return;
59485947

59495948
chatCompletionRequest = ChatCompletionRequest.builder()
59505949
.model(aiModelReal)
5951-
.temperature(temperature != -100 ? temperature : null)
5952-
// gpt-4-vision-preview如果不配置maxTokens,则会按照最短的maxTokens配置。
5953-
// 导致输出文字过短,而图片模型无法进行多轮会话导致无法发送继续,输出更多内容。
5954-
.maxTokens(tokenLimit != -100 ? tokenLimit : 4096)
59555950
.build().setMessages(chatMessageList);
59565951

59575952
} else {
59585953
List<ChatMessage> chatMessageList = getChatCompletionRequest(prompt, msgObj);
59595954

59605955
chatCompletionRequest = ChatCompletionRequest.builder()
59615956
.model(aiModelReal)
5962-
.temperature(temperature != -100 ? temperature : null)
5963-
.maxTokens(tokenLimit != -100 ? tokenLimit : null)
59645957
.build().setMessages(chatMessageList);
59655958
}
59665959

5960+
// o1不支持temperature以及tokenLimit,直接忽略配置
5961+
if (!UserConfig.isJudgeByModelO(aiModel)) {
5962+
chatCompletionRequest.setTemperature(temperature != -100 ? temperature : null);
5963+
chatCompletionRequest.setMaxTokens(tokenLimit != -100 ? tokenLimit : null);
5964+
}
5965+
59675966
BaseMessage baseMessage = new BaseMessage();
59685967
baseMessage.setDialog_id(newMsgObj.dialog_id);
59695968
KeepAliveJob.finishJob();
59705969

5971-
if (getUserConfig().streamResponses) {
5970+
if (getUserConfig().streamResponses && !UserConfig.isJudgeByModelO(aiModel)) {
59725971

59735972
streamMessages.clear();
59745973

TMessagesProj/src/main/java/org/telegram/messenger/UserConfig.java

+18
Original file line numberDiff line numberDiff line change
@@ -524,6 +524,10 @@ private void initAiModelList() {
524524
aiModelList.put(13, new AiModelBean("GPT-4o", "gpt-4o", true));
525525
aiModelList.put(17, new AiModelBean("GPT-4o-0806", "gpt-4o-2024-08-06", true));
526526
aiModelList.put(18, new AiModelBean("GPT-4o-0513", "gpt-4o-2024-05-13", true));
527+
aiModelList.put(19, new AiModelBean("o1 mini", "o1-mini", true));
528+
aiModelList.put(20, new AiModelBean("o1 preview", "o1-preview", true));
529+
aiModelList.put(21, new AiModelBean("o1 mini-2024-09-12", "o1-mini-2024-09-12", true));
530+
aiModelList.put(22, new AiModelBean("o1 preview-2024-09-12", "o1-preview-2024-09-12", true));
527531
aiModelList.put(1, new AiModelBean("GPT-3.5", "gpt-3.5-turbo", true));
528532
aiModelList.put(2, new AiModelBean("GPT-3.5-0613", "gpt-3.5-turbo-0613", false));
529533
aiModelList.put(3, new AiModelBean("GPT-3.5-16k", "gpt-3.5-turbo-16k", true));
@@ -810,6 +814,15 @@ public static boolean isUserGeminiProVision(int currentAccount, long userId) {
810814

811815
}
812816

817+
public static boolean isJudgeByModelO(int aiModel) {
818+
if (aiModel == 19) return true;
819+
if (aiModel == 20) return true;
820+
if (aiModel == 21) return true;
821+
if (aiModel == 22) return true;
822+
823+
return false;
824+
}
825+
813826
public boolean isJudgeByModelGeminiProVision(int aiModel) {
814827

815828
if (aiModel == 802) return true;
@@ -829,6 +842,11 @@ public boolean isJudgeByModelOpenAIVision(int aiModel) {
829842

830843
public static boolean isSupportImageModel(int currentAccount, long userId) {
831844

845+
int aiModel = getUserAiModel(currentAccount, userId);
846+
return isMultiCompletionRequest(currentAccount, userId) && !isJudgeByModelO(aiModel);
847+
}
848+
849+
public static boolean isMultiCompletionRequest(int currentAccount, long userId) {
832850

833851
int aiModel = getUserAiModel(currentAccount, userId);
834852

TMessagesProj/src/main/java/org/telegram/ui/ChangeApiServerActivity.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ private void saveApiServer() {
236236
String newFirst = firstNameField.getText().toString().replace("\n", "");
237237
if (TextUtils.isEmpty(newFirst)) {
238238
newFirst = UserConfig.defaultApiServer;
239-
};
239+
}
240240

241241
String formatUrl = formatUrl(newFirst);
242242
if (TextUtils.isEmpty(formatUrl)) {

openai_service/src/main/java/com/theokanning/openai/service/OpenAiService.java

+4-4
Original file line numberDiff line numberDiff line change
@@ -1494,11 +1494,11 @@ private HashMap<String, String> processOpenAIPrefixUrl(String url) {
14941494
// 匹配到特定格式,拆分后缀
14951495
tempPrefixUrl = path.substring(0, path.length() - pathEnd.length());
14961496

1497-
tempUrl = new URI(uri.getScheme(), uri.getAuthority(), tempPrefixUrl,
1498-
null).toString();
1497+
tempUrl = new URI(uri.getScheme(),null, uri.getHost(), uri.getPort(), tempPrefixUrl,
1498+
null,null).toString();
14991499
} else {
1500-
tempUrl = new URI(uri.getScheme(), uri.getAuthority(), "/v1/",
1501-
null).toString();
1500+
tempUrl = new URI(uri.getScheme(), null, uri.getHost(), uri.getPort(), path + "v1/",
1501+
null,null).toString();
15021502
tempPrefixUrl = fixPrefixUrl;
15031503
}
15041504
} catch (URISyntaxException e) {

0 commit comments

Comments
 (0)