diff --git a/app/api/openai.ts b/app/api/openai.ts index 2b5deca8b..e97dfbfe7 100644 --- a/app/api/openai.ts +++ b/app/api/openai.ts @@ -14,7 +14,7 @@ function getModels(remoteModelRes: OpenAIListModelResponse) { if (config.disableGPT4) { remoteModelRes.data = remoteModelRes.data.filter( (m) => - !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1")) || + !(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1") || m.id.startsWith("o3")) || m.id.startsWith("gpt-4o-mini"), ); } diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 5a110b84b..467bb82e0 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -195,7 +195,7 @@ export class ChatGPTApi implements LLMApi { let requestPayload: RequestPayload | DalleRequestPayload; const isDalle3 = _isDalle3(options.config.model); - const isO1 = options.config.model.startsWith("o1"); + const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3"); if (isDalle3) { const prompt = getMessageTextContent( options.messages.slice(-1)?.pop() as any, @@ -217,7 +217,7 @@ export class ChatGPTApi implements LLMApi { const content = visionModel ? await preProcessImageContent(v.content) : getMessageTextContent(v); - if (!(isO1 && v.role === "system")) + if (!(isO1OrO3 && v.role === "system")) messages.push({ role: v.role, content }); } @@ -226,16 +226,16 @@ export class ChatGPTApi implements LLMApi { messages, stream: options.config.stream, model: modelConfig.model, - temperature: !isO1 ? modelConfig.temperature : 1, - presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, - frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, - top_p: !isO1 ? modelConfig.top_p : 1, + temperature: !isO1OrO3 ? modelConfig.temperature : 1, + presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0, + frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0, + top_p: !isO1OrO3 ? modelConfig.top_p : 1, // max_tokens: Math.max(modelConfig.max_tokens, 1024), // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. }; // O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs) - if (isO1) { + if (isO1OrO3) { requestPayload["max_completion_tokens"] = modelConfig.max_tokens; } @@ -359,7 +359,7 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload); diff --git a/app/components/emoji.tsx b/app/components/emoji.tsx index d75cdda92..54d1c1c99 100644 --- a/app/components/emoji.tsx +++ b/app/components/emoji.tsx @@ -38,7 +38,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {