mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-20 04:30:17 +09:00
support o3-mini
This commit is contained in:
parent
1e20b64048
commit
8f12beb8f0
@ -14,7 +14,7 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
|
|||||||
if (config.disableGPT4) {
|
if (config.disableGPT4) {
|
||||||
remoteModelRes.data = remoteModelRes.data.filter(
|
remoteModelRes.data = remoteModelRes.data.filter(
|
||||||
(m) =>
|
(m) =>
|
||||||
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1")) ||
|
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o") || m.id.startsWith("o1")) || m.id.startsWith("o3")) ||
|
||||||
m.id.startsWith("gpt-4o-mini"),
|
m.id.startsWith("gpt-4o-mini"),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -195,7 +195,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
let requestPayload: RequestPayload | DalleRequestPayload;
|
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||||
|
|
||||||
const isDalle3 = _isDalle3(options.config.model);
|
const isDalle3 = _isDalle3(options.config.model);
|
||||||
const isO1 = options.config.model.startsWith("o1");
|
const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3");
|
||||||
if (isDalle3) {
|
if (isDalle3) {
|
||||||
const prompt = getMessageTextContent(
|
const prompt = getMessageTextContent(
|
||||||
options.messages.slice(-1)?.pop() as any,
|
options.messages.slice(-1)?.pop() as any,
|
||||||
@ -217,7 +217,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
const content = visionModel
|
const content = visionModel
|
||||||
? await preProcessImageContent(v.content)
|
? await preProcessImageContent(v.content)
|
||||||
: getMessageTextContent(v);
|
: getMessageTextContent(v);
|
||||||
if (!(isO1 && v.role === "system"))
|
if (!(isO1OrO3 && v.role === "system"))
|
||||||
messages.push({ role: v.role, content });
|
messages.push({ role: v.role, content });
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -226,16 +226,16 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
messages,
|
messages,
|
||||||
stream: options.config.stream,
|
stream: options.config.stream,
|
||||||
model: modelConfig.model,
|
model: modelConfig.model,
|
||||||
temperature: !isO1 ? modelConfig.temperature : 1,
|
temperature: !isO1OrO3 ? modelConfig.temperature : 1,
|
||||||
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
|
presence_penalty: !isO1OrO3 ? modelConfig.presence_penalty : 0,
|
||||||
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
|
frequency_penalty: !isO1OrO3 ? modelConfig.frequency_penalty : 0,
|
||||||
top_p: !isO1 ? modelConfig.top_p : 1,
|
top_p: !isO1OrO3 ? modelConfig.top_p : 1,
|
||||||
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
|
||||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||||
};
|
};
|
||||||
|
|
||||||
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||||
if (isO1) {
|
if (isO1OrO3) {
|
||||||
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -359,7 +359,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||||
);
|
);
|
||||||
|
|
||||||
const res = await fetch(chatPath, chatPayload);
|
const res = await fetch(chatPath, chatPayload);
|
||||||
|
@ -38,7 +38,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
|||||||
<div className="no-dark">
|
<div className="no-dark">
|
||||||
{props.model?.startsWith("gpt-4") ||
|
{props.model?.startsWith("gpt-4") ||
|
||||||
props.model?.startsWith("chatgpt-4o") ||
|
props.model?.startsWith("chatgpt-4o") ||
|
||||||
props.model?.startsWith("o1") ? (
|
props.model?.startsWith("o1") ||
|
||||||
|
props.model?.startsWith("o3") ? (
|
||||||
<BlackBotIcon className="user-avatar" />
|
<BlackBotIcon className="user-avatar" />
|
||||||
) : (
|
) : (
|
||||||
<BotIcon className="user-avatar" />
|
<BotIcon className="user-avatar" />
|
||||||
|
@ -410,6 +410,7 @@ export const KnowledgeCutOffDate: Record<string, string> = {
|
|||||||
"gpt-4-vision-preview": "2023-04",
|
"gpt-4-vision-preview": "2023-04",
|
||||||
"o1-mini": "2023-10",
|
"o1-mini": "2023-10",
|
||||||
"o1-preview": "2023-10",
|
"o1-preview": "2023-10",
|
||||||
|
"o3-mini": "2023-10",
|
||||||
// After improvements,
|
// After improvements,
|
||||||
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
// it's now easier to add "KnowledgeCutOffDate" instead of stupid hardcoding it, as was done previously.
|
||||||
"gemini-pro": "2023-12",
|
"gemini-pro": "2023-12",
|
||||||
|
Loading…
Reference in New Issue
Block a user