mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-20 04:30:17 +09:00
Merge branch 'main' of https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web
This commit is contained in:
commit
bf38e50455
2
LICENSE
2
LICENSE
@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023-2024 Zhang Yifei
|
Copyright (c) 2023-2025 NextChat
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
@ -22,7 +22,7 @@ import {
|
|||||||
} from "@fortaine/fetch-event-source";
|
} from "@fortaine/fetch-event-source";
|
||||||
import { prettyObject } from "@/app/utils/format";
|
import { prettyObject } from "@/app/utils/format";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent } from "@/app/utils";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
export interface OpenAIListModelResponse {
|
export interface OpenAIListModelResponse {
|
||||||
@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async chat(options: ChatOptions) {
|
async chat(options: ChatOptions) {
|
||||||
const messages = options.messages.map((v) => ({
|
const messages: ChatOptions["messages"] = [];
|
||||||
role: v.role,
|
for (const v of options.messages) {
|
||||||
content: getMessageTextContent(v),
|
const content = await preProcessImageContent(v.content);
|
||||||
}));
|
messages.push({ role: v.role, content });
|
||||||
|
}
|
||||||
|
|
||||||
const modelConfig = {
|
const modelConfig = {
|
||||||
...useAppConfig.getState().modelConfig,
|
...useAppConfig.getState().modelConfig,
|
||||||
|
@ -5,6 +5,7 @@ import {
|
|||||||
DEEPSEEK_BASE_URL,
|
DEEPSEEK_BASE_URL,
|
||||||
DeepSeek,
|
DeepSeek,
|
||||||
REQUEST_TIMEOUT_MS,
|
REQUEST_TIMEOUT_MS,
|
||||||
|
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||||
} from "@/app/constant";
|
} from "@/app/constant";
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi {
|
|||||||
|
|
||||||
// console.log(chatPayload);
|
// console.log(chatPayload);
|
||||||
|
|
||||||
|
const isR1 =
|
||||||
|
options.config.model.endsWith("-reasoner") ||
|
||||||
|
options.config.model.endsWith("-r1");
|
||||||
|
|
||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
@ -1,4 +1,9 @@
|
|||||||
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
import {
|
||||||
|
ApiPath,
|
||||||
|
Google,
|
||||||
|
REQUEST_TIMEOUT_MS,
|
||||||
|
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||||
|
} from "@/app/constant";
|
||||||
import {
|
import {
|
||||||
ChatOptions,
|
ChatOptions,
|
||||||
getHeaders,
|
getHeaders,
|
||||||
@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi {
|
|||||||
.join("\n\n");
|
.join("\n\n");
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let content = "";
|
||||||
|
if (Array.isArray(res)) {
|
||||||
|
res.map((item) => {
|
||||||
|
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
|
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
|
||||||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
|
content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
|
||||||
res?.error?.message ||
|
res?.error?.message ||
|
||||||
""
|
""
|
||||||
);
|
);
|
||||||
@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi {
|
|||||||
headers: getHeaders(),
|
headers: getHeaders(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const isThinking = options.config.model.includes("-thinking");
|
||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
@ -8,6 +8,7 @@ import {
|
|||||||
Azure,
|
Azure,
|
||||||
REQUEST_TIMEOUT_MS,
|
REQUEST_TIMEOUT_MS,
|
||||||
ServiceProvider,
|
ServiceProvider,
|
||||||
|
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||||
} from "@/app/constant";
|
} from "@/app/constant";
|
||||||
import {
|
import {
|
||||||
ChatMessageTool,
|
ChatMessageTool,
|
||||||
@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
let requestPayload: RequestPayload | DalleRequestPayload;
|
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||||
|
|
||||||
const isDalle3 = _isDalle3(options.config.model);
|
const isDalle3 = _isDalle3(options.config.model);
|
||||||
const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3");
|
const isO1OrO3 =
|
||||||
|
options.config.model.startsWith("o1") ||
|
||||||
|
options.config.model.startsWith("o3");
|
||||||
if (isDalle3) {
|
if (isDalle3) {
|
||||||
const prompt = getMessageTextContent(
|
const prompt = getMessageTextContent(
|
||||||
options.messages.slice(-1)?.pop() as any,
|
options.messages.slice(-1)?.pop() as any,
|
||||||
@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
isDalle3 || isO1OrO3
|
||||||
|
? REQUEST_TIMEOUT_MS_FOR_THINKING
|
||||||
|
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||||
);
|
);
|
||||||
|
|
||||||
const res = await fetch(chatPath, chatPayload);
|
const res = await fetch(chatPath, chatPayload);
|
||||||
|
@ -4,7 +4,7 @@ import {
|
|||||||
ApiPath,
|
ApiPath,
|
||||||
SILICONFLOW_BASE_URL,
|
SILICONFLOW_BASE_URL,
|
||||||
SiliconFlow,
|
SiliconFlow,
|
||||||
REQUEST_TIMEOUT_MS,
|
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||||
} from "@/app/constant";
|
} from "@/app/constant";
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
@ -120,11 +120,11 @@ export class SiliconflowApi implements LLMApi {
|
|||||||
|
|
||||||
// console.log(chatPayload);
|
// console.log(chatPayload);
|
||||||
|
|
||||||
// make a fetch request
|
// Use extended timeout for thinking models as they typically require more processing time
|
||||||
const requestTimeoutId = setTimeout(() => {
|
const requestTimeoutId = setTimeout(
|
||||||
console.error("[Request] SiliconFlow API timeout");
|
() => controller.abort(),
|
||||||
controller.abort();
|
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||||
}, 10 * REQUEST_TIMEOUT_MS);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
const [tools, funcs] = usePluginStore
|
const [tools, funcs] = usePluginStore
|
||||||
|
@ -17,7 +17,7 @@ import {
|
|||||||
SpeechOptions,
|
SpeechOptions,
|
||||||
} from "../api";
|
} from "../api";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent } from "@/app/utils";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { RequestPayload } from "./openai";
|
import { RequestPayload } from "./openai";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ export class XAIApi implements LLMApi {
|
|||||||
async chat(options: ChatOptions) {
|
async chat(options: ChatOptions) {
|
||||||
const messages: ChatOptions["messages"] = [];
|
const messages: ChatOptions["messages"] = [];
|
||||||
for (const v of options.messages) {
|
for (const v of options.messages) {
|
||||||
const content = getMessageTextContent(v);
|
const content = await preProcessImageContent(v.content);
|
||||||
messages.push({ role: v.role, content });
|
messages.push({ role: v.role, content });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,6 +54,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
|||||||
if (
|
if (
|
||||||
modelName.startsWith("gpt") ||
|
modelName.startsWith("gpt") ||
|
||||||
modelName.startsWith("chatgpt") ||
|
modelName.startsWith("chatgpt") ||
|
||||||
|
modelName.startsWith("dall-e") ||
|
||||||
|
modelName.startsWith("dalle") ||
|
||||||
modelName.startsWith("o1") ||
|
modelName.startsWith("o1") ||
|
||||||
modelName.startsWith("o3")
|
modelName.startsWith("o3")
|
||||||
) {
|
) {
|
||||||
@ -80,9 +82,13 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
|||||||
LlmIcon = BotIconGrok;
|
LlmIcon = BotIconGrok;
|
||||||
} else if (modelName.startsWith("hunyuan")) {
|
} else if (modelName.startsWith("hunyuan")) {
|
||||||
LlmIcon = BotIconHunyuan;
|
LlmIcon = BotIconHunyuan;
|
||||||
} else if (modelName.startsWith("doubao")) {
|
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
|
||||||
LlmIcon = BotIconDoubao;
|
LlmIcon = BotIconDoubao;
|
||||||
} else if (modelName.startsWith("glm")) {
|
} else if (
|
||||||
|
modelName.startsWith("glm") ||
|
||||||
|
modelName.startsWith("cogview-") ||
|
||||||
|
modelName.startsWith("cogvideox-")
|
||||||
|
) {
|
||||||
LlmIcon = BotIconChatglm;
|
LlmIcon = BotIconChatglm;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
|
|||||||
export const STORAGE_KEY = "chatgpt-next-web";
|
export const STORAGE_KEY = "chatgpt-next-web";
|
||||||
|
|
||||||
export const REQUEST_TIMEOUT_MS = 60000;
|
export const REQUEST_TIMEOUT_MS = 60000;
|
||||||
|
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
|
||||||
|
|
||||||
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
|
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
|
||||||
|
|
||||||
@ -589,7 +590,16 @@ const iflytekModels = [
|
|||||||
|
|
||||||
const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"];
|
const deepseekModels = ["deepseek-chat", "deepseek-coder", "deepseek-reasoner"];
|
||||||
|
|
||||||
const xAIModes = ["grok-beta"];
|
const xAIModes = [
|
||||||
|
"grok-beta",
|
||||||
|
"grok-2",
|
||||||
|
"grok-2-1212",
|
||||||
|
"grok-2-latest",
|
||||||
|
"grok-vision-beta",
|
||||||
|
"grok-2-vision-1212",
|
||||||
|
"grok-2-vision",
|
||||||
|
"grok-2-vision-latest",
|
||||||
|
];
|
||||||
|
|
||||||
const chatglmModels = [
|
const chatglmModels = [
|
||||||
"glm-4-plus",
|
"glm-4-plus",
|
||||||
|
Loading…
Reference in New Issue
Block a user