mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-19 04:00:16 +09:00
commit
c2edfec16f
@ -22,7 +22,7 @@ import {
|
||||
} from "@fortaine/fetch-event-source";
|
||||
import { prettyObject } from "@/app/utils/format";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
export interface OpenAIListModelResponse {
|
||||
@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi {
|
||||
}
|
||||
|
||||
async chat(options: ChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
content: getMessageTextContent(v),
|
||||
}));
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = await preProcessImageContent(v.content);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
const modelConfig = {
|
||||
...useAppConfig.getState().modelConfig,
|
||||
|
@ -5,6 +5,7 @@ import {
|
||||
DEEPSEEK_BASE_URL,
|
||||
DeepSeek,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi {
|
||||
|
||||
// console.log(chatPayload);
|
||||
|
||||
const isR1 =
|
||||
options.config.model.endsWith("-reasoner") ||
|
||||
options.config.model.endsWith("-r1");
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
|
@ -1,4 +1,9 @@
|
||||
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import {
|
||||
ApiPath,
|
||||
Google,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
ChatOptions,
|
||||
getHeaders,
|
||||
@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi {
|
||||
.join("\n\n");
|
||||
};
|
||||
|
||||
let content = "";
|
||||
if (Array.isArray(res)) {
|
||||
res.map((item) => {
|
||||
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
|
||||
});
|
||||
}
|
||||
|
||||
return (
|
||||
getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
|
||||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
|
||||
content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
|
||||
res?.error?.message ||
|
||||
""
|
||||
);
|
||||
@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi {
|
||||
headers: getHeaders(),
|
||||
};
|
||||
|
||||
const isThinking = options.config.model.includes("-thinking");
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS,
|
||||
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
|
@ -8,6 +8,7 @@ import {
|
||||
Azure,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
ServiceProvider,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
ChatMessageTool,
|
||||
@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
let requestPayload: RequestPayload | DalleRequestPayload;
|
||||
|
||||
const isDalle3 = _isDalle3(options.config.model);
|
||||
const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3");
|
||||
const isO1OrO3 =
|
||||
options.config.model.startsWith("o1") ||
|
||||
options.config.model.startsWith("o3");
|
||||
if (isDalle3) {
|
||||
const prompt = getMessageTextContent(
|
||||
options.messages.slice(-1)?.pop() as any,
|
||||
@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi {
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||
isDalle3 || isO1OrO3
|
||||
? REQUEST_TIMEOUT_MS_FOR_THINKING
|
||||
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
||||
);
|
||||
|
||||
const res = await fetch(chatPath, chatPayload);
|
||||
|
@ -4,7 +4,7 @@ import {
|
||||
ApiPath,
|
||||
SILICONFLOW_BASE_URL,
|
||||
SiliconFlow,
|
||||
REQUEST_TIMEOUT_MS,
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
} from "@/app/constant";
|
||||
import {
|
||||
useAccessStore,
|
||||
@ -120,11 +120,11 @@ export class SiliconflowApi implements LLMApi {
|
||||
|
||||
// console.log(chatPayload);
|
||||
|
||||
// make a fetch request
|
||||
const requestTimeoutId = setTimeout(() => {
|
||||
console.error("[Request] SiliconFlow API timeout");
|
||||
controller.abort();
|
||||
}, 10 * REQUEST_TIMEOUT_MS);
|
||||
// Use extended timeout for thinking models as they typically require more processing time
|
||||
const requestTimeoutId = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||
);
|
||||
|
||||
if (shouldStream) {
|
||||
const [tools, funcs] = usePluginStore
|
||||
|
@ -17,7 +17,7 @@ import {
|
||||
SpeechOptions,
|
||||
} from "../api";
|
||||
import { getClientConfig } from "@/app/config/client";
|
||||
import { getMessageTextContent } from "@/app/utils";
|
||||
import { preProcessImageContent } from "@/app/utils/chat";
|
||||
import { RequestPayload } from "./openai";
|
||||
import { fetch } from "@/app/utils/stream";
|
||||
|
||||
@ -62,7 +62,7 @@ export class XAIApi implements LLMApi {
|
||||
async chat(options: ChatOptions) {
|
||||
const messages: ChatOptions["messages"] = [];
|
||||
for (const v of options.messages) {
|
||||
const content = getMessageTextContent(v);
|
||||
const content = await preProcessImageContent(v.content);
|
||||
messages.push({ role: v.role, content });
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
||||
if (
|
||||
modelName.startsWith("gpt") ||
|
||||
modelName.startsWith("chatgpt") ||
|
||||
modelName.startsWith("dall-e") ||
|
||||
modelName.startsWith("dalle") ||
|
||||
modelName.startsWith("o1") ||
|
||||
modelName.startsWith("o3")
|
||||
) {
|
||||
@ -80,9 +82,13 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
||||
LlmIcon = BotIconGrok;
|
||||
} else if (modelName.startsWith("hunyuan")) {
|
||||
LlmIcon = BotIconHunyuan;
|
||||
} else if (modelName.startsWith("doubao")) {
|
||||
} else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
|
||||
LlmIcon = BotIconDoubao;
|
||||
} else if (modelName.startsWith("glm")) {
|
||||
} else if (
|
||||
modelName.startsWith("glm") ||
|
||||
modelName.startsWith("cogview-") ||
|
||||
modelName.startsWith("cogvideox-")
|
||||
) {
|
||||
LlmIcon = BotIconChatglm;
|
||||
}
|
||||
|
||||
|
@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
|
||||
export const STORAGE_KEY = "chatgpt-next-web";
|
||||
|
||||
export const REQUEST_TIMEOUT_MS = 60000;
|
||||
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
|
||||
|
||||
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user