Merge pull request #6172 from bestsanmao/bug_fix

fix several bugs
This commit is contained in:
RiverRay 2025-02-09 11:03:44 +08:00 committed by GitHub
commit c2edfec16f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 52 additions and 21 deletions

View File

@ -22,7 +22,7 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { preProcessImageContent } from "@/app/utils/chat";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi {
} }
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages: ChatOptions["messages"] = [];
role: v.role, for (const v of options.messages) {
content: getMessageTextContent(v), const content = await preProcessImageContent(v.content);
})); messages.push({ role: v.role, content });
}
const modelConfig = { const modelConfig = {
...useAppConfig.getState().modelConfig, ...useAppConfig.getState().modelConfig,

View File

@ -5,6 +5,7 @@ import {
DEEPSEEK_BASE_URL, DEEPSEEK_BASE_URL,
DeepSeek, DeepSeek,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant"; } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
@ -117,10 +118,14 @@ export class DeepSeekApi implements LLMApi {
// console.log(chatPayload); // console.log(chatPayload);
const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
); );
if (shouldStream) { if (shouldStream) {

View File

@ -1,4 +1,9 @@
import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; import {
ApiPath,
Google,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@ -69,9 +74,16 @@ export class GeminiProApi implements LLMApi {
.join("\n\n"); .join("\n\n");
}; };
let content = "";
if (Array.isArray(res)) {
res.map((item) => {
content += getTextFromParts(item?.candidates?.at(0)?.content?.parts);
});
}
return ( return (
getTextFromParts(res?.candidates?.at(0)?.content?.parts) || getTextFromParts(res?.candidates?.at(0)?.content?.parts) ||
getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) || content || //getTextFromParts(res?.at(0)?.candidates?.at(0)?.content?.parts) ||
res?.error?.message || res?.error?.message ||
"" ""
); );
@ -190,10 +202,11 @@ export class GeminiProApi implements LLMApi {
headers: getHeaders(), headers: getHeaders(),
}; };
const isThinking = options.config.model.includes("-thinking");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
); );
if (shouldStream) { if (shouldStream) {

View File

@ -8,6 +8,7 @@ import {
Azure, Azure,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
ServiceProvider, ServiceProvider,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant"; } from "@/app/constant";
import { import {
ChatMessageTool, ChatMessageTool,
@ -195,7 +196,9 @@ export class ChatGPTApi implements LLMApi {
let requestPayload: RequestPayload | DalleRequestPayload; let requestPayload: RequestPayload | DalleRequestPayload;
const isDalle3 = _isDalle3(options.config.model); const isDalle3 = _isDalle3(options.config.model);
const isO1OrO3 = options.config.model.startsWith("o1") || options.config.model.startsWith("o3"); const isO1OrO3 =
options.config.model.startsWith("o1") ||
options.config.model.startsWith("o3");
if (isDalle3) { if (isDalle3) {
const prompt = getMessageTextContent( const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any, options.messages.slice(-1)?.pop() as any,
@ -359,7 +362,9 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isDalle3 || isO1OrO3 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. isDalle3 || isO1OrO3
? REQUEST_TIMEOUT_MS_FOR_THINKING
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
); );
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);

View File

@ -4,7 +4,7 @@ import {
ApiPath, ApiPath,
SILICONFLOW_BASE_URL, SILICONFLOW_BASE_URL,
SiliconFlow, SiliconFlow,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant"; } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
@ -120,11 +120,11 @@ export class SiliconflowApi implements LLMApi {
// console.log(chatPayload); // console.log(chatPayload);
// make a fetch request // Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout(() => { const requestTimeoutId = setTimeout(
console.error("[Request] SiliconFlow API timeout"); () => controller.abort(),
controller.abort(); REQUEST_TIMEOUT_MS_FOR_THINKING,
}, 10 * REQUEST_TIMEOUT_MS); );
if (shouldStream) { if (shouldStream) {
const [tools, funcs] = usePluginStore const [tools, funcs] = usePluginStore

View File

@ -17,7 +17,7 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@ -62,7 +62,7 @@ export class XAIApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {
const content = getMessageTextContent(v); const content = await preProcessImageContent(v.content);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }

View File

@ -54,6 +54,8 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
if ( if (
modelName.startsWith("gpt") || modelName.startsWith("gpt") ||
modelName.startsWith("chatgpt") || modelName.startsWith("chatgpt") ||
modelName.startsWith("dall-e") ||
modelName.startsWith("dalle") ||
modelName.startsWith("o1") || modelName.startsWith("o1") ||
modelName.startsWith("o3") modelName.startsWith("o3")
) { ) {
@ -80,9 +82,13 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
LlmIcon = BotIconGrok; LlmIcon = BotIconGrok;
} else if (modelName.startsWith("hunyuan")) { } else if (modelName.startsWith("hunyuan")) {
LlmIcon = BotIconHunyuan; LlmIcon = BotIconHunyuan;
} else if (modelName.startsWith("doubao")) { } else if (modelName.startsWith("doubao") || modelName.startsWith("ep-")) {
LlmIcon = BotIconDoubao; LlmIcon = BotIconDoubao;
} else if (modelName.startsWith("glm")) { } else if (
modelName.startsWith("glm") ||
modelName.startsWith("cogview-") ||
modelName.startsWith("cogvideox-")
) {
LlmIcon = BotIconChatglm; LlmIcon = BotIconChatglm;
} }

View File

@ -110,6 +110,7 @@ export const UNFINISHED_INPUT = (id: string) => "unfinished-input-" + id;
export const STORAGE_KEY = "chatgpt-next-web"; export const STORAGE_KEY = "chatgpt-next-web";
export const REQUEST_TIMEOUT_MS = 60000; export const REQUEST_TIMEOUT_MS = 60000;
export const REQUEST_TIMEOUT_MS_FOR_THINKING = REQUEST_TIMEOUT_MS * 5;
export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown"; export const EXPORT_MESSAGE_CLASS_NAME = "export-markdown";