Merge pull request #6204 from bestsanmao/ali_bytedance_reasoning_content

add 3 type of reasoning_content support (+deepseek-r1@OpenAI @Alibaba @ByteDance), parse <think></think> from SSE
This commit is contained in:
RiverRay 2025-02-13 14:53:47 +08:00 committed by GitHub
commit 12863f5213
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 316 additions and 292 deletions

View File

@ -1,12 +1,13 @@
"use client"; "use client";
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
Alibaba, useAppConfig,
ALIBABA_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { streamWithThink } from "@/app/utils/chat";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@ -15,14 +16,12 @@ import {
SpeechOptions, SpeechOptions,
MultimodalContent, MultimodalContent,
} from "../api"; } from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import {
getMessageTextContent,
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@ -92,7 +91,10 @@ export class QwenApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages = options.messages.map((v) => ({ const messages = options.messages.map((v) => ({
role: v.role, role: v.role,
content: getMessageTextContent(v), content:
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: getMessageTextContent(v),
})); }));
const modelConfig = { const modelConfig = {
@ -122,134 +124,118 @@ export class QwenApi implements LLMApi {
options.onController?.(controller); options.onController?.(controller);
try { try {
const headers = {
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
};
const chatPath = this.path(Alibaba.ChatPath); const chatPath = this.path(Alibaba.ChatPath);
const chatPayload = { const chatPayload = {
method: "POST", method: "POST",
body: JSON.stringify(requestPayload), body: JSON.stringify(requestPayload),
signal: controller.signal, signal: controller.signal,
headers: { headers: headers,
...getHeaders(),
"X-DashScope-SSE": shouldStream ? "enable" : "disable",
},
}; };
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
let responseRes: Response; useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
headers,
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.output.choices as Array<{
message: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) {
responseText += remainText;
console.log("[Response Animation] finished");
if (responseText?.length === 0) {
options.onError?.(new Error("empty response from server"));
}
return;
}
if (remainText.length > 0) { const tool_calls = choices[0]?.message?.tool_calls;
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); if (tool_calls?.length > 0) {
const fetchText = remainText.slice(0, fetchCount); const index = tool_calls[0]?.index;
responseText += fetchText; const id = tool_calls[0]?.id;
remainText = remainText.slice(fetchCount); const args = tool_calls[0]?.function?.arguments;
options.onUpdate?.(responseText, fetchText); if (id) {
} runTools.push({
id,
requestAnimationFrame(animateResponseText); type: tool_calls[0]?.type,
} function: {
name: tool_calls[0]?.function?.name as string,
// start animaion arguments: args,
animateResponseText(); },
});
const finish = () => { } else {
if (!finished) { // @ts-ignore
finished = true; runTools[index]["function"]["arguments"] += args;
options.onFinish(responseText + remainText, responseRes); }
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[Alibaba] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
} }
const reasoning = choices[0]?.message?.reasoning_content;
const content = choices[0]?.message?.content;
// Skip if both content and reasoning_content are empty or null
if ( if (
!res.ok || (!reasoning || reasoning.length === 0) &&
!res.headers (!content || content.length === 0)
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) { ) {
const responseTexts = [responseText]; return {
let extraInfo = await res.clone().text(); isThinking: false,
try { content: "",
const resJson = await res.clone().json(); };
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
} }
},
onmessage(msg) { if (reasoning && reasoning.length > 0) {
if (msg.data === "[DONE]" || finished) { return {
return finish(); isThinking: true,
} content: reasoning,
const text = msg.data; };
try { } else if (content && content.length > 0) {
const json = JSON.parse(text); return {
const choices = json.output.choices as Array<{ isThinking: false,
message: { content: string }; content: content,
}>; };
const delta = choices[0]?.message?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
} }
return {
isThinking: false,
content: "",
};
}, },
onclose() { // processToolMessage, include tool_calls message and tool call results
finish(); (
requestPayload: RequestPayload,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.input?.messages?.splice(
requestPayload?.input?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
}, },
onerror(e) { options,
options.onError?.(e); );
throw e;
},
openWhenHidden: true,
});
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
ApiPath,
Baidu,
BAIDU_BASE_URL,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getAccessToken } from "@/app/utils/baidu"; import { getAccessToken } from "@/app/utils/baidu";
@ -23,7 +18,7 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent } from "@/app/utils"; import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@ -1,11 +1,12 @@
"use client"; "use client";
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
import { import {
ApiPath, useAccessStore,
ByteDance, useAppConfig,
BYTEDANCE_BASE_URL, useChatStore,
REQUEST_TIMEOUT_MS, ChatMessageTool,
} from "@/app/constant"; usePluginStore,
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; } from "@/app/store";
import { import {
ChatOptions, ChatOptions,
@ -15,14 +16,14 @@ import {
MultimodalContent, MultimodalContent,
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import Locale from "../../locales";
import { import { streamWithThink } from "@/app/utils/chat";
EventStreamContentType,
fetchEventSource,
} from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import {
getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
export interface OpenAIListModelResponse { export interface OpenAIListModelResponse {
@ -34,7 +35,7 @@ export interface OpenAIListModelResponse {
}>; }>;
} }
interface RequestPayload { interface RequestPayloadForByteDance {
messages: { messages: {
role: "system" | "user" | "assistant"; role: "system" | "user" | "assistant";
content: string | MultimodalContent[]; content: string | MultimodalContent[];
@ -86,7 +87,10 @@ export class DoubaoApi implements LLMApi {
async chat(options: ChatOptions) { async chat(options: ChatOptions) {
const messages: ChatOptions["messages"] = []; const messages: ChatOptions["messages"] = [];
for (const v of options.messages) { for (const v of options.messages) {
const content = await preProcessImageContent(v.content); const content =
v.role === "assistant"
? getMessageTextContentWithoutThinking(v)
: await preProcessImageContent(v.content);
messages.push({ role: v.role, content }); messages.push({ role: v.role, content });
} }
@ -99,7 +103,7 @@ export class DoubaoApi implements LLMApi {
}; };
const shouldStream = !!options.config.stream; const shouldStream = !!options.config.stream;
const requestPayload: RequestPayload = { const requestPayload: RequestPayloadForByteDance = {
messages, messages,
stream: shouldStream, stream: shouldStream,
model: modelConfig.model, model: modelConfig.model,
@ -124,119 +128,101 @@ export class DoubaoApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
let responseText = ""; const [tools, funcs] = usePluginStore
let remainText = ""; .getState()
let finished = false; .getAsTools(
let responseRes: Response; useChatStore.getState().currentSession().mask?.plugin || [],
);
return streamWithThink(
chatPath,
requestPayload,
getHeaders(),
tools as any,
funcs,
controller,
// parseSSE
(text: string, runTools: ChatMessageTool[]) => {
// console.log("parseSSE", text, runTools);
const json = JSON.parse(text);
const choices = json.choices as Array<{
delta: {
content: string | null;
tool_calls: ChatMessageTool[];
reasoning_content: string | null;
};
}>;
// animate response to make it looks smooth if (!choices?.length) return { isThinking: false, content: "" };
function animateResponseText() {
if (finished || controller.signal.aborted) { const tool_calls = choices[0]?.delta?.tool_calls;
responseText += remainText; if (tool_calls?.length > 0) {
console.log("[Response Animation] finished"); const index = tool_calls[0]?.index;
if (responseText?.length === 0) { const id = tool_calls[0]?.id;
options.onError?.(new Error("empty response from server")); const args = tool_calls[0]?.function?.arguments;
} if (id) {
return; runTools.push({
} id,
type: tool_calls[0]?.type,
if (remainText.length > 0) { function: {
const fetchCount = Math.max(1, Math.round(remainText.length / 60)); name: tool_calls[0]?.function?.name as string,
const fetchText = remainText.slice(0, fetchCount); arguments: args,
responseText += fetchText; },
remainText = remainText.slice(fetchCount); });
options.onUpdate?.(responseText, fetchText); } else {
} // @ts-ignore
runTools[index]["function"]["arguments"] += args;
requestAnimationFrame(animateResponseText); }
}
// start animaion
animateResponseText();
const finish = () => {
if (!finished) {
finished = true;
options.onFinish(responseText + remainText, responseRes);
}
};
controller.signal.onabort = finish;
fetchEventSource(chatPath, {
fetch: fetch as any,
...chatPayload,
async onopen(res) {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log(
"[ByteDance] request response content type: ",
contentType,
);
responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
} }
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if ( if (
!res.ok || (!reasoning || reasoning.length === 0) &&
!res.headers (!content || content.length === 0)
.get("content-type")
?.startsWith(EventStreamContentType) ||
res.status !== 200
) { ) {
const responseTexts = [responseText]; return {
let extraInfo = await res.clone().text(); isThinking: false,
try { content: "",
const resJson = await res.clone().json(); };
extraInfo = prettyObject(resJson);
} catch {}
if (res.status === 401) {
responseTexts.push(Locale.Error.Unauthorized);
}
if (extraInfo) {
responseTexts.push(extraInfo);
}
responseText = responseTexts.join("\n\n");
return finish();
} }
},
onmessage(msg) { if (reasoning && reasoning.length > 0) {
if (msg.data === "[DONE]" || finished) { return {
return finish(); isThinking: true,
} content: reasoning,
const text = msg.data; };
try { } else if (content && content.length > 0) {
const json = JSON.parse(text); return {
const choices = json.choices as Array<{ isThinking: false,
delta: { content: string }; content: content,
}>; };
const delta = choices[0]?.delta?.content;
if (delta) {
remainText += delta;
}
} catch (e) {
console.error("[Request] parse error", text, msg);
} }
return {
isThinking: false,
content: "",
};
}, },
onclose() { // processToolMessage, include tool_calls message and tool call results
finish(); (
requestPayload: RequestPayloadForByteDance,
toolCallMessage: any,
toolCallResult: any[],
) => {
requestPayload?.messages?.splice(
requestPayload?.messages?.length,
0,
toolCallMessage,
...toolCallResult,
);
}, },
onerror(e) { options,
options.onError?.(e); );
throw e;
},
openWhenHidden: true,
});
} else { } else {
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);
clearTimeout(requestTimeoutId); clearTimeout(requestTimeoutId);

View File

@ -1,12 +1,6 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
ApiPath,
DEEPSEEK_BASE_URL,
DeepSeek,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
import { import {
getMessageTextContent, getMessageTextContent,
getMessageTextContentWithoutThinking, getMessageTextContentWithoutThinking,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@ -116,16 +111,10 @@ export class DeepSeekApi implements LLMApi {
headers: getHeaders(), headers: getHeaders(),
}; };
// console.log(chatPayload);
const isR1 =
options.config.model.endsWith("-reasoner") ||
options.config.model.endsWith("-r1");
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {
@ -176,8 +165,8 @@ export class DeepSeekApi implements LLMApi {
// Skip if both content and reasoning_content are empty or null // Skip if both content and reasoning_content are empty or null
if ( if (
(!reasoning || reasoning.trim().length === 0) && (!reasoning || reasoning.length === 0) &&
(!content || content.trim().length === 0) (!content || content.length === 0)
) { ) {
return { return {
isThinking: false, isThinking: false,
@ -185,12 +174,12 @@ export class DeepSeekApi implements LLMApi {
}; };
} }
if (reasoning && reasoning.trim().length > 0) { if (reasoning && reasoning.length > 0) {
return { return {
isThinking: true, isThinking: true,
content: reasoning, content: reasoning,
}; };
} else if (content && content.trim().length > 0) { } else if (content && content.length > 0) {
return { return {
isThinking: false, isThinking: false,
content: content, content: content,

View File

@ -1,10 +1,5 @@
"use client"; "use client";
import { import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
ApiPath,
CHATGLM_BASE_URL,
ChatGLM,
REQUEST_TIMEOUT_MS,
} from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@ -21,7 +16,11 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
@ -191,7 +190,7 @@ export class ChatGLMApi implements LLMApi {
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (modelType === "image" || modelType === "video") { if (modelType === "image" || modelType === "video") {

View File

@ -1,9 +1,4 @@
import { import { ApiPath, Google } from "@/app/constant";
ApiPath,
Google,
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant";
import { import {
ChatOptions, ChatOptions,
getHeaders, getHeaders,
@ -27,6 +22,7 @@ import {
getMessageTextContent, getMessageTextContent,
getMessageImages, getMessageImages,
isVisionModel, isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { nanoid } from "nanoid"; import { nanoid } from "nanoid";
@ -206,7 +202,7 @@ export class GeminiProApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@ -8,7 +8,6 @@ import {
Azure, Azure,
REQUEST_TIMEOUT_MS, REQUEST_TIMEOUT_MS,
ServiceProvider, ServiceProvider,
REQUEST_TIMEOUT_MS_FOR_THINKING,
} from "@/app/constant"; } from "@/app/constant";
import { import {
ChatMessageTool, ChatMessageTool,
@ -22,7 +21,7 @@ import {
preProcessImageContent, preProcessImageContent,
uploadImage, uploadImage,
base64Image2Blob, base64Image2Blob,
stream, streamWithThink,
} from "@/app/utils/chat"; } from "@/app/utils/chat";
import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare";
import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing"; import { ModelSize, DalleQuality, DalleStyle } from "@/app/typing";
@ -42,6 +41,7 @@ import {
getMessageTextContent, getMessageTextContent,
isVisionModel, isVisionModel,
isDalle3 as _isDalle3, isDalle3 as _isDalle3,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@ -294,7 +294,7 @@ export class ChatGPTApi implements LLMApi {
useChatStore.getState().currentSession().mask?.plugin || [], useChatStore.getState().currentSession().mask?.plugin || [],
); );
// console.log("getAsTools", tools, funcs); // console.log("getAsTools", tools, funcs);
stream( streamWithThink(
chatPath, chatPath,
requestPayload, requestPayload,
getHeaders(), getHeaders(),
@ -309,8 +309,12 @@ export class ChatGPTApi implements LLMApi {
delta: { delta: {
content: string; content: string;
tool_calls: ChatMessageTool[]; tool_calls: ChatMessageTool[];
reasoning_content: string | null;
}; };
}>; }>;
if (!choices?.length) return { isThinking: false, content: "" };
const tool_calls = choices[0]?.delta?.tool_calls; const tool_calls = choices[0]?.delta?.tool_calls;
if (tool_calls?.length > 0) { if (tool_calls?.length > 0) {
const id = tool_calls[0]?.id; const id = tool_calls[0]?.id;
@ -330,7 +334,37 @@ export class ChatGPTApi implements LLMApi {
runTools[index]["function"]["arguments"] += args; runTools[index]["function"]["arguments"] += args;
} }
} }
return choices[0]?.delta?.content;
const reasoning = choices[0]?.delta?.reasoning_content;
const content = choices[0]?.delta?.content;
// Skip if both content and reasoning_content are empty or null
if (
(!reasoning || reasoning.length === 0) &&
(!content || content.length === 0)
) {
return {
isThinking: false,
content: "",
};
}
if (reasoning && reasoning.length > 0) {
return {
isThinking: true,
content: reasoning,
};
} else if (content && content.length > 0) {
return {
isThinking: false,
content: content,
};
}
return {
isThinking: false,
content: "",
};
}, },
// processToolMessage, include tool_calls message and tool call results // processToolMessage, include tool_calls message and tool call results
( (
@ -362,9 +396,7 @@ export class ChatGPTApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
isDalle3 || isO1OrO3 getTimeoutMSByModel(options.config.model),
? REQUEST_TIMEOUT_MS_FOR_THINKING
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
); );
const res = await fetch(chatPath, chatPayload); const res = await fetch(chatPath, chatPayload);

View File

@ -4,7 +4,6 @@ import {
ApiPath, ApiPath,
SILICONFLOW_BASE_URL, SILICONFLOW_BASE_URL,
SiliconFlow, SiliconFlow,
REQUEST_TIMEOUT_MS_FOR_THINKING,
DEFAULT_MODELS, DEFAULT_MODELS,
} from "@/app/constant"; } from "@/app/constant";
import { import {
@ -27,6 +26,7 @@ import {
getMessageTextContent, getMessageTextContent,
getMessageTextContentWithoutThinking, getMessageTextContentWithoutThinking,
isVisionModel, isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils"; } from "@/app/utils";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
@ -137,7 +137,7 @@ export class SiliconflowApi implements LLMApi {
// Use extended timeout for thinking models as they typically require more processing time // Use extended timeout for thinking models as they typically require more processing time
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS_FOR_THINKING, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@ -1,5 +1,5 @@
"use client"; "use client";
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { import {
@ -17,7 +17,11 @@ import {
} from "@fortaine/fetch-event-source"; } from "@fortaine/fetch-event-source";
import { prettyObject } from "@/app/utils/format"; import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getMessageTextContent, isVisionModel } from "@/app/utils"; import {
getMessageTextContent,
isVisionModel,
getTimeoutMSByModel,
} from "@/app/utils";
import mapKeys from "lodash-es/mapKeys"; import mapKeys from "lodash-es/mapKeys";
import mapValues from "lodash-es/mapValues"; import mapValues from "lodash-es/mapValues";
import isArray from "lodash-es/isArray"; import isArray from "lodash-es/isArray";
@ -135,7 +139,7 @@ export class HunyuanApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@ -1,6 +1,6 @@
"use client"; "use client";
// azure and openai, using same models. so using same LLMApi. // azure and openai, using same models. so using same LLMApi.
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
import { import {
useAccessStore, useAccessStore,
useAppConfig, useAppConfig,
@ -17,6 +17,7 @@ import {
SpeechOptions, SpeechOptions,
} from "../api"; } from "../api";
import { getClientConfig } from "@/app/config/client"; import { getClientConfig } from "@/app/config/client";
import { getTimeoutMSByModel } from "@/app/utils";
import { preProcessImageContent } from "@/app/utils/chat"; import { preProcessImageContent } from "@/app/utils/chat";
import { RequestPayload } from "./openai"; import { RequestPayload } from "./openai";
import { fetch } from "@/app/utils/stream"; import { fetch } from "@/app/utils/stream";
@ -103,7 +104,7 @@ export class XAIApi implements LLMApi {
// make a fetch request // make a fetch request
const requestTimeoutId = setTimeout( const requestTimeoutId = setTimeout(
() => controller.abort(), () => controller.abort(),
REQUEST_TIMEOUT_MS, getTimeoutMSByModel(options.config.model),
); );
if (shouldStream) { if (shouldStream) {

View File

@ -2,7 +2,11 @@ import { useEffect, useState } from "react";
import { showToast } from "./components/ui-lib"; import { showToast } from "./components/ui-lib";
import Locale from "./locales"; import Locale from "./locales";
import { RequestMessage } from "./client/api"; import { RequestMessage } from "./client/api";
import { ServiceProvider } from "./constant"; import {
REQUEST_TIMEOUT_MS,
REQUEST_TIMEOUT_MS_FOR_THINKING,
ServiceProvider,
} from "./constant";
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http"; // import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
import { fetch as tauriStreamFetch } from "./utils/stream"; import { fetch as tauriStreamFetch } from "./utils/stream";
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant"; import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
@ -292,6 +296,20 @@ export function isDalle3(model: string) {
return "dall-e-3" === model; return "dall-e-3" === model;
} }
export function getTimeoutMSByModel(model: string) {
model = model.toLowerCase();
if (
model.startsWith("dall-e") ||
model.startsWith("dalle") ||
model.startsWith("o1") ||
model.startsWith("o3") ||
model.includes("deepseek-r") ||
model.includes("-thinking")
)
return REQUEST_TIMEOUT_MS_FOR_THINKING;
return REQUEST_TIMEOUT_MS;
}
export function getModelSizes(model: string): ModelSize[] { export function getModelSizes(model: string): ModelSize[] {
if (isDalle3(model)) { if (isDalle3(model)) {
return ["1024x1024", "1792x1024", "1024x1792"]; return ["1024x1024", "1792x1024", "1024x1792"];

View File

@ -400,6 +400,7 @@ export function streamWithThink(
let responseRes: Response; let responseRes: Response;
let isInThinkingMode = false; let isInThinkingMode = false;
let lastIsThinking = false; let lastIsThinking = false;
let lastIsThinkingTagged = false; //between <think> and </think> tags
// animate response to make it looks smooth // animate response to make it looks smooth
function animateResponseText() { function animateResponseText() {
@ -579,6 +580,23 @@ export function streamWithThink(
if (!chunk?.content || chunk.content.length === 0) { if (!chunk?.content || chunk.content.length === 0) {
return; return;
} }
// deal with <think> and </think> tags start
if (!chunk.isThinking) {
if (chunk.content.startsWith("<think>")) {
chunk.isThinking = true;
chunk.content = chunk.content.slice(7).trim();
lastIsThinkingTagged = true;
} else if (chunk.content.endsWith("</think>")) {
chunk.isThinking = false;
chunk.content = chunk.content.slice(0, -8).trim();
lastIsThinkingTagged = false;
} else if (lastIsThinkingTagged) {
chunk.isThinking = true;
}
}
// deal with <think> and </think> tags start
// Check if thinking mode changed // Check if thinking mode changed
const isThinkingChanged = lastIsThinking !== chunk.isThinking; const isThinkingChanged = lastIsThinking !== chunk.isThinking;
lastIsThinking = chunk.isThinking; lastIsThinking = chunk.isThinking;