mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-19 04:00:16 +09:00
fix bug (trim eats space or \n mistakenly), optimize timeout by model
This commit is contained in:
parent
9714258322
commit
476d946f96
@ -1,10 +1,5 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import {
|
import { ApiPath, Alibaba, ALIBABA_BASE_URL } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
Alibaba,
|
|
||||||
ALIBABA_BASE_URL,
|
|
||||||
REQUEST_TIMEOUT_MS,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
useAppConfig,
|
useAppConfig,
|
||||||
@ -25,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
|
|||||||
import {
|
import {
|
||||||
getMessageTextContent,
|
getMessageTextContent,
|
||||||
getMessageTextContentWithoutThinking,
|
getMessageTextContentWithoutThinking,
|
||||||
|
getTimeoutMSByModel,
|
||||||
} from "@/app/utils";
|
} from "@/app/utils";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
@ -144,7 +140,7 @@ export class QwenApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
@ -199,8 +195,8 @@ export class QwenApi implements LLMApi {
|
|||||||
|
|
||||||
// Skip if both content and reasoning_content are empty or null
|
// Skip if both content and reasoning_content are empty or null
|
||||||
if (
|
if (
|
||||||
(!reasoning || reasoning.trim().length === 0) &&
|
(!reasoning || reasoning.length === 0) &&
|
||||||
(!content || content.trim().length === 0)
|
(!content || content.length === 0)
|
||||||
) {
|
) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
@ -208,12 +204,12 @@ export class QwenApi implements LLMApi {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reasoning && reasoning.trim().length > 0) {
|
if (reasoning && reasoning.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: true,
|
isThinking: true,
|
||||||
content: reasoning,
|
content: reasoning,
|
||||||
};
|
};
|
||||||
} else if (content && content.trim().length > 0) {
|
} else if (content && content.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
content: content,
|
content: content,
|
||||||
|
@ -1,10 +1,5 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import {
|
import { ApiPath, Baidu, BAIDU_BASE_URL } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
Baidu,
|
|
||||||
BAIDU_BASE_URL,
|
|
||||||
REQUEST_TIMEOUT_MS,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||||
import { getAccessToken } from "@/app/utils/baidu";
|
import { getAccessToken } from "@/app/utils/baidu";
|
||||||
|
|
||||||
@ -23,7 +18,7 @@ import {
|
|||||||
} from "@fortaine/fetch-event-source";
|
} from "@fortaine/fetch-event-source";
|
||||||
import { prettyObject } from "@/app/utils/format";
|
import { prettyObject } from "@/app/utils/format";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent } from "@/app/utils";
|
import { getMessageTextContent, getTimeoutMSByModel } from "@/app/utils";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
export interface OpenAIListModelResponse {
|
export interface OpenAIListModelResponse {
|
||||||
@ -155,7 +150,7 @@ export class ErnieApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
@ -1,10 +1,5 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import {
|
import { ApiPath, ByteDance, BYTEDANCE_BASE_URL } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
ByteDance,
|
|
||||||
BYTEDANCE_BASE_URL,
|
|
||||||
REQUEST_TIMEOUT_MS,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
useAppConfig,
|
useAppConfig,
|
||||||
@ -25,7 +20,10 @@ import {
|
|||||||
import { streamWithThink } from "@/app/utils/chat";
|
import { streamWithThink } from "@/app/utils/chat";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { preProcessImageContent } from "@/app/utils/chat";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { getMessageTextContentWithoutThinking } from "@/app/utils";
|
import {
|
||||||
|
getMessageTextContentWithoutThinking,
|
||||||
|
getTimeoutMSByModel,
|
||||||
|
} from "@/app/utils";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
export interface OpenAIListModelResponse {
|
export interface OpenAIListModelResponse {
|
||||||
@ -130,7 +128,7 @@ export class DoubaoApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
@ -184,8 +182,8 @@ export class DoubaoApi implements LLMApi {
|
|||||||
|
|
||||||
// Skip if both content and reasoning_content are empty or null
|
// Skip if both content and reasoning_content are empty or null
|
||||||
if (
|
if (
|
||||||
(!reasoning || reasoning.trim().length === 0) &&
|
(!reasoning || reasoning.length === 0) &&
|
||||||
(!content || content.trim().length === 0)
|
(!content || content.length === 0)
|
||||||
) {
|
) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
@ -193,12 +191,12 @@ export class DoubaoApi implements LLMApi {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reasoning && reasoning.trim().length > 0) {
|
if (reasoning && reasoning.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: true,
|
isThinking: true,
|
||||||
content: reasoning,
|
content: reasoning,
|
||||||
};
|
};
|
||||||
} else if (content && content.trim().length > 0) {
|
} else if (content && content.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
content: content,
|
content: content,
|
||||||
|
@ -1,12 +1,6 @@
|
|||||||
"use client";
|
"use client";
|
||||||
// azure and openai, using same models. so using same LLMApi.
|
// azure and openai, using same models. so using same LLMApi.
|
||||||
import {
|
import { ApiPath, DEEPSEEK_BASE_URL, DeepSeek } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
DEEPSEEK_BASE_URL,
|
|
||||||
DeepSeek,
|
|
||||||
REQUEST_TIMEOUT_MS,
|
|
||||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
useAppConfig,
|
useAppConfig,
|
||||||
@ -26,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
|
|||||||
import {
|
import {
|
||||||
getMessageTextContent,
|
getMessageTextContent,
|
||||||
getMessageTextContentWithoutThinking,
|
getMessageTextContentWithoutThinking,
|
||||||
|
getTimeoutMSByModel,
|
||||||
} from "@/app/utils";
|
} from "@/app/utils";
|
||||||
import { RequestPayload } from "./openai";
|
import { RequestPayload } from "./openai";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
@ -116,16 +111,10 @@ export class DeepSeekApi implements LLMApi {
|
|||||||
headers: getHeaders(),
|
headers: getHeaders(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// console.log(chatPayload);
|
|
||||||
|
|
||||||
const isR1 =
|
|
||||||
options.config.model.endsWith("-reasoner") ||
|
|
||||||
options.config.model.endsWith("-r1");
|
|
||||||
|
|
||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
isR1 ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
@ -176,8 +165,8 @@ export class DeepSeekApi implements LLMApi {
|
|||||||
|
|
||||||
// Skip if both content and reasoning_content are empty or null
|
// Skip if both content and reasoning_content are empty or null
|
||||||
if (
|
if (
|
||||||
(!reasoning || reasoning.trim().length === 0) &&
|
(!reasoning || reasoning.length === 0) &&
|
||||||
(!content || content.trim().length === 0)
|
(!content || content.length === 0)
|
||||||
) {
|
) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
@ -185,12 +174,12 @@ export class DeepSeekApi implements LLMApi {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reasoning && reasoning.trim().length > 0) {
|
if (reasoning && reasoning.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: true,
|
isThinking: true,
|
||||||
content: reasoning,
|
content: reasoning,
|
||||||
};
|
};
|
||||||
} else if (content && content.trim().length > 0) {
|
} else if (content && content.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
content: content,
|
content: content,
|
||||||
|
@ -1,10 +1,5 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import {
|
import { ApiPath, CHATGLM_BASE_URL, ChatGLM } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
CHATGLM_BASE_URL,
|
|
||||||
ChatGLM,
|
|
||||||
REQUEST_TIMEOUT_MS,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
useAppConfig,
|
useAppConfig,
|
||||||
@ -21,7 +16,11 @@ import {
|
|||||||
SpeechOptions,
|
SpeechOptions,
|
||||||
} from "../api";
|
} from "../api";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
import {
|
||||||
|
getMessageTextContent,
|
||||||
|
isVisionModel,
|
||||||
|
getTimeoutMSByModel,
|
||||||
|
} from "@/app/utils";
|
||||||
import { RequestPayload } from "./openai";
|
import { RequestPayload } from "./openai";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
import { preProcessImageContent } from "@/app/utils/chat";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
@ -191,7 +190,7 @@ export class ChatGLMApi implements LLMApi {
|
|||||||
|
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (modelType === "image" || modelType === "video") {
|
if (modelType === "image" || modelType === "video") {
|
||||||
|
@ -1,9 +1,4 @@
|
|||||||
import {
|
import { ApiPath, Google } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
Google,
|
|
||||||
REQUEST_TIMEOUT_MS,
|
|
||||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import {
|
import {
|
||||||
ChatOptions,
|
ChatOptions,
|
||||||
getHeaders,
|
getHeaders,
|
||||||
@ -27,6 +22,7 @@ import {
|
|||||||
getMessageTextContent,
|
getMessageTextContent,
|
||||||
getMessageImages,
|
getMessageImages,
|
||||||
isVisionModel,
|
isVisionModel,
|
||||||
|
getTimeoutMSByModel,
|
||||||
} from "@/app/utils";
|
} from "@/app/utils";
|
||||||
import { preProcessImageContent } from "@/app/utils/chat";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { nanoid } from "nanoid";
|
import { nanoid } from "nanoid";
|
||||||
@ -206,7 +202,7 @@ export class GeminiProApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
isThinking ? REQUEST_TIMEOUT_MS_FOR_THINKING : REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
@ -8,7 +8,6 @@ import {
|
|||||||
Azure,
|
Azure,
|
||||||
REQUEST_TIMEOUT_MS,
|
REQUEST_TIMEOUT_MS,
|
||||||
ServiceProvider,
|
ServiceProvider,
|
||||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
|
||||||
} from "@/app/constant";
|
} from "@/app/constant";
|
||||||
import {
|
import {
|
||||||
ChatMessageTool,
|
ChatMessageTool,
|
||||||
@ -42,6 +41,7 @@ import {
|
|||||||
getMessageTextContent,
|
getMessageTextContent,
|
||||||
isVisionModel,
|
isVisionModel,
|
||||||
isDalle3 as _isDalle3,
|
isDalle3 as _isDalle3,
|
||||||
|
getTimeoutMSByModel,
|
||||||
} from "@/app/utils";
|
} from "@/app/utils";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
@ -340,8 +340,8 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
|
|
||||||
// Skip if both content and reasoning_content are empty or null
|
// Skip if both content and reasoning_content are empty or null
|
||||||
if (
|
if (
|
||||||
(!reasoning || reasoning.trim().length === 0) &&
|
(!reasoning || reasoning.length === 0) &&
|
||||||
(!content || content.trim().length === 0)
|
(!content || content.length === 0)
|
||||||
) {
|
) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
@ -349,12 +349,12 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reasoning && reasoning.trim().length > 0) {
|
if (reasoning && reasoning.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: true,
|
isThinking: true,
|
||||||
content: reasoning,
|
content: reasoning,
|
||||||
};
|
};
|
||||||
} else if (content && content.trim().length > 0) {
|
} else if (content && content.length > 0) {
|
||||||
return {
|
return {
|
||||||
isThinking: false,
|
isThinking: false,
|
||||||
content: content,
|
content: content,
|
||||||
@ -396,9 +396,7 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
isDalle3 || isO1OrO3
|
getTimeoutMSByModel(options.config.model),
|
||||||
? REQUEST_TIMEOUT_MS_FOR_THINKING
|
|
||||||
: REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
|
|
||||||
);
|
);
|
||||||
|
|
||||||
const res = await fetch(chatPath, chatPayload);
|
const res = await fetch(chatPath, chatPayload);
|
||||||
|
@ -1,11 +1,6 @@
|
|||||||
"use client";
|
"use client";
|
||||||
// azure and openai, using same models. so using same LLMApi.
|
// azure and openai, using same models. so using same LLMApi.
|
||||||
import {
|
import { ApiPath, SILICONFLOW_BASE_URL, SiliconFlow } from "@/app/constant";
|
||||||
ApiPath,
|
|
||||||
SILICONFLOW_BASE_URL,
|
|
||||||
SiliconFlow,
|
|
||||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
|
||||||
} from "@/app/constant";
|
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
useAppConfig,
|
useAppConfig,
|
||||||
@ -25,6 +20,7 @@ import { getClientConfig } from "@/app/config/client";
|
|||||||
import {
|
import {
|
||||||
getMessageTextContent,
|
getMessageTextContent,
|
||||||
getMessageTextContentWithoutThinking,
|
getMessageTextContentWithoutThinking,
|
||||||
|
getTimeoutMSByModel,
|
||||||
} from "@/app/utils";
|
} from "@/app/utils";
|
||||||
import { RequestPayload } from "./openai";
|
import { RequestPayload } from "./openai";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
@ -123,7 +119,7 @@ export class SiliconflowApi implements LLMApi {
|
|||||||
// Use extended timeout for thinking models as they typically require more processing time
|
// Use extended timeout for thinking models as they typically require more processing time
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
"use client";
|
"use client";
|
||||||
import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
import { ApiPath, TENCENT_BASE_URL } from "@/app/constant";
|
||||||
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||||
|
|
||||||
import {
|
import {
|
||||||
@ -17,7 +17,11 @@ import {
|
|||||||
} from "@fortaine/fetch-event-source";
|
} from "@fortaine/fetch-event-source";
|
||||||
import { prettyObject } from "@/app/utils/format";
|
import { prettyObject } from "@/app/utils/format";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent, isVisionModel } from "@/app/utils";
|
import {
|
||||||
|
getMessageTextContent,
|
||||||
|
isVisionModel,
|
||||||
|
getTimeoutMSByModel,
|
||||||
|
} from "@/app/utils";
|
||||||
import mapKeys from "lodash-es/mapKeys";
|
import mapKeys from "lodash-es/mapKeys";
|
||||||
import mapValues from "lodash-es/mapValues";
|
import mapValues from "lodash-es/mapValues";
|
||||||
import isArray from "lodash-es/isArray";
|
import isArray from "lodash-es/isArray";
|
||||||
@ -135,7 +139,7 @@ export class HunyuanApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
"use client";
|
"use client";
|
||||||
// azure and openai, using same models. so using same LLMApi.
|
// azure and openai, using same models. so using same LLMApi.
|
||||||
import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
import { ApiPath, XAI_BASE_URL, XAI } from "@/app/constant";
|
||||||
import {
|
import {
|
||||||
useAccessStore,
|
useAccessStore,
|
||||||
useAppConfig,
|
useAppConfig,
|
||||||
@ -17,6 +17,7 @@ import {
|
|||||||
SpeechOptions,
|
SpeechOptions,
|
||||||
} from "../api";
|
} from "../api";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
|
import { getTimeoutMSByModel } from "@/app/utils";
|
||||||
import { preProcessImageContent } from "@/app/utils/chat";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { RequestPayload } from "./openai";
|
import { RequestPayload } from "./openai";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
@ -103,7 +104,7 @@ export class XAIApi implements LLMApi {
|
|||||||
// make a fetch request
|
// make a fetch request
|
||||||
const requestTimeoutId = setTimeout(
|
const requestTimeoutId = setTimeout(
|
||||||
() => controller.abort(),
|
() => controller.abort(),
|
||||||
REQUEST_TIMEOUT_MS,
|
getTimeoutMSByModel(options.config.model),
|
||||||
);
|
);
|
||||||
|
|
||||||
if (shouldStream) {
|
if (shouldStream) {
|
||||||
|
20
app/utils.ts
20
app/utils.ts
@ -2,7 +2,11 @@ import { useEffect, useState } from "react";
|
|||||||
import { showToast } from "./components/ui-lib";
|
import { showToast } from "./components/ui-lib";
|
||||||
import Locale from "./locales";
|
import Locale from "./locales";
|
||||||
import { RequestMessage } from "./client/api";
|
import { RequestMessage } from "./client/api";
|
||||||
import { ServiceProvider } from "./constant";
|
import {
|
||||||
|
REQUEST_TIMEOUT_MS,
|
||||||
|
REQUEST_TIMEOUT_MS_FOR_THINKING,
|
||||||
|
ServiceProvider,
|
||||||
|
} from "./constant";
|
||||||
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
|
// import { fetch as tauriFetch, ResponseType } from "@tauri-apps/api/http";
|
||||||
import { fetch as tauriStreamFetch } from "./utils/stream";
|
import { fetch as tauriStreamFetch } from "./utils/stream";
|
||||||
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
|
import { VISION_MODEL_REGEXES, EXCLUDE_VISION_MODEL_REGEXES } from "./constant";
|
||||||
@ -292,6 +296,20 @@ export function isDalle3(model: string) {
|
|||||||
return "dall-e-3" === model;
|
return "dall-e-3" === model;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export function getTimeoutMSByModel(model: string) {
|
||||||
|
model = model.toLowerCase();
|
||||||
|
if (
|
||||||
|
model.startsWith("dall-e") ||
|
||||||
|
model.startsWith("dalle") ||
|
||||||
|
model.startsWith("o1") ||
|
||||||
|
model.startsWith("o3") ||
|
||||||
|
model.includes("deepseek-r") ||
|
||||||
|
model.includes("-thinking")
|
||||||
|
)
|
||||||
|
return REQUEST_TIMEOUT_MS_FOR_THINKING;
|
||||||
|
return REQUEST_TIMEOUT_MS;
|
||||||
|
}
|
||||||
|
|
||||||
export function getModelSizes(model: string): ModelSize[] {
|
export function getModelSizes(model: string): ModelSize[] {
|
||||||
if (isDalle3(model)) {
|
if (isDalle3(model)) {
|
||||||
return ["1024x1024", "1792x1024", "1024x1792"];
|
return ["1024x1024", "1792x1024", "1024x1792"];
|
||||||
|
Loading…
Reference in New Issue
Block a user