diff --git a/README.md b/README.md
index b9e994e50..dd2c5b1ee 100644
--- a/README.md
+++ b/README.md
@@ -31,7 +31,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4
[MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple
[Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu
-[
](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [
](https://zeabur.com/templates/ZBUEFA) [
](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [
](https://www.bt.cn/new/download.html)
+[
](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [
](https://zeabur.com/templates/ZBUEFA) [
](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [
](https://www.bt.cn/new/download.html) [
](https://computenest.aliyun.com/market/service-f1c9b75e59814dc49d52)
[
](https://monica.im/?utm=nxcrp)
@@ -397,6 +397,9 @@ yarn dev
> [简体中文 > 如何部署到私人服务器](./README_CN.md#部署)
+### BT Install
+> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)
+
### Docker (Recommended)
```shell
diff --git a/README_CN.md b/README_CN.md
index 3f339ea61..ccdcf28ff 100644
--- a/README_CN.md
+++ b/README_CN.md
@@ -264,6 +264,9 @@ BASE_URL=https://b.nextweb.fun/api/proxy
## 部署
+### 宝塔面板部署
+> [简体中文 > 如何通过宝塔一键部署](./docs/bt-cn.md)
+
### 容器部署 (推荐)
> Docker 版本需要在 20 及其以上,否则会提示找不到镜像。
diff --git a/app/client/api.ts b/app/client/api.ts
index 8fecf841f..1da81e964 100644
--- a/app/client/api.ts
+++ b/app/client/api.ts
@@ -70,7 +70,7 @@ export interface ChatOptions {
config: LLMConfig;
onUpdate?: (message: string, chunk: string) => void;
- onFinish: (message: string) => void;
+ onFinish: (message: string, responseRes: Response) => void;
onError?: (err: Error) => void;
onController?: (controller: AbortController) => void;
onBeforeTool?: (tool: ChatMessageTool) => void;
diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts
index 86229a147..6fe69e87a 100644
--- a/app/client/platforms/alibaba.ts
+++ b/app/client/platforms/alibaba.ts
@@ -143,6 +143,7 @@ export class QwenApi implements LLMApi {
let responseText = "";
let remainText = "";
let finished = false;
+ let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
@@ -172,7 +173,7 @@ export class QwenApi implements LLMApi {
const finish = () => {
if (!finished) {
finished = true;
- options.onFinish(responseText + remainText);
+ options.onFinish(responseText + remainText, responseRes);
}
};
@@ -188,6 +189,7 @@ export class QwenApi implements LLMApi {
"[Alibaba] request response content type: ",
contentType,
);
+ responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
@@ -254,7 +256,7 @@ export class QwenApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts
index 3645cbe6e..6747221a8 100644
--- a/app/client/platforms/anthropic.ts
+++ b/app/client/platforms/anthropic.ts
@@ -317,13 +317,14 @@ export class ClaudeApi implements LLMApi {
};
try {
- controller.signal.onabort = () => options.onFinish("");
+ controller.signal.onabort = () =>
+ options.onFinish("", new Response(null, { status: 400 }));
const res = await fetch(path, payload);
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
} catch (e) {
console.error("failed to chat", e);
options.onError?.(e as Error);
diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts
index 2511a696b..9e8c2f139 100644
--- a/app/client/platforms/baidu.ts
+++ b/app/client/platforms/baidu.ts
@@ -162,6 +162,7 @@ export class ErnieApi implements LLMApi {
let responseText = "";
let remainText = "";
let finished = false;
+ let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
@@ -191,7 +192,7 @@ export class ErnieApi implements LLMApi {
const finish = () => {
if (!finished) {
finished = true;
- options.onFinish(responseText + remainText);
+ options.onFinish(responseText + remainText, responseRes);
}
};
@@ -204,7 +205,7 @@ export class ErnieApi implements LLMApi {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log("[Baidu] request response content type: ", contentType);
-
+ responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
@@ -267,7 +268,7 @@ export class ErnieApi implements LLMApi {
const resJson = await res.json();
const message = resJson?.result;
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts
index 000a9e278..a2f0660d8 100644
--- a/app/client/platforms/bytedance.ts
+++ b/app/client/platforms/bytedance.ts
@@ -130,6 +130,7 @@ export class DoubaoApi implements LLMApi {
let responseText = "";
let remainText = "";
let finished = false;
+ let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
@@ -159,7 +160,7 @@ export class DoubaoApi implements LLMApi {
const finish = () => {
if (!finished) {
finished = true;
- options.onFinish(responseText + remainText);
+ options.onFinish(responseText + remainText, responseRes);
}
};
@@ -175,7 +176,7 @@ export class DoubaoApi implements LLMApi {
"[ByteDance] request response content type: ",
contentType,
);
-
+ responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
@@ -241,7 +242,7 @@ export class DoubaoApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/glm.ts b/app/client/platforms/glm.ts
index 10696ee82..a7965947f 100644
--- a/app/client/platforms/glm.ts
+++ b/app/client/platforms/glm.ts
@@ -177,7 +177,7 @@ export class ChatGLMApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts
index a4b594ddf..53ff00aee 100644
--- a/app/client/platforms/google.ts
+++ b/app/client/platforms/google.ts
@@ -274,7 +274,7 @@ export class GeminiProApi implements LLMApi {
);
}
const message = apiClient.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts
index 55a39d0cc..cfc37b3b2 100644
--- a/app/client/platforms/iflytek.ts
+++ b/app/client/platforms/iflytek.ts
@@ -117,6 +117,7 @@ export class SparkApi implements LLMApi {
let responseText = "";
let remainText = "";
let finished = false;
+ let responseRes: Response;
// Animate response text to make it look smooth
function animateResponseText() {
@@ -143,7 +144,7 @@ export class SparkApi implements LLMApi {
const finish = () => {
if (!finished) {
finished = true;
- options.onFinish(responseText + remainText);
+ options.onFinish(responseText + remainText, responseRes);
}
};
@@ -156,7 +157,7 @@ export class SparkApi implements LLMApi {
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log("[Spark] request response content type: ", contentType);
-
+ responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
@@ -231,7 +232,7 @@ export class SparkApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts
index 22a34b2e2..b6812c0d7 100644
--- a/app/client/platforms/moonshot.ts
+++ b/app/client/platforms/moonshot.ts
@@ -180,7 +180,7 @@ export class MoonshotApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts
index 30f7415c1..6e893ed14 100644
--- a/app/client/platforms/openai.ts
+++ b/app/client/platforms/openai.ts
@@ -361,7 +361,7 @@ export class ChatGPTApi implements LLMApi {
const resJson = await res.json();
const message = await this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts
index 3610fac0a..580844a5b 100644
--- a/app/client/platforms/tencent.ts
+++ b/app/client/platforms/tencent.ts
@@ -142,6 +142,7 @@ export class HunyuanApi implements LLMApi {
let responseText = "";
let remainText = "";
let finished = false;
+ let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
@@ -171,7 +172,7 @@ export class HunyuanApi implements LLMApi {
const finish = () => {
if (!finished) {
finished = true;
- options.onFinish(responseText + remainText);
+ options.onFinish(responseText + remainText, responseRes);
}
};
@@ -187,7 +188,7 @@ export class HunyuanApi implements LLMApi {
"[Tencent] request response content type: ",
contentType,
);
-
+ responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
return finish();
@@ -253,7 +254,7 @@ export class HunyuanApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts
index deb74e66c..06dbaaa29 100644
--- a/app/client/platforms/xai.ts
+++ b/app/client/platforms/xai.ts
@@ -173,7 +173,7 @@ export class XAIApi implements LLMApi {
const resJson = await res.json();
const message = this.extractMessage(resJson);
- options.onFinish(message);
+ options.onFinish(message, res);
}
} catch (e) {
console.log("[Request] failed to make a chat request", e);
diff --git a/app/store/chat.ts b/app/store/chat.ts
index 6900899e1..1bf2e1367 100644
--- a/app/store/chat.ts
+++ b/app/store/chat.ts
@@ -649,13 +649,14 @@ export const useChatStore = createPersistStore(
stream: false,
providerName,
},
- onFinish(message) {
- if (!isValidMessage(message)) return;
- get().updateCurrentSession(
- (session) =>
- (session.topic =
- message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
- );
+ onFinish(message, responseRes) {
+ if (responseRes?.status === 200) {
+ get().updateCurrentSession(
+ (session) =>
+ (session.topic =
+ message.length > 0 ? trimTopic(message) : DEFAULT_TOPIC),
+ );
+ }
},
});
}
@@ -669,7 +670,7 @@ export const useChatStore = createPersistStore(
const historyMsgLength = countMessages(toBeSummarizedMsgs);
- if (historyMsgLength > modelConfig?.max_tokens ?? 4000) {
+ if (historyMsgLength > (modelConfig?.max_tokens || 4000)) {
const n = toBeSummarizedMsgs.length;
toBeSummarizedMsgs = toBeSummarizedMsgs.slice(
Math.max(0, n - modelConfig.historyMessageCount),
@@ -715,22 +716,20 @@ export const useChatStore = createPersistStore(
onUpdate(message) {
session.memoryPrompt = message;
},
- onFinish(message) {
- console.log("[Memory] ", message);
- get().updateCurrentSession((session) => {
- session.lastSummarizeIndex = lastSummarizeIndex;
- session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
- });
+ onFinish(message, responseRes) {
+ if (responseRes?.status === 200) {
+ console.log("[Memory] ", message);
+ get().updateCurrentSession((session) => {
+ session.lastSummarizeIndex = lastSummarizeIndex;
+ session.memoryPrompt = message; // Update the memory prompt for stored it in local storage
+ });
+ }
},
onError(err) {
console.error("[Summarize] ", err);
},
});
}
-
- function isValidMessage(message: any): boolean {
- return typeof message === "string" && !message.startsWith("```json");
- }
},
updateStat(message: ChatMessage) {
diff --git a/app/utils.ts b/app/utils.ts
index c444f8ef4..2e1f94016 100644
--- a/app/utils.ts
+++ b/app/utils.ts
@@ -266,7 +266,9 @@ export function isVisionModel(model: string) {
model.includes("gpt-4-turbo") && !model.includes("preview");
return (
- visionKeywords.some((keyword) => model.includes(keyword)) || isGpt4Turbo
+ visionKeywords.some((keyword) => model.includes(keyword)) ||
+ isGpt4Turbo ||
+ isDalle3(model)
);
}
diff --git a/app/utils/chat.ts b/app/utils/chat.ts
index ba1904625..9209b5da5 100644
--- a/app/utils/chat.ts
+++ b/app/utils/chat.ts
@@ -174,6 +174,7 @@ export function stream(
let finished = false;
let running = false;
let runTools: any[] = [];
+ let responseRes: Response;
// animate response to make it looks smooth
function animateResponseText() {
@@ -272,7 +273,7 @@ export function stream(
}
console.debug("[ChatAPI] end");
finished = true;
- options.onFinish(responseText + remainText);
+ options.onFinish(responseText + remainText, responseRes); // 将res传递给onFinish
}
};
@@ -304,6 +305,7 @@ export function stream(
clearTimeout(requestTimeoutId);
const contentType = res.headers.get("content-type");
console.log("[Request] response content type: ", contentType);
+ responseRes = res;
if (contentType?.startsWith("text/plain")) {
responseText = await res.clone().text();
diff --git a/app/utils/stream.ts b/app/utils/stream.ts
index 782634595..f186730f6 100644
--- a/app/utils/stream.ts
+++ b/app/utils/stream.ts
@@ -19,7 +19,7 @@ type StreamResponse = {
headers: Record;
};
-export function fetch(url: string, options?: RequestInit): Promise {
+export function fetch(url: string, options?: RequestInit): Promise {
if (window.__TAURI__) {
const {
signal,
diff --git a/docs/bt-cn.md b/docs/bt-cn.md
new file mode 100644
index 000000000..115fbbd70
--- /dev/null
+++ b/docs/bt-cn.md
@@ -0,0 +1,29 @@
+# 宝塔面板 的部署说明
+
+## 拥有自己的宝塔
+当你需要通过 宝塔面板 部署本项目之前,需要在服务器上先安装好 宝塔面板工具。 接下来的 部署流程 都建立在已有宝塔面板的前提下。宝塔安装请参考 ([宝塔官网](https://www.bt.cn/new/download.html))
+
+> 注意:本项目需要宝塔面板版本 9.2.0 及以上
+
+## 一键安装
+
+1. 在 宝塔面板 -> Docker -> 应用商店 页面,搜索 ChatGPT-Next-Web 找到本项目的docker应用;
+2. 点击 安装 开始部署本项目
+
+
+1. 在项目配置页,根据要求开始配置环境变量;
+2. 如勾选 允许外部访问 配置,请注意为配置的 web端口 开放安全组端口访问权限;
+3. 请确保你添加了正确的 Open Api Key,否则无法使用;当配置 OpenAI官方 提供的key(国内无法访问),请配置代理地址;
+4. 建议配置 访问权限密码,否则部署后所有人均可使用已配置的 Open Api Key(当允许外部访问时);
+5. 点击 确认 开始自动部署。
+
+## 如何访问
+
+通过根据 服务器IP地址 和配置的 web端口 http://$(host):$(port),在浏览器中打开 ChatGPT-Next-Web。
+
+
+若配置了 访问权限密码,访问大模型前需要登录,请点击 登录,获取访问权限。
+
+
+
+
diff --git a/docs/images/bt/bt-install-1.jpeg b/docs/images/bt/bt-install-1.jpeg
new file mode 100644
index 000000000..fff3406d6
Binary files /dev/null and b/docs/images/bt/bt-install-1.jpeg differ
diff --git a/docs/images/bt/bt-install-2.jpeg b/docs/images/bt/bt-install-2.jpeg
new file mode 100644
index 000000000..77256ef8d
Binary files /dev/null and b/docs/images/bt/bt-install-2.jpeg differ
diff --git a/docs/images/bt/bt-install-3.jpeg b/docs/images/bt/bt-install-3.jpeg
new file mode 100644
index 000000000..7790f89e8
Binary files /dev/null and b/docs/images/bt/bt-install-3.jpeg differ
diff --git a/docs/images/bt/bt-install-4.jpeg b/docs/images/bt/bt-install-4.jpeg
new file mode 100644
index 000000000..38d7caee4
Binary files /dev/null and b/docs/images/bt/bt-install-4.jpeg differ
diff --git a/docs/images/bt/bt-install-5.jpeg b/docs/images/bt/bt-install-5.jpeg
new file mode 100644
index 000000000..aa1a7963c
Binary files /dev/null and b/docs/images/bt/bt-install-5.jpeg differ
diff --git a/docs/images/bt/bt-install-6.jpeg b/docs/images/bt/bt-install-6.jpeg
new file mode 100644
index 000000000..42359e65b
Binary files /dev/null and b/docs/images/bt/bt-install-6.jpeg differ
diff --git a/package.json b/package.json
index 5bca3b327..a036969ac 100644
--- a/package.json
+++ b/package.json
@@ -58,7 +58,7 @@
"@tauri-apps/cli": "1.5.11",
"@testing-library/dom": "^10.4.0",
"@testing-library/jest-dom": "^6.6.2",
- "@testing-library/react": "^16.0.0",
+ "@testing-library/react": "^16.0.1",
"@types/jest": "^29.5.14",
"@types/js-yaml": "4.0.9",
"@types/lodash-es": "^4.17.12",
diff --git a/yarn.lock b/yarn.lock
index 9f8aa9f61..16b8b872e 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1201,14 +1201,7 @@
resolved "https://registry.yarnpkg.com/@babel/regjsgen/-/regjsgen-0.8.0.tgz#f0ba69b075e1f05fb2825b7fad991e7adbb18310"
integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA==
-"@babel/runtime@^7.12.1", "@babel/runtime@^7.20.7", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2":
- version "7.23.6"
- resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.23.6.tgz#c05e610dc228855dc92ef1b53d07389ed8ab521d"
- integrity sha512-zHd0eUrf5GZoOWVCXp6koAKQTfZV07eit6bGPmJgnZdnSAvvZee6zniW2XMF7Cmc4ISOOnPy3QaSiIJGJkVEDQ==
- dependencies:
- regenerator-runtime "^0.14.0"
-
-"@babel/runtime@^7.12.5", "@babel/runtime@^7.21.0":
+"@babel/runtime@^7.12.1", "@babel/runtime@^7.12.5", "@babel/runtime@^7.20.7", "@babel/runtime@^7.21.0", "@babel/runtime@^7.23.2", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2":
version "7.25.0"
resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.25.0.tgz#3af9a91c1b739c569d5d80cc917280919c544ecb"
integrity sha512-7dRy4DwXwtzBrPbZflqxnvfxLF8kdZXPkhymtDeFoFqE6ldzjQFgYTtYIFARcLEYDrqfBfYcZt1WqFxRoyC9Rw==
@@ -2134,10 +2127,10 @@
lodash "^4.17.21"
redent "^3.0.0"
-"@testing-library/react@^16.0.0":
- version "16.0.0"
- resolved "https://registry.npmmirror.com/@testing-library/react/-/react-16.0.0.tgz#0a1e0c7a3de25841c3591b8cb7fb0cf0c0a27321"
- integrity sha512-guuxUKRWQ+FgNX0h0NS0FIq3Q3uLtWVpBzcLOggmfMoUpgBnzBzvLLd4fbm6yS8ydJd94cIfY4yP9qUQjM2KwQ==
+"@testing-library/react@^16.0.1":
+ version "16.0.1"
+ resolved "https://registry.yarnpkg.com/@testing-library/react/-/react-16.0.1.tgz#29c0ee878d672703f5e7579f239005e4e0faa875"
+ integrity sha512-dSmwJVtJXmku+iocRhWOUFbrERC76TX2Mnf0ATODz8brzAZrMBbzLwQixlBSanZxR6LddK3eiwpSFZgDET1URg==
dependencies:
"@babel/runtime" "^7.12.5"