mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-23 22:20:23 +09:00
Merge branch 'main' into main
This commit is contained in:
commit
aa1035d889
@ -50,4 +50,22 @@ DISABLE_FAST_LINK=
|
||||
# (optional)
|
||||
# Default: 1
|
||||
# If your project is not deployed on Vercel, set this value to 1.
|
||||
NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN=1
|
||||
NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN=1
|
||||
|
||||
# (optional)
|
||||
# Default: Empty
|
||||
# If you want to enable RAG, set this value to 1.
|
||||
ENABLE_RAG=
|
||||
|
||||
# (optional)
|
||||
# Default: Empty
|
||||
# Model used when RAG vectorized data.
|
||||
RAG_EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
# Configuration is required when turning on RAG.
|
||||
# Default: Empty
|
||||
QDRANT_URL=
|
||||
|
||||
# Configuration is required when turning on RAG.
|
||||
# Default: Empty
|
||||
QDRANT_API_KEY=
|
13
README.md
13
README.md
@ -25,7 +25,7 @@
|
||||
> [!WARNING]
|
||||
> 本项目插件功能基于 [OpenAI API 函数调用](https://platform.openai.com/docs/guides/function-calling) 功能实现,转发 GitHub Copilot 接口或类似实现的模拟接口并不能正常调用插件功能!
|
||||
|
||||

|
||||

|
||||
|
||||

|
||||
|
||||
@ -35,6 +35,9 @@
|
||||
|
||||
## 主要功能
|
||||
|
||||
- RAG 功能 (预览)
|
||||
- 配置请参考文档[RAG 功能配置说明](./docs/rag-cn.md)
|
||||
|
||||
- 除插件工具外,与原项目保持一致 [ChatGPT-Next-Web 主要功能](https://github.com/Yidadaa/ChatGPT-Next-Web#主要功能)
|
||||
|
||||
- 支持 OpenAI TTS(文本转语音)https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/208
|
||||
@ -142,7 +145,7 @@
|
||||
|
||||
- [x] 支持语音输入 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/208
|
||||
|
||||
- [ ] 支持其他类型文件上传 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/77
|
||||
- [x] 支持其他类型文件上传 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/77
|
||||
|
||||
- [ ] 支持 Azure Storage https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/217
|
||||
|
||||
@ -295,11 +298,9 @@ docker run -d -p 3000:3000 \
|
||||
| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md)
|
||||
|
||||
|
||||
## 贡献者
|
||||
## Star History
|
||||
|
||||
<a href="https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=Hk-Gosuto/ChatGPT-Next-Web-LangChain" />
|
||||
</a>
|
||||
[](https://star-history.com/#Hk-Gosuto/ChatGPT-Next-Web-LangChain&Date)
|
||||
|
||||
## 捐赠
|
||||
|
||||
|
@ -13,6 +13,7 @@ const DANGER_CONFIG = {
|
||||
hideBalanceQuery: serverConfig.hideBalanceQuery,
|
||||
disableFastLink: serverConfig.disableFastLink,
|
||||
customModels: serverConfig.customModels,
|
||||
isEnableRAG: serverConfig.isEnableRAG,
|
||||
};
|
||||
|
||||
declare global {
|
||||
|
@ -2,6 +2,7 @@ import { getServerSideConfig } from "@/app/config/server";
|
||||
import LocalFileStorage from "@/app/utils/local_file_storage";
|
||||
import S3FileStorage from "@/app/utils/s3_file_storage";
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import mime from "mime";
|
||||
|
||||
async function handle(
|
||||
req: NextRequest,
|
||||
@ -13,19 +14,27 @@ async function handle(
|
||||
|
||||
try {
|
||||
const serverConfig = getServerSideConfig();
|
||||
const fileName = params.path[0];
|
||||
const contentType = mime.getType(fileName);
|
||||
|
||||
if (serverConfig.isStoreFileToLocal) {
|
||||
var fileBuffer = await LocalFileStorage.get(params.path[0]);
|
||||
var fileBuffer = await LocalFileStorage.get(fileName);
|
||||
return new Response(fileBuffer, {
|
||||
headers: {
|
||||
"Content-Type": "image/png",
|
||||
"Content-Type": contentType ?? "application/octet-stream",
|
||||
},
|
||||
});
|
||||
} else {
|
||||
var file = await S3FileStorage.get(params.path[0]);
|
||||
return new Response(file?.transformToWebStream(), {
|
||||
headers: {
|
||||
"Content-Type": "image/png",
|
||||
},
|
||||
var file = await S3FileStorage.get(fileName);
|
||||
if (file) {
|
||||
return new Response(file?.transformToWebStream(), {
|
||||
headers: {
|
||||
"Content-Type": contentType ?? "application/octet-stream",
|
||||
},
|
||||
});
|
||||
}
|
||||
return new Response("not found", {
|
||||
status: 404,
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
|
@ -4,6 +4,7 @@ import { auth } from "@/app/api/auth";
|
||||
import LocalFileStorage from "@/app/utils/local_file_storage";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import S3FileStorage from "@/app/utils/s3_file_storage";
|
||||
import path from "path";
|
||||
|
||||
async function handle(req: NextRequest) {
|
||||
if (req.method === "OPTIONS") {
|
||||
@ -19,20 +20,14 @@ async function handle(req: NextRequest) {
|
||||
|
||||
try {
|
||||
const formData = await req.formData();
|
||||
const image = formData.get("file") as File;
|
||||
const file = formData.get("file") as File;
|
||||
const fileData = await file.arrayBuffer();
|
||||
const originalFileName = file?.name;
|
||||
|
||||
const imageReader = image.stream().getReader();
|
||||
const imageData: number[] = [];
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await imageReader.read();
|
||||
if (done) break;
|
||||
imageData.push(...value);
|
||||
}
|
||||
|
||||
const buffer = Buffer.from(imageData);
|
||||
|
||||
var fileName = `${Date.now()}.png`;
|
||||
if (!fileData) throw new Error("Get file buffer error");
|
||||
const buffer = Buffer.from(fileData);
|
||||
const fileType = path.extname(originalFileName).slice(1);
|
||||
var fileName = `${Date.now()}.${fileType}`;
|
||||
var filePath = "";
|
||||
const serverConfig = getServerSideConfig();
|
||||
if (serverConfig.isStoreFileToLocal) {
|
||||
|
@ -10,16 +10,15 @@ import { WolframAlphaTool } from "@/app/api/langchain-tools/wolframalpha";
|
||||
import { BilibiliVideoInfoTool } from "./bilibili_vid_info";
|
||||
import { BilibiliVideoSearchTool } from "./bilibili_vid_search";
|
||||
import { BilibiliMusicRecognitionTool } from "./bilibili_music_recognition";
|
||||
import { RAGSearch } from "./rag_search";
|
||||
|
||||
export class NodeJSTool {
|
||||
private apiKey: string | undefined;
|
||||
|
||||
private baseUrl: string;
|
||||
|
||||
private model: BaseLanguageModel;
|
||||
|
||||
private embeddings: Embeddings;
|
||||
|
||||
private sessionId: string;
|
||||
private ragEmbeddings: Embeddings;
|
||||
private callback?: (data: string) => Promise<void>;
|
||||
|
||||
constructor(
|
||||
@ -27,12 +26,16 @@ export class NodeJSTool {
|
||||
baseUrl: string,
|
||||
model: BaseLanguageModel,
|
||||
embeddings: Embeddings,
|
||||
sessionId: string,
|
||||
ragEmbeddings: Embeddings,
|
||||
callback?: (data: string) => Promise<void>,
|
||||
) {
|
||||
this.apiKey = apiKey;
|
||||
this.baseUrl = baseUrl;
|
||||
this.model = model;
|
||||
this.embeddings = embeddings;
|
||||
this.sessionId = sessionId;
|
||||
this.ragEmbeddings = ragEmbeddings;
|
||||
this.callback = callback;
|
||||
}
|
||||
|
||||
@ -66,6 +69,9 @@ export class NodeJSTool {
|
||||
bilibiliVideoSearchTool,
|
||||
bilibiliMusicRecognitionTool,
|
||||
];
|
||||
if (!!process.env.ENABLE_RAG) {
|
||||
tools.push(new RAGSearch(this.sessionId, this.model, this.ragEmbeddings));
|
||||
}
|
||||
return tools;
|
||||
}
|
||||
}
|
||||
|
79
app/api/langchain-tools/rag_search.ts
Normal file
79
app/api/langchain-tools/rag_search.ts
Normal file
@ -0,0 +1,79 @@
|
||||
import { Tool } from "@langchain/core/tools";
|
||||
import { CallbackManagerForToolRun } from "@langchain/core/callbacks/manager";
|
||||
import { BaseLanguageModel } from "langchain/dist/base_language";
|
||||
import { formatDocumentsAsString } from "langchain/util/document";
|
||||
import { Embeddings } from "langchain/dist/embeddings/base.js";
|
||||
import { RunnableSequence } from "@langchain/core/runnables";
|
||||
import { StringOutputParser } from "@langchain/core/output_parsers";
|
||||
import { Pinecone } from "@pinecone-database/pinecone";
|
||||
import { PineconeStore } from "@langchain/pinecone";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
|
||||
|
||||
export class RAGSearch extends Tool {
|
||||
static lc_name() {
|
||||
return "RAGSearch";
|
||||
}
|
||||
|
||||
get lc_namespace() {
|
||||
return [...super.lc_namespace, "ragsearch"];
|
||||
}
|
||||
|
||||
private sessionId: string;
|
||||
private model: BaseLanguageModel;
|
||||
private embeddings: Embeddings;
|
||||
|
||||
constructor(
|
||||
sessionId: string,
|
||||
model: BaseLanguageModel,
|
||||
embeddings: Embeddings,
|
||||
) {
|
||||
super();
|
||||
this.sessionId = sessionId;
|
||||
this.model = model;
|
||||
this.embeddings = embeddings;
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(inputs: string, runManager?: CallbackManagerForToolRun) {
|
||||
const serverConfig = getServerSideConfig();
|
||||
if (!serverConfig.isEnableRAG)
|
||||
throw new Error("env ENABLE_RAG not configured");
|
||||
// const pinecone = new Pinecone();
|
||||
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
|
||||
// const vectorStore = await PineconeStore.fromExistingIndex(this.embeddings, {
|
||||
// pineconeIndex,
|
||||
// });
|
||||
const vectorStore = await QdrantVectorStore.fromExistingCollection(
|
||||
this.embeddings,
|
||||
{
|
||||
url: process.env.QDRANT_URL,
|
||||
apiKey: process.env.QDRANT_API_KEY,
|
||||
collectionName: this.sessionId,
|
||||
},
|
||||
);
|
||||
|
||||
let context;
|
||||
const returnCunt = serverConfig.ragReturnCount
|
||||
? parseInt(serverConfig.ragReturnCount, 10)
|
||||
: 4;
|
||||
console.log("[rag-search]", { inputs, returnCunt });
|
||||
// const results = await vectorStore.similaritySearch(inputs, returnCunt, {
|
||||
// sessionId: this.sessionId,
|
||||
// });
|
||||
const results = await vectorStore.similaritySearch(inputs, returnCunt);
|
||||
context = formatDocumentsAsString(results);
|
||||
console.log("[rag-search]", { context });
|
||||
return context;
|
||||
// const input = `Text:${context}\n\nQuestion:${inputs}\n\nI need you to answer the question based on the text.`;
|
||||
|
||||
// console.log("[rag-search]", input);
|
||||
|
||||
// const chain = RunnableSequence.from([this.model, new StringOutputParser()]);
|
||||
// return chain.invoke(input, runManager?.getChild());
|
||||
}
|
||||
|
||||
name = "rag-search";
|
||||
|
||||
description = `It is used to query documents entered by the user.The input content is the keywords extracted from the user's question, and multiple keywords are separated by spaces and passed in.`;
|
||||
}
|
120
app/api/langchain/rag/search/route.ts
Normal file
120
app/api/langchain/rag/search/route.ts
Normal file
@ -0,0 +1,120 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { ACCESS_CODE_PREFIX, ModelProvider } from "@/app/constant";
|
||||
import { OpenAIEmbeddings } from "@langchain/openai";
|
||||
import { Pinecone } from "@pinecone-database/pinecone";
|
||||
import { PineconeStore } from "@langchain/pinecone";
|
||||
import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
|
||||
interface RequestBody {
|
||||
sessionId: string;
|
||||
query: string;
|
||||
baseUrl?: string;
|
||||
}
|
||||
|
||||
async function handle(req: NextRequest) {
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
try {
|
||||
const authResult = auth(req, ModelProvider.GPT);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
const reqBody: RequestBody = await req.json();
|
||||
const authToken = req.headers.get("Authorization") ?? "";
|
||||
const token = authToken.trim().replaceAll("Bearer ", "").trim();
|
||||
const serverConfig = getServerSideConfig();
|
||||
// const pinecone = new Pinecone();
|
||||
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
|
||||
const apiKey = getOpenAIApiKey(token);
|
||||
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
|
||||
const embeddings = new OpenAIEmbeddings(
|
||||
{
|
||||
modelName: serverConfig.ragEmbeddingModel ?? "text-embedding-3-large",
|
||||
openAIApiKey: apiKey,
|
||||
},
|
||||
{ basePath: baseUrl },
|
||||
);
|
||||
// const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
|
||||
// pineconeIndex,
|
||||
// });
|
||||
// const results = await vectorStore.similaritySearch(reqBody.query, 4, {
|
||||
// sessionId: reqBody.sessionId,
|
||||
// });
|
||||
const vectorStore = await QdrantVectorStore.fromExistingCollection(
|
||||
embeddings,
|
||||
{
|
||||
url: process.env.QDRANT_URL,
|
||||
apiKey: process.env.QDRANT_API_KEY,
|
||||
collectionName: reqBody.sessionId,
|
||||
},
|
||||
);
|
||||
const returnCunt = serverConfig.ragReturnCount
|
||||
? parseInt(serverConfig.ragReturnCount, 10)
|
||||
: 4;
|
||||
const response = await vectorStore.similaritySearch(
|
||||
reqBody.query,
|
||||
returnCunt,
|
||||
);
|
||||
return NextResponse.json(response, {
|
||||
status: 200,
|
||||
});
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
return new Response(JSON.stringify({ error: (e as any).message }), {
|
||||
status: 500,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getOpenAIApiKey(token: string) {
|
||||
const serverConfig = getServerSideConfig();
|
||||
const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);
|
||||
|
||||
let apiKey = serverConfig.apiKey;
|
||||
if (isApiKey && token) {
|
||||
apiKey = token;
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
|
||||
function getOpenAIBaseUrl(reqBaseUrl: string | undefined) {
|
||||
const serverConfig = getServerSideConfig();
|
||||
let baseUrl = "https://api.openai.com/v1";
|
||||
if (serverConfig.baseUrl) baseUrl = serverConfig.baseUrl;
|
||||
if (reqBaseUrl?.startsWith("http://") || reqBaseUrl?.startsWith("https://"))
|
||||
baseUrl = reqBaseUrl;
|
||||
if (!baseUrl.endsWith("/v1"))
|
||||
baseUrl = baseUrl.endsWith("/") ? `${baseUrl}v1` : `${baseUrl}/v1`;
|
||||
console.log("[baseUrl]", baseUrl);
|
||||
return baseUrl;
|
||||
}
|
||||
|
||||
export const POST = handle;
|
||||
|
||||
export const runtime = "nodejs";
|
||||
export const preferredRegion = [
|
||||
"arn1",
|
||||
"bom1",
|
||||
"cdg1",
|
||||
"cle1",
|
||||
"cpt1",
|
||||
"dub1",
|
||||
"fra1",
|
||||
"gru1",
|
||||
"hnd1",
|
||||
"iad1",
|
||||
"icn1",
|
||||
"kix1",
|
||||
"lhr1",
|
||||
"pdx1",
|
||||
"sfo1",
|
||||
"sin1",
|
||||
"syd1",
|
||||
];
|
221
app/api/langchain/rag/store/route.ts
Normal file
221
app/api/langchain/rag/store/route.ts
Normal file
@ -0,0 +1,221 @@
|
||||
import { NextRequest, NextResponse } from "next/server";
|
||||
import { auth } from "@/app/api/auth";
|
||||
import { ACCESS_CODE_PREFIX, ModelProvider } from "@/app/constant";
|
||||
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai";
|
||||
import { PDFLoader } from "langchain/document_loaders/fs/pdf";
|
||||
import { TextLoader } from "langchain/document_loaders/fs/text";
|
||||
import { CSVLoader } from "langchain/document_loaders/fs/csv";
|
||||
import { DocxLoader } from "langchain/document_loaders/fs/docx";
|
||||
import { EPubLoader } from "langchain/document_loaders/fs/epub";
|
||||
import { JSONLoader } from "langchain/document_loaders/fs/json";
|
||||
import { JSONLinesLoader } from "langchain/document_loaders/fs/json";
|
||||
import { OpenAIWhisperAudio } from "langchain/document_loaders/fs/openai_whisper_audio";
|
||||
// import { PPTXLoader } from "langchain/document_loaders/fs/pptx";
|
||||
import { SRTLoader } from "langchain/document_loaders/fs/srt";
|
||||
import { RecursiveCharacterTextSplitter } from "langchain/text_splitter";
|
||||
import { Pinecone } from "@pinecone-database/pinecone";
|
||||
import { PineconeStore } from "@langchain/pinecone";
|
||||
import { getServerSideConfig } from "@/app/config/server";
|
||||
import { FileInfo } from "@/app/client/platforms/utils";
|
||||
import mime from "mime";
|
||||
import LocalFileStorage from "@/app/utils/local_file_storage";
|
||||
import S3FileStorage from "@/app/utils/s3_file_storage";
|
||||
import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
|
||||
|
||||
interface RequestBody {
|
||||
sessionId: string;
|
||||
fileInfos: FileInfo[];
|
||||
baseUrl?: string;
|
||||
}
|
||||
|
||||
function getLoader(
|
||||
fileName: string,
|
||||
fileBlob: Blob,
|
||||
openaiApiKey: string,
|
||||
openaiBaseUrl: string,
|
||||
) {
|
||||
const extension = fileName.split(".").pop();
|
||||
switch (extension) {
|
||||
case "txt":
|
||||
case "md":
|
||||
return new TextLoader(fileBlob);
|
||||
case "pdf":
|
||||
return new PDFLoader(fileBlob);
|
||||
case "docx":
|
||||
return new DocxLoader(fileBlob);
|
||||
case "csv":
|
||||
return new CSVLoader(fileBlob);
|
||||
case "json":
|
||||
return new JSONLoader(fileBlob);
|
||||
// case 'pptx':
|
||||
// return new PPTXLoader(fileBlob);
|
||||
case "srt":
|
||||
return new SRTLoader(fileBlob);
|
||||
case "mp3":
|
||||
return new OpenAIWhisperAudio(fileBlob, {
|
||||
clientOptions: {
|
||||
apiKey: openaiApiKey,
|
||||
baseURL: openaiBaseUrl,
|
||||
},
|
||||
});
|
||||
default:
|
||||
throw new Error(`Unsupported file type: ${extension}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function handle(req: NextRequest) {
|
||||
if (req.method === "OPTIONS") {
|
||||
return NextResponse.json({ body: "OK" }, { status: 200 });
|
||||
}
|
||||
try {
|
||||
const authResult = auth(req, ModelProvider.GPT);
|
||||
if (authResult.error) {
|
||||
return NextResponse.json(authResult, {
|
||||
status: 401,
|
||||
});
|
||||
}
|
||||
|
||||
const reqBody: RequestBody = await req.json();
|
||||
const authToken = req.headers.get("Authorization") ?? "";
|
||||
const token = authToken.trim().replaceAll("Bearer ", "").trim();
|
||||
const apiKey = getOpenAIApiKey(token);
|
||||
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
|
||||
const serverConfig = getServerSideConfig();
|
||||
// const pinecone = new Pinecone();
|
||||
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
|
||||
const embeddings = new OpenAIEmbeddings(
|
||||
{
|
||||
modelName: serverConfig.ragEmbeddingModel,
|
||||
openAIApiKey: apiKey,
|
||||
},
|
||||
{ basePath: baseUrl },
|
||||
);
|
||||
// https://js.langchain.com/docs/integrations/vectorstores/pinecone
|
||||
// https://js.langchain.com/docs/integrations/vectorstores/qdrant
|
||||
// process files
|
||||
for (let i = 0; i < reqBody.fileInfos.length; i++) {
|
||||
const fileInfo = reqBody.fileInfos[i];
|
||||
const contentType = mime.getType(fileInfo.fileName);
|
||||
// get file buffer
|
||||
var fileBuffer: Buffer | undefined;
|
||||
if (serverConfig.isStoreFileToLocal) {
|
||||
fileBuffer = await LocalFileStorage.get(fileInfo.fileName);
|
||||
} else {
|
||||
var file = await S3FileStorage.get(fileInfo.fileName);
|
||||
var fileByteArray = await file?.transformToByteArray();
|
||||
if (fileByteArray) fileBuffer = Buffer.from(fileByteArray);
|
||||
}
|
||||
if (!fileBuffer || !contentType) {
|
||||
console.error(`get ${fileInfo.fileName} buffer fail`);
|
||||
continue;
|
||||
}
|
||||
// load file to docs
|
||||
const fileBlob = bufferToBlob(fileBuffer, contentType);
|
||||
const loader = getLoader(fileInfo.fileName, fileBlob, apiKey, baseUrl);
|
||||
const docs = await loader.load();
|
||||
// modify doc meta
|
||||
docs.forEach((doc) => {
|
||||
doc.metadata = {
|
||||
...doc.metadata,
|
||||
sessionId: reqBody.sessionId,
|
||||
sourceFileName: fileInfo.originalFilename,
|
||||
fileName: fileInfo.fileName,
|
||||
};
|
||||
});
|
||||
// split
|
||||
const chunkSize = serverConfig.ragChunkSize
|
||||
? parseInt(serverConfig.ragChunkSize, 10)
|
||||
: 2000;
|
||||
const chunkOverlap = serverConfig.ragChunkOverlap
|
||||
? parseInt(serverConfig.ragChunkOverlap, 10)
|
||||
: 200;
|
||||
const textSplitter = new RecursiveCharacterTextSplitter({
|
||||
chunkSize: chunkSize,
|
||||
chunkOverlap: chunkOverlap,
|
||||
});
|
||||
const splits = await textSplitter.splitDocuments(docs);
|
||||
const vectorStore = await QdrantVectorStore.fromDocuments(
|
||||
splits,
|
||||
embeddings,
|
||||
{
|
||||
url: process.env.QDRANT_URL,
|
||||
apiKey: process.env.QDRANT_API_KEY,
|
||||
collectionName: reqBody.sessionId,
|
||||
},
|
||||
);
|
||||
// await PineconeStore.fromDocuments(splits, embeddings, {
|
||||
// pineconeIndex,
|
||||
// maxConcurrency: 5,
|
||||
// });
|
||||
// const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
|
||||
// pineconeIndex,
|
||||
// });
|
||||
}
|
||||
return NextResponse.json(
|
||||
{
|
||||
sessionId: reqBody.sessionId,
|
||||
},
|
||||
{
|
||||
status: 200,
|
||||
},
|
||||
);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
return new Response(JSON.stringify({ error: (e as any).message }), {
|
||||
status: 500,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function bufferToBlob(buffer: Buffer, mimeType?: string): Blob {
|
||||
const arrayBuffer: ArrayBuffer = buffer.buffer.slice(
|
||||
buffer.byteOffset,
|
||||
buffer.byteOffset + buffer.byteLength,
|
||||
);
|
||||
return new Blob([arrayBuffer], { type: mimeType || "" });
|
||||
}
|
||||
function getOpenAIApiKey(token: string) {
|
||||
const serverConfig = getServerSideConfig();
|
||||
const isApiKey = !token.startsWith(ACCESS_CODE_PREFIX);
|
||||
|
||||
let apiKey = serverConfig.apiKey;
|
||||
if (isApiKey && token) {
|
||||
apiKey = token;
|
||||
}
|
||||
return apiKey;
|
||||
}
|
||||
function getOpenAIBaseUrl(reqBaseUrl: string | undefined) {
|
||||
const serverConfig = getServerSideConfig();
|
||||
let baseUrl = "https://api.openai.com/v1";
|
||||
if (serverConfig.baseUrl) baseUrl = serverConfig.baseUrl;
|
||||
if (reqBaseUrl?.startsWith("http://") || reqBaseUrl?.startsWith("https://"))
|
||||
baseUrl = reqBaseUrl;
|
||||
if (!baseUrl.endsWith("/v1"))
|
||||
baseUrl = baseUrl.endsWith("/") ? `${baseUrl}v1` : `${baseUrl}/v1`;
|
||||
console.log("[baseUrl]", baseUrl);
|
||||
return baseUrl;
|
||||
}
|
||||
|
||||
export const POST = handle;
|
||||
|
||||
export const runtime = "nodejs";
|
||||
export const preferredRegion = [
|
||||
"arn1",
|
||||
"bom1",
|
||||
"cdg1",
|
||||
"cle1",
|
||||
"cpt1",
|
||||
"dub1",
|
||||
"fra1",
|
||||
"gru1",
|
||||
"hnd1",
|
||||
"iad1",
|
||||
"icn1",
|
||||
"kix1",
|
||||
"lhr1",
|
||||
"pdx1",
|
||||
"sfo1",
|
||||
"sin1",
|
||||
"syd1",
|
||||
];
|
@ -44,6 +44,7 @@ export interface RequestMessage {
|
||||
}
|
||||
|
||||
export interface RequestBody {
|
||||
chatSessionId: string;
|
||||
messages: RequestMessage[];
|
||||
isAzure: boolean;
|
||||
azureApiVersion?: string;
|
||||
|
@ -44,6 +44,13 @@ async function handle(req: NextRequest) {
|
||||
},
|
||||
{ basePath: baseUrl },
|
||||
);
|
||||
const ragEmbeddings = new OpenAIEmbeddings(
|
||||
{
|
||||
modelName: process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
|
||||
openAIApiKey: apiKey,
|
||||
},
|
||||
{ basePath: baseUrl },
|
||||
);
|
||||
|
||||
var dalleCallback = async (data: string) => {
|
||||
var response = new ResponseBody();
|
||||
@ -62,6 +69,8 @@ async function handle(req: NextRequest) {
|
||||
baseUrl,
|
||||
model,
|
||||
embeddings,
|
||||
reqBody.chatSessionId,
|
||||
ragEmbeddings,
|
||||
dalleCallback,
|
||||
);
|
||||
var nodejsTools = await nodejsTool.getCustomTools();
|
||||
|
@ -7,7 +7,7 @@ import {
|
||||
} from "../constant";
|
||||
import { ChatMessage, ModelType, useAccessStore, useChatStore } from "../store";
|
||||
import { ChatGPTApi } from "./platforms/openai";
|
||||
import { FileApi } from "./platforms/utils";
|
||||
import { FileApi, FileInfo } from "./platforms/utils";
|
||||
import { GeminiProApi } from "./platforms/google";
|
||||
export const ROLES = ["system", "user", "assistant"] as const;
|
||||
export type MessageRole = (typeof ROLES)[number];
|
||||
@ -27,6 +27,7 @@ export interface MultimodalContent {
|
||||
export interface RequestMessage {
|
||||
role: MessageRole;
|
||||
content: string | MultimodalContent[];
|
||||
fileInfos?: FileInfo[];
|
||||
}
|
||||
|
||||
export interface LLMConfig {
|
||||
@ -74,6 +75,7 @@ export interface ChatOptions {
|
||||
}
|
||||
|
||||
export interface AgentChatOptions {
|
||||
chatSessionId?: string;
|
||||
messages: RequestMessage[];
|
||||
config: LLMConfig;
|
||||
agentConfig: LLMAgentConfig;
|
||||
@ -84,6 +86,13 @@ export interface AgentChatOptions {
|
||||
onController?: (controller: AbortController) => void;
|
||||
}
|
||||
|
||||
export interface CreateRAGStoreOptions {
|
||||
chatSessionId: string;
|
||||
fileInfos: FileInfo[];
|
||||
onError?: (err: Error) => void;
|
||||
onController?: (controller: AbortController) => void;
|
||||
}
|
||||
|
||||
export interface LLMUsage {
|
||||
used: number;
|
||||
total: number;
|
||||
@ -106,6 +115,7 @@ export abstract class LLMApi {
|
||||
abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
|
||||
abstract transcription(options: TranscriptionOptions): Promise<string>;
|
||||
abstract toolAgentChat(options: AgentChatOptions): Promise<void>;
|
||||
abstract createRAGStore(options: CreateRAGStoreOptions): Promise<void>;
|
||||
abstract usage(): Promise<LLMUsage>;
|
||||
abstract models(): Promise<LLMModel[]>;
|
||||
}
|
||||
@ -213,8 +223,8 @@ export function getHeaders(ignoreHeaders?: boolean) {
|
||||
const apiKey = isGoogle
|
||||
? accessStore.googleApiKey
|
||||
: isAzure
|
||||
? accessStore.azureApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
? accessStore.azureApiKey
|
||||
: accessStore.openaiApiKey;
|
||||
|
||||
const makeBearer = (s: string) =>
|
||||
`${isGoogle || isAzure ? "" : "Bearer "}${s.trim()}`;
|
||||
|
@ -2,6 +2,7 @@ import { Google, REQUEST_TIMEOUT_MS } from "@/app/constant";
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
@ -19,6 +20,9 @@ import {
|
||||
} from "@/app/utils";
|
||||
|
||||
export class GeminiProApi implements LLMApi {
|
||||
createRAGStore(options: CreateRAGStoreOptions): Promise<void> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
transcription(options: TranscriptionOptions): Promise<string> {
|
||||
throw new Error("Method not implemented.");
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
|
||||
import {
|
||||
AgentChatOptions,
|
||||
ChatOptions,
|
||||
CreateRAGStoreOptions,
|
||||
getHeaders,
|
||||
LLMApi,
|
||||
LLMModel,
|
||||
@ -362,6 +363,34 @@ export class ChatGPTApi implements LLMApi {
|
||||
}
|
||||
}
|
||||
|
||||
async createRAGStore(options: CreateRAGStoreOptions): Promise<void> {
|
||||
try {
|
||||
const accessStore = useAccessStore.getState();
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
let baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
|
||||
const requestPayload = {
|
||||
sessionId: options.chatSessionId,
|
||||
fileInfos: options.fileInfos,
|
||||
baseUrl: baseUrl,
|
||||
};
|
||||
console.log("[Request] rag store payload: ", requestPayload);
|
||||
const controller = new AbortController();
|
||||
options.onController?.(controller);
|
||||
let path = "/api/langchain/rag/store";
|
||||
const chatPayload = {
|
||||
method: "POST",
|
||||
body: JSON.stringify(requestPayload),
|
||||
signal: controller.signal,
|
||||
headers: getHeaders(),
|
||||
};
|
||||
const res = await fetch(path, chatPayload);
|
||||
if (res.status !== 200) throw new Error(await res.text());
|
||||
} catch (e) {
|
||||
console.log("[Request] failed to make a chat reqeust", e);
|
||||
options.onError?.(e as Error);
|
||||
}
|
||||
}
|
||||
|
||||
async toolAgentChat(options: AgentChatOptions) {
|
||||
const messages = options.messages.map((v) => ({
|
||||
role: v.role,
|
||||
@ -379,6 +408,7 @@ export class ChatGPTApi implements LLMApi {
|
||||
const isAzure = accessStore.provider === ServiceProvider.Azure;
|
||||
let baseUrl = isAzure ? accessStore.azureUrl : accessStore.openaiUrl;
|
||||
const requestPayload = {
|
||||
chatSessionId: options.chatSessionId,
|
||||
messages,
|
||||
isAzure,
|
||||
azureApiVersion: accessStore.azureApiVersion,
|
||||
|
@ -1,7 +1,16 @@
|
||||
import { getHeaders } from "../api";
|
||||
|
||||
export interface FileInfo {
|
||||
originalFilename: string;
|
||||
fileName: string;
|
||||
filePath: string;
|
||||
size: number;
|
||||
}
|
||||
|
||||
export class FileApi {
|
||||
async upload(file: any): Promise<any> {
|
||||
async upload(file: any): Promise<FileInfo> {
|
||||
const fileName = file.name;
|
||||
const fileSize = file.size;
|
||||
const formData = new FormData();
|
||||
formData.append("file", file);
|
||||
var headers = getHeaders(true);
|
||||
@ -16,6 +25,8 @@ export class FileApi {
|
||||
const resJson = await res.json();
|
||||
console.log(resJson);
|
||||
return {
|
||||
originalFilename: fileName,
|
||||
size: fileSize,
|
||||
fileName: resJson.fileName,
|
||||
filePath: resJson.filePath,
|
||||
};
|
||||
|
@ -1,5 +1,69 @@
|
||||
@import "../styles/animation.scss";
|
||||
|
||||
.attach-files {
|
||||
position: absolute;
|
||||
left: 30px;
|
||||
bottom: 32px;
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.attach-file {
|
||||
cursor: default;
|
||||
width: 64px;
|
||||
height: 64px;
|
||||
border: rgba($color: #888, $alpha: 0.2) 1px solid;
|
||||
border-radius: 5px;
|
||||
margin-right: 10px;
|
||||
background-size: cover;
|
||||
background-position: center;
|
||||
background-color: var(--second);
|
||||
display: flex;
|
||||
position: relative;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
|
||||
.attach-file-info {
|
||||
top: 5px;
|
||||
width: 100%;
|
||||
position: absolute;
|
||||
font-size: 12px;
|
||||
font-weight: bolder;
|
||||
text-align: center;
|
||||
word-wrap: break-word;
|
||||
word-break: break-all;
|
||||
-webkit-line-clamp: 3;
|
||||
-webkit-box-orient: vertical;
|
||||
line-height: 1.5;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
display: -webkit-box;
|
||||
}
|
||||
|
||||
.attach-file-mask {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
opacity: 0;
|
||||
transition: all ease 0.2s;
|
||||
z-index: 999;
|
||||
}
|
||||
|
||||
.attach-file-mask:hover {
|
||||
opacity: 1;
|
||||
}
|
||||
|
||||
.delete-file {
|
||||
width: 24px;
|
||||
height: 24px;
|
||||
cursor: pointer;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
border-radius: 5px;
|
||||
float: right;
|
||||
background-color: var(--white);
|
||||
}
|
||||
}
|
||||
|
||||
.attach-images {
|
||||
position: absolute;
|
||||
left: 30px;
|
||||
@ -232,10 +296,12 @@
|
||||
|
||||
animation: slide-in ease 0.3s;
|
||||
|
||||
$linear: linear-gradient(to right,
|
||||
rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 1),
|
||||
rgba(0, 0, 0, 0));
|
||||
$linear: linear-gradient(
|
||||
to right,
|
||||
rgba(0, 0, 0, 0),
|
||||
rgba(0, 0, 0, 1),
|
||||
rgba(0, 0, 0, 0)
|
||||
);
|
||||
mask-image: $linear;
|
||||
|
||||
@mixin show {
|
||||
@ -368,7 +434,7 @@
|
||||
}
|
||||
}
|
||||
|
||||
.chat-message-user>.chat-message-container {
|
||||
.chat-message-user > .chat-message-container {
|
||||
align-items: flex-end;
|
||||
}
|
||||
|
||||
@ -454,6 +520,17 @@
|
||||
transition: all ease 0.3s;
|
||||
}
|
||||
|
||||
.chat-message-item-files {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(var(--file-count), auto);
|
||||
grid-gap: 5px;
|
||||
}
|
||||
|
||||
.chat-message-item-file {
|
||||
text-decoration: none;
|
||||
color: #aaa;
|
||||
}
|
||||
|
||||
.chat-message-item-image {
|
||||
width: 100%;
|
||||
margin-top: 10px;
|
||||
@ -482,23 +559,27 @@
|
||||
border: rgba($color: #888, $alpha: 0.2) 1px solid;
|
||||
}
|
||||
|
||||
|
||||
@media only screen and (max-width: 600px) {
|
||||
$calc-image-width: calc(100vw/3*2/var(--image-count));
|
||||
$calc-image-width: calc(100vw / 3 * 2 / var(--image-count));
|
||||
|
||||
.chat-message-item-image-multi {
|
||||
width: $calc-image-width;
|
||||
height: $calc-image-width;
|
||||
}
|
||||
|
||||
|
||||
.chat-message-item-image {
|
||||
max-width: calc(100vw/3*2);
|
||||
max-width: calc(100vw / 3 * 2);
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (min-width: 600px) {
|
||||
$max-image-width: calc(calc(1200px - var(--sidebar-width))/3*2/var(--image-count));
|
||||
$image-width: calc(calc(var(--window-width) - var(--sidebar-width))/3*2/var(--image-count));
|
||||
$max-image-width: calc(
|
||||
calc(1200px - var(--sidebar-width)) / 3 * 2 / var(--image-count)
|
||||
);
|
||||
$image-width: calc(
|
||||
calc(var(--window-width) - var(--sidebar-width)) / 3 * 2 /
|
||||
var(--image-count)
|
||||
);
|
||||
|
||||
.chat-message-item-image-multi {
|
||||
width: $image-width;
|
||||
@ -508,7 +589,7 @@
|
||||
}
|
||||
|
||||
.chat-message-item-image {
|
||||
max-width: calc(calc(1200px - var(--sidebar-width))/3*2);
|
||||
max-width: calc(calc(1200px - var(--sidebar-width)) / 3 * 2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,7 +607,7 @@
|
||||
z-index: 1;
|
||||
}
|
||||
|
||||
.chat-message-user>.chat-message-container>.chat-message-item {
|
||||
.chat-message-user > .chat-message-container > .chat-message-item {
|
||||
background-color: var(--second);
|
||||
|
||||
&:hover {
|
||||
@ -637,7 +718,8 @@
|
||||
min-height: 68px;
|
||||
}
|
||||
|
||||
.chat-input:focus {}
|
||||
.chat-input:focus {
|
||||
}
|
||||
|
||||
.chat-input-send {
|
||||
background-color: var(--primary);
|
||||
@ -656,4 +738,4 @@
|
||||
.chat-input-send {
|
||||
bottom: 30px;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ import {
|
||||
isVisionModel,
|
||||
compressImage,
|
||||
isFirefox,
|
||||
isSupportRAGModel,
|
||||
} from "../utils";
|
||||
|
||||
import dynamic from "next/dynamic";
|
||||
@ -116,6 +117,7 @@ import {
|
||||
SpeechApi,
|
||||
WebTranscriptionApi,
|
||||
} from "../utils/speech";
|
||||
import { FileInfo } from "../client/platforms/utils";
|
||||
|
||||
const ttsPlayer = createTTSPlayer();
|
||||
|
||||
@ -460,6 +462,8 @@ function useScrollToBottom(
|
||||
export function ChatActions(props: {
|
||||
uploadImage: () => void;
|
||||
setAttachImages: (images: string[]) => void;
|
||||
uploadFile: () => void;
|
||||
setAttachFiles: (files: FileInfo[]) => void;
|
||||
setUploading: (uploading: boolean) => void;
|
||||
showPromptModal: () => void;
|
||||
scrollToBottom: () => void;
|
||||
@ -502,10 +506,19 @@ export function ChatActions(props: {
|
||||
);
|
||||
const [showModelSelector, setShowModelSelector] = useState(false);
|
||||
const [showUploadImage, setShowUploadImage] = useState(false);
|
||||
const [showUploadFile, setShowUploadFile] = useState(false);
|
||||
|
||||
const accessStore = useAccessStore();
|
||||
const isEnableRAG = useMemo(
|
||||
() => accessStore.enableRAG(),
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
const show = isVisionModel(currentModel);
|
||||
setShowUploadImage(show);
|
||||
setShowUploadFile(isEnableRAG && !show && isSupportRAGModel(currentModel));
|
||||
if (!show) {
|
||||
props.setAttachImages([]);
|
||||
props.setUploading(false);
|
||||
@ -555,6 +568,14 @@ export function ChatActions(props: {
|
||||
icon={props.uploading ? <LoadingButtonIcon /> : <ImageIcon />}
|
||||
/>
|
||||
)}
|
||||
|
||||
{showUploadFile && (
|
||||
<ChatAction
|
||||
onClick={props.uploadFile}
|
||||
text={Locale.Chat.InputActions.UploadFle}
|
||||
icon={props.uploading ? <LoadingButtonIcon /> : <UploadIcon />}
|
||||
/>
|
||||
)}
|
||||
<ChatAction
|
||||
onClick={nextTheme}
|
||||
text={Locale.Chat.InputActions.Theme[theme]}
|
||||
@ -713,6 +734,14 @@ export function DeleteImageButton(props: { deleteImage: () => void }) {
|
||||
);
|
||||
}
|
||||
|
||||
export function DeleteFileButton(props: { deleteFile: () => void }) {
|
||||
return (
|
||||
<div className={styles["delete-file"]} onClick={props.deleteFile}>
|
||||
<DeleteIcon />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function _Chat() {
|
||||
type RenderMessage = ChatMessage & { preview?: boolean };
|
||||
|
||||
@ -743,6 +772,7 @@ function _Chat() {
|
||||
const navigate = useNavigate();
|
||||
const [attachImages, setAttachImages] = useState<string[]>([]);
|
||||
const [uploading, setUploading] = useState(false);
|
||||
const [attachFiles, setAttachFiles] = useState<FileInfo[]>([]);
|
||||
|
||||
// prompt hints
|
||||
const promptStore = usePromptStore();
|
||||
@ -848,9 +878,10 @@ function _Chat() {
|
||||
}
|
||||
setIsLoading(true);
|
||||
chatStore
|
||||
.onUserInput(userInput, attachImages)
|
||||
.onUserInput(userInput, attachImages, attachFiles)
|
||||
.then(() => setIsLoading(false));
|
||||
setAttachImages([]);
|
||||
setAttachFiles([]);
|
||||
localStorage.setItem(LAST_INPUT_KEY, userInput);
|
||||
setUserInput("");
|
||||
setPromptHints([]);
|
||||
@ -1010,7 +1041,9 @@ function _Chat() {
|
||||
setIsLoading(true);
|
||||
const textContent = getMessageTextContent(userMessage);
|
||||
const images = getMessageImages(userMessage);
|
||||
chatStore.onUserInput(textContent, images).then(() => setIsLoading(false));
|
||||
chatStore
|
||||
.onUserInput(textContent, images, userMessage.fileInfos)
|
||||
.then(() => setIsLoading(false));
|
||||
inputRef.current?.focus();
|
||||
};
|
||||
|
||||
@ -1077,34 +1110,36 @@ function _Chat() {
|
||||
|
||||
// preview messages
|
||||
const renderMessages = useMemo(() => {
|
||||
return context
|
||||
.concat(session.messages as RenderMessage[])
|
||||
.concat(
|
||||
isLoading
|
||||
? [
|
||||
{
|
||||
...createMessage({
|
||||
role: "assistant",
|
||||
content: "……",
|
||||
}),
|
||||
preview: true,
|
||||
},
|
||||
]
|
||||
: [],
|
||||
)
|
||||
.concat(
|
||||
userInput.length > 0 && config.sendPreviewBubble
|
||||
? [
|
||||
{
|
||||
...createMessage({
|
||||
role: "user",
|
||||
content: userInput,
|
||||
}),
|
||||
preview: true,
|
||||
},
|
||||
]
|
||||
: [],
|
||||
);
|
||||
return (
|
||||
context
|
||||
.concat(session.messages as RenderMessage[])
|
||||
// .concat(
|
||||
// isLoading
|
||||
// ? [
|
||||
// {
|
||||
// ...createMessage({
|
||||
// role: "assistant",
|
||||
// content: "……",
|
||||
// }),
|
||||
// preview: true,
|
||||
// },
|
||||
// ]
|
||||
// : [],
|
||||
// )
|
||||
.concat(
|
||||
userInput.length > 0 && config.sendPreviewBubble
|
||||
? [
|
||||
{
|
||||
...createMessage({
|
||||
role: "user",
|
||||
content: userInput,
|
||||
}),
|
||||
preview: true,
|
||||
},
|
||||
]
|
||||
: [],
|
||||
)
|
||||
);
|
||||
}, [
|
||||
config.sendPreviewBubble,
|
||||
context,
|
||||
@ -1324,6 +1359,53 @@ function _Chat() {
|
||||
setAttachImages(images);
|
||||
}
|
||||
|
||||
async function uploadFile() {
|
||||
const uploadFiles: FileInfo[] = [];
|
||||
uploadFiles.push(...attachFiles);
|
||||
|
||||
uploadFiles.push(
|
||||
...(await new Promise<FileInfo[]>((res, rej) => {
|
||||
const fileInput = document.createElement("input");
|
||||
fileInput.type = "file";
|
||||
fileInput.accept = ".pdf,.txt,.md,.json,.csv,.docx,.srt,.mp3";
|
||||
fileInput.multiple = true;
|
||||
fileInput.onchange = (event: any) => {
|
||||
setUploading(true);
|
||||
const files = event.target.files;
|
||||
const api = new ClientApi();
|
||||
const fileDatas: FileInfo[] = [];
|
||||
for (let i = 0; i < files.length; i++) {
|
||||
const file = event.target.files[i];
|
||||
api.file
|
||||
.upload(file)
|
||||
.then((fileInfo) => {
|
||||
console.log(fileInfo);
|
||||
fileDatas.push(fileInfo);
|
||||
if (
|
||||
fileDatas.length === 3 ||
|
||||
fileDatas.length === files.length
|
||||
) {
|
||||
setUploading(false);
|
||||
res(fileDatas);
|
||||
}
|
||||
})
|
||||
.catch((e) => {
|
||||
setUploading(false);
|
||||
rej(e);
|
||||
});
|
||||
}
|
||||
};
|
||||
fileInput.click();
|
||||
})),
|
||||
);
|
||||
|
||||
const filesLength = uploadFiles.length;
|
||||
if (filesLength > 5) {
|
||||
uploadFiles.splice(5, filesLength - 5);
|
||||
}
|
||||
setAttachFiles(uploadFiles);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={styles.chat} key={session.id}>
|
||||
<div className="window-header" data-tauri-drag-region>
|
||||
@ -1582,6 +1664,29 @@ function _Chat() {
|
||||
parentRef={scrollRef}
|
||||
defaultShow={i >= messages.length - 6}
|
||||
/>
|
||||
{message.fileInfos && message.fileInfos.length > 0 && (
|
||||
<nav
|
||||
className={styles["chat-message-item-files"]}
|
||||
style={
|
||||
{
|
||||
"--file-count": message.fileInfos.length,
|
||||
} as React.CSSProperties
|
||||
}
|
||||
>
|
||||
{message.fileInfos.map((fileInfo, index) => {
|
||||
return (
|
||||
<a
|
||||
key={index}
|
||||
href={fileInfo.filePath}
|
||||
className={styles["chat-message-item-file"]}
|
||||
target="_blank"
|
||||
>
|
||||
{fileInfo.originalFilename}
|
||||
</a>
|
||||
);
|
||||
})}
|
||||
</nav>
|
||||
)}
|
||||
{getMessageImages(message).length == 1 && (
|
||||
<img
|
||||
className={styles["chat-message-item-image"]}
|
||||
@ -1632,6 +1737,8 @@ function _Chat() {
|
||||
<ChatActions
|
||||
uploadImage={uploadImage}
|
||||
setAttachImages={setAttachImages}
|
||||
uploadFile={uploadFile}
|
||||
setAttachFiles={setAttachFiles}
|
||||
setUploading={setUploading}
|
||||
showPromptModal={() => setShowPromptModal(true)}
|
||||
scrollToBottom={scrollToBottom}
|
||||
@ -1651,7 +1758,7 @@ function _Chat() {
|
||||
/>
|
||||
<label
|
||||
className={`${styles["chat-input-panel-inner"]} ${
|
||||
attachImages.length != 0
|
||||
attachImages.length != 0 || attachFiles.length != 0
|
||||
? styles["chat-input-panel-inner-attach"]
|
||||
: ""
|
||||
}`}
|
||||
@ -1697,7 +1804,32 @@ function _Chat() {
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{attachFiles.length != 0 && (
|
||||
<div className={styles["attach-files"]}>
|
||||
{attachFiles.map((file, index) => {
|
||||
return (
|
||||
<div
|
||||
key={index}
|
||||
className={styles["attach-file"]}
|
||||
title={file.originalFilename}
|
||||
>
|
||||
<div className={styles["attach-file-info"]}>
|
||||
{file.originalFilename}
|
||||
</div>
|
||||
<div className={styles["attach-file-mask"]}>
|
||||
<DeleteFileButton
|
||||
deleteFile={() => {
|
||||
setAttachFiles(
|
||||
attachFiles.filter((_, i) => i !== index),
|
||||
);
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
)}
|
||||
{config.sttConfig.enable ? (
|
||||
<IconButton
|
||||
icon={<VoiceWhiteIcon />}
|
||||
|
@ -111,5 +111,12 @@ export const getServerSideConfig = () => {
|
||||
!!process.env.NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN &&
|
||||
!process.env.R2_ACCOUNT_ID &&
|
||||
!process.env.S3_ENDPOINT,
|
||||
|
||||
isEnableRAG: !!process.env.ENABLE_RAG,
|
||||
ragEmbeddingModel:
|
||||
process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
|
||||
ragChunkSize: process.env.RAG_CHUNK_SIZE ?? "2000",
|
||||
ragChunkOverlap: process.env.RAG_CHUNK_OVERLAP ?? "200",
|
||||
ragReturnCount: process.env.RAG_RETURN_COUNT ?? "4",
|
||||
};
|
||||
};
|
||||
|
@ -68,6 +68,7 @@ const cn = {
|
||||
EnablePlugins: "开启插件",
|
||||
DisablePlugins: "关闭插件",
|
||||
UploadImage: "上传图片",
|
||||
UploadFle: "上传文件",
|
||||
},
|
||||
Rename: "重命名对话",
|
||||
Typing: "正在输入…",
|
||||
|
@ -70,6 +70,7 @@ const en: LocaleType = {
|
||||
EnablePlugins: "Enable Plugins",
|
||||
DisablePlugins: "Disable Plugins",
|
||||
UploadImage: "Upload Images",
|
||||
UploadFle: "Upload Files",
|
||||
},
|
||||
Rename: "Rename Chat",
|
||||
Typing: "Typing…",
|
||||
|
@ -43,6 +43,7 @@ const DEFAULT_ACCESS_STATE = {
|
||||
disableGPT4: false,
|
||||
disableFastLink: false,
|
||||
customModels: "",
|
||||
isEnableRAG: false,
|
||||
};
|
||||
|
||||
export const useAccessStore = createPersistStore(
|
||||
@ -55,6 +56,12 @@ export const useAccessStore = createPersistStore(
|
||||
return get().needCode;
|
||||
},
|
||||
|
||||
enableRAG() {
|
||||
this.fetch();
|
||||
|
||||
return get().isEnableRAG;
|
||||
},
|
||||
|
||||
isValidOpenAI() {
|
||||
return ensure(get(), ["openaiApiKey"]);
|
||||
},
|
||||
|
@ -26,6 +26,7 @@ export interface ChatToolMessage {
|
||||
toolInput?: string;
|
||||
}
|
||||
import { createPersistStore } from "../utils/store";
|
||||
import { FileInfo } from "../client/platforms/utils";
|
||||
|
||||
export type ChatMessage = RequestMessage & {
|
||||
date: string;
|
||||
@ -304,7 +305,11 @@ export const useChatStore = createPersistStore(
|
||||
get().summarizeSession();
|
||||
},
|
||||
|
||||
async onUserInput(content: string, attachImages?: string[]) {
|
||||
async onUserInput(
|
||||
content: string,
|
||||
attachImages?: string[],
|
||||
attachFiles?: FileInfo[],
|
||||
) {
|
||||
const session = get().currentSession();
|
||||
const modelConfig = session.mask.modelConfig;
|
||||
|
||||
@ -335,6 +340,7 @@ export const useChatStore = createPersistStore(
|
||||
let userMessage: ChatMessage = createMessage({
|
||||
role: "user",
|
||||
content: mContent,
|
||||
fileInfos: attachFiles,
|
||||
});
|
||||
const botMessage: ChatMessage = createMessage({
|
||||
role: "assistant",
|
||||
@ -359,7 +365,6 @@ export const useChatStore = createPersistStore(
|
||||
m.lang === (getLang() == "cn" ? getLang() : "en")) &&
|
||||
m.enable,
|
||||
);
|
||||
|
||||
// save user's and bot's message
|
||||
get().updateCurrentSession((session) => {
|
||||
const savedUserMessage = {
|
||||
@ -369,80 +374,98 @@ export const useChatStore = createPersistStore(
|
||||
session.messages.push(savedUserMessage);
|
||||
session.messages.push(botMessage);
|
||||
});
|
||||
const isEnableRAG = attachFiles && attachFiles?.length > 0;
|
||||
var api: ClientApi;
|
||||
api = new ClientApi(ModelProvider.GPT);
|
||||
if (
|
||||
config.pluginConfig.enable &&
|
||||
session.mask.usePlugins &&
|
||||
allPlugins.length > 0 &&
|
||||
(allPlugins.length > 0 || isEnableRAG) &&
|
||||
modelConfig.model.startsWith("gpt") &&
|
||||
modelConfig.model != "gpt-4-vision-preview"
|
||||
) {
|
||||
console.log("[ToolAgent] start");
|
||||
const pluginToolNames = allPlugins.map((m) => m.toolName);
|
||||
api.llm.toolAgentChat({
|
||||
messages: sendMessages,
|
||||
config: { ...modelConfig, stream: true },
|
||||
agentConfig: { ...pluginConfig, useTools: pluginToolNames },
|
||||
onUpdate(message) {
|
||||
botMessage.streaming = true;
|
||||
if (message) {
|
||||
botMessage.content = message;
|
||||
}
|
||||
get().updateCurrentSession((session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
},
|
||||
onToolUpdate(toolName, toolInput) {
|
||||
botMessage.streaming = true;
|
||||
if (toolName && toolInput) {
|
||||
botMessage.toolMessages!.push({
|
||||
toolName,
|
||||
toolInput,
|
||||
if (isEnableRAG) pluginToolNames.push("rag-search");
|
||||
const agentCall = () => {
|
||||
api.llm.toolAgentChat({
|
||||
chatSessionId: session.id,
|
||||
messages: sendMessages,
|
||||
config: { ...modelConfig, stream: true },
|
||||
agentConfig: { ...pluginConfig, useTools: pluginToolNames },
|
||||
onUpdate(message) {
|
||||
botMessage.streaming = true;
|
||||
if (message) {
|
||||
botMessage.content = message;
|
||||
}
|
||||
get().updateCurrentSession((session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
}
|
||||
get().updateCurrentSession((session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
},
|
||||
onFinish(message) {
|
||||
botMessage.streaming = false;
|
||||
if (message) {
|
||||
botMessage.content = message;
|
||||
get().onNewMessage(botMessage);
|
||||
}
|
||||
ChatControllerPool.remove(session.id, botMessage.id);
|
||||
},
|
||||
onError(error) {
|
||||
const isAborted = error.message.includes("aborted");
|
||||
botMessage.content +=
|
||||
"\n\n" +
|
||||
prettyObject({
|
||||
error: true,
|
||||
message: error.message,
|
||||
},
|
||||
onToolUpdate(toolName, toolInput) {
|
||||
botMessage.streaming = true;
|
||||
if (toolName && toolInput) {
|
||||
botMessage.toolMessages!.push({
|
||||
toolName,
|
||||
toolInput,
|
||||
});
|
||||
}
|
||||
get().updateCurrentSession((session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
botMessage.streaming = false;
|
||||
userMessage.isError = !isAborted;
|
||||
botMessage.isError = !isAborted;
|
||||
get().updateCurrentSession((session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
ChatControllerPool.remove(
|
||||
session.id,
|
||||
botMessage.id ?? messageIndex,
|
||||
);
|
||||
},
|
||||
onFinish(message) {
|
||||
botMessage.streaming = false;
|
||||
if (message) {
|
||||
botMessage.content = message;
|
||||
get().onNewMessage(botMessage);
|
||||
}
|
||||
ChatControllerPool.remove(session.id, botMessage.id);
|
||||
},
|
||||
onError(error) {
|
||||
const isAborted = error.message.includes("aborted");
|
||||
botMessage.content +=
|
||||
"\n\n" +
|
||||
prettyObject({
|
||||
error: true,
|
||||
message: error.message,
|
||||
});
|
||||
botMessage.streaming = false;
|
||||
userMessage.isError = !isAborted;
|
||||
botMessage.isError = !isAborted;
|
||||
get().updateCurrentSession((session) => {
|
||||
session.messages = session.messages.concat();
|
||||
});
|
||||
ChatControllerPool.remove(
|
||||
session.id,
|
||||
botMessage.id ?? messageIndex,
|
||||
);
|
||||
|
||||
console.error("[Chat] failed ", error);
|
||||
},
|
||||
onController(controller) {
|
||||
// collect controller for stop/retry
|
||||
ChatControllerPool.addController(
|
||||
session.id,
|
||||
botMessage.id ?? messageIndex,
|
||||
controller,
|
||||
);
|
||||
},
|
||||
});
|
||||
console.error("[Chat] failed ", error);
|
||||
},
|
||||
onController(controller) {
|
||||
// collect controller for stop/retry
|
||||
ChatControllerPool.addController(
|
||||
session.id,
|
||||
botMessage.id ?? messageIndex,
|
||||
controller,
|
||||
);
|
||||
},
|
||||
});
|
||||
};
|
||||
if (attachFiles && attachFiles.length > 0) {
|
||||
await api.llm
|
||||
.createRAGStore({
|
||||
chatSessionId: session.id,
|
||||
fileInfos: attachFiles,
|
||||
})
|
||||
.then(() => {
|
||||
console.log("[RAG]", "Vector db created");
|
||||
agentCall();
|
||||
});
|
||||
} else {
|
||||
agentCall();
|
||||
}
|
||||
} else {
|
||||
if (modelConfig.model.startsWith("gemini")) {
|
||||
api = new ClientApi(ModelProvider.GeminiPro);
|
||||
|
@ -3,6 +3,7 @@ import { showToast } from "./components/ui-lib";
|
||||
import Locale from "./locales";
|
||||
import { RequestMessage } from "./client/api";
|
||||
import { DEFAULT_MODELS } from "./constant";
|
||||
import { useAccessStore } from "./store";
|
||||
|
||||
export function trimTopic(topic: string) {
|
||||
// Fix an issue where double quotes still show in the Indonesian language
|
||||
@ -296,3 +297,9 @@ export function isVisionModel(model: string) {
|
||||
|
||||
return visionKeywords.some((keyword) => model.includes(keyword));
|
||||
}
|
||||
|
||||
export function isSupportRAGModel(modelName: string) {
|
||||
return DEFAULT_MODELS.filter((model) => model.provider.id === "openai").some(
|
||||
(model) => model.name === modelName,
|
||||
);
|
||||
}
|
||||
|
BIN
docs/images/rag-example.jpg
Normal file
BIN
docs/images/rag-example.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 112 KiB |
BIN
docs/images/rag.png
Normal file
BIN
docs/images/rag.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 251 KiB |
74
docs/rag-cn.md
Normal file
74
docs/rag-cn.md
Normal file
@ -0,0 +1,74 @@
|
||||
# RAG 功能配置说明
|
||||
|
||||
> [!WARNING]
|
||||
> 该功能目前在预览阶段,可能会有较多的问题,请在仔细阅读本文档后再使用。
|
||||
|
||||
## 效果图
|
||||
|
||||

|
||||
|
||||
## 原理
|
||||
|
||||

|
||||
|
||||
## 已知问题
|
||||
|
||||
- 由于接口中使用 nodejs 运行时,在 vercel 环境下接口可能会超时,建议使用 docker 部署
|
||||
- 已开启的插件可能会影响到数据检索,可以关闭部分插件后再使用
|
||||
- 已创建的向量数据不会删除
|
||||
- 同一聊天窗口内即使“清除聊天”也可以访问已经上传的文件内容
|
||||
- RAG 插件需要一定的话术来让模型触发查询
|
||||
- 上传文件部分的 UI 交互可能会变更
|
||||
- 暂不支持文档总结
|
||||
|
||||
## 支持的文件类型
|
||||
|
||||
- txt
|
||||
- md
|
||||
- pdf
|
||||
- docx
|
||||
- csv
|
||||
- json
|
||||
- srt
|
||||
- mp3 (基于OpenAIWhisper)
|
||||
|
||||
## 配置
|
||||
|
||||
1. 登录 https://cloud.qdrant.io 并创建一个账户
|
||||
2. 在控制面板中创建一个 Cluster
|
||||
3. 获取 Cluster 的 Cluster URL 和 API Key
|
||||
4. 完善下面的环境变量配置后即可使用
|
||||
|
||||
## 环境变量
|
||||
|
||||
### `ENABLE_RAG`
|
||||
|
||||
如果你想启用 RAG 功能,将此环境变量设置为 1 即可。
|
||||
|
||||
### `QDRANT_URL`
|
||||
|
||||
qdrant 服务的 Cluster URL。
|
||||
|
||||
### `QDRANT_API_KEY`
|
||||
|
||||
qdrant 服务的 ApiKey。
|
||||
|
||||
### `RAG_CHUNK_SIZE` (可选)
|
||||
|
||||
分割后文档的最大大小(按字符数计算),默认:2000。
|
||||
|
||||
### `RAG_CHUNK_OVERLAP` (可选)
|
||||
|
||||
分割文档时块重叠数量,默认:200。
|
||||
|
||||
### `RAG_RETURN_COUNT` (可选)
|
||||
|
||||
检索时返回的文档数量,默认:4。
|
||||
|
||||
### `RAG_EMBEDDING_MODEL` (可选)
|
||||
|
||||
向量化时使用的向量模型,默认:text-embedding-3-large。
|
||||
可选项:
|
||||
- text-embedding-3-small
|
||||
- text-embedding-3-large
|
||||
- text-embedding-ada-002
|
14
package.json
14
package.json
@ -21,28 +21,37 @@
|
||||
"@aws-sdk/s3-request-presigner": "^3.414.0",
|
||||
"@fortaine/fetch-event-source": "^3.0.6",
|
||||
"@hello-pangea/dnd": "^16.5.0",
|
||||
"@langchain/cohere": "^0.0.6",
|
||||
"@langchain/community": "0.0.30",
|
||||
"@langchain/openai": "0.0.14",
|
||||
"@langchain/pinecone": "^0.0.4",
|
||||
"@next/third-parties": "^14.1.0",
|
||||
"@pinecone-database/pinecone": "^2.2.0",
|
||||
"@qdrant/js-client-rest": "^1.8.2",
|
||||
"@svgr/webpack": "^6.5.1",
|
||||
"@vercel/analytics": "^0.1.11",
|
||||
"@vercel/speed-insights": "^1.0.2",
|
||||
"axios": "^0.26.0",
|
||||
"cheerio": "^1.0.0-rc.12",
|
||||
"d3-dsv": "2",
|
||||
"duck-duck-scrape": "^2.2.4",
|
||||
"emoji-picker-react": "^4.9.2",
|
||||
"encoding": "^0.1.13",
|
||||
"epub2": "^3.0.2",
|
||||
"fuse.js": "^7.0.0",
|
||||
"html-entities": "^2.4.0",
|
||||
"html-to-image": "^1.11.11",
|
||||
"html-to-text": "^9.0.5",
|
||||
"https-proxy-agent": "^7.0.2",
|
||||
"langchain": "0.1.20",
|
||||
"md5": "^2.3.0",
|
||||
"langchain": "0.1.30",
|
||||
"mammoth": "^1.7.1",
|
||||
"mermaid": "^10.6.1",
|
||||
"mime": "^4.0.1",
|
||||
"nanoid": "^5.0.3",
|
||||
"next": "^13.4.9",
|
||||
"node-fetch": "^3.3.1",
|
||||
"officeparser": "^4.0.8",
|
||||
"openai": "^4.28.4",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"react": "^18.2.0",
|
||||
@ -57,6 +66,7 @@
|
||||
"sass": "^1.59.2",
|
||||
"sharp": "^0.33.3",
|
||||
"spark-md5": "^3.0.2",
|
||||
"srt-parser-2": "^1.2.3",
|
||||
"use-debounce": "^9.0.4",
|
||||
"zustand": "^4.3.8"
|
||||
},
|
||||
@ -82,7 +92,7 @@
|
||||
},
|
||||
"resolutions": {
|
||||
"lint-staged/yaml": "^2.2.2",
|
||||
"@langchain/core": "0.1.30",
|
||||
"@langchain/core": "0.1.53",
|
||||
"openai": "4.28.4"
|
||||
},
|
||||
"packageManager": "yarn@1.22.19"
|
||||
|
Loading…
Reference in New Issue
Block a user