This commit is contained in:
Hk-Gosuto 2024-04-07 18:00:21 +08:00
parent 7382ce48bb
commit b00e9f0c79
17 changed files with 307 additions and 122 deletions

View File

@ -50,4 +50,22 @@ DISABLE_FAST_LINK=
# (optional)
# Default: 1
# If your project is not deployed on Vercel, set this value to 1.
NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN=1
NEXT_PUBLIC_ENABLE_NODEJS_PLUGIN=1
# (optional)
# Default: Empty
# If you want to enable RAG, set this value to 1.
NEXT_PUBLIC_ENABLE_RAG=
# (optional)
# Default: Empty
# Model used when RAG vectorized data.
RAG_EMBEDDING_MODEL=text-embedding-ada-002
# Configuration is required when turning on RAG.
# Default: Empty
QDRANT_URL=
# Configuration is required when turning on RAG.
# Default: Empty
QDRANT_API_KEY=

View File

@ -25,7 +25,7 @@
> [!WARNING]
> 本项目插件功能基于 [OpenAI API 函数调用](https://platform.openai.com/docs/guides/function-calling) 功能实现,转发 GitHub Copilot 接口或类似实现的模拟接口并不能正常调用插件功能!
![cover](./docs/images/gpt-vision-example.jpg)
![cover](./docs/images/rag-example.jpg)
![plugin-example](./docs/images/plugin-example.png)
@ -35,6 +35,9 @@
## 主要功能
- RAG 功能 (预览)
- 配置请参考文档[RAG 功能配置说明](./docs/rag-cn.md)
- 除插件工具外,与原项目保持一致 [ChatGPT-Next-Web 主要功能](https://github.com/Yidadaa/ChatGPT-Next-Web#主要功能)
- 支持 OpenAI TTS文本转语音https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/208
@ -142,7 +145,7 @@
- [x] 支持语音输入 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/208
- [ ] 支持其他类型文件上传 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/77
- [x] 支持其他类型文件上传 https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/77
- [ ] 支持 Azure Storage https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/issues/217
@ -295,11 +298,9 @@ docker run -d -p 3000:3000 \
| [简体中文](./docs/synchronise-chat-logs-cn.md) | [English](./docs/synchronise-chat-logs-en.md) | [Italiano](./docs/synchronise-chat-logs-es.md) | [日本語](./docs/synchronise-chat-logs-ja.md) | [한국어](./docs/synchronise-chat-logs-ko.md)
## 贡献者
## Star History
<a href="https://github.com/Hk-Gosuto/ChatGPT-Next-Web-LangChain/graphs/contributors">
<img src="https://contrib.rocks/image?repo=Hk-Gosuto/ChatGPT-Next-Web-LangChain" />
</a>
[![Star History Chart](https://api.star-history.com/svg?repos=Hk-Gosuto/ChatGPT-Next-Web-LangChain&type=Date)](https://star-history.com/#Hk-Gosuto/ChatGPT-Next-Web-LangChain&Date)
## 捐赠

View File

@ -7,6 +7,8 @@ import { RunnableSequence } from "@langchain/core/runnables";
import { StringOutputParser } from "@langchain/core/output_parsers";
import { Pinecone } from "@pinecone-database/pinecone";
import { PineconeStore } from "@langchain/pinecone";
import { getServerSideConfig } from "@/app/config/server";
import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
export class RAGSearch extends Tool {
static lc_name() {
@ -34,21 +36,32 @@ export class RAGSearch extends Tool {
/** @ignore */
async _call(inputs: string, runManager?: CallbackManagerForToolRun) {
const pinecone = new Pinecone();
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
const vectorStore = await PineconeStore.fromExistingIndex(this.embeddings, {
pineconeIndex,
});
const serverConfig = getServerSideConfig();
// const pinecone = new Pinecone();
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
// const vectorStore = await PineconeStore.fromExistingIndex(this.embeddings, {
// pineconeIndex,
// });
const vectorStore = await QdrantVectorStore.fromExistingCollection(
this.embeddings,
{
url: process.env.QDRANT_URL,
apiKey: process.env.QDRANT_API_KEY,
collectionName: this.sessionId,
},
);
let context;
const returnCunt = process.env.RAG_RETURN_COUNT
? parseInt(process.env.RAG_RETURN_COUNT, 10)
const returnCunt = serverConfig.ragReturnCount
? parseInt(serverConfig.ragReturnCount, 10)
: 4;
const results = await vectorStore.similaritySearch(inputs, returnCunt, {
sessionId: this.sessionId,
});
console.log("[rag-search]", { inputs, returnCunt });
// const results = await vectorStore.similaritySearch(inputs, returnCunt, {
// sessionId: this.sessionId,
// });
const results = await vectorStore.similaritySearch(inputs, returnCunt);
context = formatDocumentsAsString(results);
console.log("[rag-search]", context);
console.log("[rag-search]", { context });
return context;
// const input = `Text:${context}\n\nQuestion:${inputs}\n\nI need you to answer the question based on the text.`;

View File

@ -4,6 +4,7 @@ import { ACCESS_CODE_PREFIX, ModelProvider } from "@/app/constant";
import { OpenAIEmbeddings } from "@langchain/openai";
import { Pinecone } from "@pinecone-database/pinecone";
import { PineconeStore } from "@langchain/pinecone";
import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
import { getServerSideConfig } from "@/app/config/server";
interface RequestBody {
@ -27,26 +28,40 @@ async function handle(req: NextRequest) {
const reqBody: RequestBody = await req.json();
const authToken = req.headers.get("Authorization") ?? "";
const token = authToken.trim().replaceAll("Bearer ", "").trim();
const pinecone = new Pinecone();
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
const serverConfig = getServerSideConfig();
// const pinecone = new Pinecone();
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
const apiKey = getOpenAIApiKey(token);
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
const embeddings = new OpenAIEmbeddings(
{
modelName: process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
modelName: serverConfig.ragEmbeddingModel ?? "text-embedding-3-large",
openAIApiKey: apiKey,
},
{ basePath: baseUrl },
);
const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
pineconeIndex,
});
const results = await vectorStore.similaritySearch(reqBody.query, 1, {
sessionId: reqBody.sessionId,
});
console.log(results);
return NextResponse.json(results, {
// const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
// pineconeIndex,
// });
// const results = await vectorStore.similaritySearch(reqBody.query, 4, {
// sessionId: reqBody.sessionId,
// });
const vectorStore = await QdrantVectorStore.fromExistingCollection(
embeddings,
{
url: process.env.QDRANT_URL,
apiKey: process.env.QDRANT_API_KEY,
collectionName: reqBody.sessionId,
},
);
const returnCunt = serverConfig.ragReturnCount
? parseInt(serverConfig.ragReturnCount, 10)
: 4;
const response = await vectorStore.similaritySearch(
reqBody.query,
returnCunt,
);
return NextResponse.json(response, {
status: 200,
});
} catch (e) {

View File

@ -20,6 +20,7 @@ import { FileInfo } from "@/app/client/platforms/utils";
import mime from "mime";
import LocalFileStorage from "@/app/utils/local_file_storage";
import S3FileStorage from "@/app/utils/s3_file_storage";
import { QdrantVectorStore } from "@langchain/community/vectorstores/qdrant";
interface RequestBody {
sessionId: string;
@ -80,16 +81,17 @@ async function handle(req: NextRequest) {
const apiKey = getOpenAIApiKey(token);
const baseUrl = getOpenAIBaseUrl(reqBody.baseUrl);
const serverConfig = getServerSideConfig();
const pinecone = new Pinecone();
const pineconeIndex = pinecone.Index(process.env.PINECONE_INDEX!);
// const pinecone = new Pinecone();
// const pineconeIndex = pinecone.Index(serverConfig.pineconeIndex!);
const embeddings = new OpenAIEmbeddings(
{
modelName: process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
modelName: serverConfig.ragEmbeddingModel,
openAIApiKey: apiKey,
},
{ basePath: baseUrl },
);
//https://js.langchain.com/docs/integrations/vectorstores/pinecone
// https://js.langchain.com/docs/integrations/vectorstores/pinecone
// https://js.langchain.com/docs/integrations/vectorstores/qdrant
// process files
for (let i = 0; i < reqBody.fileInfos.length; i++) {
const fileInfo = reqBody.fileInfos[i];
@ -121,22 +123,33 @@ async function handle(req: NextRequest) {
};
});
// split
const chunkSize = process.env.RAG_CHUNK_SIZE
? parseInt(process.env.RAG_CHUNK_SIZE, 10)
const chunkSize = serverConfig.ragChunkSize
? parseInt(serverConfig.ragChunkSize, 10)
: 2000;
const chunkOverlap = process.env.RAG_CHUNK_OVERLAP
? parseInt(process.env.RAG_CHUNK_OVERLAP, 10)
const chunkOverlap = serverConfig.ragChunkOverlap
? parseInt(serverConfig.ragChunkOverlap, 10)
: 200;
const textSplitter = new RecursiveCharacterTextSplitter({
chunkSize: chunkSize,
chunkOverlap: chunkOverlap,
});
const splits = await textSplitter.splitDocuments(docs);
// remove history
await PineconeStore.fromDocuments(splits, embeddings, {
pineconeIndex,
maxConcurrency: 5,
});
const vectorStore = await QdrantVectorStore.fromDocuments(
splits,
embeddings,
{
url: process.env.QDRANT_URL,
apiKey: process.env.QDRANT_API_KEY,
collectionName: reqBody.sessionId,
},
);
// await PineconeStore.fromDocuments(splits, embeddings, {
// pineconeIndex,
// maxConcurrency: 5,
// });
// const vectorStore = await PineconeStore.fromExistingIndex(embeddings, {
// pineconeIndex,
// });
}
return NextResponse.json(
{

View File

@ -115,7 +115,7 @@ export abstract class LLMApi {
abstract speech(options: SpeechOptions): Promise<ArrayBuffer>;
abstract transcription(options: TranscriptionOptions): Promise<string>;
abstract toolAgentChat(options: AgentChatOptions): Promise<void>;
abstract createRAGSore(options: CreateRAGStoreOptions): Promise<void>;
abstract createRAGStore(options: CreateRAGStoreOptions): Promise<void>;
abstract usage(): Promise<LLMUsage>;
abstract models(): Promise<LLMModel[]>;
}

View File

@ -20,7 +20,7 @@ import {
} from "@/app/utils";
export class GeminiProApi implements LLMApi {
createRAGSore(options: CreateRAGStoreOptions): Promise<void> {
createRAGStore(options: CreateRAGStoreOptions): Promise<void> {
throw new Error("Method not implemented.");
}
transcription(options: TranscriptionOptions): Promise<string> {

View File

@ -363,7 +363,7 @@ export class ChatGPTApi implements LLMApi {
}
}
async createRAGSore(options: CreateRAGStoreOptions): Promise<void> {
async createRAGStore(options: CreateRAGStoreOptions): Promise<void> {
try {
const accessStore = useAccessStore.getState();
const isAzure = accessStore.provider === ServiceProvider.Azure;
@ -373,7 +373,7 @@ export class ChatGPTApi implements LLMApi {
fileInfos: options.fileInfos,
baseUrl: baseUrl,
};
console.log("[Request] openai payload: ", requestPayload);
console.log("[Request] rag store payload: ", requestPayload);
const controller = new AbortController();
options.onController?.(controller);
let path = "/api/langchain/rag/store";

View File

@ -509,14 +509,13 @@ export function ChatActions(props: {
const [showUploadImage, setShowUploadImage] = useState(false);
const [showUploadFile, setShowUploadFile] = useState(false);
const accessStore = useAccessStore();
useEffect(() => {
const show = isVisionModel(currentModel);
setShowUploadImage(show);
const serverConfig = getServerSideConfig();
setShowUploadFile(
serverConfig.isEnableRAG && !show && isSupportRAGModel(currentModel),
);
const isEnableRAG = !!process.env.NEXT_PUBLIC_ENABLE_RAG;
setShowUploadFile(isEnableRAG && !show && isSupportRAGModel(currentModel));
if (!show) {
props.setAttachImages([]);
props.setUploading(false);
@ -1039,7 +1038,9 @@ function _Chat() {
setIsLoading(true);
const textContent = getMessageTextContent(userMessage);
const images = getMessageImages(userMessage);
chatStore.onUserInput(textContent, images).then(() => setIsLoading(false));
chatStore
.onUserInput(textContent, images, userMessage.fileInfos)
.then(() => setIsLoading(false));
inputRef.current?.focus();
};

View File

@ -113,5 +113,10 @@ export const getServerSideConfig = () => {
!process.env.S3_ENDPOINT,
isEnableRAG: !!process.env.NEXT_PUBLIC_ENABLE_RAG,
ragEmbeddingModel:
process.env.RAG_EMBEDDING_MODEL ?? "text-embedding-3-large",
ragChunkSize: process.env.RAG_CHUNK_SIZE ?? "2000",
ragChunkOverlap: process.env.RAG_CHUNK_OVERLAP ?? "200",
ragReturnCount: process.env.RAG_RETURN_COUNT ?? "4",
};
};

View File

@ -43,6 +43,7 @@ const DEFAULT_ACCESS_STATE = {
disableGPT4: false,
disableFastLink: false,
customModels: "",
isEnableRAG: false,
};
export const useAccessStore = createPersistStore(
@ -55,6 +56,10 @@ export const useAccessStore = createPersistStore(
return get().needCode;
},
isEnableRAG() {
return ensure(get(), ["isEnableRAG"]);
},
isValidOpenAI() {
return ensure(get(), ["openaiApiKey"]);
},

View File

@ -376,88 +376,96 @@ export const useChatStore = createPersistStore(
});
var api: ClientApi;
api = new ClientApi(ModelProvider.GPT);
const isEnableRAG = !!process.env.NEXT_PUBLIC_ENABLE_RAG;
if (
config.pluginConfig.enable &&
session.mask.usePlugins &&
(allPlugins.length > 0 || !!process.env.NEXT_PUBLIC_ENABLE_RAG) &&
(allPlugins.length > 0 || isEnableRAG) &&
modelConfig.model.startsWith("gpt") &&
modelConfig.model != "gpt-4-vision-preview"
) {
console.log("[ToolAgent] start");
const pluginToolNames = allPlugins.map((m) => m.toolName);
if (!!process.env.NEXT_PUBLIC_ENABLE_RAG)
pluginToolNames.push("rag-search");
if (attachFiles && attachFiles.length > 0) {
console.log("crete rag store");
await api.llm.createRAGSore({
if (isEnableRAG) pluginToolNames.push("rag-search");
const agentCall = () => {
api.llm.toolAgentChat({
chatSessionId: session.id,
fileInfos: attachFiles,
});
}
api.llm.toolAgentChat({
chatSessionId: session.id,
messages: sendMessages,
config: { ...modelConfig, stream: true },
agentConfig: { ...pluginConfig, useTools: pluginToolNames },
onUpdate(message) {
botMessage.streaming = true;
if (message) {
botMessage.content = message;
}
get().updateCurrentSession((session) => {
session.messages = session.messages.concat();
});
},
onToolUpdate(toolName, toolInput) {
botMessage.streaming = true;
if (toolName && toolInput) {
botMessage.toolMessages!.push({
toolName,
toolInput,
messages: sendMessages,
config: { ...modelConfig, stream: true },
agentConfig: { ...pluginConfig, useTools: pluginToolNames },
onUpdate(message) {
botMessage.streaming = true;
if (message) {
botMessage.content = message;
}
get().updateCurrentSession((session) => {
session.messages = session.messages.concat();
});
}
get().updateCurrentSession((session) => {
session.messages = session.messages.concat();
});
},
onFinish(message) {
botMessage.streaming = false;
if (message) {
botMessage.content = message;
get().onNewMessage(botMessage);
}
ChatControllerPool.remove(session.id, botMessage.id);
},
onError(error) {
const isAborted = error.message.includes("aborted");
botMessage.content +=
"\n\n" +
prettyObject({
error: true,
message: error.message,
},
onToolUpdate(toolName, toolInput) {
botMessage.streaming = true;
if (toolName && toolInput) {
botMessage.toolMessages!.push({
toolName,
toolInput,
});
}
get().updateCurrentSession((session) => {
session.messages = session.messages.concat();
});
botMessage.streaming = false;
userMessage.isError = !isAborted;
botMessage.isError = !isAborted;
get().updateCurrentSession((session) => {
session.messages = session.messages.concat();
});
ChatControllerPool.remove(
session.id,
botMessage.id ?? messageIndex,
);
},
onFinish(message) {
botMessage.streaming = false;
if (message) {
botMessage.content = message;
get().onNewMessage(botMessage);
}
ChatControllerPool.remove(session.id, botMessage.id);
},
onError(error) {
const isAborted = error.message.includes("aborted");
botMessage.content +=
"\n\n" +
prettyObject({
error: true,
message: error.message,
});
botMessage.streaming = false;
userMessage.isError = !isAborted;
botMessage.isError = !isAborted;
get().updateCurrentSession((session) => {
session.messages = session.messages.concat();
});
ChatControllerPool.remove(
session.id,
botMessage.id ?? messageIndex,
);
console.error("[Chat] failed ", error);
},
onController(controller) {
// collect controller for stop/retry
ChatControllerPool.addController(
session.id,
botMessage.id ?? messageIndex,
controller,
);
},
});
console.error("[Chat] failed ", error);
},
onController(controller) {
// collect controller for stop/retry
ChatControllerPool.addController(
session.id,
botMessage.id ?? messageIndex,
controller,
);
},
});
};
if (attachFiles && attachFiles.length > 0) {
await api.llm
.createRAGStore({
chatSessionId: session.id,
fileInfos: attachFiles,
})
.then(() => {
console.log("[RAG]", "Vector db created");
agentCall();
});
} else {
agentCall();
}
} else {
if (modelConfig.model.startsWith("gemini")) {
api = new ClientApi(ModelProvider.GeminiPro);

BIN
docs/images/rag-example.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

BIN
docs/images/rag.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 251 KiB

74
docs/rag-cn.md Normal file
View File

@ -0,0 +1,74 @@
# RAG 功能配置说明
> [!WARNING]
> 该功能目前在预览阶段,可能会有较多的问题,请在仔细阅读本文档后再使用。
## 效果图
![example](./images/rag-example.jpg)
## 原理
![example](./images/rag.png)
## 已知问题
- 由于接口中使用 nodejs 运行时,在 vercel 环境下接口可能会超时,建议使用 docker 部署
- 已开启的插件可能会影响到数据检索,可以关闭部分插件后再使用
- 已创建的向量数据不会删除
- 同一聊天窗口内即使“清除聊天”也可以访问已经上传的文件内容
- RAG 插件需要一定的话术来让模型触发查询
- 上传文件部分的 UI 交互可能会变更
- 暂不支持文档总结
## 支持的文件类型
- txt
- md
- pdf
- docx
- csv
- json
- srt
- mp3 (基于OpenAIWhisper)
## 配置
1. 登录 https://cloud.qdrant.io 并创建一个账户
2. 在控制面板中创建一个 Cluster
3. 获取 Cluster 的 Cluster URL 和 API Key
4. 完善下面的环境变量配置后即可使用
## 环境变量
### `NEXT_PUBLIC_ENABLE_RAG`
如果你想启用 RAG 功能,将此环境变量设置为 1 即可。
### `QDRANT_URL`
qdrant 服务的 Cluster URL。
### `QDRANT_API_KEY`
qdrant 服务的 ApiKey。
### `RAG_CHUNK_SIZE` (可选)
分割后文档的最大大小按字符数计算默认2000。
### `RAG_CHUNK_OVERLAP` (可选)
分割文档时块重叠数量默认200。
### `RAG_RETURN_COUNT` (可选)
检索时返回的文档数量默认4。
### `RAG_EMBEDDING_MODEL` (可选)
向量化时使用的向量模型默认text-embedding-3-large。
可选项:
- text-embedding-3-small
- text-embedding-3-large
- text-embedding-ada-002

View File

@ -27,6 +27,7 @@
"@langchain/pinecone": "^0.0.4",
"@next/third-parties": "^14.1.0",
"@pinecone-database/pinecone": "^2.2.0",
"@qdrant/js-client-rest": "^1.8.2",
"@svgr/webpack": "^6.5.1",
"@vercel/analytics": "^0.1.11",
"@vercel/speed-insights": "^1.0.2",

View File

@ -1717,6 +1717,11 @@
resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.0.tgz#a5417ae8427873f1dd08b70b3574b453e67b5f7f"
integrity sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==
"@fastify/busboy@^2.0.0":
version "2.1.1"
resolved "https://registry.yarnpkg.com/@fastify/busboy/-/busboy-2.1.1.tgz#b9da6a878a371829a0502c9b6c1c143ef6663f4d"
integrity sha512-vBZP4NlzfOlerQTnba4aqZoMhE/a9HY7HRqoOPaETQcSQuWEIyZMHGfVu6w9wGtGK5fED5qRs2DteVCjOH60sA==
"@fortaine/fetch-event-source@^3.0.6":
version "3.0.6"
resolved "https://registry.yarnpkg.com/@fortaine/fetch-event-source/-/fetch-event-source-3.0.6.tgz#b8552a2ca2c5202f5699b93a92be0188d422b06e"
@ -1977,6 +1982,20 @@
resolved "https://registry.yarnpkg.com/@pkgr/core/-/core-0.1.1.tgz#1ec17e2edbec25c8306d424ecfbf13c7de1aaa31"
integrity sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==
"@qdrant/js-client-rest@^1.8.2":
version "1.8.2"
resolved "https://registry.yarnpkg.com/@qdrant/js-client-rest/-/js-client-rest-1.8.2.tgz#7ea149c791e6c89da931c5a8fd043f61a97aca56"
integrity sha512-BCGC4YRcqjRxXVo500CxjhluPpGO0XpOwojauT8675Duv24YTlkhvDRmc1c9k/df2+yH/typtkecK3VOi3CD7A==
dependencies:
"@qdrant/openapi-typescript-fetch" "1.2.6"
"@sevinf/maybe" "0.5.0"
undici "~5.28.4"
"@qdrant/openapi-typescript-fetch@1.2.6":
version "1.2.6"
resolved "https://registry.yarnpkg.com/@qdrant/openapi-typescript-fetch/-/openapi-typescript-fetch-1.2.6.tgz#c2682a9fa26ded86384f421c991f6c461785af7e"
integrity sha512-oQG/FejNpItrxRHoyctYvT3rwGZOnK4jr3JdppO/c78ktDvkWiPXPHNsrDf33K9sZdRb6PR7gi4noIapu5q4HA==
"@remix-run/router@1.15.3":
version "1.15.3"
resolved "https://registry.yarnpkg.com/@remix-run/router/-/router-1.15.3.tgz#d2509048d69dbb72d5389a14945339f1430b2d3c"
@ -1995,6 +2014,11 @@
domhandler "^5.0.3"
selderee "^0.11.0"
"@sevinf/maybe@0.5.0":
version "0.5.0"
resolved "https://registry.yarnpkg.com/@sevinf/maybe/-/maybe-0.5.0.tgz#e59fcea028df615fe87d708bb30e1f338e46bb44"
integrity sha512-ARhyoYDnY1LES3vYI0fiG6e9esWfTNcXcO6+MPJJXcnyMV3bim4lnFt45VXouV7y82F4x3YH8nOQ6VztuvUiWg==
"@sinclair/typebox@^0.29.0":
version "0.29.6"
resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.29.6.tgz#4cd8372f9247372edd5fc5af44f67e2032c46e2f"
@ -8263,6 +8287,13 @@ undici-types@~5.26.4:
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
undici@~5.28.4:
version "5.28.4"
resolved "https://registry.yarnpkg.com/undici/-/undici-5.28.4.tgz#6b280408edb6a1a604a9b20340f45b422e373068"
integrity sha512-72RFADWFqKmUb2hmmvNODKL3p9hcB6Gt2DOQMis1SEBaV6a4MH8soBvzg+95CYhCKPFedut2JY9bMfrDl9D23g==
dependencies:
"@fastify/busboy" "^2.0.0"
unicode-canonical-property-names-ecmascript@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc"