From 2a59a38c23bdede664dc79691431177a1949ec3f Mon Sep 17 00:00:00 2001 From: hycqwq Date: Sat, 3 May 2025 19:55:15 +0800 Subject: [PATCH] feat: Add service provider OpenRouter --- .env.template | 6 + app/api/[provider]/[...path]/route.ts | 3 + app/api/auth.ts | 3 + app/api/openrouter.ts | 128 ++++++++++ app/client/api.ts | 12 + app/client/platforms/openrouter.ts | 287 +++++++++++++++++++++ app/components/settings.tsx | 42 ++++ app/config/server.ts | 9 + app/constant.ts | 349 ++++++++++++++++++++++++++ app/locales/cn.ts | 11 + app/locales/da.ts | 11 + app/locales/en.ts | 11 + app/store/access.ts | 12 + 13 files changed, 884 insertions(+) create mode 100644 app/api/openrouter.ts create mode 100644 app/client/platforms/openrouter.ts diff --git a/.env.template b/.env.template index 4efaa2ff8..4df01a650 100644 --- a/.env.template +++ b/.env.template @@ -81,3 +81,9 @@ SILICONFLOW_API_KEY= ### siliconflow Api url (optional) SILICONFLOW_URL= + +### openrouter Api key (optional) +OPENROUTER_API_KEY= + +### openrouter Api url (optional) +OPENROUTER_URL= diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 8975bf971..15e7be12b 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -15,6 +15,7 @@ import { handle as siliconflowHandler } from "../../siliconflow"; import { handle as xaiHandler } from "../../xai"; import { handle as chatglmHandler } from "../../glm"; import { handle as proxyHandler } from "../../proxy"; +import { handle as openrouterHandler } from "../../openrouter"; async function handle( req: NextRequest, @@ -50,6 +51,8 @@ async function handle( return chatglmHandler(req, { params }); case ApiPath.SiliconFlow: return siliconflowHandler(req, { params }); + case ApiPath.OpenRouter: + return openrouterHandler(req, { params }); case ApiPath.OpenAI: return openaiHandler(req, { params }); default: diff --git a/app/api/auth.ts b/app/api/auth.ts index 8c78c70c8..1cbf135c9 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -104,6 +104,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { case ModelProvider.SiliconFlow: systemApiKey = serverConfig.siliconFlowApiKey; break; + case ModelProvider.OpenRouter: + systemApiKey = serverConfig.openrouterApiKey; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/openrouter.ts b/app/api/openrouter.ts new file mode 100644 index 000000000..26fa42c6a --- /dev/null +++ b/app/api/openrouter.ts @@ -0,0 +1,128 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + OPENROUTER_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelNotavailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[OpenRouter Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.OpenRouter); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[OpenRouter] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.OpenRouter, ""); + + let baseUrl = serverConfig.openrouterUrl || OPENROUTER_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelNotavailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.OpenRouter as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[OpenRouter] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index f5288593d..eb43e1c79 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -24,6 +24,7 @@ import { DeepSeekApi } from "./platforms/deepseek"; import { XAIApi } from "./platforms/xai"; import { ChatGLMApi } from "./platforms/glm"; import { SiliconflowApi } from "./platforms/siliconflow"; +import { OpenRouterApi } from "./platforms/openrouter"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; @@ -173,6 +174,9 @@ export class ClientApi { case ModelProvider.SiliconFlow: this.llm = new SiliconflowApi(); break; + case ModelProvider.OpenRouter: + this.llm = new OpenRouterApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -265,6 +269,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { const isChatGLM = modelConfig.providerName === ServiceProvider.ChatGLM; const isSiliconFlow = modelConfig.providerName === ServiceProvider.SiliconFlow; + const isOpenRouter = + modelConfig.providerName === ServiceProvider.OpenRouter; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -286,6 +292,8 @@ export function getHeaders(ignoreHeaders: boolean = false) { ? accessStore.chatglmApiKey : isSiliconFlow ? accessStore.siliconflowApiKey + : isOpenRouter + ? accessStore.openrouterApiKey : isIflytek ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret @@ -304,6 +312,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { isXAI, isChatGLM, isSiliconFlow, + isOpenRouter, apiKey, isEnabledAccessControl, }; @@ -332,6 +341,7 @@ export function getHeaders(ignoreHeaders: boolean = false) { isXAI, isChatGLM, isSiliconFlow, + isOpenRouter, apiKey, isEnabledAccessControl, } = getConfig(); @@ -382,6 +392,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.ChatGLM); case ServiceProvider.SiliconFlow: return new ClientApi(ModelProvider.SiliconFlow); + case ServiceProvider.OpenRouter: + return new ClientApi(ModelProvider.OpenRouter); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/openrouter.ts b/app/client/platforms/openrouter.ts new file mode 100644 index 000000000..52ab16e01 --- /dev/null +++ b/app/client/platforms/openrouter.ts @@ -0,0 +1,287 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { + ApiPath, + OPENROUTER_BASE_URL, + OpenRouter, + DEFAULT_MODELS, +} from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { preProcessImageContent, streamWithThink } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { + getMessageTextContent, + getMessageTextContentWithoutThinking, + isVisionModel, + getTimeoutMSByModel, +} from "@/app/utils"; +import { RequestPayload } from "./openai"; + +import { fetch } from "@/app/utils/stream"; +export interface OpenRouterListModelResponse { + object: string; + data: Array<{ + id: string; + object: string; + root: string; + }>; +} + +export class OpenRouterApi implements LLMApi { + private disableListModels = false; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.openrouterUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.OpenRouter; + baseUrl = isApp ? OPENROUTER_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if ( + !baseUrl.startsWith("http") && + !baseUrl.startsWith(ApiPath.OpenRouter) + ) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const visionModel = isVisionModel(options.config.model); + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + if (v.role === "assistant") { + const content = getMessageTextContentWithoutThinking(v); + messages.push({ role: v.role, content }); + } else { + const content = visionModel + ? await preProcessImageContent(v.content) + : getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + // max_tokens: Math.max(modelConfig.max_tokens, 1024), + // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + }; + + console.log("[Request] openai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(OpenRouter.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // console.log(chatPayload); + + // Use extended timeout for thinking models as they typically require more processing time + const requestTimeoutId = setTimeout( + () => controller.abort(), + getTimeoutMSByModel(options.config.model), + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return streamWithThink( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string | null; + tool_calls: ChatMessageTool[]; + reasoning_content: string | null; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + const reasoning = choices[0]?.delta?.reasoning_content; + const content = choices[0]?.delta?.content; + + // Skip if both content and reasoning_content are empty or null + if ( + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) + ) { + return { + isThinking: false, + content: "", + }; + } + + if (reasoning && reasoning.length > 0) { + return { + isThinking: true, + content: reasoning, + }; + } else if (content && content.length > 0) { + return { + isThinking: false, + content: content, + }; + } + + return { + isThinking: false, + content: "", + }; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message, res); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + if (this.disableListModels) { + return DEFAULT_MODELS.slice(); + } + + const res = await fetch(this.path(OpenRouter.ListModelPath), { + method: "GET", + headers: { + ...getHeaders(), + }, + }); + + const resJson = (await res.json()) as OpenRouterListModelResponse; + const chatModels = resJson.data; + console.log("[Models]", chatModels); + + if (!chatModels) { + return []; + } + + let seq = 1000; //同 Constant.ts 中的排序保持一致 + return chatModels.map((m) => ({ + name: m.id, + available: true, + sorted: seq++, + provider: { + id: "openrouter", + providerName: "OpenRouter", + providerType: "openrouter", + sorted: 14, + }, + })); + } +} diff --git a/app/components/settings.tsx b/app/components/settings.tsx index 68ebcf084..2d3182dbd 100644 --- a/app/components/settings.tsx +++ b/app/components/settings.tsx @@ -75,6 +75,7 @@ import { ChatGLM, DeepSeek, SiliconFlow, + OpenRouter, } from "../constant"; import { Prompt, SearchService, usePromptStore } from "../store/prompt"; import { ErrorBoundary } from "./error"; @@ -1359,6 +1360,46 @@ export function Settings() { ); + const openrouterConfigComponent = accessStore.provider === + ServiceProvider.OpenRouter && ( + <> + + + accessStore.update( + (access) => (access.openrouterUrl = e.currentTarget.value), + ) + } + > + + + { + accessStore.update( + (access) => (access.openrouterApiKey = e.currentTarget.value), + ); + }} + /> + + + ); const stabilityConfigComponent = accessStore.provider === ServiceProvider.Stability && ( @@ -1822,6 +1863,7 @@ export function Settings() { {XAIConfigComponent} {chatglmConfigComponent} {siliconflowConfigComponent} + {openrouterConfigComponent} )} diff --git a/app/config/server.ts b/app/config/server.ts index 43d4ff833..e4d16510c 100644 --- a/app/config/server.ts +++ b/app/config/server.ts @@ -88,6 +88,10 @@ declare global { SILICONFLOW_URL?: string; SILICONFLOW_API_KEY?: string; + // openrouter only + OPENROUTER_URL?: string; + OPENROUTER_API_KEY?: string; + // custom template for preprocessing user input DEFAULT_INPUT_TEMPLATE?: string; @@ -163,6 +167,7 @@ export const getServerSideConfig = () => { const isXAI = !!process.env.XAI_API_KEY; const isChatGLM = !!process.env.CHATGLM_API_KEY; const isSiliconFlow = !!process.env.SILICONFLOW_API_KEY; + const isOpenRouter = !!process.env.OPENROUTER_API_KEY; // const apiKeyEnvVar = process.env.OPENAI_API_KEY ?? ""; // const apiKeys = apiKeyEnvVar.split(",").map((v) => v.trim()); // const randomIndex = Math.floor(Math.random() * apiKeys.length); @@ -246,6 +251,10 @@ export const getServerSideConfig = () => { siliconFlowUrl: process.env.SILICONFLOW_URL, siliconFlowApiKey: getApiKey(process.env.SILICONFLOW_API_KEY), + isOpenRouter, + openrouterUrl: process.env.OPENROUTER_URL, + openrouterApiKey: getApiKey(process.env.OPENROUTER_API_KEY), + gtmId: process.env.GTM_ID, gaId: process.env.GA_ID || DEFAULT_GA_ID, diff --git a/app/constant.ts b/app/constant.ts index 9fcea1187..940ca8a2e 100644 --- a/app/constant.ts +++ b/app/constant.ts @@ -36,6 +36,8 @@ export const CHATGLM_BASE_URL = "https://open.bigmodel.cn"; export const SILICONFLOW_BASE_URL = "https://api.siliconflow.cn"; +export const OPENROUTER_BASE_URL = "https://openrouter.ai/api"; + export const CACHE_URL_PREFIX = "/api/cache"; export const UPLOAD_URL = `${CACHE_URL_PREFIX}/upload`; @@ -72,6 +74,7 @@ export enum ApiPath { ChatGLM = "/api/chatglm", DeepSeek = "/api/deepseek", SiliconFlow = "/api/siliconflow", + OpenRouter = "/api/openrouter", } export enum SlotID { @@ -130,6 +133,7 @@ export enum ServiceProvider { ChatGLM = "ChatGLM", DeepSeek = "DeepSeek", SiliconFlow = "SiliconFlow", + OpenRouter = "OpenRouter", } // Google API safety settings, see https://ai.google.dev/gemini-api/docs/safety-settings @@ -156,6 +160,7 @@ export enum ModelProvider { ChatGLM = "ChatGLM", DeepSeek = "DeepSeek", SiliconFlow = "SiliconFlow", + OpenRouter = "OpenRouter", } export const Stability = { @@ -266,6 +271,12 @@ export const SiliconFlow = { ListModelPath: "v1/models?&sub_type=chat", }; +export const OpenRouter = { + ExampleEndpoint: OPENROUTER_BASE_URL, + ChatPath: "v1/chat/completions", + ListModelPath: "v1/models", +}; + export const DEFAULT_INPUT_TEMPLATE = `{{input}}`; // input / time / model / lang // export const DEFAULT_SYSTEM_TEMPLATE = ` // You are ChatGPT, a large language model trained by {{ServiceProvider}}. @@ -671,6 +682,333 @@ const siliconflowModels = [ "Pro/deepseek-ai/DeepSeek-V3", ]; +const openrouterModels = [ + "microsoft/phi-4-reasoning-plus:free", + "microsoft/phi-4-reasoning-plus", + "microsoft/phi-4-reasoning:free", + "qwen/qwen3-0.6b-04-28:free", + "inception/mercury-coder-small-beta", + "qwen/qwen3-1.7b:free", + "qwen/qwen3-4b:free", + "opengvlab/internvl3-14b:free", + "opengvlab/internvl3-2b:free", + "deepseek/deepseek-prover-v2:free", + "deepseek/deepseek-prover-v2", + "meta-llama/llama-guard-4-12b", + "qwen/qwen3-30b-a3b:free", + "qwen/qwen3-30b-a3b", + "qwen/qwen3-8b:free", + "qwen/qwen3-8b", + "qwen/qwen3-14b:free", + "qwen/qwen3-14b", + "qwen/qwen3-32b:free", + "qwen/qwen3-32b", + "qwen/qwen3-235b-a22b:free", + "qwen/qwen3-235b-a22b", + "tngtech/deepseek-r1t-chimera:free", + "thudm/glm-z1-rumination-32b", + "thudm/glm-z1-9b:free", + "thudm/glm-4-9b:free", + "microsoft/mai-ds-r1:free", + "google/gemini-2.5-pro-preview-03-25", + "thudm/glm-z1-32b:free", + "thudm/glm-z1-32b", + "thudm/glm-4-32b:free", + "thudm/glm-4-32b", + "google/gemini-2.5-flash-preview", + "google/gemini-2.5-flash-preview:thinking", + "openai/o4-mini-high", + "openai/o3", + "openai/o4-mini", + "shisa-ai/shisa-v2-llama3.3-70b:free", + "qwen/qwen2.5-coder-7b-instruct", + "openai/gpt-4.1", + "openai/gpt-4.1-mini", + "openai/gpt-4.1-nano", + "eleutherai/llemma_7b", + "alfredpros/codellama-7b-instruct-solidity", + "arliai/qwq-32b-arliai-rpr-v1:free", + "agentica-org/deepcoder-14b-preview:free", + "moonshotai/kimi-vl-a3b-thinking:free", + "x-ai/grok-3-mini-beta", + "x-ai/grok-3-beta", + "nvidia/llama-3.3-nemotron-super-49b-v1:free", + "nvidia/llama-3.3-nemotron-super-49b-v1", + "nvidia/llama-3.1-nemotron-ultra-253b-v1:free", + "meta-llama/llama-4-maverick:free", + "meta-llama/llama-4-maverick", + "meta-llama/llama-4-scout:free", + "meta-llama/llama-4-scout", + "all-hands/openhands-lm-32b-v0.1", + "mistral/ministral-8b", + "deepseek/deepseek-v3-base:free", + "scb10x/llama3.1-typhoon2-8b-instruct", + "scb10x/llama3.1-typhoon2-70b-instruct", + "allenai/molmo-7b-d:free", + "bytedance-research/ui-tars-72b:free", + "qwen/qwen2.5-vl-3b-instruct:free", + "google/gemini-2.5-pro-exp-03-25", + "qwen/qwen2.5-vl-32b-instruct:free", + "qwen/qwen2.5-vl-32b-instruct", + "deepseek/deepseek-chat-v3-0324:free", + "deepseek/deepseek-chat-v3-0324", + "featherless/qwerky-72b:free", + "openai/o1-pro", + "mistralai/mistral-small-3.1-24b-instruct:free", + "mistralai/mistral-small-3.1-24b-instruct", + "open-r1/olympiccoder-32b:free", + "steelskull/l3.3-electra-r1-70b", + "google/gemma-3-1b-it:free", + "google/gemma-3-4b-it:free", + "google/gemma-3-4b-it", + "ai21/jamba-1.6-large", + "ai21/jamba-1.6-mini", + "google/gemma-3-12b-it:free", + "google/gemma-3-12b-it", + "cohere/command-a", + "openai/gpt-4o-mini-search-preview", + "openai/gpt-4o-search-preview", + "rekaai/reka-flash-3:free", + "google/gemma-3-27b-it:free", + "google/gemma-3-27b-it", + "thedrummer/anubis-pro-105b-v1", + "latitudegames/wayfarer-large-70b-llama-3.3", + "thedrummer/skyfall-36b-v2", + "microsoft/phi-4-multimodal-instruct", + "perplexity/sonar-reasoning-pro", + "perplexity/sonar-pro", + "perplexity/sonar-deep-research", + "deepseek/deepseek-r1-zero:free", + "qwen/qwq-32b:free", + "qwen/qwq-32b", + "moonshotai/moonlight-16b-a3b-instruct:free", + "nousresearch/deephermes-3-llama-3-8b-preview:free", + "openai/gpt-4.5-preview", + "google/gemini-2.0-flash-lite-001", + "anthropic/claude-3.7-sonnet", + "anthropic/claude-3.7-sonnet:thinking", + "anthropic/claude-3.7-sonnet:beta", + "perplexity/r1-1776", + "mistralai/mistral-saba", + "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", + "cognitivecomputations/dolphin3.0-mistral-24b:free", + "meta-llama/llama-guard-3-8b", + "openai/o3-mini-high", + "deepseek/deepseek-r1-distill-llama-8b", + "google/gemini-2.0-flash-001", + "qwen/qwen-vl-plus", + "aion-labs/aion-1.0", + "aion-labs/aion-1.0-mini", + "aion-labs/aion-rp-llama-3.1-8b", + "qwen/qwen-vl-max", + "qwen/qwen-turbo", + "qwen/qwen2.5-vl-72b-instruct:free", + "qwen/qwen2.5-vl-72b-instruct", + "qwen/qwen-plus", + "qwen/qwen-max", + "openai/o3-mini", + "deepseek/deepseek-r1-distill-qwen-1.5b", + "mistralai/mistral-small-24b-instruct-2501:free", + "mistralai/mistral-small-24b-instruct-2501", + "deepseek/deepseek-r1-distill-qwen-32b:free", + "deepseek/deepseek-r1-distill-qwen-32b", + "deepseek/deepseek-r1-distill-qwen-14b:free", + "deepseek/deepseek-r1-distill-qwen-14b", + "perplexity/sonar-reasoning", + "perplexity/sonar", + "liquid/lfm-7b", + "liquid/lfm-3b", + "deepseek/deepseek-r1-distill-llama-70b:free", + "deepseek/deepseek-r1-distill-llama-70b", + "deepseek/deepseek-r1:free", + "deepseek/deepseek-r1", + "minimax/minimax-01", + "mistralai/codestral-2501", + "microsoft/phi-4", + "deepseek/deepseek-chat:free", + "deepseek/deepseek-chat", + "sao10k/l3.3-euryale-70b", + "openai/o1", + "eva-unit-01/eva-llama-3.33-70b", + "x-ai/grok-2-vision-1212", + "x-ai/grok-2-1212", + "cohere/command-r7b-12-2024", + "google/gemini-2.0-flash-exp:free", + "meta-llama/llama-3.3-70b-instruct:free", + "meta-llama/llama-3.3-70b-instruct", + "amazon/nova-lite-v1", + "amazon/nova-micro-v1", + "amazon/nova-pro-v1", + "qwen/qwq-32b-preview:free", + "qwen/qwq-32b-preview", + "google/learnlm-1.5-pro-experimental:free", + "eva-unit-01/eva-qwen-2.5-72b", + "openai/gpt-4o-2024-11-20", + "mistralai/mistral-large-2411", + "mistralai/mistral-large-2407", + "mistralai/pixtral-large-2411", + "x-ai/grok-vision-beta", + "infermatic/mn-inferor-12b", + "qwen/qwen-2.5-coder-32b-instruct:free", + "qwen/qwen-2.5-coder-32b-instruct", + "raifle/sorcererlm-8x22b", + "eva-unit-01/eva-qwen-2.5-32b", + "thedrummer/unslopnemo-12b", + "anthropic/claude-3.5-haiku:beta", + "anthropic/claude-3.5-haiku", + "anthropic/claude-3.5-haiku-20241022:beta", + "anthropic/claude-3.5-haiku-20241022", + "neversleep/llama-3.1-lumimaid-70b", + "anthracite-org/magnum-v4-72b", + "anthropic/claude-3.5-sonnet:beta", + "anthropic/claude-3.5-sonnet", + "x-ai/grok-beta", + "mistralai/ministral-8b", + "mistralai/ministral-3b", + "qwen/qwen-2.5-7b-instruct:free", + "qwen/qwen-2.5-7b-instruct", + "nvidia/llama-3.1-nemotron-70b-instruct", + "inflection/inflection-3-productivity", + "inflection/inflection-3-pi", + "google/gemini-flash-1.5-8b", + "thedrummer/rocinante-12b", + "anthracite-org/magnum-v2-72b", + "liquid/lfm-40b", + "meta-llama/llama-3.2-3b-instruct:free", + "meta-llama/llama-3.2-3b-instruct", + "meta-llama/llama-3.2-1b-instruct:free", + "meta-llama/llama-3.2-1b-instruct", + "meta-llama/llama-3.2-90b-vision-instruct", + "meta-llama/llama-3.2-11b-vision-instruct:free", + "meta-llama/llama-3.2-11b-vision-instruct", + "qwen/qwen-2.5-72b-instruct:free", + "qwen/qwen-2.5-72b-instruct", + "qwen/qwen-2.5-vl-72b-instruct", + "neversleep/llama-3.1-lumimaid-8b", + "openai/o1-preview", + "openai/o1-preview-2024-09-12", + "openai/o1-mini", + "openai/o1-mini-2024-09-12", + "mistralai/pixtral-12b", + "cohere/command-r-plus-08-2024", + "cohere/command-r-08-2024", + "qwen/qwen-2.5-vl-7b-instruct:free", + "qwen/qwen-2.5-vl-7b-instruct", + "sao10k/l3.1-euryale-70b", + "google/gemini-flash-1.5-8b-exp", + "ai21/jamba-1-5-mini", + "ai21/jamba-1-5-large", + "microsoft/phi-3.5-mini-128k-instruct", + "nousresearch/hermes-3-llama-3.1-70b", + "nousresearch/hermes-3-llama-3.1-405b", + "openai/chatgpt-4o-latest", + "sao10k/l3-lunaris-8b", + "aetherwiing/mn-starcannon-12b", + "openai/gpt-4o-2024-08-06", + "meta-llama/llama-3.1-405b:free", + "meta-llama/llama-3.1-405b", + "nothingiisreal/mn-celeste-12b", + "perplexity/llama-3.1-sonar-small-128k-online", + "perplexity/llama-3.1-sonar-large-128k-online", + "meta-llama/llama-3.1-8b-instruct:free", + "meta-llama/llama-3.1-8b-instruct", + "meta-llama/llama-3.1-405b-instruct", + "meta-llama/llama-3.1-70b-instruct", + "mistralai/codestral-mamba", + "mistralai/mistral-nemo:free", + "mistralai/mistral-nemo", + "openai/gpt-4o-mini", + "openai/gpt-4o-mini-2024-07-18", + "google/gemma-2-27b-it", + "alpindale/magnum-72b", + "google/gemma-2-9b-it:free", + "google/gemma-2-9b-it", + "01-ai/yi-large", + "ai21/jamba-instruct", + "anthropic/claude-3.5-sonnet-20240620:beta", + "anthropic/claude-3.5-sonnet-20240620", + "sao10k/l3-euryale-70b", + "cognitivecomputations/dolphin-mixtral-8x22b", + "qwen/qwen-2-72b-instruct", + "mistralai/mistral-7b-instruct:free", + "mistralai/mistral-7b-instruct", + "nousresearch/hermes-2-pro-llama-3-8b", + "mistralai/mistral-7b-instruct-v0.3", + "microsoft/phi-3-mini-128k-instruct", + "microsoft/phi-3-medium-128k-instruct", + "neversleep/llama-3-lumimaid-70b", + "deepseek/deepseek-coder", + "google/gemini-flash-1.5", + "openai/gpt-4o", + "openai/gpt-4o:extended", + "meta-llama/llama-guard-2-8b", + "openai/gpt-4o-2024-05-13", + "allenai/olmo-7b-instruct", + "neversleep/llama-3-lumimaid-8b:extended", + "neversleep/llama-3-lumimaid-8b", + "sao10k/fimbulvetr-11b-v2", + "meta-llama/llama-3-8b-instruct", + "meta-llama/llama-3-70b-instruct", + "mistralai/mixtral-8x22b-instruct", + "microsoft/wizardlm-2-8x22b", + "google/gemini-pro-1.5", + "openai/gpt-4-turbo", + "cohere/command-r-plus", + "cohere/command-r-plus-04-2024", + "sophosympatheia/midnight-rose-70b", + "cohere/command", + "cohere/command-r", + "anthropic/claude-3-haiku:beta", + "anthropic/claude-3-haiku", + "anthropic/claude-3-opus:beta", + "anthropic/claude-3-opus", + "anthropic/claude-3-sonnet:beta", + "anthropic/claude-3-sonnet", + "cohere/command-r-03-2024", + "mistralai/mistral-large", + "openai/gpt-3.5-turbo-0613", + "openai/gpt-4-turbo-preview", + "nousresearch/nous-hermes-2-mixtral-8x7b-dpo", + "mistralai/mistral-medium", + "mistralai/mistral-small", + "mistralai/mistral-tiny", + "mistralai/mistral-7b-instruct-v0.2", + "google/gemini-pro-vision", + "mistralai/mixtral-8x7b-instruct", + "neversleep/noromaid-20b", + "anthropic/claude-2.1:beta", + "anthropic/claude-2.1", + "anthropic/claude-2:beta", + "anthropic/claude-2", + "undi95/toppy-m-7b", + "alpindale/goliath-120b", + "openrouter/auto", + "openai/gpt-3.5-turbo-1106", + "openai/gpt-4-1106-preview", + "google/palm-2-chat-bison-32k", + "google/palm-2-codechat-bison-32k", + "jondurbin/airoboros-l2-70b", + "openai/gpt-3.5-turbo-instruct", + "mistralai/mistral-7b-instruct-v0.1", + "pygmalionai/mythalion-13b", + "openai/gpt-3.5-turbo-16k", + "openai/gpt-4-32k", + "openai/gpt-4-32k-0314", + "mancer/weaver", + "huggingfaceh4/zephyr-7b-beta:free", + "anthropic/claude-2.0:beta", + "anthropic/claude-2.0", + "undi95/remm-slerp-l2-13b", + "google/palm-2-chat-bison", + "google/palm-2-codechat-bison", + "gryphe/mythomax-l2-13b", + "meta-llama/llama-2-70b-chat", + "openai/gpt-3.5-turbo", + "openai/gpt-3.5-turbo-0125", + "openai/gpt-4", + "openai/gpt-4-0314", +]; + let seq = 1000; // 内置的模型序号生成器从1000开始 export const DEFAULT_MODELS = [ ...openaiModels.map((name) => ({ @@ -827,6 +1165,17 @@ export const DEFAULT_MODELS = [ sorted: 14, }, })), + ...openrouterModels.map((name) => ({ + name, + available: true, + sorted: seq++, + provider: { + id: "openrouter", + providerName: "OpenRouter", + providerType: "openrouter", + sorted: 14, + }, + })), ] as const; export const CHAT_PAGE_SIZE = 15; diff --git a/app/locales/cn.ts b/app/locales/cn.ts index 81b609cde..a1f2377e0 100644 --- a/app/locales/cn.ts +++ b/app/locales/cn.ts @@ -507,6 +507,17 @@ const cn = { SubTitle: "样例:", }, }, + OpenRouter: { + ApiKey: { + Title: "接口密钥", + SubTitle: "使用自定义 OpenRouter API Key", + Placeholder: "OpenRouter API Key", + }, + Endpoint: { + Title: "接口地址", + SubTitle: "样例:", + }, + }, Stability: { ApiKey: { Title: "接口密钥", diff --git a/app/locales/da.ts b/app/locales/da.ts index 7090b062b..9f25a277d 100644 --- a/app/locales/da.ts +++ b/app/locales/da.ts @@ -467,6 +467,17 @@ const da: PartialLocaleType = { SubTitle: "F.eks.: ", }, }, + OpenRouter: { + ApiKey: { + Title: "OpenRouter-nøgle", + SubTitle: "Din egen OpenRouter-nøgle", + Placeholder: "OpenRouter API Key", + }, + Endpoint: { + Title: "Adresse", + SubTitle: "F.eks.: ", + }, + }, Stability: { ApiKey: { Title: "Stability-nøgle", diff --git a/app/locales/en.ts b/app/locales/en.ts index 8fecf8bf7..7b7475420 100644 --- a/app/locales/en.ts +++ b/app/locales/en.ts @@ -491,6 +491,17 @@ const en: LocaleType = { SubTitle: "Example: ", }, }, + OpenRouter: { + ApiKey: { + Title: "OpenRouter API Key", + SubTitle: "Use a custom OpenRouter API Key", + Placeholder: "OpenRouter API Key", + }, + Endpoint: { + Title: "Endpoint Address", + SubTitle: "Example: ", + }, + }, Stability: { ApiKey: { Title: "Stability API Key", diff --git a/app/store/access.ts b/app/store/access.ts index 7025a1814..cf4eac022 100644 --- a/app/store/access.ts +++ b/app/store/access.ts @@ -17,6 +17,7 @@ import { XAI_BASE_URL, CHATGLM_BASE_URL, SILICONFLOW_BASE_URL, + OPENROUTER_BASE_URL, } from "../constant"; import { getHeaders } from "../client/api"; import { getClientConfig } from "../config/client"; @@ -59,6 +60,8 @@ const DEFAULT_SILICONFLOW_URL = isApp ? SILICONFLOW_BASE_URL : ApiPath.SiliconFlow; +const DEFAULT_OPENROUTER_URL = isApp ? OPENROUTER_BASE_URL : ApiPath.OpenRouter; + const DEFAULT_ACCESS_STATE = { accessCode: "", useCustomConfig: false, @@ -132,6 +135,10 @@ const DEFAULT_ACCESS_STATE = { siliconflowUrl: DEFAULT_SILICONFLOW_URL, siliconflowApiKey: "", + // openrouter + openrouterUrl: DEFAULT_OPENROUTER_URL, + openrouterApiKey: "", + // server config needCode: true, hideUserApiKey: false, @@ -219,6 +226,10 @@ export const useAccessStore = createPersistStore( return ensure(get(), ["siliconflowApiKey"]); }, + isValidOpenRouter() { + return ensure(get(), ["openrouterApiKey"]); + }, + isAuthorized() { this.fetch(); @@ -238,6 +249,7 @@ export const useAccessStore = createPersistStore( this.isValidXAI() || this.isValidChatGLM() || this.isValidSiliconFlow() || + this.isValidOpenRouter() || !this.enabledAccessControl() || (this.enabledAccessControl() && ensure(get(), ["accessCode"])) );