mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-19 12:10:17 +09:00
feat: Remove OpenRouter model list & improve icon detection
- Removed the (very long) model list from OpenRouter. - Added support for icon detection for OpenRouter models
This commit is contained in:
parent
47e9c2d3b7
commit
e6278f7f07
@ -57,18 +57,32 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
||||
modelName.startsWith("dall-e") ||
|
||||
modelName.startsWith("dalle") ||
|
||||
modelName.startsWith("o1") ||
|
||||
modelName.startsWith("o3")
|
||||
modelName.startsWith("o3") ||
|
||||
modelName.startsWith("openai/")
|
||||
) {
|
||||
LlmIcon = BotIconOpenAI;
|
||||
} else if (modelName.startsWith("gemini")) {
|
||||
} else if (
|
||||
modelName.startsWith("gemini") ||
|
||||
modelName.startsWith("google/gemini")
|
||||
) {
|
||||
LlmIcon = BotIconGemini;
|
||||
} else if (modelName.startsWith("gemma")) {
|
||||
} else if (
|
||||
modelName.startsWith("gemma") ||
|
||||
modelName.startsWith("google/gemma")
|
||||
) {
|
||||
LlmIcon = BotIconGemma;
|
||||
} else if (modelName.startsWith("claude")) {
|
||||
} else if (
|
||||
modelName.startsWith("claude") ||
|
||||
modelName.startsWith("anthropic/claude")
|
||||
) {
|
||||
LlmIcon = BotIconClaude;
|
||||
} else if (modelName.includes("llama")) {
|
||||
LlmIcon = BotIconMeta;
|
||||
} else if (modelName.startsWith("mixtral") || modelName.startsWith("codestral")) {
|
||||
} else if (
|
||||
modelName.startsWith("mixtral") ||
|
||||
modelName.startsWith("codestral") ||
|
||||
modelName.startsWith("mistralai/")
|
||||
) {
|
||||
LlmIcon = BotIconMistral;
|
||||
} else if (modelName.includes("deepseek")) {
|
||||
LlmIcon = BotIconDeepseek;
|
||||
@ -78,7 +92,10 @@ export function Avatar(props: { model?: ModelType; avatar?: string }) {
|
||||
LlmIcon = BotIconQwen;
|
||||
} else if (modelName.startsWith("ernie")) {
|
||||
LlmIcon = BotIconWenxin;
|
||||
} else if (modelName.startsWith("grok")) {
|
||||
} else if (
|
||||
modelName.startsWith("grok") ||
|
||||
modelName.startsWith("x-ai/grok")
|
||||
) {
|
||||
LlmIcon = BotIconGrok;
|
||||
} else if (modelName.startsWith("hunyuan")) {
|
||||
LlmIcon = BotIconHunyuan;
|
||||
|
327
app/constant.ts
327
app/constant.ts
@ -682,333 +682,10 @@ const siliconflowModels = [
|
||||
"Pro/deepseek-ai/DeepSeek-V3",
|
||||
];
|
||||
|
||||
// Use this to generate newest model list -> https://gist.github.com/hyc1230/d4b271d161ffcda485f1fa1a27e08096
|
||||
// Current list is generated on 2025/05/03
|
||||
// Use this to generate a full model list -> https://gist.github.com/hyc1230/d4b271d161ffcda485f1fa1a27e08096
|
||||
const openrouterModels = [
|
||||
"microsoft/phi-4-reasoning-plus:free",
|
||||
"microsoft/phi-4-reasoning-plus",
|
||||
"microsoft/phi-4-reasoning:free",
|
||||
"qwen/qwen3-0.6b-04-28:free",
|
||||
"inception/mercury-coder-small-beta",
|
||||
"qwen/qwen3-1.7b:free",
|
||||
"qwen/qwen3-4b:free",
|
||||
"opengvlab/internvl3-14b:free",
|
||||
"opengvlab/internvl3-2b:free",
|
||||
"deepseek/deepseek-prover-v2:free",
|
||||
"deepseek/deepseek-prover-v2",
|
||||
"meta-llama/llama-guard-4-12b",
|
||||
"qwen/qwen3-30b-a3b:free",
|
||||
"qwen/qwen3-30b-a3b",
|
||||
"qwen/qwen3-8b:free",
|
||||
"qwen/qwen3-8b",
|
||||
"qwen/qwen3-14b:free",
|
||||
"qwen/qwen3-14b",
|
||||
"qwen/qwen3-32b:free",
|
||||
"qwen/qwen3-32b",
|
||||
"qwen/qwen3-235b-a22b:free",
|
||||
"qwen/qwen3-235b-a22b",
|
||||
"tngtech/deepseek-r1t-chimera:free",
|
||||
"thudm/glm-z1-rumination-32b",
|
||||
"thudm/glm-z1-9b:free",
|
||||
"thudm/glm-4-9b:free",
|
||||
"microsoft/mai-ds-r1:free",
|
||||
"google/gemini-2.5-pro-preview-03-25",
|
||||
"thudm/glm-z1-32b:free",
|
||||
"thudm/glm-z1-32b",
|
||||
"thudm/glm-4-32b:free",
|
||||
"thudm/glm-4-32b",
|
||||
"google/gemini-2.5-flash-preview",
|
||||
"google/gemini-2.5-flash-preview:thinking",
|
||||
"openai/o4-mini-high",
|
||||
"openai/o3",
|
||||
"openai/o4-mini",
|
||||
"shisa-ai/shisa-v2-llama3.3-70b:free",
|
||||
"qwen/qwen2.5-coder-7b-instruct",
|
||||
"openai/gpt-4.1",
|
||||
"openai/gpt-4.1-mini",
|
||||
"openai/gpt-4.1-nano",
|
||||
"eleutherai/llemma_7b",
|
||||
"alfredpros/codellama-7b-instruct-solidity",
|
||||
"arliai/qwq-32b-arliai-rpr-v1:free",
|
||||
"agentica-org/deepcoder-14b-preview:free",
|
||||
"moonshotai/kimi-vl-a3b-thinking:free",
|
||||
"x-ai/grok-3-mini-beta",
|
||||
"x-ai/grok-3-beta",
|
||||
"nvidia/llama-3.3-nemotron-super-49b-v1:free",
|
||||
"nvidia/llama-3.3-nemotron-super-49b-v1",
|
||||
"nvidia/llama-3.1-nemotron-ultra-253b-v1:free",
|
||||
"meta-llama/llama-4-maverick:free",
|
||||
"meta-llama/llama-4-maverick",
|
||||
"meta-llama/llama-4-scout:free",
|
||||
"meta-llama/llama-4-scout",
|
||||
"all-hands/openhands-lm-32b-v0.1",
|
||||
"mistral/ministral-8b",
|
||||
"deepseek/deepseek-v3-base:free",
|
||||
"scb10x/llama3.1-typhoon2-8b-instruct",
|
||||
"scb10x/llama3.1-typhoon2-70b-instruct",
|
||||
"allenai/molmo-7b-d:free",
|
||||
"bytedance-research/ui-tars-72b:free",
|
||||
"qwen/qwen2.5-vl-3b-instruct:free",
|
||||
"google/gemini-2.5-pro-exp-03-25",
|
||||
"qwen/qwen2.5-vl-32b-instruct:free",
|
||||
"qwen/qwen2.5-vl-32b-instruct",
|
||||
"deepseek/deepseek-chat-v3-0324:free",
|
||||
"deepseek/deepseek-chat-v3-0324",
|
||||
"featherless/qwerky-72b:free",
|
||||
"openai/o1-pro",
|
||||
"mistralai/mistral-small-3.1-24b-instruct:free",
|
||||
"mistralai/mistral-small-3.1-24b-instruct",
|
||||
"open-r1/olympiccoder-32b:free",
|
||||
"steelskull/l3.3-electra-r1-70b",
|
||||
"google/gemma-3-1b-it:free",
|
||||
"google/gemma-3-4b-it:free",
|
||||
"google/gemma-3-4b-it",
|
||||
"ai21/jamba-1.6-large",
|
||||
"ai21/jamba-1.6-mini",
|
||||
"google/gemma-3-12b-it:free",
|
||||
"google/gemma-3-12b-it",
|
||||
"cohere/command-a",
|
||||
"openai/gpt-4o-mini-search-preview",
|
||||
"openai/gpt-4o-search-preview",
|
||||
"rekaai/reka-flash-3:free",
|
||||
"google/gemma-3-27b-it:free",
|
||||
"google/gemma-3-27b-it",
|
||||
"thedrummer/anubis-pro-105b-v1",
|
||||
"latitudegames/wayfarer-large-70b-llama-3.3",
|
||||
"thedrummer/skyfall-36b-v2",
|
||||
"microsoft/phi-4-multimodal-instruct",
|
||||
"perplexity/sonar-reasoning-pro",
|
||||
"perplexity/sonar-pro",
|
||||
"perplexity/sonar-deep-research",
|
||||
"deepseek/deepseek-r1-zero:free",
|
||||
"qwen/qwq-32b:free",
|
||||
"qwen/qwq-32b",
|
||||
"moonshotai/moonlight-16b-a3b-instruct:free",
|
||||
"nousresearch/deephermes-3-llama-3-8b-preview:free",
|
||||
"openai/gpt-4.5-preview",
|
||||
"google/gemini-2.0-flash-lite-001",
|
||||
"anthropic/claude-3.7-sonnet",
|
||||
"anthropic/claude-3.7-sonnet:thinking",
|
||||
"anthropic/claude-3.7-sonnet:beta",
|
||||
"perplexity/r1-1776",
|
||||
"mistralai/mistral-saba",
|
||||
"cognitivecomputations/dolphin3.0-r1-mistral-24b:free",
|
||||
"cognitivecomputations/dolphin3.0-mistral-24b:free",
|
||||
"meta-llama/llama-guard-3-8b",
|
||||
"openai/o3-mini-high",
|
||||
"deepseek/deepseek-r1-distill-llama-8b",
|
||||
"google/gemini-2.0-flash-001",
|
||||
"qwen/qwen-vl-plus",
|
||||
"aion-labs/aion-1.0",
|
||||
"aion-labs/aion-1.0-mini",
|
||||
"aion-labs/aion-rp-llama-3.1-8b",
|
||||
"qwen/qwen-vl-max",
|
||||
"qwen/qwen-turbo",
|
||||
"qwen/qwen2.5-vl-72b-instruct:free",
|
||||
"qwen/qwen2.5-vl-72b-instruct",
|
||||
"qwen/qwen-plus",
|
||||
"qwen/qwen-max",
|
||||
"openai/o3-mini",
|
||||
"deepseek/deepseek-r1-distill-qwen-1.5b",
|
||||
"mistralai/mistral-small-24b-instruct-2501:free",
|
||||
"mistralai/mistral-small-24b-instruct-2501",
|
||||
"deepseek/deepseek-r1-distill-qwen-32b:free",
|
||||
"deepseek/deepseek-r1-distill-qwen-32b",
|
||||
"deepseek/deepseek-r1-distill-qwen-14b:free",
|
||||
"deepseek/deepseek-r1-distill-qwen-14b",
|
||||
"perplexity/sonar-reasoning",
|
||||
"perplexity/sonar",
|
||||
"liquid/lfm-7b",
|
||||
"liquid/lfm-3b",
|
||||
"deepseek/deepseek-r1-distill-llama-70b:free",
|
||||
"deepseek/deepseek-r1-distill-llama-70b",
|
||||
"deepseek/deepseek-r1:free",
|
||||
"deepseek/deepseek-r1",
|
||||
"minimax/minimax-01",
|
||||
"mistralai/codestral-2501",
|
||||
"microsoft/phi-4",
|
||||
"deepseek/deepseek-chat:free",
|
||||
"deepseek/deepseek-chat",
|
||||
"sao10k/l3.3-euryale-70b",
|
||||
"openai/o1",
|
||||
"eva-unit-01/eva-llama-3.33-70b",
|
||||
"x-ai/grok-2-vision-1212",
|
||||
"x-ai/grok-2-1212",
|
||||
"cohere/command-r7b-12-2024",
|
||||
"google/gemini-2.0-flash-exp:free",
|
||||
"meta-llama/llama-3.3-70b-instruct:free",
|
||||
"meta-llama/llama-3.3-70b-instruct",
|
||||
"amazon/nova-lite-v1",
|
||||
"amazon/nova-micro-v1",
|
||||
"amazon/nova-pro-v1",
|
||||
"qwen/qwq-32b-preview:free",
|
||||
"qwen/qwq-32b-preview",
|
||||
"google/learnlm-1.5-pro-experimental:free",
|
||||
"eva-unit-01/eva-qwen-2.5-72b",
|
||||
"openai/gpt-4o-2024-11-20",
|
||||
"mistralai/mistral-large-2411",
|
||||
"mistralai/mistral-large-2407",
|
||||
"mistralai/pixtral-large-2411",
|
||||
"x-ai/grok-vision-beta",
|
||||
"infermatic/mn-inferor-12b",
|
||||
"qwen/qwen-2.5-coder-32b-instruct:free",
|
||||
"qwen/qwen-2.5-coder-32b-instruct",
|
||||
"raifle/sorcererlm-8x22b",
|
||||
"eva-unit-01/eva-qwen-2.5-32b",
|
||||
"thedrummer/unslopnemo-12b",
|
||||
"anthropic/claude-3.5-haiku:beta",
|
||||
"anthropic/claude-3.5-haiku",
|
||||
"anthropic/claude-3.5-haiku-20241022:beta",
|
||||
"anthropic/claude-3.5-haiku-20241022",
|
||||
"neversleep/llama-3.1-lumimaid-70b",
|
||||
"anthracite-org/magnum-v4-72b",
|
||||
"anthropic/claude-3.5-sonnet:beta",
|
||||
"anthropic/claude-3.5-sonnet",
|
||||
"x-ai/grok-beta",
|
||||
"mistralai/ministral-8b",
|
||||
"mistralai/ministral-3b",
|
||||
"qwen/qwen-2.5-7b-instruct:free",
|
||||
"qwen/qwen-2.5-7b-instruct",
|
||||
"nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
"inflection/inflection-3-productivity",
|
||||
"inflection/inflection-3-pi",
|
||||
"google/gemini-flash-1.5-8b",
|
||||
"thedrummer/rocinante-12b",
|
||||
"anthracite-org/magnum-v2-72b",
|
||||
"liquid/lfm-40b",
|
||||
"meta-llama/llama-3.2-3b-instruct:free",
|
||||
"meta-llama/llama-3.2-3b-instruct",
|
||||
"meta-llama/llama-3.2-1b-instruct:free",
|
||||
"meta-llama/llama-3.2-1b-instruct",
|
||||
"meta-llama/llama-3.2-90b-vision-instruct",
|
||||
"meta-llama/llama-3.2-11b-vision-instruct:free",
|
||||
"meta-llama/llama-3.2-11b-vision-instruct",
|
||||
"qwen/qwen-2.5-72b-instruct:free",
|
||||
"qwen/qwen-2.5-72b-instruct",
|
||||
"qwen/qwen-2.5-vl-72b-instruct",
|
||||
"neversleep/llama-3.1-lumimaid-8b",
|
||||
"openai/o1-preview",
|
||||
"openai/o1-preview-2024-09-12",
|
||||
"openai/o1-mini",
|
||||
"openai/o1-mini-2024-09-12",
|
||||
"mistralai/pixtral-12b",
|
||||
"cohere/command-r-plus-08-2024",
|
||||
"cohere/command-r-08-2024",
|
||||
"qwen/qwen-2.5-vl-7b-instruct:free",
|
||||
"qwen/qwen-2.5-vl-7b-instruct",
|
||||
"sao10k/l3.1-euryale-70b",
|
||||
"google/gemini-flash-1.5-8b-exp",
|
||||
"ai21/jamba-1-5-mini",
|
||||
"ai21/jamba-1-5-large",
|
||||
"microsoft/phi-3.5-mini-128k-instruct",
|
||||
"nousresearch/hermes-3-llama-3.1-70b",
|
||||
"nousresearch/hermes-3-llama-3.1-405b",
|
||||
"openai/chatgpt-4o-latest",
|
||||
"sao10k/l3-lunaris-8b",
|
||||
"aetherwiing/mn-starcannon-12b",
|
||||
"openai/gpt-4o-2024-08-06",
|
||||
"meta-llama/llama-3.1-405b:free",
|
||||
"meta-llama/llama-3.1-405b",
|
||||
"nothingiisreal/mn-celeste-12b",
|
||||
"perplexity/llama-3.1-sonar-small-128k-online",
|
||||
"perplexity/llama-3.1-sonar-large-128k-online",
|
||||
"meta-llama/llama-3.1-8b-instruct:free",
|
||||
"meta-llama/llama-3.1-8b-instruct",
|
||||
"meta-llama/llama-3.1-405b-instruct",
|
||||
"meta-llama/llama-3.1-70b-instruct",
|
||||
"mistralai/codestral-mamba",
|
||||
"mistralai/mistral-nemo:free",
|
||||
"mistralai/mistral-nemo",
|
||||
"openai/gpt-4o-mini",
|
||||
"openai/gpt-4o-mini-2024-07-18",
|
||||
"google/gemma-2-27b-it",
|
||||
"alpindale/magnum-72b",
|
||||
"google/gemma-2-9b-it:free",
|
||||
"google/gemma-2-9b-it",
|
||||
"01-ai/yi-large",
|
||||
"ai21/jamba-instruct",
|
||||
"anthropic/claude-3.5-sonnet-20240620:beta",
|
||||
"anthropic/claude-3.5-sonnet-20240620",
|
||||
"sao10k/l3-euryale-70b",
|
||||
"cognitivecomputations/dolphin-mixtral-8x22b",
|
||||
"qwen/qwen-2-72b-instruct",
|
||||
"mistralai/mistral-7b-instruct:free",
|
||||
"mistralai/mistral-7b-instruct",
|
||||
"nousresearch/hermes-2-pro-llama-3-8b",
|
||||
"mistralai/mistral-7b-instruct-v0.3",
|
||||
"microsoft/phi-3-mini-128k-instruct",
|
||||
"microsoft/phi-3-medium-128k-instruct",
|
||||
"neversleep/llama-3-lumimaid-70b",
|
||||
"deepseek/deepseek-coder",
|
||||
"google/gemini-flash-1.5",
|
||||
"openai/gpt-4o",
|
||||
"openai/gpt-4o:extended",
|
||||
"meta-llama/llama-guard-2-8b",
|
||||
"openai/gpt-4o-2024-05-13",
|
||||
"allenai/olmo-7b-instruct",
|
||||
"neversleep/llama-3-lumimaid-8b:extended",
|
||||
"neversleep/llama-3-lumimaid-8b",
|
||||
"sao10k/fimbulvetr-11b-v2",
|
||||
"meta-llama/llama-3-8b-instruct",
|
||||
"meta-llama/llama-3-70b-instruct",
|
||||
"mistralai/mixtral-8x22b-instruct",
|
||||
"microsoft/wizardlm-2-8x22b",
|
||||
"google/gemini-pro-1.5",
|
||||
"openai/gpt-4-turbo",
|
||||
"cohere/command-r-plus",
|
||||
"cohere/command-r-plus-04-2024",
|
||||
"sophosympatheia/midnight-rose-70b",
|
||||
"cohere/command",
|
||||
"cohere/command-r",
|
||||
"anthropic/claude-3-haiku:beta",
|
||||
"anthropic/claude-3-haiku",
|
||||
"anthropic/claude-3-opus:beta",
|
||||
"anthropic/claude-3-opus",
|
||||
"anthropic/claude-3-sonnet:beta",
|
||||
"anthropic/claude-3-sonnet",
|
||||
"cohere/command-r-03-2024",
|
||||
"mistralai/mistral-large",
|
||||
"openai/gpt-3.5-turbo-0613",
|
||||
"openai/gpt-4-turbo-preview",
|
||||
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
||||
"mistralai/mistral-medium",
|
||||
"mistralai/mistral-small",
|
||||
"mistralai/mistral-tiny",
|
||||
"mistralai/mistral-7b-instruct-v0.2",
|
||||
"google/gemini-pro-vision",
|
||||
"mistralai/mixtral-8x7b-instruct",
|
||||
"neversleep/noromaid-20b",
|
||||
"anthropic/claude-2.1:beta",
|
||||
"anthropic/claude-2.1",
|
||||
"anthropic/claude-2:beta",
|
||||
"anthropic/claude-2",
|
||||
"undi95/toppy-m-7b",
|
||||
"alpindale/goliath-120b",
|
||||
// Requires user to customize models
|
||||
"openrouter/auto",
|
||||
"openai/gpt-3.5-turbo-1106",
|
||||
"openai/gpt-4-1106-preview",
|
||||
"google/palm-2-chat-bison-32k",
|
||||
"google/palm-2-codechat-bison-32k",
|
||||
"jondurbin/airoboros-l2-70b",
|
||||
"openai/gpt-3.5-turbo-instruct",
|
||||
"mistralai/mistral-7b-instruct-v0.1",
|
||||
"pygmalionai/mythalion-13b",
|
||||
"openai/gpt-3.5-turbo-16k",
|
||||
"openai/gpt-4-32k",
|
||||
"openai/gpt-4-32k-0314",
|
||||
"mancer/weaver",
|
||||
"huggingfaceh4/zephyr-7b-beta:free",
|
||||
"anthropic/claude-2.0:beta",
|
||||
"anthropic/claude-2.0",
|
||||
"undi95/remm-slerp-l2-13b",
|
||||
"google/palm-2-chat-bison",
|
||||
"google/palm-2-codechat-bison",
|
||||
"gryphe/mythomax-l2-13b",
|
||||
"meta-llama/llama-2-70b-chat",
|
||||
"openai/gpt-3.5-turbo",
|
||||
"openai/gpt-3.5-turbo-0125",
|
||||
"openai/gpt-4",
|
||||
"openai/gpt-4-0314",
|
||||
];
|
||||
|
||||
let seq = 1000; // 内置的模型序号生成器从1000开始
|
||||
|
Loading…
Reference in New Issue
Block a user