add chatglm turbo model

This commit is contained in:
Zhang Minghan 2023-11-10 20:28:46 +08:00
parent 9dc85bf860
commit daded5d549
7 changed files with 39 additions and 26 deletions

View File

@ -18,6 +18,8 @@ func (c *ChatInstance) GetChatEndpoint(model string) string {
func (c *ChatInstance) GetModel(model string) string { func (c *ChatInstance) GetModel(model string) string {
switch model { switch model {
case globals.ZhiPuChatGLMTurbo:
return ChatGLMTurbo
case globals.ZhiPuChatGLMPro: case globals.ZhiPuChatGLMPro:
return ChatGLMPro return ChatGLMPro
case globals.ZhiPuChatGLMStd: case globals.ZhiPuChatGLMStd:

View File

@ -3,9 +3,10 @@ package zhipuai
import "chat/globals" import "chat/globals"
const ( const (
ChatGLMPro = "chatglm_pro" ChatGLMTurbo = "chatglm_turbo"
ChatGLMStd = "chatglm_std" ChatGLMPro = "chatglm_pro"
ChatGLMLite = "chatglm_lite" ChatGLMStd = "chatglm_std"
ChatGLMLite = "chatglm_lite"
) )
type Payload struct { type Payload struct {

View File

@ -38,6 +38,7 @@ export const modelColorMapper: Record<string, string> = {
"bing-balance": "#2673e7", "bing-balance": "#2673e7",
"bing-precise": "#2673e7", "bing-precise": "#2673e7",
"zhipu-chatglm-turbo": "#008272",
"zhipu-chatglm-pro": "#008272", "zhipu-chatglm-pro": "#008272",
"zhipu-chatglm-std": "#008272", "zhipu-chatglm-std": "#008272",
"zhipu-chatglm-lite": "#008272", "zhipu-chatglm-lite": "#008272",

View File

@ -8,7 +8,7 @@ import {
} from "@/utils/env.ts"; } from "@/utils/env.ts";
import { getMemory } from "@/utils/memory.ts"; import { getMemory } from "@/utils/memory.ts";
export const version = "3.6.16"; export const version = "3.6.17";
export const dev: boolean = getDev(); export const dev: boolean = getDev();
export const deploy: boolean = true; export const deploy: boolean = true;
export let rest_api: string = getRestApi(deploy); export let rest_api: string = getRestApi(deploy);
@ -23,13 +23,10 @@ export const supportModels: Model[] = [
{ id: "gpt-3.5-turbo-1106", name: "GPT-3.5 1106", free: true, auth: false }, { id: "gpt-3.5-turbo-1106", name: "GPT-3.5 1106", free: true, auth: false },
{ id: "gpt-4-0613", name: "GPT-4", free: false, auth: true }, { id: "gpt-4-0613", name: "GPT-4", free: false, auth: true },
{ id: "gpt-4-1106-preview", name: "GPT-4 Turbo", free: false, auth: true }, { id: "gpt-4-1106-preview", name: "GPT-4 Turbo", free: false, auth: true },
{ id: "gpt-4-v", name: "GPT-4 Vision", free: false, auth: true },
{ id: "gpt-4-dalle", name: "GPT-4 DALLE", free: false, auth: true },
// anthropic models // anthropic models
{ id: "claude-2", name: "Claude-2", free: true, auth: true }, { id: "claude-1-100k", name: "Claude-2", free: true, auth: true },
{ id: "claude-1-100k", name: "Claude-1-100k", free: false, auth: true }, { id: "claude-2", name: "Claude-2-100k", free: false, auth: true },
{ id: "claude-2-100k", name: "Claude-2-100k", free: false, auth: true },
// spark desk // spark desk
{ id: "spark-desk-v3", name: "讯飞星火 V3", free: true, auth: true }, { id: "spark-desk-v3", name: "讯飞星火 V3", free: true, auth: true },
@ -42,18 +39,21 @@ export const supportModels: Model[] = [
{ id: "qwen-turbo-net", name: "通义千问 Turbo X", free: false, auth: true }, { id: "qwen-turbo-net", name: "通义千问 Turbo X", free: false, auth: true },
{ id: "qwen-turbo", name: "通义千问 Turbo", free: false, auth: true }, { id: "qwen-turbo", name: "通义千问 Turbo", free: false, auth: true },
// google palm2
{ id: "chat-bison-001", name: "Palm2", free: true, auth: true },
// new bing // new bing
{ id: "bing-creative", name: "New Bing", free: true, auth: true }, { id: "bing-creative", name: "New Bing", free: true, auth: true },
// google palm2
{ id: "chat-bison-001", name: "Palm2", free: true, auth: true },
// zhipu models // zhipu models
{ id: "zhipu-chatglm-pro", name: "ChatGLM Pro", free: false, auth: true }, { id: "zhipu-chatglm-turbo", name: "ChatGLM Turbo 32k", free: false, auth: true },
{ id: "zhipu-chatglm-std", name: "ChatGLM Std", free: false, auth: true },
{ id: "zhipu-chatglm-lite", name: "ChatGLM Lite", free: true, auth: true },
{ id: "dalle", name: "DALLE2", free: true, auth: true }, { id: "dalle", name: "DALLE2", free: true, auth: true },
// reverse models
{ id: "gpt-4-v", name: "GPT-4 Vision", free: false, auth: true },
{ id: "gpt-4-dalle", name: "GPT-4 DALLE", free: false, auth: true },
// high price models // high price models
{ id: "gpt-4-32k-0613", name: "GPT-4-32k", free: false, auth: true }, { id: "gpt-4-32k-0613", name: "GPT-4-32k", free: false, auth: true },
]; ];
@ -67,9 +67,10 @@ export const largeContextModels = [
"claude-1-100k", "claude-1-100k",
"claude-2", "claude-2",
"claude-2-100k", "claude-2-100k",
"zhipu-chatglm-turbo",
]; ];
export const studentModels = ["claude-1-100k", "claude-2-100k"]; export const studentModels = ["claude-1-100k", "claude-2-100k", "claude-2"];
export const planModels = [ export const planModels = [
"gpt-4-0613", "gpt-4-0613",
@ -77,6 +78,7 @@ export const planModels = [
"gpt-4-v", "gpt-4-v",
"gpt-4-all", "gpt-4-all",
"gpt-4-dalle", "gpt-4-dalle",
"claude-2",
"claude-1-100k", "claude-1-100k",
"claude-2-100k", "claude-2-100k",
]; ];

View File

@ -19,7 +19,7 @@ func CanEnableModel(db *sql.DB, user *User, model string) bool {
return user != nil // && user.GetQuota(db) >= 1 free now return user != nil // && user.GetQuota(db) >= 1 free now
case globals.Claude1100k, globals.Claude2100k: case globals.Claude1100k, globals.Claude2100k:
return user != nil && user.GetQuota(db) >= 1 return user != nil && user.GetQuota(db) >= 1
case globals.ZhiPuChatGLMPro, globals.ZhiPuChatGLMStd: case globals.ZhiPuChatGLMTurbo, globals.ZhiPuChatGLMPro, globals.ZhiPuChatGLMStd:
return user != nil && user.GetQuota(db) >= 1 return user != nil && user.GetQuota(db) >= 1
case globals.QwenTurbo, globals.QwenPlus, globals.QwenPlusNet, globals.QwenTurboNet: case globals.QwenTurbo, globals.QwenPlus, globals.QwenPlusNet, globals.QwenTurboNet:
return user != nil && user.GetQuota(db) >= 1 return user != nil && user.GetQuota(db) >= 1
@ -48,7 +48,7 @@ func HandleSubscriptionUsage(db *sql.DB, cache *redis.Client, user *User, model
func RevertSubscriptionUsage(cache *redis.Client, user *User, model string, plan bool) { func RevertSubscriptionUsage(cache *redis.Client, user *User, model string, plan bool) {
if globals.IsGPT4NativeModel(model) && plan { if globals.IsGPT4NativeModel(model) && plan {
DecreaseSubscriptionUsage(cache, user, globals.GPT4) DecreaseSubscriptionUsage(cache, user, globals.GPT4)
} else if globals.IsClaude100KModel(model) && !plan { } else if globals.IsClaude100KModel(model) && plan {
DecreaseSubscriptionUsage(cache, user, globals.Claude2100k) DecreaseSubscriptionUsage(cache, user, globals.Claude2100k)
} }
} }

View File

@ -58,9 +58,9 @@ const (
GPT432k0613 = "gpt-4-32k-0613" GPT432k0613 = "gpt-4-32k-0613"
Dalle2 = "dalle" Dalle2 = "dalle"
Claude1 = "claude-1" Claude1 = "claude-1"
Claude1100k = "claude-1-100k" Claude1100k = "claude-1.3"
Claude2 = "claude-2" Claude2 = "claude-1-100k"
Claude2100k = "claude-2-100k" Claude2100k = "claude-2"
ClaudeSlack = "claude-slack" ClaudeSlack = "claude-slack"
SparkDesk = "spark-desk-v1.5" SparkDesk = "spark-desk-v1.5"
SparkDeskV2 = "spark-desk-v2" SparkDeskV2 = "spark-desk-v2"
@ -69,6 +69,7 @@ const (
BingCreative = "bing-creative" BingCreative = "bing-creative"
BingBalanced = "bing-balanced" BingBalanced = "bing-balanced"
BingPrecise = "bing-precise" BingPrecise = "bing-precise"
ZhiPuChatGLMTurbo = "zhipu-chatglm-turbo"
ZhiPuChatGLMPro = "zhipu-chatglm-pro" ZhiPuChatGLMPro = "zhipu-chatglm-pro"
ZhiPuChatGLMStd = "zhipu-chatglm-std" ZhiPuChatGLMStd = "zhipu-chatglm-std"
ZhiPuChatGLMLite = "zhipu-chatglm-lite" ZhiPuChatGLMLite = "zhipu-chatglm-lite"
@ -115,6 +116,7 @@ var BingModelArray = []string{
} }
var ZhiPuModelArray = []string{ var ZhiPuModelArray = []string{
ZhiPuChatGLMTurbo,
ZhiPuChatGLMPro, ZhiPuChatGLMPro,
ZhiPuChatGLMStd, ZhiPuChatGLMStd,
ZhiPuChatGLMLite, ZhiPuChatGLMLite,
@ -196,6 +198,7 @@ var AllModels = []string{
BingCreative, BingCreative,
BingBalanced, BingBalanced,
BingPrecise, BingPrecise,
ZhiPuChatGLMTurbo,
ZhiPuChatGLMPro, ZhiPuChatGLMPro,
ZhiPuChatGLMStd, ZhiPuChatGLMStd,
ZhiPuChatGLMLite, ZhiPuChatGLMLite,

View File

@ -26,7 +26,7 @@ func GetWeightByModel(model string) int {
globals.BingPrecise, globals.BingCreative, globals.BingBalanced: globals.BingPrecise, globals.BingCreative, globals.BingBalanced:
return 3 return 3
case globals.GPT3Turbo0301, globals.GPT3Turbo16k0301, case globals.GPT3Turbo0301, globals.GPT3Turbo16k0301,
globals.ZhiPuChatGLMLite, globals.ZhiPuChatGLMStd, globals.ZhiPuChatGLMPro: globals.ZhiPuChatGLMTurbo, globals.ZhiPuChatGLMLite, globals.ZhiPuChatGLMStd, globals.ZhiPuChatGLMPro:
return 4 // every message follows <|start|>{role/name}\n{content}<|end|>\n return 4 // every message follows <|start|>{role/name}\n{content}<|end|>\n
default: default:
if strings.Contains(model, globals.GPT3Turbo) { if strings.Contains(model, globals.GPT3Turbo) {
@ -77,7 +77,9 @@ func CountInputToken(model string, v []globals.Message) float32 {
case globals.GPT3Turbo, globals.GPT3Turbo0613, globals.GPT3Turbo0301, globals.GPT3TurboInstruct, globals.GPT3Turbo1106, case globals.GPT3Turbo, globals.GPT3Turbo0613, globals.GPT3Turbo0301, globals.GPT3TurboInstruct, globals.GPT3Turbo1106,
globals.GPT3Turbo16k, globals.GPT3Turbo16k0613, globals.GPT3Turbo16k0301: globals.GPT3Turbo16k, globals.GPT3Turbo16k0613, globals.GPT3Turbo16k0301:
return 0 return 0
case globals.GPT4, globals.GPT4Vision, globals.GPT4All, globals.GPT4Dalle, globals.GPT40314, globals.GPT40613, globals.GPT41106Preview: case globals.GPT41106Preview:
return float32(CountTokenPrice(v, model)) / 1000 * 0.7 * 0.6
case globals.GPT4, globals.GPT4Vision, globals.GPT4All, globals.GPT4Dalle, globals.GPT40314, globals.GPT40613:
return float32(CountTokenPrice(v, model)) / 1000 * 2.1 * 0.6 return float32(CountTokenPrice(v, model)) / 1000 * 2.1 * 0.6
case globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314: case globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314:
return float32(CountTokenPrice(v, model)) / 1000 * 4.2 return float32(CountTokenPrice(v, model)) / 1000 * 4.2
@ -91,7 +93,7 @@ func CountInputToken(model string, v []globals.Message) float32 {
return float32(CountTokenPrice(v, model)) / 1000 * 0.8 * 0.6 return float32(CountTokenPrice(v, model)) / 1000 * 0.8 * 0.6
case globals.ZhiPuChatGLMPro: case globals.ZhiPuChatGLMPro:
return float32(CountTokenPrice(v, model)) / 1000 * 0.1 return float32(CountTokenPrice(v, model)) / 1000 * 0.1
case globals.ZhiPuChatGLMStd: case globals.ZhiPuChatGLMTurbo, globals.ZhiPuChatGLMStd:
return float32(CountTokenPrice(v, model)) / 1000 * 0.05 return float32(CountTokenPrice(v, model)) / 1000 * 0.05
case globals.QwenTurbo, globals.QwenTurboNet: case globals.QwenTurbo, globals.QwenTurboNet:
return float32(CountTokenPrice(v, model)) / 1000 * 0.08 return float32(CountTokenPrice(v, model)) / 1000 * 0.08
@ -107,7 +109,9 @@ func CountOutputToken(model string, t int) float32 {
case globals.GPT3Turbo, globals.GPT3Turbo0613, globals.GPT3Turbo0301, globals.GPT3TurboInstruct, globals.GPT3Turbo1106, case globals.GPT3Turbo, globals.GPT3Turbo0613, globals.GPT3Turbo0301, globals.GPT3TurboInstruct, globals.GPT3Turbo1106,
globals.GPT3Turbo16k, globals.GPT3Turbo16k0613, globals.GPT3Turbo16k0301: globals.GPT3Turbo16k, globals.GPT3Turbo16k0613, globals.GPT3Turbo16k0301:
return 0 return 0
case globals.GPT4, globals.GPT4Vision, globals.GPT4All, globals.GPT4Dalle, globals.GPT40314, globals.GPT40613, globals.GPT41106Preview: case globals.GPT41106Preview:
return float32(t*GetWeightByModel(model)) / 1000 * 2.1 * 0.6
case globals.GPT4, globals.GPT4Vision, globals.GPT4All, globals.GPT4Dalle, globals.GPT40314, globals.GPT40613:
return float32(t*GetWeightByModel(model)) / 1000 * 4.3 * 0.6 return float32(t*GetWeightByModel(model)) / 1000 * 4.3 * 0.6
case globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314: case globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314:
return float32(t*GetWeightByModel(model)) / 1000 * 8.6 return float32(t*GetWeightByModel(model)) / 1000 * 8.6
@ -121,7 +125,7 @@ func CountOutputToken(model string, t int) float32 {
return float32(t*GetWeightByModel(model)) / 1000 * 2.4 * 0.6 return float32(t*GetWeightByModel(model)) / 1000 * 2.4 * 0.6
case globals.ZhiPuChatGLMPro: case globals.ZhiPuChatGLMPro:
return float32(t*GetWeightByModel(model)) / 1000 * 0.1 return float32(t*GetWeightByModel(model)) / 1000 * 0.1
case globals.ZhiPuChatGLMStd: case globals.ZhiPuChatGLMTurbo, globals.ZhiPuChatGLMStd:
return float32(t*GetWeightByModel(model)) / 1000 * 0.05 return float32(t*GetWeightByModel(model)) / 1000 * 0.05
case globals.QwenTurbo, globals.QwenTurboNet: case globals.QwenTurbo, globals.QwenTurboNet:
return float32(t*GetWeightByModel(model)) / 1000 * 0.08 return float32(t*GetWeightByModel(model)) / 1000 * 0.08