add 360gpt model

This commit is contained in:
Zhang Minghan 2023-11-15 17:04:13 +08:00
parent 7b43cb230b
commit e7fae44268
16 changed files with 396 additions and 56 deletions

View File

@ -74,6 +74,7 @@
- [x] LLaMa 2 (70b, 13b, 7b) - [x] LLaMa 2 (70b, 13b, 7b)
- [x] Code LLaMa (34b, 13b, 7b) - [x] Code LLaMa (34b, 13b, 7b)
- [x] Tencent Hunyuan - [x] Tencent Hunyuan
- [x] 360 GPT
- [ ] RWKV - [ ] RWKV
- [ ] Azure OpenAI - [ ] Azure OpenAI
- [ ] Baidu Qianfan - [ ] Baidu Qianfan

View File

@ -9,6 +9,7 @@ import (
"chat/adapter/oneapi" "chat/adapter/oneapi"
"chat/adapter/palm2" "chat/adapter/palm2"
"chat/adapter/slack" "chat/adapter/slack"
"chat/adapter/zhinao"
"chat/adapter/zhipuai" "chat/adapter/zhipuai"
"chat/globals" "chat/globals"
"chat/utils" "chat/utils"
@ -73,6 +74,12 @@ func NewChatRequest(props *ChatProps, hook globals.Hook) error {
Model: props.Model, Model: props.Model,
Messages: props.Message, Messages: props.Message,
}, hook) }, hook)
} else if globals.Is360Model(props.Model) {
return zhinao.NewChatInstanceFromConfig().CreateStreamChatRequest(&zhinao.ChatProps{
Model: props.Model,
Message: props.Message,
Token: utils.Multi(props.Token == 0, 2048, props.Token),
}, hook)
} }
return hook("Sorry, we cannot find the model you are looking for. Please try another model.") return hook("Sorry, we cannot find the model you are looking for. Please try another model.")

View File

@ -1,6 +1,5 @@
package chatgpt package chatgpt
import "C"
import ( import (
"chat/globals" "chat/globals"
"chat/utils" "chat/utils"

View File

@ -125,7 +125,7 @@ func (c *ChatInstance) ProcessLine(instruct bool, buf, data string) (string, err
return c.ProcessLine(instruct, "", buf+item) return c.ProcessLine(instruct, "", buf+item)
} }
if err := processChatErrorResponse(item); err == nil { if err := processChatErrorResponse(item); err == nil || err.Data.Error.Message == "" {
globals.Warn(fmt.Sprintf("chatgpt error: cannot parse response: %s", item)) globals.Warn(fmt.Sprintf("chatgpt error: cannot parse response: %s", item))
return data, errors.New("parser error: cannot parse response") return data, errors.New("parser error: cannot parse response")
} else { } else {

View File

@ -55,11 +55,17 @@ func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback global
slice := strings.TrimSpace(strings.TrimPrefix(data, "data:")) slice := strings.TrimSpace(strings.TrimPrefix(data, "data:"))
if form := utils.UnmarshalForm[ChatResponse](slice); form != nil { if form := utils.UnmarshalForm[ChatResponse](slice); form != nil {
if form.Output.Text == "" && form.Message != "" {
return fmt.Errorf("dashscope error: %s", form.Message)
}
if err := callback(form.Output.Text); err != nil { if err := callback(form.Output.Text); err != nil {
return err return err
} }
return nil
} }
fmt.Println(slice)
globals.Debug(fmt.Sprintf("dashscope error: cannot unmarshal data %s", slice)) globals.Debug(fmt.Sprintf("dashscope error: cannot unmarshal data %s", slice))
return nil return nil

View File

@ -30,4 +30,5 @@ type ChatResponse struct {
InputTokens int `json:"input_tokens"` InputTokens int `json:"input_tokens"`
OutputTokens int `json:"output_tokens"` OutputTokens int `json:"output_tokens"`
} `json:"usage"` } `json:"usage"`
Message string `json:"message"`
} }

View File

@ -1,6 +1,5 @@
package oneapi package oneapi
import "C"
import ( import (
"chat/globals" "chat/globals"
"chat/utils" "chat/utils"

115
adapter/zhinao/chat.go Normal file
View File

@ -0,0 +1,115 @@
package zhinao
import (
"chat/globals"
"chat/utils"
"fmt"
"strings"
)
type ChatProps struct {
Model string
Message []globals.Message
Token int
}
func (c *ChatInstance) GetChatEndpoint() string {
return fmt.Sprintf("%s/v1/chat/completions", c.GetEndpoint())
}
func (c *ChatInstance) GetModel(model string) string {
switch model {
case globals.GPT360V9:
return "360GPT_S2_V9"
default:
return model
}
}
func (c *ChatInstance) GetChatBody(props *ChatProps, stream bool) interface{} {
// 2048 is the max token for 360GPT
if props.Token > 2048 {
props.Token = 2048
}
if props.Token != -1 {
return ChatRequest{
Model: c.GetModel(props.Model),
Messages: props.Message,
MaxToken: props.Token,
Stream: stream,
}
}
return ChatRequestWithInfinity{
Model: c.GetModel(props.Model),
Messages: props.Message,
Stream: stream,
}
}
// CreateChatRequest is the native http request body for zhinao
func (c *ChatInstance) CreateChatRequest(props *ChatProps) (string, error) {
res, err := utils.Post(
c.GetChatEndpoint(),
c.GetHeader(),
c.GetChatBody(props, false),
)
if err != nil || res == nil {
return "", fmt.Errorf("zhinao error: %s", err.Error())
}
data := utils.MapToStruct[ChatResponse](res)
if data == nil {
return "", fmt.Errorf("zhinao error: cannot parse response")
} else if data.Error.Message != "" {
return "", fmt.Errorf("zhinao error: %s", data.Error.Message)
}
return data.Choices[0].Message.Content, nil
}
// CreateStreamChatRequest is the stream response body for zhinao
func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback globals.Hook) error {
buf := ""
cursor := 0
chunk := ""
err := utils.EventSource(
"POST",
c.GetChatEndpoint(),
c.GetHeader(),
c.GetChatBody(props, true),
func(data string) error {
data, err := c.ProcessLine(buf, data)
chunk += data
if err != nil {
if strings.HasPrefix(err.Error(), "zhinao error") {
return err
}
// error when break line
buf = buf + data
return nil
}
buf = ""
if data != "" {
cursor += 1
if err := callback(data); err != nil {
return err
}
}
return nil
},
)
if err != nil {
return err
} else if len(chunk) == 0 {
return fmt.Errorf("empty response")
}
return nil
}

View File

@ -0,0 +1,97 @@
package zhinao
import (
"chat/globals"
"chat/utils"
"errors"
"fmt"
"strings"
)
func processFormat(data string) string {
rep := strings.NewReplacer(
"data: {",
"\"data\": {",
)
item := rep.Replace(data)
if !strings.HasPrefix(item, "{") {
item = "{" + item
}
if !strings.HasSuffix(item, "}}") {
item = item + "}"
}
return item
}
func processChatResponse(data string) *ChatStreamResponse {
if strings.HasPrefix(data, "{") {
var form *ChatStreamResponse
if form = utils.UnmarshalForm[ChatStreamResponse](data); form != nil {
return form
}
if form = utils.UnmarshalForm[ChatStreamResponse](data[:len(data)-1]); form != nil {
return form
}
if form = utils.UnmarshalForm[ChatStreamResponse](data + "}"); form != nil {
return form
}
}
return nil
}
func processChatErrorResponse(data string) *ChatStreamErrorResponse {
if strings.HasPrefix(data, "{") {
var form *ChatStreamErrorResponse
if form = utils.UnmarshalForm[ChatStreamErrorResponse](data); form != nil {
return form
}
if form = utils.UnmarshalForm[ChatStreamErrorResponse](data + "}"); form != nil {
return form
}
}
return nil
}
func isDone(data string) bool {
return utils.Contains[string](data, []string{
"{data: [DONE]}", "{data: [DONE]}}", "null}}", "{null}",
"{[DONE]}", "{data:}", "{data:}}", "data: [DONE]}}",
})
}
func getChoices(form *ChatStreamResponse) string {
if len(form.Data.Choices) == 0 {
return ""
}
return form.Data.Choices[0].Delta.Content
}
func (c *ChatInstance) ProcessLine(buf, data string) (string, error) {
item := processFormat(buf + data)
if isDone(item) {
return "", nil
}
if form := processChatResponse(item); form == nil {
// recursive call
if len(buf) > 0 {
return c.ProcessLine("", buf+item)
}
if err := processChatErrorResponse(item); err == nil || err.Data.Error.Message == "" {
globals.Warn(fmt.Sprintf("zhinao error: cannot parse response: %s", item))
return data, errors.New("parser error: cannot parse response")
} else {
return "", fmt.Errorf("zhinao error: %s (type: %s)", err.Data.Error.Message, err.Data.Error.Type)
}
} else {
return getChoices(form), nil
}
}

40
adapter/zhinao/struct.go Normal file
View File

@ -0,0 +1,40 @@
package zhinao
import (
"fmt"
"github.com/spf13/viper"
)
type ChatInstance struct {
Endpoint string
ApiKey string
}
func (c *ChatInstance) GetEndpoint() string {
return c.Endpoint
}
func (c *ChatInstance) GetApiKey() string {
return c.ApiKey
}
func (c *ChatInstance) GetHeader() map[string]string {
return map[string]string{
"Content-Type": "application/json",
"Authorization": fmt.Sprintf("Bearer %s", c.GetApiKey()),
}
}
func NewChatInstance(endpoint, apiKey string) *ChatInstance {
return &ChatInstance{
Endpoint: endpoint,
ApiKey: apiKey,
}
}
func NewChatInstanceFromConfig() *ChatInstance {
return NewChatInstance(
viper.GetString("zhinao.endpoint"),
viper.GetString("zhinao.apikey"),
)
}

60
adapter/zhinao/types.go Normal file
View File

@ -0,0 +1,60 @@
package zhinao
import "chat/globals"
// 360 ZhiNao API is similar to OpenAI API
// ChatRequest is the request body for zhinao
type ChatRequest struct {
Model string `json:"model"`
Messages []globals.Message `json:"messages"`
MaxToken int `json:"max_tokens"`
Stream bool `json:"stream"`
}
type ChatRequestWithInfinity struct {
Model string `json:"model"`
Messages []globals.Message `json:"messages"`
Stream bool `json:"stream"`
}
// ChatResponse is the native http request body for zhinao
type ChatResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Choices []struct {
Message struct {
Content string `json:"content"`
}
} `json:"choices"`
Error struct {
Message string `json:"message"`
} `json:"error"`
}
// ChatStreamResponse is the stream response body for zhinao
type ChatStreamResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Data struct {
Choices []struct {
Delta struct {
Content string `json:"content"`
}
Index int `json:"index"`
} `json:"choices"`
} `json:"data"`
}
type ChatStreamErrorResponse struct {
Data struct {
Error struct {
Message string `json:"message"`
Type string `json:"type"`
} `json:"error"`
} `json:"data"`
}

BIN
app/public/icons/360gpt.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.9 KiB

View File

@ -8,7 +8,7 @@ import {
} from "@/utils/env.ts"; } from "@/utils/env.ts";
import { getMemory } from "@/utils/memory.ts"; import { getMemory } from "@/utils/memory.ts";
export const version = "3.6.24"; export const version = "3.6.25";
export const dev: boolean = getDev(); export const dev: boolean = getDev();
export const deploy: boolean = true; export const deploy: boolean = true;
export let rest_api: string = getRestApi(deploy); export let rest_api: string = getRestApi(deploy);
@ -53,8 +53,21 @@ export const supportModels: Model[] = [
auth: true, auth: true,
tag: ["official", "high-context"], tag: ["official", "high-context"],
}, },
{
id: "gpt-4-v",
name: "GPT-4 Vision",
free: false,
auth: true,
tag: ["official", "unstable", "multi-modal"],
},
{
id: "gpt-4-dalle",
name: "GPT-4 DALLE",
free: false,
auth: true,
tag: ["official", "unstable", "image-generation"],
},
// anthropic models
{ {
id: "claude-1-100k", id: "claude-1-100k",
name: "Claude", name: "Claude",
@ -141,6 +154,15 @@ export const supportModels: Model[] = [
tag: ["official", "high-context"], tag: ["official", "high-context"],
}, },
// 360 models
{
id: "360-gpt-v9",
name: "360 智脑",
free: false,
auth: true,
tag: ["official"],
},
// llama models // llama models
{ {
id: "llama-2-70b", id: "llama-2-70b",
@ -186,15 +208,6 @@ export const supportModels: Model[] = [
tag: ["open-source", "unstable"], tag: ["open-source", "unstable"],
}, },
// drawing models
{
id: "stable-diffusion",
name: "Stable Diffusion XL",
free: false,
auth: true,
tag: ["open-source", "unstable", "image-generation"],
},
// new bing // new bing
{ {
id: "bing-creative", id: "bing-creative",
@ -213,7 +226,35 @@ export const supportModels: Model[] = [
tag: ["free", "english-model"], tag: ["free", "english-model"],
}, },
// dalle models // drawing models
{
id: "midjourney",
name: "Midjourney",
free: false,
auth: true,
tag: ["official", "image-generation"],
},
{
id: "midjourney-fast",
name: "Midjourney Fast",
free: false,
auth: true,
tag: ["official", "fast", "image-generation"],
},
{
id: "midjourney-turbo",
name: "Midjourney Turbo",
free: false,
auth: true,
tag: ["official", "fast", "image-generation"],
},
{
id: "stable-diffusion",
name: "Stable Diffusion XL",
free: false,
auth: true,
tag: ["open-source", "unstable", "image-generation"],
},
{ {
id: "dall-e-3", id: "dall-e-3",
name: "DALLE 3", name: "DALLE 3",
@ -229,44 +270,6 @@ export const supportModels: Model[] = [
tag: ["free", "official", "image-generation"], tag: ["free", "official", "image-generation"],
}, },
{
id: "midjourney",
name: "Midjourney Queue",
free: false,
auth: true,
tag: ["official", "image-generation"],
},
{
id: "midjourney-fast",
name: "Midjourney",
free: false,
auth: true,
tag: ["official", "fast", "image-generation"],
},
{
id: "midjourney-turbo",
name: "Midjourney Turbo",
free: false,
auth: true,
tag: ["official", "fast", "image-generation"],
},
// reverse models
{
id: "gpt-4-v",
name: "GPT-4 Vision",
free: false,
auth: true,
tag: ["official", "unstable", "multi-modal"],
},
{
id: "gpt-4-dalle",
name: "GPT-4 DALLE",
free: false,
auth: true,
tag: ["official", "unstable", "image-generation"],
},
{ {
id: "gpt-4-32k-0613", id: "gpt-4-32k-0613",
name: "GPT-4-32k", name: "GPT-4-32k",
@ -292,6 +295,7 @@ export const defaultModels = [
"qwen-plus", "qwen-plus",
"hunyuan", "hunyuan",
"zhipu-chatglm-turbo", "zhipu-chatglm-turbo",
"360-gpt-v9",
"dall-e-2", "dall-e-2",
"midjourney-fast", "midjourney-fast",
@ -363,6 +367,7 @@ export const modelAvatars: Record<string, string> = {
"spark-desk-v2": "sparkdesk.jpg", "spark-desk-v2": "sparkdesk.jpg",
"spark-desk-v1.5": "sparkdesk.jpg", "spark-desk-v1.5": "sparkdesk.jpg",
hunyuan: "hunyuan.png", hunyuan: "hunyuan.png",
"360-gpt-v9": "360gpt.png",
}; };
export const modelPricingLink = "https://docs.chatnio.net/ai-mo-xing-ji-ji-fei"; export const modelPricingLink = "https://docs.chatnio.net/ai-mo-xing-ji-ji-fei";

View File

@ -29,7 +29,7 @@ func CanEnableModel(db *sql.DB, user *User, model string) bool {
case globals.LLaMa27B, globals.LLaMa213B, globals.LLaMa270B, case globals.LLaMa27B, globals.LLaMa213B, globals.LLaMa270B,
globals.CodeLLaMa34B, globals.CodeLLaMa13B, globals.CodeLLaMa7B: globals.CodeLLaMa34B, globals.CodeLLaMa13B, globals.CodeLLaMa7B:
return user != nil && user.GetQuota(db) >= 1 return user != nil && user.GetQuota(db) >= 1
case globals.Hunyuan: case globals.Hunyuan, globals.GPT360V9:
return user != nil && user.GetQuota(db) >= 1 return user != nil && user.GetQuota(db) >= 1
default: default:
return user != nil return user != nil

View File

@ -89,6 +89,7 @@ const (
CodeLLaMa13B = "code-llama-13b" CodeLLaMa13B = "code-llama-13b"
CodeLLaMa7B = "code-llama-7b" CodeLLaMa7B = "code-llama-7b"
Hunyuan = "hunyuan" Hunyuan = "hunyuan"
GPT360V9 = "360-gpt-v9"
) )
var GPT3TurboArray = []string{ var GPT3TurboArray = []string{
@ -199,7 +200,7 @@ var AllModels = []string{
StableDiffusion, Midjourney, MidjourneyFast, MidjourneyTurbo, StableDiffusion, Midjourney, MidjourneyFast, MidjourneyTurbo,
LLaMa270B, LLaMa213B, LLaMa27B, LLaMa270B, LLaMa213B, LLaMa27B,
CodeLLaMa34B, CodeLLaMa13B, CodeLLaMa7B, CodeLLaMa34B, CodeLLaMa13B, CodeLLaMa7B,
Hunyuan, Hunyuan, GPT360V9,
} }
func in(value string, slice []string) bool { func in(value string, slice []string) bool {
@ -275,6 +276,10 @@ func IsHunyuanModel(model string) bool {
return model == Hunyuan return model == Hunyuan
} }
func Is360Model(model string) bool {
return model == GPT360V9
}
func IsLongContextModel(model string) bool { func IsLongContextModel(model string) bool {
return in(model, LongContextModelArray) return in(model, LongContextModelArray)
} }

View File

@ -25,7 +25,8 @@ func GetWeightByModel(model string) int {
globals.SparkDesk, globals.SparkDeskV2, globals.SparkDeskV3, globals.SparkDesk, globals.SparkDeskV2, globals.SparkDeskV3,
globals.QwenTurbo, globals.QwenPlus, globals.QwenTurboNet, globals.QwenPlusNet, globals.QwenTurbo, globals.QwenPlus, globals.QwenTurboNet, globals.QwenPlusNet,
globals.BingPrecise, globals.BingCreative, globals.BingBalanced, globals.Hunyuan: globals.BingPrecise, globals.BingCreative, globals.BingBalanced,
globals.Hunyuan, globals.GPT360V9:
return 3 return 3
case globals.GPT3Turbo0301, globals.GPT3Turbo16k0301, case globals.GPT3Turbo0301, globals.GPT3Turbo16k0301,
globals.ZhiPuChatGLMTurbo, globals.ZhiPuChatGLMLite, globals.ZhiPuChatGLMStd, globals.ZhiPuChatGLMPro: globals.ZhiPuChatGLMTurbo, globals.ZhiPuChatGLMLite, globals.ZhiPuChatGLMStd, globals.ZhiPuChatGLMPro:
@ -107,6 +108,8 @@ func CountInputToken(model string, v []globals.Message) float32 {
return float32(CountTokenPrice(v, model)) / 1000 * 0.2 return float32(CountTokenPrice(v, model)) / 1000 * 0.2
case globals.Hunyuan: case globals.Hunyuan:
return float32(CountTokenPrice(v, model)) / 1000 * 1 return float32(CountTokenPrice(v, model)) / 1000 * 1
case globals.GPT360V9:
return float32(CountTokenPrice(v, model)) / 1000 * 0.12
default: default:
return 0 return 0
} }
@ -145,6 +148,8 @@ func CountOutputToken(model string, t int) float32 {
return float32(t*GetWeightByModel(model)) / 1000 * 0.2 return float32(t*GetWeightByModel(model)) / 1000 * 0.2
case globals.Hunyuan: case globals.Hunyuan:
return float32(t*GetWeightByModel(model)) / 1000 * 1 return float32(t*GetWeightByModel(model)) / 1000 * 1
case globals.GPT360V9:
return float32(t*GetWeightByModel(model)) / 1000 * 0.12
case globals.StableDiffusion: case globals.StableDiffusion:
return 0.25 return 0.25
case globals.Midjourney: case globals.Midjourney: