fix subscription usage increasing when request is error, update claude models

This commit is contained in:
Zhang Minghan 2023-11-03 17:37:57 +08:00
parent ebd8a6f1c6
commit a22f14a123
26 changed files with 183 additions and 287 deletions

View File

@ -64,8 +64,8 @@
- GPT-3.5-Turbo-Instruct - GPT-3.5-Turbo-Instruct
- GPT-4 (_0314_, _0613_) - GPT-4 (_0314_, _0613_)
- GPT-4-32k (_0314_, _0613_) - GPT-4-32k (_0314_, _0613_)
- GPT-4-Reverse (_gpt-4_, _**gpt-4v**_) - GPT-4-Reverse (_gpt-4_, _**gpt-4v**_, _**dalle3**_)
- DALL-E - DALL-E 2
- Claude - Claude
- Slack-Claude (unstable) - Slack-Claude (unstable)
- Claude-2 - Claude-2
@ -83,6 +83,13 @@
- Creative - Creative
- Balanced - Balanced
- Precise - Precise
- ChatGLM
- Pro
- Std
- Lite
- DashScope Tongyi
- Qwen Plus (net)
- Qwen Turbo (net)
- More models are under development... - More models are under development...

View File

@ -4,6 +4,7 @@ import (
"chat/adapter/bing" "chat/adapter/bing"
"chat/adapter/claude" "chat/adapter/claude"
"chat/adapter/dashscope" "chat/adapter/dashscope"
"chat/adapter/oneapi"
"chat/adapter/palm2" "chat/adapter/palm2"
"chat/adapter/slack" "chat/adapter/slack"
"chat/adapter/zhipuai" "chat/adapter/zhipuai"
@ -20,7 +21,10 @@ type ChatProps struct {
} }
func NewChatRequest(props *ChatProps, hook globals.Hook) error { func NewChatRequest(props *ChatProps, hook globals.Hook) error {
if globals.IsChatGPTModel(props.Model) { if oneapi.IsHit(props.Model) {
return oneapi.Handle(props, hook)
} else if globals.IsChatGPTModel(props.Model) {
return createRetryChatGPTPool(props, hook) return createRetryChatGPTPool(props, hook)
} else if globals.IsClaudeModel(props.Model) { } else if globals.IsClaudeModel(props.Model) {

View File

@ -5,7 +5,6 @@ import (
"chat/globals" "chat/globals"
"chat/utils" "chat/utils"
"fmt" "fmt"
"github.com/spf13/viper"
"strings" "strings"
) )
@ -135,41 +134,3 @@ func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback global
}, },
) )
} }
func (c *ChatInstance) Test() bool {
result, err := c.CreateChatRequest(&ChatProps{
Model: globals.GPT3Turbo,
Message: []globals.Message{{Role: "user", Content: "hi"}},
Token: 1,
})
if err != nil {
fmt.Println(fmt.Sprintf("%s: test failed (%s)", c.GetApiKey(), err.Error()))
}
return err == nil && len(result) > 0
}
func FilterKeys(v string) []string {
endpoint := viper.GetString(fmt.Sprintf("openai.%s.endpoint", v))
keys := strings.Split(viper.GetString(fmt.Sprintf("openai.%s.apikey", v)), "|")
return FilterKeysNative(endpoint, keys)
}
func FilterKeysNative(endpoint string, keys []string) []string {
stack := make(chan string, len(keys))
for _, key := range keys {
go func(key string) {
instance := NewChatInstance(endpoint, key)
stack <- utils.Multi[string](instance.Test(), key, "")
}(key)
}
var result []string
for i := 0; i < len(keys); i++ {
if res := <-stack; res != "" {
result = append(result, res)
}
}
return result
}

View File

@ -48,12 +48,10 @@ func NewChatInstanceFromConfig(v string) *ChatInstance {
func NewChatInstanceFromModel(props *InstanceProps) *ChatInstance { func NewChatInstanceFromModel(props *InstanceProps) *ChatInstance {
switch props.Model { switch props.Model {
case globals.GPT4, globals.GPT40314, globals.GPT40613: case globals.GPT4, globals.GPT40314, globals.GPT40613,
globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314:
return NewChatInstanceFromConfig("gpt4") return NewChatInstanceFromConfig("gpt4")
case globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314:
return NewChatInstanceFromConfig("32k")
case globals.GPT4Vision, globals.Dalle3: case globals.GPT4Vision, globals.Dalle3:
return NewChatInstanceFromConfig("reverse") return NewChatInstanceFromConfig("reverse")

47
adapter/chatgpt/test.go Normal file
View File

@ -0,0 +1,47 @@
package chatgpt
import (
"chat/globals"
"chat/utils"
"fmt"
"github.com/spf13/viper"
"strings"
)
func (c *ChatInstance) Test() bool {
result, err := c.CreateChatRequest(&ChatProps{
Model: globals.GPT3Turbo,
Message: []globals.Message{{Role: "user", Content: "hi"}},
Token: 1,
})
if err != nil {
fmt.Println(fmt.Sprintf("%s: test failed (%s)", c.GetApiKey(), err.Error()))
}
return err == nil && len(result) > 0
}
func FilterKeys(v string) []string {
endpoint := viper.GetString(fmt.Sprintf("openai.%s.endpoint", v))
keys := strings.Split(viper.GetString(fmt.Sprintf("openai.%s.apikey", v)), "|")
return FilterKeysNative(endpoint, keys)
}
func FilterKeysNative(endpoint string, keys []string) []string {
stack := make(chan string, len(keys))
for _, key := range keys {
go func(key string) {
instance := NewChatInstance(endpoint, key)
stack <- utils.Multi[string](instance.Test(), key, "")
}(key)
}
var result []string
for i := 0; i < len(keys); i++ {
if res := <-stack; res != "" {
result = append(result, res)
}
}
return result
}

View File

@ -1,11 +1,10 @@
package chatgpt package oneapi
import "C" import "C"
import ( import (
"chat/globals" "chat/globals"
"chat/utils" "chat/utils"
"fmt" "fmt"
"github.com/spf13/viper"
"strings" "strings"
) )
@ -15,36 +14,11 @@ type ChatProps struct {
Token int Token int
} }
func (c *ChatInstance) GetChatEndpoint(props *ChatProps) string { func (c *ChatInstance) GetChatEndpoint() string {
if props.Model == globals.GPT3TurboInstruct {
return fmt.Sprintf("%s/v1/completions", c.GetEndpoint())
}
return fmt.Sprintf("%s/v1/chat/completions", c.GetEndpoint()) return fmt.Sprintf("%s/v1/chat/completions", c.GetEndpoint())
} }
func (c *ChatInstance) GetCompletionPrompt(messages []globals.Message) string {
result := ""
for _, message := range messages {
result += fmt.Sprintf("%s: %s\n", message.Role, message.Content)
}
return result
}
func (c *ChatInstance) GetChatBody(props *ChatProps, stream bool) interface{} { func (c *ChatInstance) GetChatBody(props *ChatProps, stream bool) interface{} {
if props.Model == globals.GPT3TurboInstruct {
// for completions
return utils.Multi[interface{}](props.Token != -1, CompletionRequest{
Model: props.Model,
Prompt: c.GetCompletionPrompt(props.Message),
MaxToken: props.Token,
Stream: stream,
}, CompletionWithInfinity{
Model: props.Model,
Prompt: c.GetCompletionPrompt(props.Message),
Stream: stream,
})
}
if props.Token != -1 { if props.Token != -1 {
return ChatRequest{ return ChatRequest{
Model: props.Model, Model: props.Model,
@ -61,42 +35,41 @@ func (c *ChatInstance) GetChatBody(props *ChatProps, stream bool) interface{} {
} }
} }
// CreateChatRequest is the native http request body for chatgpt // CreateChatRequest is the native http request body for oneapi
func (c *ChatInstance) CreateChatRequest(props *ChatProps) (string, error) { func (c *ChatInstance) CreateChatRequest(props *ChatProps) (string, error) {
res, err := utils.Post( res, err := utils.Post(
c.GetChatEndpoint(props), c.GetChatEndpoint(),
c.GetHeader(), c.GetHeader(),
c.GetChatBody(props, false), c.GetChatBody(props, false),
) )
if err != nil || res == nil { if err != nil || res == nil {
return "", fmt.Errorf("chatgpt error: %s", err.Error()) return "", fmt.Errorf("oneapi error: %s", err.Error())
} }
data := utils.MapToStruct[ChatResponse](res) data := utils.MapToStruct[ChatResponse](res)
if data == nil { if data == nil {
return "", fmt.Errorf("chatgpt error: cannot parse response") return "", fmt.Errorf("oneapi error: cannot parse response")
} else if data.Error.Message != "" { } else if data.Error.Message != "" {
return "", fmt.Errorf("chatgpt error: %s", data.Error.Message) return "", fmt.Errorf("oneapi error: %s", data.Error.Message)
} }
return data.Choices[0].Message.Content, nil return data.Choices[0].Message.Content, nil
} }
// CreateStreamChatRequest is the stream response body for chatgpt // CreateStreamChatRequest is the stream response body for oneapi
func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback globals.Hook) error { func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback globals.Hook) error {
buf := "" buf := ""
instruct := props.Model == globals.GPT3TurboInstruct
return utils.EventSource( return utils.EventSource(
"POST", "POST",
c.GetChatEndpoint(props), c.GetChatEndpoint(),
c.GetHeader(), c.GetHeader(),
c.GetChatBody(props, true), c.GetChatBody(props, true),
func(data string) error { func(data string) error {
data, err := c.ProcessLine(instruct, buf, data) data, err := c.ProcessLine(buf, data)
if err != nil { if err != nil {
if strings.HasPrefix(err.Error(), "chatgpt error") { if strings.HasPrefix(err.Error(), "oneapi error") {
return err return err
} }
@ -115,41 +88,3 @@ func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback global
}, },
) )
} }
func (c *ChatInstance) Test() bool {
result, err := c.CreateChatRequest(&ChatProps{
Model: globals.GPT3Turbo,
Message: []globals.Message{{Role: "user", Content: "hi"}},
Token: 1,
})
if err != nil {
fmt.Println(fmt.Sprintf("%s: test failed (%s)", c.GetApiKey(), err.Error()))
}
return err == nil && len(result) > 0
}
func FilterKeys(v string) []string {
endpoint := viper.GetString(fmt.Sprintf("openai.%s.endpoint", v))
keys := strings.Split(viper.GetString(fmt.Sprintf("openai.%s.apikey", v)), "|")
return FilterKeysNative(endpoint, keys)
}
func FilterKeysNative(endpoint string, keys []string) []string {
stack := make(chan string, len(keys))
for _, key := range keys {
go func(key string) {
instance := NewChatInstance(endpoint, key)
stack <- utils.Multi[string](instance.Test(), key, "")
}(key)
}
var result []string
for i := 0; i < len(keys); i++ {
if res := <-stack; res != "" {
result = append(result, res)
}
}
return result
}

32
adapter/oneapi/globals.go Normal file
View File

@ -0,0 +1,32 @@
package oneapi
import (
"chat/globals"
)
var HitModels = []string{
globals.Claude2, globals.Claude2100k,
}
func (c *ChatInstance) Process(data string) string {
return data
}
func (c *ChatInstance) FormatMessage(message []globals.Message) []globals.Message {
return message
}
func (c *ChatInstance) FormatModel(model string) string {
return model
}
func (c *ChatInstance) GetToken(model string) int {
switch model {
case globals.Claude2:
return 5000
case globals.Claude2100k:
return 50000
default:
return 2500
}
}

30
adapter/oneapi/handler.go Normal file
View File

@ -0,0 +1,30 @@
package oneapi
import (
"chat/globals"
"chat/utils"
)
type AdapterProps struct {
Model string
Plan bool
Infinity bool
Message []globals.Message
Token int
}
func HandleRequest(props *AdapterProps, hook globals.Hook) error {
instance := NewChatInstanceFromConfig()
return instance.CreateStreamChatRequest(&ChatProps{
Model: instance.FormatModel(props.Model),
Message: instance.FormatMessage(props.Message),
Token: utils.Multi(props.Token == 0, instance.GetToken(props.Model), props.Token),
}, func(data string) error {
return hook(instance.Process(data))
})
}
func Handle(props interface{}, hook globals.Hook) error {
conv := utils.MapToStruct[AdapterProps](props)
return HandleRequest(conv, hook)
}

View File

@ -1,4 +1,4 @@
package chatgpt package oneapi
import ( import (
"chat/globals" "chat/globals"
@ -54,21 +54,6 @@ func processChatResponse(data string) *ChatStreamResponse {
return nil return nil
} }
func processCompletionResponse(data string) *CompletionResponse {
if strings.HasPrefix(data, "{") {
var form *CompletionResponse
if form = utils.UnmarshalForm[CompletionResponse](data); form != nil {
return form
}
if form = utils.UnmarshalForm[CompletionResponse](data[:len(data)-1]); form != nil {
return form
}
}
return nil
}
func processChatErrorResponse(data string) *ChatStreamErrorResponse { func processChatErrorResponse(data string) *ChatStreamErrorResponse {
if strings.HasPrefix(data, "{") { if strings.HasPrefix(data, "{") {
var form *ChatStreamErrorResponse var form *ChatStreamErrorResponse
@ -98,38 +83,23 @@ func getChoices(form *ChatStreamResponse) string {
return form.Data.Choices[0].Delta.Content return form.Data.Choices[0].Delta.Content
} }
func getCompletionChoices(form *CompletionResponse) string { func (c *ChatInstance) ProcessLine(buf, data string) (string, error) {
if len(form.Data.Choices) == 0 {
return ""
}
return form.Data.Choices[0].Text
}
func (c *ChatInstance) ProcessLine(instruct bool, buf, data string) (string, error) {
item := processFormat(buf + data) item := processFormat(buf + data)
if isDone(item) { if isDone(item) {
return "", nil return "", nil
} }
if form := processChatResponse(item); form == nil { if form := processChatResponse(item); form == nil {
if instruct {
// legacy support
if completion := processCompletionResponse(item); completion != nil {
return getCompletionChoices(completion), nil
}
}
// recursive call // recursive call
if len(buf) > 0 { if len(buf) > 0 {
return c.ProcessLine(instruct, "", buf+item) return c.ProcessLine("", buf+item)
} }
if err := processChatErrorResponse(item); err == nil { if err := processChatErrorResponse(item); err == nil {
globals.Warn(fmt.Sprintf("chatgpt error: cannot parse response: %s", item)) globals.Warn(fmt.Sprintf("oneapi error: cannot parse response: %s", item))
return data, errors.New("parser error: cannot parse response") return data, errors.New("parser error: cannot parse response")
} else { } else {
return "", fmt.Errorf("chatgpt error: %s (type: %s)", err.Data.Error.Message, err.Data.Error.Type) return "", fmt.Errorf("oneapi error: %s (type: %s)", err.Data.Error.Message, err.Data.Error.Type)
} }
} else { } else {

View File

@ -1,7 +1,6 @@
package chatgpt package oneapi
import ( import (
"chat/globals"
"chat/utils" "chat/utils"
"fmt" "fmt"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -39,31 +38,13 @@ func NewChatInstance(endpoint, apiKey string) *ChatInstance {
} }
} }
func NewChatInstanceFromConfig(v string) *ChatInstance { func NewChatInstanceFromConfig() *ChatInstance {
return NewChatInstance( return NewChatInstance(
viper.GetString(fmt.Sprintf("openai.%s.endpoint", v)), viper.GetString("oneapi.endpoint"),
utils.GetRandomKey(viper.GetString(fmt.Sprintf("openai.%s.apikey", v))), viper.GetString("oneapi.apikey"),
) )
} }
func NewChatInstanceFromModel(props *InstanceProps) *ChatInstance { func IsHit(model string) bool {
switch props.Model { return utils.Contains[string](model, HitModels)
case globals.GPT4, globals.GPT40314, globals.GPT40613:
return NewChatInstanceFromConfig("gpt4")
case globals.GPT432k, globals.GPT432k0613, globals.GPT432k0314:
return NewChatInstanceFromConfig("32k")
case globals.GPT4Vision, globals.Dalle3:
return NewChatInstanceFromConfig("reverse")
case globals.GPT3Turbo, globals.GPT3TurboInstruct, globals.GPT3Turbo0613, globals.GPT3Turbo0301,
globals.GPT3Turbo16k, globals.GPT3Turbo16k0301, globals.GPT3Turbo16k0613:
if props.Plan {
return NewChatInstanceFromConfig("subscribe")
}
return NewChatInstanceFromConfig("gpt3")
default:
return NewChatInstanceFromConfig("gpt3")
}
} }

View File

@ -1,8 +1,8 @@
package chatgpt package oneapi
import "chat/globals" import "chat/globals"
// ChatRequest is the request body for chatgpt // ChatRequest is the request body for oneapi
type ChatRequest struct { type ChatRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []globals.Message `json:"messages"` Messages []globals.Message `json:"messages"`
@ -16,21 +16,7 @@ type ChatRequestWithInfinity struct {
Stream bool `json:"stream"` Stream bool `json:"stream"`
} }
// CompletionRequest ChatRequest is the request body for chatgpt completion // ChatResponse is the native http request body for oneapi
type CompletionRequest struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
MaxToken int `json:"max_tokens"`
Stream bool `json:"stream"`
}
type CompletionWithInfinity struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
Stream bool `json:"stream"`
}
// ChatResponse is the native http request body for chatgpt
type ChatResponse struct { type ChatResponse struct {
ID string `json:"id"` ID string `json:"id"`
Object string `json:"object"` Object string `json:"object"`
@ -46,7 +32,7 @@ type ChatResponse struct {
} `json:"error"` } `json:"error"`
} }
// ChatStreamResponse is the stream response body for chatgpt // ChatStreamResponse is the stream response body for oneapi
type ChatStreamResponse struct { type ChatStreamResponse struct {
ID string `json:"id"` ID string `json:"id"`
Object string `json:"object"` Object string `json:"object"`
@ -62,20 +48,6 @@ type ChatStreamResponse struct {
} `json:"data"` } `json:"data"`
} }
// CompletionResponse is the native http request body / stream response body for chatgpt completion
type CompletionResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Data struct {
Choices []struct {
Text string `json:"text"`
Index int `json:"index"`
} `json:"choices"`
} `json:"data"`
}
type ChatStreamErrorResponse struct { type ChatStreamErrorResponse struct {
Data struct { Data struct {
Error struct { Error struct {
@ -84,27 +56,3 @@ type ChatStreamErrorResponse struct {
} `json:"error"` } `json:"error"`
} `json:"data"` } `json:"data"`
} }
type ImageSize string
// ImageRequest is the request body for chatgpt dalle image generation
type ImageRequest struct {
Prompt string `json:"prompt"`
Size ImageSize `json:"size"`
N int `json:"n"`
}
type ImageResponse struct {
Data []struct {
Url string `json:"url"`
} `json:"data"`
Error struct {
Message string `json:"message"`
} `json:"error"`
}
var (
ImageSize256 ImageSize = "256x256"
ImageSize512 ImageSize = "512x512"
ImageSize1024 ImageSize = "1024x1024"
)

View File

@ -32,21 +32,6 @@ func retryChatGPTPool(props *ChatProps, hook globals.Hook, retry int) error {
), ),
}, hook) }, hook)
if globals.IsGPT4NativeModel(props.Model) && IsAvailableError(err) {
if !strings.Contains(err.Error(), "429") {
// not rate limited
return chatgpt.NewChatInstanceFromConfig("32k").CreateStreamChatRequest(&chatgpt.ChatProps{
Model: props.Model,
Message: props.Message,
Token: utils.Multi(
props.Token == 0,
utils.Multi(globals.IsGPT4Model(props.Model) || props.Plan || props.Infinity, -1, 2500),
props.Token,
),
}, hook)
}
}
if IsAvailableError(err) && retry < MaxRetries { if IsAvailableError(err) && retry < MaxRetries {
fmt.Println(fmt.Sprintf("retrying chatgpt pool (times: %d, error: %s)", retry+1, err.Error())) fmt.Println(fmt.Sprintf("retrying chatgpt pool (times: %d, error: %s)", retry+1, err.Error()))
return retryChatGPTPool(props, hook, retry+1) return retryChatGPTPool(props, hook, retry+1)

View File

@ -86,6 +86,7 @@ func GenerateAPI(c *gin.Context) {
} }
if err != nil { if err != nil {
auth.RevertSubscriptionUsage(cache, user, form.Model, plan)
conn.Send(globals.GenerationSegmentResponse{ conn.Send(globals.GenerationSegmentResponse{
End: true, End: true,
Error: err.Error(), Error: err.Error(),

View File

@ -38,14 +38,14 @@ function ModelSelector(props: ModelSelectorProps) {
}); });
const list = supportModels.map((model: Model): SelectItemProps => { const list = supportModels.map((model: Model): SelectItemProps => {
const array = ["gpt-4-0613", "gpt-4v", "gpt-4-dalle", "claude-2"]; const array = ["gpt-4-0613", "gpt-4v", "gpt-4-dalle", "claude-2-100k"];
if (subscription && array.includes(model.id)) { if (subscription && array.includes(model.id)) {
return { return {
name: model.id, name: model.id,
value: model.name, value: model.name,
badge: { variant: "gold", name: "plus" }, badge: { variant: "gold", name: "plus" },
} as SelectItemProps; } as SelectItemProps;
} else if (student && model.id === "claude-2") { } else if (student && model.id === "claude-2-100k") {
return { return {
name: model.id, name: model.id,
value: model.name, value: model.name,

View File

@ -8,7 +8,7 @@ import {
} from "@/utils/env.ts"; } from "@/utils/env.ts";
import { getMemory } from "@/utils/memory.ts"; import { getMemory } from "@/utils/memory.ts";
export const version = "3.6.9"; export const version = "3.6.9rc";
export const dev: boolean = getDev(); export const dev: boolean = getDev();
export const deploy: boolean = true; export const deploy: boolean = true;
export let rest_api: string = getRestApi(deploy); export let rest_api: string = getRestApi(deploy);
@ -24,15 +24,15 @@ export const supportModels: Model[] = [
{ id: "gpt-4v", name: "GPT-4V", free: false, auth: true }, { id: "gpt-4v", name: "GPT-4V", free: false, auth: true },
{ id: "gpt-4-dalle", name: "DALLE3", free: false, auth: true }, { id: "gpt-4-dalle", name: "DALLE3", free: false, auth: true },
// anthropic models
{ id: "claude-2", name: "Claude-2", free: true, auth: false },
{ id: "claude-2-100k", name: "Claude-2-100k", free: false, auth: true },
// spark desk // spark desk
{ id: "spark-desk-v3", name: "讯飞星火 V3", free: true, auth: true }, { id: "spark-desk-v3", name: "讯飞星火 V3", free: true, auth: true },
{ id: "spark-desk-v2", name: "讯飞星火 V2", free: true, auth: true }, { id: "spark-desk-v2", name: "讯飞星火 V2", free: true, auth: true },
{ id: "spark-desk-v2", name: "讯飞星火 V1.5", free: true, auth: true }, { id: "spark-desk-v2", name: "讯飞星火 V1.5", free: true, auth: true },
// anthropic models
{ id: "claude-1", name: "Claude-2", free: true, auth: false },
{ id: "claude-2", name: "Claude-2-100k", free: false, auth: true }, // not claude-2-100k
// dashscope models // dashscope models
{ id: "qwen-plus-net", name: "通义千问 Plus X", free: false, auth: true }, { id: "qwen-plus-net", name: "通义千问 Plus X", free: false, auth: true },
{ id: "qwen-plus", name: "通义千问 Plus", free: false, auth: true }, { id: "qwen-plus", name: "通义千问 Plus", free: false, auth: true },

View File

@ -18,7 +18,6 @@ type SubscriptionResponse = {
enterprise?: boolean; enterprise?: boolean;
usage: { usage: {
gpt4: number; gpt4: number;
dalle: number;
}; };
}; };
@ -67,7 +66,7 @@ export async function getSubscription(): Promise<SubscriptionResponse> {
status: false, status: false,
is_subscribed: false, is_subscribed: false,
expired: 0, expired: 0,
usage: { gpt4: 0, dalle: 0 }, usage: { gpt4: 0 },
}; };
} }
return resp.data as SubscriptionResponse; return resp.data as SubscriptionResponse;
@ -77,7 +76,7 @@ export async function getSubscription(): Promise<SubscriptionResponse> {
status: false, status: false,
is_subscribed: false, is_subscribed: false,
expired: 0, expired: 0,
usage: { gpt4: 0, dalle: 0 }, usage: { gpt4: 0 },
}; };
} }
} }

View File

@ -31,7 +31,6 @@ import {
FolderGit2, FolderGit2,
Globe, Globe,
Image, Image,
ImagePlus,
LifeBuoy, LifeBuoy,
MessageSquare, MessageSquare,
MessagesSquare, MessagesSquare,
@ -200,14 +199,6 @@ function Subscription() {
<p>{usage?.gpt4}</p> / <p> 50 </p> <p>{usage?.gpt4}</p> / <p> 50 </p>
</div> </div>
</div> </div>
<div className={`sub-column`}>
<ImagePlus className={`h-4 w-4 mr-1`} />
DALL-E
<div className={`grow`} />
<div className={`sub-value`}>
<p>{usage?.dalle}</p> / <p> 2000 </p>
</div>
</div>
</> </>
)} )}
</div> </div>

View File

@ -11,7 +11,6 @@ export const subscriptionSlice = createSlice({
expired: 0, expired: 0,
usage: { usage: {
gpt4: 0, gpt4: 0,
dalle: 0,
}, },
}, },
reducers: { reducers: {

View File

@ -50,8 +50,15 @@ func HandleSubscriptionUsage(db *sql.DB, cache *redis.Client, user *User, model
return false return false
} }
// CanEnableModelWithSubscription returns (canEnable, usePlan) func RevertSubscriptionUsage(cache *redis.Client, user *User, model string, plan bool) {
func CanEnableModelWithSubscription(db *sql.DB, cache *redis.Client, user *User, model string) (bool, bool) { if globals.IsGPT4NativeModel(model) && plan {
DecreaseSubscriptionUsage(cache, user, globals.GPT4)
} else if model == globals.Claude2100k && !plan {
DecreaseSubscriptionUsage(cache, user, globals.Claude2100k)
}
}
func CanEnableModelWithSubscription(db *sql.DB, cache *redis.Client, user *User, model string) (canEnable bool, usePlan bool) {
// use subscription quota first // use subscription quota first
if user != nil && HandleSubscriptionUsage(db, cache, user, model) { if user != nil && HandleSubscriptionUsage(db, cache, user, model) {
return true, true return true, true

View File

@ -34,3 +34,7 @@ func BuySubscription(db *sql.DB, user *User, month int) bool {
func IncreaseSubscriptionUsage(cache *redis.Client, user *User, t string, limit int64) bool { func IncreaseSubscriptionUsage(cache *redis.Client, user *User, t string, limit int64) bool {
return utils.IncrWithLimit(cache, globals.GetSubscriptionLimitFormat(t, user.ID), 1, limit, 60*60*24) // 1 day return utils.IncrWithLimit(cache, globals.GetSubscriptionLimitFormat(t, user.ID), 1, limit, 60*60*24) // 1 day
} }
func DecreaseSubscriptionUsage(cache *redis.Client, user *User, t string) bool {
return utils.DecrInt(cache, globals.GetSubscriptionLimitFormat(t, user.ID), 1)
}

View File

@ -4,7 +4,6 @@ import (
"chat/globals" "chat/globals"
"chat/utils" "chat/utils"
"database/sql" "database/sql"
"errors"
"fmt" "fmt"
"github.com/dgrijalva/jwt-go" "github.com/dgrijalva/jwt-go"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@ -219,11 +218,8 @@ func (u *User) CreateApiKey(db *sql.DB) string {
func (u *User) GetApiKey(db *sql.DB) string { func (u *User) GetApiKey(db *sql.DB) string {
var key string var key string
if err := db.QueryRow("SELECT api_key FROM apikey WHERE user_id = ?", u.GetID(db)).Scan(&key); err != nil { if err := db.QueryRow("SELECT api_key FROM apikey WHERE user_id = ?", u.GetID(db)).Scan(&key); err != nil {
if errors.Is(err, sql.ErrNoRows) {
return u.CreateApiKey(db) return u.CreateApiKey(db)
} }
return ""
}
return key return key
} }

View File

@ -54,8 +54,8 @@ const (
GPT432k0613 = "gpt-4-32k-0613" GPT432k0613 = "gpt-4-32k-0613"
Dalle2 = "dalle" Dalle2 = "dalle"
Dalle3 = "gpt-4-dalle" Dalle3 = "gpt-4-dalle"
Claude2 = "claude-1" // claude v1.3 Claude2 = "claude-2"
Claude2100k = "claude-2" Claude2100k = "claude-2-100k"
ClaudeSlack = "claude-slack" ClaudeSlack = "claude-slack"
SparkDesk = "spark-desk-v1.5" SparkDesk = "spark-desk-v1.5"
SparkDeskV2 = "spark-desk-v2" SparkDeskV2 = "spark-desk-v2"

View File

@ -106,6 +106,7 @@ func ChatHandler(conn *Connection, user *auth.User, instance *conversation.Conve
if err != nil && err.Error() != "signal" { if err != nil && err.Error() != "signal" {
globals.Warn(fmt.Sprintf("caught error from chat handler: %s (instance: %s, client: %s)", err, model, conn.GetCtx().ClientIP())) globals.Warn(fmt.Sprintf("caught error from chat handler: %s (instance: %s, client: %s)", err, model, conn.GetCtx().ClientIP()))
auth.RevertSubscriptionUsage(cache, user, model, plan)
CollectQuota(conn.GetCtx(), user, buffer, plan) CollectQuota(conn.GetCtx(), user, buffer, plan)
conn.Send(globals.ChatSegmentResponse{ conn.Send(globals.ChatSegmentResponse{
Message: err.Error(), Message: err.Error(),

View File

@ -46,6 +46,7 @@ func NativeChatHandler(c *gin.Context, user *auth.User, model string, message []
buffer.Write(resp) buffer.Write(resp)
return nil return nil
}); err != nil { }); err != nil {
auth.RevertSubscriptionUsage(cache, user, model, plan)
CollectQuota(c, user, buffer, plan) CollectQuota(c, user, buffer, plan)
return keyword, err.Error(), GetErrorQuota(model) return keyword, err.Error(), GetErrorQuota(model)
} }

View File

@ -28,9 +28,7 @@ func (l *Limiter) RateLimit(ctx *gin.Context, rds *redis.Client, ip string, path
var limits = map[string]Limiter{ var limits = map[string]Limiter{
"/login": {Duration: 10, Count: 5}, "/login": {Duration: 10, Count: 5},
"/anonymous": {Duration: 60, Count: 15}, "/apikey": {Duration: 1, Count: 2},
"/card": {Duration: 1, Count: 5},
"/user": {Duration: 1, Count: 1},
"/package": {Duration: 1, Count: 2}, "/package": {Duration: 1, Count: 2},
"/quota": {Duration: 1, Count: 2}, "/quota": {Duration: 1, Count: 2},
"/buy": {Duration: 1, Count: 2}, "/buy": {Duration: 1, Count: 2},
@ -41,6 +39,7 @@ var limits = map[string]Limiter{
"/invite": {Duration: 7200, Count: 20}, "/invite": {Duration: 7200, Count: 20},
"/v1": {Duration: 1, Count: 600}, "/v1": {Duration: 1, Count: 600},
"/card": {Duration: 1, Count: 5},
"/generation": {Duration: 1, Count: 5}, "/generation": {Duration: 1, Count: 5},
"/article": {Duration: 1, Count: 5}, "/article": {Duration: 1, Count: 5},
} }

View File

@ -88,7 +88,7 @@ func CountInputToken(model string, v []globals.Message) float32 {
case globals.Claude2: case globals.Claude2:
return 0 return 0
case globals.Claude2100k: case globals.Claude2100k:
return float32(CountTokenPrice(v, model)) / 1000 * 0.008 return float32(CountTokenPrice(v, model)) / 1000 * 0.05
case globals.ZhiPuChatGLMPro: case globals.ZhiPuChatGLMPro:
return float32(CountTokenPrice(v, model)) / 1000 * 0.1 return float32(CountTokenPrice(v, model)) / 1000 * 0.1
case globals.ZhiPuChatGLMStd: case globals.ZhiPuChatGLMStd:
@ -118,7 +118,7 @@ func CountOutputToken(model string, t int) float32 {
case globals.Claude2: case globals.Claude2:
return 0 return 0
case globals.Claude2100k: case globals.Claude2100k:
return float32(t*GetWeightByModel(model)) / 1000 * 0.008 return float32(t*GetWeightByModel(model)) / 1000 * 0.05
case globals.ZhiPuChatGLMPro: case globals.ZhiPuChatGLMPro:
return float32(t*GetWeightByModel(model)) / 1000 * 0.1 return float32(t*GetWeightByModel(model)) / 1000 * 0.1
case globals.ZhiPuChatGLMStd: case globals.ZhiPuChatGLMStd: