mirror of
https://github.com/coaidev/coai.git
synced 2025-05-19 13:00:14 +09:00
feat: add azure openai format
This commit is contained in:
parent
261e500840
commit
a1f48451e7
@ -69,7 +69,7 @@
|
||||
- [x] Anthropic Claude (claude-2, claude-instant)
|
||||
- [x] Slack Claude (deprecated)
|
||||
- [x] Sparkdesk (v1.5, v2, v3)
|
||||
- [x] Google PaLM2
|
||||
- [x] Google Gemini (PaLM2)
|
||||
- [x] New Bing (creative, balanced, precise)
|
||||
- [x] ChatGLM (turbo, pro, std, lite)
|
||||
- [x] DashScope Tongyi (plus, turbo)
|
||||
|
@ -1,6 +1,7 @@
|
||||
package adapter
|
||||
|
||||
import (
|
||||
"chat/adapter/azure"
|
||||
"chat/adapter/baichuan"
|
||||
"chat/adapter/bing"
|
||||
"chat/adapter/chatgpt"
|
||||
@ -67,6 +68,24 @@ func createChatRequest(conf globals.ChannelConfig, props *ChatProps, hook global
|
||||
Buffer: props.Buffer,
|
||||
}, hook)
|
||||
|
||||
case globals.AzureOpenAIChannelType:
|
||||
return azure.NewChatInstanceFromConfig(conf).CreateStreamChatRequest(&azure.ChatProps{
|
||||
Model: model,
|
||||
Message: props.Message,
|
||||
Token: utils.Multi(
|
||||
props.Token == 0,
|
||||
utils.Multi(props.Infinity || props.Plan, nil, utils.ToPtr(2500)),
|
||||
&props.Token,
|
||||
),
|
||||
PresencePenalty: props.PresencePenalty,
|
||||
FrequencyPenalty: props.FrequencyPenalty,
|
||||
Temperature: props.Temperature,
|
||||
TopP: props.TopP,
|
||||
Tools: props.Tools,
|
||||
ToolChoice: props.ToolChoice,
|
||||
Buffer: props.Buffer,
|
||||
}, hook)
|
||||
|
||||
case globals.ClaudeChannelType:
|
||||
return claude.NewChatInstanceFromConfig(conf).CreateStreamChatRequest(&claude.ChatProps{
|
||||
Model: model,
|
||||
|
147
adapter/azure/chat.go
Normal file
147
adapter/azure/chat.go
Normal file
@ -0,0 +1,147 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"chat/globals"
|
||||
"chat/utils"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ChatProps struct {
|
||||
Model string
|
||||
Message []globals.Message
|
||||
Token *int
|
||||
PresencePenalty *float32
|
||||
FrequencyPenalty *float32
|
||||
Temperature *float32
|
||||
TopP *float32
|
||||
Tools *globals.FunctionTools
|
||||
ToolChoice *interface{}
|
||||
Buffer utils.Buffer
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetChatEndpoint(props *ChatProps) string {
|
||||
model := strings.ReplaceAll(props.Model, ".", "")
|
||||
if props.Model == globals.GPT3TurboInstruct {
|
||||
return fmt.Sprintf("%s/openai/deployments/%s/completions?api-version=%s", c.GetResource(), model, c.GetEndpoint())
|
||||
}
|
||||
return fmt.Sprintf("%s/openai/deployments/%s/chat/completions?api-version=%s", c.GetResource(), model, c.GetEndpoint())
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetCompletionPrompt(messages []globals.Message) string {
|
||||
result := ""
|
||||
for _, message := range messages {
|
||||
result += fmt.Sprintf("%s: %s\n", message.Role, message.Content)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetLatestPrompt(props *ChatProps) string {
|
||||
if len(props.Message) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return props.Message[len(props.Message)-1].Content
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetChatBody(props *ChatProps, stream bool) interface{} {
|
||||
if props.Model == globals.GPT3TurboInstruct {
|
||||
// for completions
|
||||
return CompletionRequest{
|
||||
Prompt: c.GetCompletionPrompt(props.Message),
|
||||
MaxToken: props.Token,
|
||||
Stream: stream,
|
||||
}
|
||||
}
|
||||
|
||||
return ChatRequest{
|
||||
Messages: formatMessages(props),
|
||||
MaxToken: props.Token,
|
||||
Stream: stream,
|
||||
PresencePenalty: props.PresencePenalty,
|
||||
FrequencyPenalty: props.FrequencyPenalty,
|
||||
Temperature: props.Temperature,
|
||||
TopP: props.TopP,
|
||||
Tools: props.Tools,
|
||||
ToolChoice: props.ToolChoice,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateChatRequest is the native http request body for chatgpt
|
||||
func (c *ChatInstance) CreateChatRequest(props *ChatProps) (string, error) {
|
||||
if globals.IsDalleModel(props.Model) {
|
||||
return c.CreateImage(props)
|
||||
}
|
||||
|
||||
res, err := utils.Post(
|
||||
c.GetChatEndpoint(props),
|
||||
c.GetHeader(),
|
||||
c.GetChatBody(props, false),
|
||||
)
|
||||
|
||||
if err != nil || res == nil {
|
||||
return "", fmt.Errorf("chatgpt error: %s", err.Error())
|
||||
}
|
||||
|
||||
data := utils.MapToStruct[ChatResponse](res)
|
||||
if data == nil {
|
||||
return "", fmt.Errorf("chatgpt error: cannot parse response")
|
||||
} else if data.Error.Message != "" {
|
||||
return "", fmt.Errorf("chatgpt error: %s", data.Error.Message)
|
||||
}
|
||||
return data.Choices[0].Message.Content, nil
|
||||
}
|
||||
|
||||
// CreateStreamChatRequest is the stream response body for chatgpt
|
||||
func (c *ChatInstance) CreateStreamChatRequest(props *ChatProps, callback globals.Hook) error {
|
||||
if globals.IsDalleModel(props.Model) {
|
||||
if url, err := c.CreateImage(props); err != nil {
|
||||
return err
|
||||
} else {
|
||||
return callback(url)
|
||||
}
|
||||
}
|
||||
|
||||
buf := ""
|
||||
cursor := 0
|
||||
chunk := ""
|
||||
instruct := props.Model == globals.GPT3TurboInstruct
|
||||
|
||||
err := utils.EventSource(
|
||||
"POST",
|
||||
c.GetChatEndpoint(props),
|
||||
c.GetHeader(),
|
||||
c.GetChatBody(props, true),
|
||||
func(data string) error {
|
||||
data, err := c.ProcessLine(props.Buffer, instruct, buf, data)
|
||||
chunk += data
|
||||
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "chatgpt error") {
|
||||
return err
|
||||
}
|
||||
|
||||
// error when break line
|
||||
buf = buf + data
|
||||
return nil
|
||||
}
|
||||
|
||||
buf = ""
|
||||
if data != "" {
|
||||
cursor += 1
|
||||
if err := callback(data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(chunk) == 0 {
|
||||
return fmt.Errorf("empty response")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
62
adapter/azure/image.go
Normal file
62
adapter/azure/image.go
Normal file
@ -0,0 +1,62 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"chat/globals"
|
||||
"chat/utils"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ImageProps struct {
|
||||
Model string
|
||||
Prompt string
|
||||
Size ImageSize
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetImageEndpoint(model string) string {
|
||||
model = strings.ReplaceAll(model, ".", "")
|
||||
return fmt.Sprintf("%s/openai/deployments/%s/images/generations?api-version=%s", c.GetResource(), model, c.GetEndpoint())
|
||||
}
|
||||
|
||||
// CreateImageRequest will create a dalle image from prompt, return url of image and error
|
||||
func (c *ChatInstance) CreateImageRequest(props ImageProps) (string, error) {
|
||||
res, err := utils.Post(
|
||||
c.GetImageEndpoint(props.Model),
|
||||
c.GetHeader(), ImageRequest{
|
||||
Prompt: props.Prompt,
|
||||
Size: utils.Multi[ImageSize](
|
||||
props.Model == globals.Dalle3,
|
||||
ImageSize1024,
|
||||
ImageSize512,
|
||||
),
|
||||
N: 1,
|
||||
})
|
||||
if err != nil || res == nil {
|
||||
return "", fmt.Errorf("chatgpt error: %s", err.Error())
|
||||
}
|
||||
|
||||
data := utils.MapToStruct[ImageResponse](res)
|
||||
if data == nil {
|
||||
return "", fmt.Errorf("chatgpt error: cannot parse response")
|
||||
} else if data.Error.Message != "" {
|
||||
return "", fmt.Errorf("chatgpt error: %s", data.Error.Message)
|
||||
}
|
||||
|
||||
return data.Data[0].Url, nil
|
||||
}
|
||||
|
||||
// CreateImage will create a dalle image from prompt, return markdown of image
|
||||
func (c *ChatInstance) CreateImage(props *ChatProps) (string, error) {
|
||||
url, err := c.CreateImageRequest(ImageProps{
|
||||
Model: props.Model,
|
||||
Prompt: c.GetLatestPrompt(props),
|
||||
})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "safety") {
|
||||
return err.Error(), nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return utils.GetImageMarkdown(url), nil
|
||||
}
|
216
adapter/azure/processor.go
Normal file
216
adapter/azure/processor.go
Normal file
@ -0,0 +1,216 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"chat/globals"
|
||||
"chat/utils"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func processFormat(data string) string {
|
||||
rep := strings.NewReplacer(
|
||||
"data: {",
|
||||
"\"data\": {",
|
||||
)
|
||||
item := rep.Replace(data)
|
||||
if !strings.HasPrefix(item, "{") {
|
||||
item = "{" + item
|
||||
}
|
||||
if !strings.HasSuffix(item, "}}") {
|
||||
item = item + "}"
|
||||
}
|
||||
|
||||
return item
|
||||
}
|
||||
|
||||
func formatMessages(props *ChatProps) interface{} {
|
||||
if props.Model == globals.GPT4Vision {
|
||||
base := props.Message[len(props.Message)-1].Content
|
||||
urls := utils.ExtractImageUrls(base)
|
||||
|
||||
if len(urls) > 0 {
|
||||
base = fmt.Sprintf("%s %s", strings.Join(urls, " "), base)
|
||||
}
|
||||
props.Message[len(props.Message)-1].Content = base
|
||||
return props.Message
|
||||
} else if props.Model == globals.GPT41106VisionPreview {
|
||||
return utils.Each[globals.Message, Message](props.Message, func(message globals.Message) Message {
|
||||
if message.Role == globals.User {
|
||||
urls := utils.ExtractImageUrls(message.Content)
|
||||
images := utils.EachNotNil[string, MessageContent](urls, func(url string) *MessageContent {
|
||||
obj, err := utils.NewImage(url)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
props.Buffer.AddImage(obj)
|
||||
|
||||
return &MessageContent{
|
||||
Type: "image_url",
|
||||
ImageUrl: &ImageUrl{
|
||||
Url: url,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
return Message{
|
||||
Role: message.Role,
|
||||
Content: utils.Prepend(images, MessageContent{
|
||||
Type: "text",
|
||||
Text: &message.Content,
|
||||
}),
|
||||
ToolCalls: message.ToolCalls,
|
||||
ToolCallId: message.ToolCallId,
|
||||
}
|
||||
}
|
||||
|
||||
return Message{
|
||||
Role: message.Role,
|
||||
Content: MessageContents{
|
||||
MessageContent{
|
||||
Type: "text",
|
||||
Text: &message.Content,
|
||||
},
|
||||
},
|
||||
ToolCalls: message.ToolCalls,
|
||||
ToolCallId: message.ToolCallId,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return props.Message
|
||||
}
|
||||
|
||||
func processChatResponse(data string) *ChatStreamResponse {
|
||||
if strings.HasPrefix(data, "{") {
|
||||
var form *ChatStreamResponse
|
||||
if form = utils.UnmarshalForm[ChatStreamResponse](data); form != nil {
|
||||
return form
|
||||
}
|
||||
|
||||
if form = utils.UnmarshalForm[ChatStreamResponse](data[:len(data)-1]); form != nil {
|
||||
return form
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processCompletionResponse(data string) *CompletionResponse {
|
||||
if strings.HasPrefix(data, "{") {
|
||||
var form *CompletionResponse
|
||||
if form = utils.UnmarshalForm[CompletionResponse](data); form != nil {
|
||||
return form
|
||||
}
|
||||
|
||||
if form = utils.UnmarshalForm[CompletionResponse](data[:len(data)-1]); form != nil {
|
||||
return form
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processChatErrorResponse(data string) *ChatStreamErrorResponse {
|
||||
if strings.HasPrefix(data, "{") {
|
||||
var form *ChatStreamErrorResponse
|
||||
if form = utils.UnmarshalForm[ChatStreamErrorResponse](data); form != nil {
|
||||
return form
|
||||
}
|
||||
if form = utils.UnmarshalForm[ChatStreamErrorResponse](data + "}"); form != nil {
|
||||
return form
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isDone(data string) bool {
|
||||
return utils.Contains[string](data, []string{
|
||||
"{data: [DONE]}", "{data: [DONE]}}",
|
||||
"{[DONE]}", "{data:}", "{data:}}",
|
||||
})
|
||||
}
|
||||
|
||||
func getChoices(form *ChatStreamResponse) string {
|
||||
if len(form.Data.Choices) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return form.Data.Choices[0].Delta.Content
|
||||
}
|
||||
|
||||
func getCompletionChoices(form *CompletionResponse) string {
|
||||
if len(form.Data.Choices) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return form.Data.Choices[0].Text
|
||||
}
|
||||
|
||||
func getToolCalls(form *ChatStreamResponse) *globals.ToolCalls {
|
||||
if len(form.Data.Choices) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return form.Data.Choices[0].Delta.ToolCalls
|
||||
}
|
||||
|
||||
func getRobustnessResult(chunk string) string {
|
||||
exp := `\"content\":\"(.*?)\"`
|
||||
compile, err := regexp.Compile(exp)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
matches := compile.FindStringSubmatch(chunk)
|
||||
if len(matches) > 1 {
|
||||
partial := matches[1]
|
||||
// if the unicode character is in the string, like `hi\\u2019s`, we need to convert it to `hi's`
|
||||
if utils.ContainUnicode(partial) {
|
||||
partial = utils.DecodeUnicode(partial)
|
||||
}
|
||||
|
||||
return partial
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ChatInstance) ProcessLine(obj utils.Buffer, instruct bool, buf, data string) (string, error) {
|
||||
item := processFormat(buf + data)
|
||||
if isDone(item) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if form := processChatResponse(item); form == nil {
|
||||
if instruct {
|
||||
// legacy support
|
||||
if completion := processCompletionResponse(item); completion != nil {
|
||||
return getCompletionChoices(completion), nil
|
||||
}
|
||||
}
|
||||
|
||||
// recursive call
|
||||
if len(buf) > 0 {
|
||||
return c.ProcessLine(obj, instruct, "", buf+item)
|
||||
}
|
||||
|
||||
if err := processChatErrorResponse(item); err == nil || err.Data.Error.Message == "" {
|
||||
if res := getRobustnessResult(item); res != "" {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
globals.Warn(fmt.Sprintf("chatgpt error: cannot parse response: %s", item))
|
||||
return data, errors.New("parser error: cannot parse response")
|
||||
} else {
|
||||
return "", fmt.Errorf("chatgpt error: %s (type: %s)", err.Data.Error.Message, err.Data.Error.Type)
|
||||
}
|
||||
|
||||
} else {
|
||||
obj.SetToolCalls(getToolCalls(form))
|
||||
return getChoices(form), nil
|
||||
}
|
||||
}
|
52
adapter/azure/struct.go
Normal file
52
adapter/azure/struct.go
Normal file
@ -0,0 +1,52 @@
|
||||
package azure
|
||||
|
||||
import (
|
||||
"chat/globals"
|
||||
)
|
||||
|
||||
type ChatInstance struct {
|
||||
Endpoint string
|
||||
ApiKey string
|
||||
Resource string
|
||||
}
|
||||
|
||||
type InstanceProps struct {
|
||||
Model string
|
||||
Plan bool
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetEndpoint() string {
|
||||
return c.Endpoint
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetApiKey() string {
|
||||
return c.ApiKey
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetResource() string {
|
||||
return c.Resource
|
||||
}
|
||||
|
||||
func (c *ChatInstance) GetHeader() map[string]string {
|
||||
return map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
"api-key": c.GetApiKey(),
|
||||
}
|
||||
}
|
||||
|
||||
func NewChatInstance(endpoint, apiKey string, resource string) *ChatInstance {
|
||||
return &ChatInstance{
|
||||
Endpoint: endpoint,
|
||||
ApiKey: apiKey,
|
||||
Resource: resource,
|
||||
}
|
||||
}
|
||||
|
||||
func NewChatInstanceFromConfig(conf globals.ChannelConfig) *ChatInstance {
|
||||
param := conf.SplitRandomSecret(2)
|
||||
return NewChatInstance(
|
||||
conf.GetEndpoint(),
|
||||
param[0],
|
||||
param[1],
|
||||
)
|
||||
}
|
124
adapter/azure/types.go
Normal file
124
adapter/azure/types.go
Normal file
@ -0,0 +1,124 @@
|
||||
package azure
|
||||
|
||||
import "chat/globals"
|
||||
|
||||
type ImageUrl struct {
|
||||
Url string `json:"url"`
|
||||
Detail *string `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
type MessageContent struct {
|
||||
Type string `json:"type"`
|
||||
Text *string `json:"text,omitempty"`
|
||||
ImageUrl *ImageUrl `json:"image_url,omitempty"`
|
||||
}
|
||||
|
||||
type MessageContents []MessageContent
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content MessageContents `json:"content"`
|
||||
ToolCallId *string `json:"tool_call_id,omitempty"` // only `tool` role
|
||||
ToolCalls *globals.ToolCalls `json:"tool_calls,omitempty"` // only `assistant` role
|
||||
}
|
||||
|
||||
// ChatRequest is the request body for chatgpt
|
||||
type ChatRequest struct {
|
||||
Model string `json:"model"`
|
||||
Messages interface{} `json:"messages"`
|
||||
MaxToken *int `json:"max_tokens,omitempty"`
|
||||
Stream bool `json:"stream"`
|
||||
PresencePenalty *float32 `json:"presence_penalty,omitempty"`
|
||||
FrequencyPenalty *float32 `json:"frequency_penalty,omitempty"`
|
||||
Temperature *float32 `json:"temperature,omitempty"`
|
||||
TopP *float32 `json:"top_p,omitempty"`
|
||||
Tools *globals.FunctionTools `json:"tools,omitempty"`
|
||||
ToolChoice *interface{} `json:"tool_choice,omitempty"` // string or object
|
||||
}
|
||||
|
||||
// CompletionRequest is the request body for chatgpt completion
|
||||
type CompletionRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
MaxToken *int `json:"max_tokens,omitempty"`
|
||||
Stream bool `json:"stream"`
|
||||
}
|
||||
|
||||
// ChatResponse is the native http request body for chatgpt
|
||||
type ChatResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []struct {
|
||||
Index int `json:"index"`
|
||||
Message globals.Message `json:"message"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
} `json:"choices"`
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
// ChatStreamResponse is the stream response body for chatgpt
|
||||
type ChatStreamResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Data struct {
|
||||
Choices []struct {
|
||||
Delta globals.Message `json:"delta"`
|
||||
Index int `json:"index"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
} `json:"choices"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// CompletionResponse is the native http request body / stream response body for chatgpt completion
|
||||
type CompletionResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Data struct {
|
||||
Choices []struct {
|
||||
Text string `json:"text"`
|
||||
Index int `json:"index"`
|
||||
} `json:"choices"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ChatStreamErrorResponse struct {
|
||||
Data struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
Type string `json:"type"`
|
||||
} `json:"error"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type ImageSize string
|
||||
|
||||
// ImageRequest is the request body for chatgpt dalle image generation
|
||||
type ImageRequest struct {
|
||||
Model string `json:"model"`
|
||||
Prompt string `json:"prompt"`
|
||||
Size ImageSize `json:"size"`
|
||||
N int `json:"n"`
|
||||
}
|
||||
|
||||
type ImageResponse struct {
|
||||
Data []struct {
|
||||
Url string `json:"url"`
|
||||
} `json:"data"`
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
var (
|
||||
ImageSize256 ImageSize = "256x256"
|
||||
ImageSize512 ImageSize = "512x512"
|
||||
ImageSize1024 ImageSize = "1024x1024"
|
||||
)
|
@ -87,7 +87,11 @@ func (c *ChatInstance) GetGeminiContents(model string, message []globals.Message
|
||||
|
||||
if len(result) == 0 && getGeminiRole(item.Role) == GeminiModelType {
|
||||
// gemini model: first message must be user
|
||||
continue
|
||||
|
||||
result = append(result, GeminiContent{
|
||||
Role: GeminiUserType,
|
||||
Parts: getGeminiContent(make([]GeminiChatPart, 0), "", model),
|
||||
})
|
||||
}
|
||||
|
||||
if len(result) > 0 && role == result[len(result)-1].Role {
|
||||
|
@ -14,7 +14,6 @@ export type Channel = {
|
||||
};
|
||||
|
||||
export type ChannelInfo = {
|
||||
id: number;
|
||||
description?: string;
|
||||
endpoint: string;
|
||||
format: string;
|
||||
@ -23,6 +22,7 @@ export type ChannelInfo = {
|
||||
|
||||
export const ChannelTypes: Record<string, string> = {
|
||||
openai: "OpenAI",
|
||||
azure: "Azure OpenAI",
|
||||
claude: "Claude",
|
||||
slack: "Slack",
|
||||
sparkdesk: "讯飞星火",
|
||||
@ -40,7 +40,6 @@ export const ChannelTypes: Record<string, string> = {
|
||||
|
||||
export const ChannelInfos: Record<string, ChannelInfo> = {
|
||||
openai: {
|
||||
id: 0,
|
||||
endpoint: "https://api.openai.com",
|
||||
format: "<api-key>",
|
||||
models: [
|
||||
@ -64,26 +63,50 @@ export const ChannelInfos: Record<string, ChannelInfo> = {
|
||||
"dall-e-3",
|
||||
],
|
||||
},
|
||||
azure: {
|
||||
endpoint: "2023-12-01-preview",
|
||||
format: "<api-key>|<api-endpoint>",
|
||||
description:
|
||||
"> Azure 密钥 API Key 1 和 API Key 2 任填一个即可,密钥格式为 **<api-key>|<api-endpoint>**, api-endpoint 为 Azure 的 **API 端点**。\n" +
|
||||
"> 接入点填 **API Version**,如 2023-12-01-preview。\n" +
|
||||
"Azure 模型名称忽略点号等问题内部已经进行适配,无需额外任何设置。",
|
||||
models: [
|
||||
"gpt-3.5-turbo",
|
||||
"gpt-3.5-turbo-instruct",
|
||||
"gpt-3.5-turbo-0613",
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-1106",
|
||||
"gpt-3.5-turbo-16k",
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
"gpt-3.5-turbo-16k-0301",
|
||||
"gpt-4",
|
||||
"gpt-4-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-vision-preview",
|
||||
"gpt-4-32k",
|
||||
"gpt-4-32k-0314",
|
||||
"gpt-4-32k-0613",
|
||||
"dall-e-2",
|
||||
"dall-e-3",
|
||||
],
|
||||
},
|
||||
claude: {
|
||||
id: 1,
|
||||
endpoint: "https://api.anthropic.com",
|
||||
format: "<x-api-key>",
|
||||
models: ["claude-instant-1", "claude-2", "claude-2.1"],
|
||||
},
|
||||
slack: {
|
||||
id: 2,
|
||||
endpoint: "your-channel",
|
||||
format: "<bot-id>|<xoxp-token>",
|
||||
models: ["claude-slack"],
|
||||
},
|
||||
sparkdesk: {
|
||||
id: 3,
|
||||
endpoint: "wss://spark-api.xf-yun.com",
|
||||
format: "<app-id>|<api-secret>|<api-key>",
|
||||
models: ["spark-desk-v1.5", "spark-desk-v2", "spark-desk-v3"],
|
||||
},
|
||||
chatglm: {
|
||||
id: 4,
|
||||
endpoint: "https://open.bigmodel.cn",
|
||||
format: "<api-key>",
|
||||
models: [
|
||||
@ -94,32 +117,27 @@ export const ChannelInfos: Record<string, ChannelInfo> = {
|
||||
],
|
||||
},
|
||||
qwen: {
|
||||
id: 5,
|
||||
endpoint: "https://dashscope.aliyuncs.com",
|
||||
format: "<api-key>",
|
||||
models: ["qwen-turbo", "qwen-plus", "qwen-turbo-net", "qwen-plus-net"],
|
||||
},
|
||||
hunyuan: {
|
||||
id: 6,
|
||||
endpoint: "https://hunyuan.cloud.tencent.com",
|
||||
format: "<app-id>|<secret-id>|<secret-key>",
|
||||
models: ["hunyuan"],
|
||||
// endpoint
|
||||
},
|
||||
zhinao: {
|
||||
id: 7,
|
||||
endpoint: "https://api.360.cn",
|
||||
format: "<api-key>",
|
||||
models: ["360-gpt-v9"],
|
||||
},
|
||||
baichuan: {
|
||||
id: 8,
|
||||
endpoint: "https://api.baichuan-ai.com",
|
||||
format: "<api-key>",
|
||||
models: ["baichuan-53b"],
|
||||
},
|
||||
skylark: {
|
||||
id: 9,
|
||||
endpoint: "https://maas-api.ml-platform-cn-beijing.volces.com",
|
||||
format: "<access-key>|<secret-key>",
|
||||
models: [
|
||||
@ -130,7 +148,6 @@ export const ChannelInfos: Record<string, ChannelInfo> = {
|
||||
],
|
||||
},
|
||||
bing: {
|
||||
id: 10,
|
||||
endpoint: "wss://your.bing.service",
|
||||
format: "<secret>",
|
||||
models: ["bing-creative", "bing-balanced", "bing-precise"],
|
||||
@ -138,13 +155,11 @@ export const ChannelInfos: Record<string, ChannelInfo> = {
|
||||
"> Bing 服务需要自行搭建,详情请参考 [chatnio-bing-service](https://github.com/Deeptrain-Community/chatnio-bing-service) (如为 bing2api 可直接使用 OpenAI 格式映射)",
|
||||
},
|
||||
palm: {
|
||||
id: 11,
|
||||
endpoint: "https://generativelanguage.googleapis.com",
|
||||
format: "<api-key>",
|
||||
models: ["chat-bison-001", "gemini-pro", "gemini-pro-vision"],
|
||||
},
|
||||
midjourney: {
|
||||
id: 12,
|
||||
endpoint: "https://your.midjourney.proxy",
|
||||
format: "<mj-api-secret>|<white-list>",
|
||||
models: ["midjourney", "midjourney-fast", "midjourney-turbo"],
|
||||
@ -154,7 +169,6 @@ export const ChannelInfos: Record<string, ChannelInfo> = {
|
||||
"> 注意:**请在系统设置中设置后端的公网 IP / 域名,否则无法接收回调**",
|
||||
},
|
||||
oneapi: {
|
||||
id: 13,
|
||||
endpoint: "https://openai.justsong.cn/api",
|
||||
format: "<api-key>",
|
||||
models: [],
|
||||
|
@ -179,13 +179,15 @@ func (c *Channel) ProcessError(err error) error {
|
||||
return nil
|
||||
}
|
||||
content := err.Error()
|
||||
|
||||
fmt.Println(content)
|
||||
if strings.Contains(content, c.GetEndpoint()) {
|
||||
// hide the endpoint
|
||||
replacer := fmt.Sprintf("channel://%d", c.GetId())
|
||||
content = strings.Replace(content, c.GetEndpoint(), replacer, -1)
|
||||
}
|
||||
|
||||
if domain := c.GetDomain(); strings.Contains(content, domain) {
|
||||
if domain := c.GetDomain(); len(strings.TrimSpace(domain)) > 0 && strings.Contains(content, domain) {
|
||||
content = strings.Replace(content, domain, "channel", -1)
|
||||
}
|
||||
|
||||
|
@ -8,20 +8,21 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
OpenAIChannelType = "openai"
|
||||
ClaudeChannelType = "claude"
|
||||
SlackChannelType = "slack"
|
||||
SparkdeskChannelType = "sparkdesk"
|
||||
ChatGLMChannelType = "chatglm"
|
||||
HunyuanChannelType = "hunyuan"
|
||||
QwenChannelType = "qwen"
|
||||
ZhinaoChannelType = "zhinao"
|
||||
BaichuanChannelType = "baichuan"
|
||||
SkylarkChannelType = "skylark"
|
||||
BingChannelType = "bing"
|
||||
PalmChannelType = "palm"
|
||||
MidjourneyChannelType = "midjourney"
|
||||
OneAPIChannelType = "oneapi"
|
||||
OpenAIChannelType = "openai"
|
||||
AzureOpenAIChannelType = "azure"
|
||||
ClaudeChannelType = "claude"
|
||||
SlackChannelType = "slack"
|
||||
SparkdeskChannelType = "sparkdesk"
|
||||
ChatGLMChannelType = "chatglm"
|
||||
HunyuanChannelType = "hunyuan"
|
||||
QwenChannelType = "qwen"
|
||||
ZhinaoChannelType = "zhinao"
|
||||
BaichuanChannelType = "baichuan"
|
||||
SkylarkChannelType = "skylark"
|
||||
BingChannelType = "bing"
|
||||
PalmChannelType = "palm"
|
||||
MidjourneyChannelType = "midjourney"
|
||||
OneAPIChannelType = "oneapi"
|
||||
)
|
||||
|
||||
const (
|
||||
|
Loading…
Reference in New Issue
Block a user