mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-05-20 04:30:17 +09:00
fix doubao and grok not upload image
This commit is contained in:
parent
add9ca200c
commit
f30c6a4348
@ -22,7 +22,7 @@ import {
|
|||||||
} from "@fortaine/fetch-event-source";
|
} from "@fortaine/fetch-event-source";
|
||||||
import { prettyObject } from "@/app/utils/format";
|
import { prettyObject } from "@/app/utils/format";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent } from "@/app/utils";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
export interface OpenAIListModelResponse {
|
export interface OpenAIListModelResponse {
|
||||||
@ -84,10 +84,11 @@ export class DoubaoApi implements LLMApi {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async chat(options: ChatOptions) {
|
async chat(options: ChatOptions) {
|
||||||
const messages = options.messages.map((v) => ({
|
const messages: ChatOptions["messages"] = [];
|
||||||
role: v.role,
|
for (const v of options.messages) {
|
||||||
content: getMessageTextContent(v),
|
const content = await preProcessImageContent(v.content);
|
||||||
}));
|
messages.push({ role: v.role, content });
|
||||||
|
}
|
||||||
|
|
||||||
const modelConfig = {
|
const modelConfig = {
|
||||||
...useAppConfig.getState().modelConfig,
|
...useAppConfig.getState().modelConfig,
|
||||||
|
@ -17,7 +17,7 @@ import {
|
|||||||
SpeechOptions,
|
SpeechOptions,
|
||||||
} from "../api";
|
} from "../api";
|
||||||
import { getClientConfig } from "@/app/config/client";
|
import { getClientConfig } from "@/app/config/client";
|
||||||
import { getMessageTextContent } from "@/app/utils";
|
import { preProcessImageContent } from "@/app/utils/chat";
|
||||||
import { RequestPayload } from "./openai";
|
import { RequestPayload } from "./openai";
|
||||||
import { fetch } from "@/app/utils/stream";
|
import { fetch } from "@/app/utils/stream";
|
||||||
|
|
||||||
@ -62,7 +62,7 @@ export class XAIApi implements LLMApi {
|
|||||||
async chat(options: ChatOptions) {
|
async chat(options: ChatOptions) {
|
||||||
const messages: ChatOptions["messages"] = [];
|
const messages: ChatOptions["messages"] = [];
|
||||||
for (const v of options.messages) {
|
for (const v of options.messages) {
|
||||||
const content = getMessageTextContent(v);
|
const content = await preProcessImageContent(v.content);
|
||||||
messages.push({ role: v.role, content });
|
messages.push({ role: v.role, content });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user