mirror of
https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web.git
synced 2025-06-01 18:30:33 +09:00
Merge branch 'ChatGPTNextWeb:main' into main
This commit is contained in:
commit
0e09697274
@ -65,6 +65,7 @@ export interface RequestPayload {
|
|||||||
frequency_penalty: number;
|
frequency_penalty: number;
|
||||||
top_p: number;
|
top_p: number;
|
||||||
max_tokens?: number;
|
max_tokens?: number;
|
||||||
|
max_completion_tokens?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface DalleRequestPayload {
|
export interface DalleRequestPayload {
|
||||||
@ -233,6 +234,11 @@ export class ChatGPTApi implements LLMApi {
|
|||||||
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
|
||||||
|
if (isO1) {
|
||||||
|
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
// add max_tokens to vision model
|
// add max_tokens to vision model
|
||||||
if (visionModel) {
|
if (visionModel) {
|
||||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||||
|
Loading…
Reference in New Issue
Block a user