diff --git a/app/client/platforms/siliconflow.ts b/app/client/platforms/siliconflow.ts index d6d51fe93..1ad316a61 100644 --- a/app/client/platforms/siliconflow.ts +++ b/app/client/platforms/siliconflow.ts @@ -120,7 +120,7 @@ export class SiliconflowApi implements LLMApi { // console.log(chatPayload); - // make a fetch request + // Use extended timeout for thinking models as they typically require more processing time const requestTimeoutId = setTimeout( () => controller.abort(), REQUEST_TIMEOUT_MS_FOR_THINKING, @@ -174,8 +174,8 @@ export class SiliconflowApi implements LLMApi { // Skip if both content and reasoning_content are empty or null if ( - (!reasoning || reasoning.trim().length === 0) && - (!content || content.trim().length === 0) + (!reasoning || reasoning.length === 0) && + (!content || content.length === 0) ) { return { isThinking: false, @@ -183,12 +183,12 @@ export class SiliconflowApi implements LLMApi { }; } - if (reasoning && reasoning.trim().length > 0) { + if (reasoning && reasoning.length > 0) { return { isThinking: true, content: reasoning, }; - } else if (content && content.trim().length > 0) { + } else if (content && content.length > 0) { return { isThinking: false, content: content, diff --git a/app/utils/chat.ts b/app/utils/chat.ts index c04d33cbf..b77955e6e 100644 --- a/app/utils/chat.ts +++ b/app/utils/chat.ts @@ -576,7 +576,7 @@ export function streamWithThink( try { const chunk = parseSSE(text, runTools); // Skip if content is empty - if (!chunk?.content || chunk.content.trim().length === 0) { + if (!chunk?.content || chunk.content.length === 0) { return; } // Check if thinking mode changed