Merge pull request #3205 from H0llyW00dzZ/summarizelogic

Refactor Summarize Logic
This commit is contained in:
DeanYao 2024-03-28 15:19:32 +08:00 committed by GitHub
commit e38b527ac2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -557,6 +557,7 @@ export const useChatStore = createPersistStore(
messages: topicMessages, messages: topicMessages,
config: { config: {
model: getSummarizeModel(session.mask.modelConfig.model), model: getSummarizeModel(session.mask.modelConfig.model),
stream: false,
}, },
onFinish(message) { onFinish(message) {
get().updateCurrentSession( get().updateCurrentSession(
@ -600,6 +601,10 @@ export const useChatStore = createPersistStore(
historyMsgLength > modelConfig.compressMessageLengthThreshold && historyMsgLength > modelConfig.compressMessageLengthThreshold &&
modelConfig.sendMemory modelConfig.sendMemory
) { ) {
/** Destruct max_tokens while summarizing
* this param is just shit
**/
const { max_tokens, ...modelcfg } = modelConfig;
api.llm.chat({ api.llm.chat({
messages: toBeSummarizedMsgs.concat( messages: toBeSummarizedMsgs.concat(
createMessage({ createMessage({
@ -609,7 +614,7 @@ export const useChatStore = createPersistStore(
}), }),
), ),
config: { config: {
...modelConfig, ...modelcfg,
stream: true, stream: true,
model: getSummarizeModel(session.mask.modelConfig.model), model: getSummarizeModel(session.mask.modelConfig.model),
}, },