From dbd7485c50b68681641abf4aa1a3c6f65f8323fb Mon Sep 17 00:00:00 2001 From: Bo26fhmC5M <88071760+Bo26fhmC5M@users.noreply.github.com> Date: Tue, 3 Dec 2024 22:12:53 +0900 Subject: [PATCH 1/2] Fix hypav2 issue where the 'search_document: ' string is not truncated correctly. --- src/ts/process/memory/hypav2.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index ba68e160..8137fe1e 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -230,6 +230,7 @@ export async function hypaMemoryV2( } // Fetch additional memory from chunks + const searchDocumentPrefix = "search_document: "; const processor = new HypaProcesser(db.hypaModel); processor.oaikey = db.supaMemoryKey; @@ -249,7 +250,7 @@ export async function hypaMemoryV2( console.log("Older Chunks:", olderChunks); // Add older chunks to processor for similarity search - await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => "search_document: " + v.text.trim())); + await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => searchDocumentPrefix + v.text.trim())); let scoredResults: { [key: string]: number } = {}; for (let i = 0; i < 3; i++) { @@ -267,9 +268,10 @@ export async function hypaMemoryV2( let chunkResultTokens = 0; while (allocatedTokens - mainPromptTokens - chunkResultTokens > 0 && scoredArray.length > 0) { const [text] = scoredArray.shift(); - const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(14) }); + const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(searchDocumentPrefix.length) }); if (tokenized > allocatedTokens - mainPromptTokens - chunkResultTokens) break; - chunkResultPrompts += text.substring(14) + '\n\n'; + // Ensure strings are truncated correctly using searchDocumentPrefix.length + chunkResultPrompts += text.substring(searchDocumentPrefix.length) + '\n\n'; chunkResultTokens += tokenized; } From ecc2817a7b874ab6d8b37ac9d1a4523b0f6a011b Mon Sep 17 00:00:00 2001 From: Bo26fhmC5M <88071760+Bo26fhmC5M@users.noreply.github.com> Date: Wed, 4 Dec 2024 13:46:35 +0900 Subject: [PATCH 2/2] Add memo to detect supaMemory model requests in the plugin --- src/ts/process/memory/hypav2.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 8137fe1e..c186232a 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -87,7 +87,7 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat let parsedPrompt = parseChatML(supaPrompt.replaceAll('{{slot}}', stringlizedChat)) - const promptbody: OpenAIChat[] = parsedPrompt ?? [ + const promptbody: OpenAIChat[] = (parsedPrompt ?? [ { role: "user", content: stringlizedChat @@ -96,7 +96,10 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat role: "system", content: supaPrompt } - ]; + ]).map(message => ({ + ...message, + memo: "supaPrompt" + })); console.log("Using submodel: ", db.subModel, "for supaMemory model"); const da = await requestChatData({ formated: promptbody,