From e4e63dc2373ba6cd6db6967f0660e324fee0a09c Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Thu, 5 Dec 2024 16:09:06 -0800 Subject: [PATCH 1/2] force pushing desktop updates add log soon --- src/ts/process/memory/hypav2.ts | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index ff827ac5..7b96a5ce 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -141,7 +141,7 @@ async function summary( return { success: true, data: result }; } // No, I am not going to touch any http API calls. -function isSubset(subset: Set, superset: Set): boolean { // simple helper function. Check if subset IS a subset of superset given. +function isSubset(subset: Set, superset: Set): boolean { for (const item of subset) { if (!superset.has(item)) { return false; @@ -154,20 +154,27 @@ function cleanInvalidChunks( chats: OpenAIChat[], data: HypaV2Data, ): void { - const currentChatMemos = new Set(chats.map((chat) => chat.memo)); // if chunk's memo set is not subset of this, the chunk's content -> delete + const currentChatMemos = new Set(chats.map((chat) => chat.memo)); // mainChunks filtering data.mainChunks = data.mainChunks.filter((mainChunk) => { return isSubset(mainChunk.chatMemos, currentChatMemos); }); + // chunk filtering based on mainChunk's id const validMainChunkIds = new Set(data.mainChunks.map((mainChunk) => mainChunk.id)); data.chunks = data.chunks.filter((chunk) => validMainChunkIds.has(chunk.mainChunkID) ); - data.lastMainChunkId = data.mainChunks[-1].id; // Quite literally the definition of lastMainChunkId. Didn't use .length, since middle chat context can be partially deleted. + // Update lastMainChunkId + if (data.mainChunks.length > 0) { + data.lastMainChunkId = data.mainChunks[data.mainChunks.length - 1].id; + } else { + data.lastMainChunkId = 0; + } } + export async function regenerateSummary( chats: OpenAIChat[], data: HypaV2Data, From 56646809a1e262312ecfc0f3e90a947c8cbe4566 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Fri, 6 Dec 2024 08:49:00 -0800 Subject: [PATCH 2/2] add logging --- src/ts/process/index.svelte.ts | 6 +++--- src/ts/process/memory/hypav2.ts | 8 ++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index 4d22d63b..37863e8a 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -776,9 +776,9 @@ export async function sendChat(chatProcessIndex = -1,arg:{ chats = hn.chats currentTokens = hn.tokens } - else if(DBState.db.hypav2){ //HypaV2 support needs to be changed like this. + else if(DBState.db.hypav2){ + console.log("Current chat's hypaV2 Data: ", currentChat.hypaV2Data) const sp = await hypaMemoryV2(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer) - console.log("All chats: ", chats) if(sp.error){ console.log(sp) alertError(sp.error) @@ -788,7 +788,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{ currentTokens = sp.currentTokens currentChat.hypaV2Data = sp.memory ?? currentChat.hypaV2Data DBState.db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data - console.log(currentChat.hypaV2Data) + console.log("Current chat's HypaV2Data: ", currentChat.hypaV2Data) } else{ const sp = await supaMemory(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer, { diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 7b96a5ce..cb271a8a 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -226,11 +226,14 @@ export async function hypaMemoryV2( } // Starting chat index of new mainChunk to be generated - // Token management loop(where using of ) + // Token management loop(If current token exceeds allowed amount...) while (currentTokens >= maxContextTokens) { + console.log("The current Token exceeded maxContextTokens. Current tokens: ", currentTokens, "\nMax Context Tokens: ", maxContextTokens) const halfData: OpenAIChat[] = []; let halfDataTokens = 0; + const startIdx = idx; + // Accumulate chats to summarize while ( halfDataTokens < chunkSize && @@ -241,7 +244,8 @@ export async function hypaMemoryV2( halfDataTokens += await tokenizer.tokenizeChat(chat); halfData.push(chat); } - + const endIdx = idx - 1; + console.log(`Summarizing chats from index ${startIdx} to ${endIdx}.`); if (halfData.length === 0) break; const stringlizedChat = halfData