From a8d52decd0f9ee8a142d1f9b91a7db4af1d9efb8 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Wed, 25 Sep 2024 21:19:54 -0700 Subject: [PATCH 01/18] refactor: few hypa potential issues/bad prompt --- src/ts/process/memory/hypav2.ts | 2 +- src/ts/storage/database.ts | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index c7295267..239332fd 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -36,7 +36,7 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat } const supaPrompt = db.supaMemoryPrompt === '' ? - "[Summarize the ongoing role story, It must also remove redundancy and unnecessary text and content from the output to reduce tokens for gpt3 and other sublanguage models]\n" + "[Summarize the ongoing role story, It must also remove redundancy and unnecessary text and content from the output.]\n" : db.supaMemoryPrompt; let result = ''; diff --git a/src/ts/storage/database.ts b/src/ts/storage/database.ts index 550f69ef..cce3ea63 100644 --- a/src/ts/storage/database.ts +++ b/src/ts/storage/database.ts @@ -230,6 +230,9 @@ export function setDatabase(data:Database){ if(checkNullish(data.supaMemoryKey)){ data.supaMemoryKey = "" } + if(checkNullish(data.hypaMemoryKey)){ + data.hypaMemoryKey = "" + } if(checkNullish(data.supaModelType)){ data.supaModelType = "none" } @@ -551,6 +554,7 @@ export interface Database{ useStreaming:boolean palmAPI:string, supaMemoryKey:string + hypaMemoryKey:string supaModelType:string textScreenColor?:string textBorder?:boolean From 60d4e338932f2a584adfdf8434232dfab54fb4fb Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 1 Dec 2024 13:00:00 -0800 Subject: [PATCH 02/18] feat: add validation Also revoked potentially problematic feature(add hypav2data chunk) TODO: 1. On mid-context editing, currently that is not considered as deletion. Do have optional editedChatIndex to latter dive in more. 2. re-roll mainChunks(re-summarization) functionalities added, but not able to access it. --- src/lib/ChatScreens/Chat.svelte | 1 + src/lib/Others/AlertComp.svelte | 10 +- src/ts/process/memory/hypav2.ts | 342 ++++++++++++++++++++------------ src/ts/tokenizer.ts | 19 +- 4 files changed, 230 insertions(+), 142 deletions(-) diff --git a/src/lib/ChatScreens/Chat.svelte b/src/lib/ChatScreens/Chat.svelte index bb646480..95335e45 100644 --- a/src/lib/ChatScreens/Chat.svelte +++ b/src/lib/ChatScreens/Chat.svelte @@ -465,6 +465,7 @@ }}> + diff --git a/src/lib/Others/AlertComp.svelte b/src/lib/Others/AlertComp.svelte index 5584509d..b532562f 100644 --- a/src/lib/Others/AlertComp.svelte +++ b/src/lib/Others/AlertComp.svelte @@ -291,16 +291,10 @@ {/each} - + {:else} - {#each DBState.db.characters[$selectedCharID].chats[DBState.db.characters[$selectedCharID].chatPage].hypaV2Data.chunks as chunk, i} + {#each DBState.db.characters[$selectedCharID].chats[DBState.db.characters[$selectedCharID].chatPage].hypaV2Data.mainChunks as chunk, i} // Summarized -> mainChunks
{#if i === 0} Active diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 32041286..ee6a151b 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -1,4 +1,9 @@ -import { getDatabase, type Chat, type character, type groupChat } from "src/ts/storage/database.svelte"; +import { + getDatabase, + type Chat, + type character, + type groupChat, +} from "src/ts/storage/database.svelte"; import type { OpenAIChat } from "../index.svelte"; import type { ChatTokenizer } from "src/ts/tokenizer"; import { requestChatData } from "../request"; @@ -11,59 +16,67 @@ export interface HypaV2Data { chunks: { text: string; targetId: string; + chatRange: [number, number]; // Start and end indices of chats summarized }[]; mainChunks: { text: string; targetId: string; + chatRange: [number, number]; // Start and end indices of chats summarized }[]; } -async function summary(stringlizedChat: string): Promise<{ success: boolean; data: string }> { +async function summary( + stringlizedChat: string +): Promise<{ success: boolean; data: string }> { const db = getDatabase(); console.log("Summarizing"); - if (db.supaModelType === 'distilbart') { + if (db.supaModelType === "distilbart") { try { const sum = await runSummarizer(stringlizedChat); return { success: true, data: sum }; } catch (error) { return { success: false, - data: "SupaMemory: Summarizer: " + `${error}` + data: "SupaMemory: Summarizer: " + `${error}`, }; } } - const supaPrompt = db.supaMemoryPrompt === '' ? - "[Summarize the ongoing role story, It must also remove redundancy and unnecessary text and content from the output.]\n" - : db.supaMemoryPrompt; - let result = ''; + const supaPrompt = + db.supaMemoryPrompt === "" + ? "[Summarize the ongoing role story, It must also remove redundancy and unnecessary text and content from the output.]\n" + : db.supaMemoryPrompt; + let result = ""; - if (db.supaModelType !== 'subModel') { - const promptbody = stringlizedChat + '\n\n' + supaPrompt + "\n\nOutput:"; + if (db.supaModelType !== "subModel") { + const promptbody = stringlizedChat + "\n\n" + supaPrompt + "\n\nOutput:"; const da = await globalFetch("https://api.openai.com/v1/completions", { headers: { "Content-Type": "application/json", - "Authorization": "Bearer " + db.supaMemoryKey + Authorization: "Bearer " + db.supaMemoryKey, }, method: "POST", body: { - "model": db.supaModelType === 'curie' ? "text-curie-001" - : db.supaModelType === 'instruct35' ? 'gpt-3.5-turbo-instruct' - : "text-davinci-003", - "prompt": promptbody, - "max_tokens": 600, - "temperature": 0 - } - }) + model: + db.supaModelType === "curie" + ? "text-curie-001" + : db.supaModelType === "instruct35" + ? "gpt-3.5-turbo-instruct" + : "text-davinci-003", + prompt: promptbody, + max_tokens: 600, + temperature: 0, + }, + }); console.log("Using openAI instruct 3.5 for SupaMemory"); try { if (!da.ok) { return { success: false, - data: "SupaMemory: HTTP: " + JSON.stringify(da) + data: "SupaMemory: HTTP: " + JSON.stringify(da), }; } @@ -72,7 +85,7 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat if (!result) { return { success: false, - data: "SupaMemory: HTTP: " + JSON.stringify(da) + data: "SupaMemory: HTTP: " + JSON.stringify(da), }; } @@ -80,34 +93,46 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat } catch (error) { return { success: false, - data: "SupaMemory: HTTP: " + error + data: "SupaMemory: HTTP: " + error, }; } } else { - - let parsedPrompt = parseChatML(supaPrompt.replaceAll('{{slot}}', stringlizedChat)) + let parsedPrompt = parseChatML( + supaPrompt.replaceAll("{{slot}}", stringlizedChat) + ); const promptbody: OpenAIChat[] = parsedPrompt ?? [ { role: "user", - content: stringlizedChat + content: stringlizedChat, }, { role: "system", - content: supaPrompt - } + content: supaPrompt, + }, ]; - console.log("Using submodel: ", db.subModel, "for supaMemory model"); - const da = await requestChatData({ - formated: promptbody, - bias: {}, - useStreaming: false, - noMultiGen: true - }, 'memory'); - if (da.type === 'fail' || da.type === 'streaming' || da.type === 'multiline') { + console.log( + "Using submodel: ", + db.subModel, + "for supaMemory model" + ); + const da = await requestChatData( + { + formated: promptbody, + bias: {}, + useStreaming: false, + noMultiGen: true, + }, + "memory" + ); + if ( + da.type === "fail" || + da.type === "streaming" || + da.type === "multiline" + ) { return { success: false, - data: "SupaMemory: HTTP: " + da.result + data: "SupaMemory: HTTP: " + da.result, }; } result = da.result; @@ -115,6 +140,43 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat return { success: true, data: result }; } +function cleanInvalidChunks( + chats: OpenAIChat[], + data: HypaV2Data, + editedChatIndex?: number +): void { + // If editedChatIndex is provided, remove chunks and mainChunks that summarize chats from that index onwards + if (editedChatIndex !== undefined) { + data.mainChunks = data.mainChunks.filter( + (chunk) => chunk.chatRange[1] < editedChatIndex + ); + data.chunks = data.chunks.filter( + (chunk) => chunk.chatRange[1] < editedChatIndex + ); + } else { + // Build a set of current chat memo IDs + const currentChatIds = new Set(chats.map((chat) => chat.memo)); + + // Filter mainChunks + data.mainChunks = data.mainChunks.filter((chunk) => { + // Check if all chat memos in the range exist + const [startIdx, endIdx] = chunk.chatRange; + for (let i = startIdx; i <= endIdx; i++) { + if (!currentChatIds.has(chats[i]?.memo)) { + return false; // Chat no longer exists, remove this mainChunk + } + } + return true; + }); + + // Similarly for chunks + data.chunks = data.chunks.filter(() => { + // Since chunks are associated with mainChunks, they have been filtered already + return true; + }); + } +} + export async function hypaMemoryV2( chats: OpenAIChat[], currentTokens: number, @@ -122,12 +184,19 @@ export async function hypaMemoryV2( room: Chat, char: character | groupChat, tokenizer: ChatTokenizer, - arg: { asHyper?: boolean, summaryModel?: string, summaryPrompt?: string, hypaModel?: string } = {} -): Promise<{ currentTokens: number; chats: OpenAIChat[]; error?: string; memory?: HypaV2Data; }> { - + editedChatIndex?: number +): Promise<{ + currentTokens: number; + chats: OpenAIChat[]; + error?: string; + memory?: HypaV2Data; +}> { const db = getDatabase(); const data: HypaV2Data = room.hypaV2Data ?? { chunks: [], mainChunks: [] }; + // Clean invalid chunks based on the edited chat index + cleanInvalidChunks(chats, data, editedChatIndex); + let allocatedTokens = db.hypaAllocatedTokens; let chunkSize = db.hypaChunkSize; currentTokens += allocatedTokens + 50; @@ -136,49 +205,40 @@ export async function hypaMemoryV2( // Error handling for infinite summarization attempts let summarizationFailures = 0; const maxSummarizationFailures = 3; - let lastMainChunkTargetId = ''; - - // Ensure correct targetId matching - const getValidChatIndex = (targetId: string) => { - return chats.findIndex(chat => chat.memo === targetId); - }; - - // Processing mainChunks - if (data.mainChunks.length > 0) { - const chunk = data.mainChunks[0]; - const ind = getValidChatIndex(chunk.targetId); - if (ind !== -1) { - const removedChats = chats.splice(0, ind + 1); - console.log("removed chats", removedChats); - for (const chat of removedChats) { - currentTokens -= await tokenizer.tokenizeChat(chat); - } - mainPrompt = chunk.text; - const mpToken = await tokenizer.tokenizeChat({ role: 'system', content: mainPrompt }); - allocatedTokens -= mpToken; - } - } + const summarizedIndices = new Set(); // Token management loop while (currentTokens >= maxContextTokens) { let idx = 0; - let targetId = ''; + let targetId = ""; const halfData: OpenAIChat[] = []; let halfDataTokens = 0; - while (halfDataTokens < chunkSize && (idx <= chats.length - 4)) { // Ensure latest two chats are not added to summarization. - const chat = chats[idx]; - halfDataTokens += await tokenizer.tokenizeChat(chat); - halfData.push(chat); + let startIdx = -1; + + // Find the next batch of chats to summarize + while ( + halfDataTokens < chunkSize && + idx < chats.length - 2 // Ensure latest two chats are not added to summarization. + ) { + if (!summarizedIndices.has(idx)) { + const chat = chats[idx]; + if (startIdx === -1) startIdx = idx; + halfDataTokens += await tokenizer.tokenizeChat(chat); + halfData.push(chat); + targetId = chat.memo; + } idx++; - targetId = chat.memo; - console.log("current target chat: ", chat); } + const endIdx = idx - 1; // End index of the chats being summarized + // Avoid summarizing the last two chats if (halfData.length < 3) break; - const stringlizedChat = halfData.map(e => `${e.role}: ${e.content}`).join('\n'); + const stringlizedChat = halfData + .map((e) => `${e.role}: ${e.content}`) + .join("\n"); const summaryData = await summary(stringlizedChat); if (!summaryData.success) { @@ -187,7 +247,8 @@ export async function hypaMemoryV2( return { currentTokens: currentTokens, chats: chats, - error: "Summarization failed multiple times. Aborting to prevent infinite loop." + error: + "Summarization failed multiple times. Aborting to prevent infinite loop.", }; } continue; @@ -195,117 +256,142 @@ export async function hypaMemoryV2( summarizationFailures = 0; // Reset failure counter on success - const summaryDataToken = await tokenizer.tokenizeChat({ role: 'system', content: summaryData.data }); + const summaryDataToken = await tokenizer.tokenizeChat({ + role: "system", + content: summaryData.data, + }); mainPrompt += `\n\n${summaryData.data}`; currentTokens -= halfDataTokens; allocatedTokens -= summaryDataToken; data.mainChunks.unshift({ text: summaryData.data, - targetId: targetId + targetId: targetId, + chatRange: [startIdx, endIdx], }); // Split the summary into chunks based on double line breaks - const splitted = summaryData.data.split('\n\n').map(e => e.trim()).filter(e => e.length > 0); + const splitted = summaryData.data + .split("\n\n") + .map((e) => e.trim()) + .filter((e) => e.length > 0); // Update chunks with the new summary - data.chunks.push(...splitted.map(e => ({ - text: e, - targetId: targetId - }))); + data.chunks.push( + ...splitted.map((e) => ({ + text: e, + targetId: targetId, + chatRange: [startIdx, endIdx] as [number, number], + })) + ); - // Remove summarized chats - chats.splice(0, idx); + // Mark the chats as summarized + for (let i = startIdx; i <= endIdx; i++) { + summarizedIndices.add(i); + } } - // Construct the mainPrompt from mainChunks until half of the allocatedTokens are used + // Construct the mainPrompt from mainChunks mainPrompt = ""; let mainPromptTokens = 0; for (const chunk of data.mainChunks) { - const chunkTokens = await tokenizer.tokenizeChat({ role: 'system', content: chunk.text }); + const chunkTokens = await tokenizer.tokenizeChat({ + role: "system", + content: chunk.text, + }); if (mainPromptTokens + chunkTokens > allocatedTokens / 2) break; mainPrompt += `\n\n${chunk.text}`; mainPromptTokens += chunkTokens; - lastMainChunkTargetId = chunk.targetId; } // Fetch additional memory from chunks const processor = new HypaProcesser(db.hypaModel); processor.oaikey = db.supaMemoryKey; - // Find the smallest index of chunks with the same targetId as lastMainChunkTargetId - const lastMainChunkIndex = data.chunks.reduce((minIndex, chunk, index) => { - if (chunk.targetId === lastMainChunkTargetId) { - return Math.min(minIndex, index); - } - return minIndex; - }, data.chunks.length); - - // Filter chunks to only include those older than the last mainChunk's targetId - const olderChunks = lastMainChunkIndex !== data.chunks.length - ? data.chunks.slice(0, lastMainChunkIndex) - : data.chunks; - - console.log("Older Chunks:", olderChunks); - - // Add older chunks to processor for similarity search - await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => "search_document: " + v.text.trim())); + // Add chunks to processor for similarity search + await processor.addText( + data.chunks + .filter((v) => v.text.trim().length > 0) + .map((v) => "search_document: " + v.text.trim()) + ); let scoredResults: { [key: string]: number } = {}; for (let i = 0; i < 3; i++) { const pop = chats[chats.length - i - 1]; if (!pop) break; - const searched = await processor.similaritySearchScored(`search_query: ${pop.content}`); + const searched = await processor.similaritySearchScored( + `search_query: ${pop.content}` + ); for (const result of searched) { const score = result[1] / (i + 1); scoredResults[result[0]] = (scoredResults[result[0]] || 0) + score; } } - const scoredArray = Object.entries(scoredResults).sort((a, b) => b[1] - a[1]); + const scoredArray = Object.entries(scoredResults).sort( + (a, b) => b[1] - a[1] + ); let chunkResultPrompts = ""; let chunkResultTokens = 0; - while (allocatedTokens - mainPromptTokens - chunkResultTokens > 0 && scoredArray.length > 0) { + while ( + allocatedTokens - mainPromptTokens - chunkResultTokens > 0 && + scoredArray.length > 0 + ) { const [text] = scoredArray.shift(); - const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(14) }); - if (tokenized > allocatedTokens - mainPromptTokens - chunkResultTokens) break; - chunkResultPrompts += text.substring(14) + '\n\n'; + const tokenized = await tokenizer.tokenizeChat({ + role: "system", + content: text.substring(14), + }); + if ( + tokenized > + allocatedTokens - mainPromptTokens - chunkResultTokens + ) + break; + chunkResultPrompts += text.substring(14) + "\n\n"; chunkResultTokens += tokenized; } const fullResult = `${mainPrompt}\n${chunkResultPrompts}`; - chats.unshift({ + // Filter out summarized chats + const unsummarizedChats = chats.filter( + (_, idx) => !summarizedIndices.has(idx) + ); + + // Insert the memory system prompt at the beginning + unsummarizedChats.unshift({ role: "system", content: fullResult, - memo: "supaMemory" + memo: "supaMemory", }); - // Add the remaining chats after the last mainChunk's targetId - const lastTargetId = data.mainChunks.length > 0 ? data.mainChunks[0].targetId : null; - if (lastTargetId) { - const lastIndex = getValidChatIndex(lastTargetId); - if (lastIndex !== -1) { - const remainingChats = chats.slice(lastIndex + 1); - chats = [chats[0], ...remainingChats]; - } - } - - // Add last two chats if they exist and are not duplicates - if (lastTwoChats.length === 2) { - const [lastChat1, lastChat2] = lastTwoChats; - if (!chats.some(chat => chat.memo === lastChat1.memo)) { - chats.push(lastChat1); - } - if (!chats.some(chat => chat.memo === lastChat2.memo)) { - chats.push(lastChat2); + // Add the last two chats back if they were removed + const lastTwoChatsSet = new Set(lastTwoChats.map((chat) => chat.memo)); + console.log(lastTwoChatsSet) // Not so sure if chat.memo is unique id. + for (const chat of lastTwoChats) { + if (!unsummarizedChats.find((c) => c.memo === chat.memo)) { + unsummarizedChats.push(chat); } } - console.log("model being used: ", db.hypaModel, db.supaModelType, "\nCurrent session tokens: ", currentTokens, "\nAll chats, including memory system prompt: ", chats, "\nMemory data, with all the chunks: ", data); + // Recalculate currentTokens + currentTokens = await tokenizer.tokenizeChats(unsummarizedChats); + + console.log( + "Model being used: ", + db.hypaModel, + db.supaModelType, + "\nCurrent session tokens: ", + currentTokens, + "\nAll chats, including memory system prompt: ", + unsummarizedChats, + "\nMemory data, with all the chunks: ", + data + ); + return { currentTokens: currentTokens, - chats: chats, - memory: data + chats: unsummarizedChats, + memory: data, }; } diff --git a/src/ts/tokenizer.ts b/src/ts/tokenizer.ts index d767481d..a7fe4623 100644 --- a/src/ts/tokenizer.ts +++ b/src/ts/tokenizer.ts @@ -222,15 +222,15 @@ export async function tokenizeAccurate(data:string, consistantChar?:boolean) { export class ChatTokenizer { - private chatAdditonalTokens:number + private chatAdditionalTokens:number private useName:'name'|'noName' - constructor(chatAdditonalTokens:number, useName:'name'|'noName'){ - this.chatAdditonalTokens = chatAdditonalTokens + constructor(chatAdditionalTokens:number, useName:'name'|'noName'){ + this.chatAdditionalTokens = chatAdditionalTokens this.useName = useName } async tokenizeChat(data:OpenAIChat) { - let encoded = (await encode(data.content)).length + this.chatAdditonalTokens + let encoded = (await encode(data.content)).length + this.chatAdditionalTokens if(data.name && this.useName ==='name'){ encoded += (await encode(data.name)).length + 1 } @@ -241,17 +241,24 @@ export class ChatTokenizer { } return encoded } + async tokenizeChats(data:OpenAIChat[]){ + let encoded = 0 + for(const chat of data){ + encoded += await this.tokenizeChat(chat) + } + return encoded + } async tokenizeMultiModal(data:MultiModal){ const db = getDatabase() if(!supportsInlayImage()){ - return this.chatAdditonalTokens + return this.chatAdditionalTokens } if(db.gptVisionQuality === 'low'){ return 87 } - let encoded = this.chatAdditonalTokens + let encoded = this.chatAdditionalTokens let height = data.height ?? 0 let width = data.width ?? 0 From 2a35b1f4b2452d528aaac2dd20c0af55809b56fc Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 1 Dec 2024 13:06:33 -0800 Subject: [PATCH 03/18] refactor: accidental usage of wrong comment --- src/lib/Others/AlertComp.svelte | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib/Others/AlertComp.svelte b/src/lib/Others/AlertComp.svelte index b532562f..0bc63e02 100644 --- a/src/lib/Others/AlertComp.svelte +++ b/src/lib/Others/AlertComp.svelte @@ -294,7 +294,7 @@
{:else} - {#each DBState.db.characters[$selectedCharID].chats[DBState.db.characters[$selectedCharID].chatPage].hypaV2Data.mainChunks as chunk, i} // Summarized -> mainChunks + {#each DBState.db.characters[$selectedCharID].chats[DBState.db.characters[$selectedCharID].chatPage].hypaV2Data.mainChunks as chunk, i}
{#if i === 0} Active From 83b79fa48dfe305b28395e11c060ee21d3b58e64 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 1 Dec 2024 17:40:21 -0800 Subject: [PATCH 04/18] fix: chunks not being filtered correctly need to check if chat.memo is actually a unique uuid4/uuid5 or else it will be broken --- src/ts/process/memory/hypav2.ts | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index ee6a151b..b437681c 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -156,27 +156,33 @@ function cleanInvalidChunks( } else { // Build a set of current chat memo IDs const currentChatIds = new Set(chats.map((chat) => chat.memo)); + console.log("OpenAI Chat IDs? ", currentChatIds) - // Filter mainChunks + // 존재하지 않는 챗의 요약본 삭제 data.mainChunks = data.mainChunks.filter((chunk) => { - // Check if all chat memos in the range exist const [startIdx, endIdx] = chunk.chatRange; + // Check if all chats in the range exist for (let i = startIdx; i <= endIdx; i++) { if (!currentChatIds.has(chats[i]?.memo)) { - return false; // Chat no longer exists, remove this mainChunk + return false; // false로 filtering } } return true; }); - // Similarly for chunks - data.chunks = data.chunks.filter(() => { - // Since chunks are associated with mainChunks, they have been filtered already + // 같은거, 근데 이건 쪼개진 chunk들에 대하여 수행 + data.chunks = data.chunks.filter((chunk) => { + const [startIdx, endIdx] = chunk.chatRange; + // 생성된 chunks는 더이상 mainChunks와 연결되지 않음. 따라서 같은 작업을 진행해야 한다. + for (let i = startIdx; i <= endIdx; i++) { + if (!currentChatIds.has(chats[i]?.memo)) { + return false; + } + } return true; }); } } - export async function hypaMemoryV2( chats: OpenAIChat[], currentTokens: number, From 4ea365a14105a704bc608d321faa56202a0dea4f Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 1 Dec 2024 19:31:23 -0800 Subject: [PATCH 05/18] refactor: logging --- src/ts/process/memory/hypav2.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index b437681c..b6cde9ba 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -154,9 +154,8 @@ function cleanInvalidChunks( (chunk) => chunk.chatRange[1] < editedChatIndex ); } else { - // Build a set of current chat memo IDs + // Confirmed that chat.memo is indeed unique uuid const currentChatIds = new Set(chats.map((chat) => chat.memo)); - console.log("OpenAI Chat IDs? ", currentChatIds) // 존재하지 않는 챗의 요약본 삭제 data.mainChunks = data.mainChunks.filter((chunk) => { @@ -164,6 +163,7 @@ function cleanInvalidChunks( // Check if all chats in the range exist for (let i = startIdx; i <= endIdx; i++) { if (!currentChatIds.has(chats[i]?.memo)) { + console.log(`Removing this mainChunk(summary) due to chat context change: ${chunk}`); return false; // false로 filtering } } @@ -176,6 +176,7 @@ function cleanInvalidChunks( // 생성된 chunks는 더이상 mainChunks와 연결되지 않음. 따라서 같은 작업을 진행해야 한다. for (let i = startIdx; i <= endIdx; i++) { if (!currentChatIds.has(chats[i]?.memo)) { + console.log(`Removing this chunk(split) due to chat context change: ${chunk}`); return false; } } From 46502e762baac62edb2b7ffd15a5dd7053828583 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 1 Dec 2024 22:15:19 -0800 Subject: [PATCH 06/18] add: gut updates are you ready? I hate school --- src/lib/Others/AlertComp.svelte | 2 +- src/ts/process/memory/hypav2.ts | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/lib/Others/AlertComp.svelte b/src/lib/Others/AlertComp.svelte index 0bc63e02..7ef80344 100644 --- a/src/lib/Others/AlertComp.svelte +++ b/src/lib/Others/AlertComp.svelte @@ -287,7 +287,7 @@
{#if generationInfoMenuIndex === 0}
- {#each DBState.db.characters[$selectedCharID].chats[DBState.db.characters[$selectedCharID].chatPage].hypaV2Data.chunks as chunk} + {#each DBState.db.characters[$selectedCharID].chats[DBState.db.characters[$selectedCharID].chatPage].hypaV2Data.chunks as chunk, i} {/each} diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index b6cde9ba..d2e46126 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -183,6 +183,16 @@ function cleanInvalidChunks( return true; }); } +} +export async function regenerateSummary( + chats: OpenAIChat[], + data: HypaV2Data, + mainChunkIndex: number +) : Promise { +// Should re-summarize a certain main chunk, based on index. It will then replace the original one. How much chat needs to be summarized is already defined in the mainChunk's chatRange field. + // After the update on mainChunks, it should also update chunks that have the same ChatRange, as they should be updated with the newly generated summary. Follow the same principles of splitting them. + + } export async function hypaMemoryV2( chats: OpenAIChat[], From b8bb2330ccd323a0e97a5101050d1ba1d181c713 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Tue, 3 Dec 2024 23:28:18 -0800 Subject: [PATCH 07/18] feat: validate hypav2 data # Changelist: ## 1. Types ### MainChunks Added id(int), and chatMemos(Set) Id: incremental int starting from 0 chatMemos: A set of UUID, containing which chat has been summarized in it. ### Chunks mainChunkID: A connection of which mainChunk it has been split from text: the split text data ## 2. Features ### CleanInvalidChunks Called every time when chat is updated and Hypamemory is used. Gets all the memo(UUID)s of current chats, and creates a set. Then checks if each mainChunk's chatMemos set is subset of the enitre memo set. If not, the summarized part's chat is deleted/edited. The mainChunk is filtered out. Concurrently, the chunks that are split from that mainChunk is also deleted. --- src/ts/process/memory/hypamemory.ts | 10 +- src/ts/process/memory/hypav2.ts | 145 +++++++++++++--------------- 2 files changed, 69 insertions(+), 86 deletions(-) diff --git a/src/ts/process/memory/hypamemory.ts b/src/ts/process/memory/hypamemory.ts index 760ebd83..1bc81732 100644 --- a/src/ts/process/memory/hypamemory.ts +++ b/src/ts/process/memory/hypamemory.ts @@ -1,8 +1,7 @@ import localforage from "localforage"; -import { globalFetch } from "src/ts/globalApi.svelte"; -import { runEmbedding } from "../transformers"; -import { alertError } from "src/ts/alert"; -import { appendLastPath } from "src/ts/util"; +import {globalFetch} from "src/ts/globalApi.svelte"; +import {runEmbedding} from "../transformers"; +import {appendLastPath} from "src/ts/util"; export class HypaProcesser{ @@ -139,8 +138,7 @@ export class HypaProcesser{ } async similaritySearchScored(query: string) { - const results = await this.similaritySearchVectorWithScore((await this.getEmbeds(query))[0],); - return results + return await this.similaritySearchVectorWithScore((await this.getEmbeds(query))[0],) } private async similaritySearchVectorWithScore( diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index d2e46126..0fb6275f 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -13,15 +13,15 @@ import { runSummarizer } from "../transformers"; import { parseChatML } from "src/ts/parser.svelte"; export interface HypaV2Data { - chunks: { + lastMainChunkId: number; // can be removed, but exists to more readability of the code. + mainChunks: { // summary itself + id: number; text: string; - targetId: string; - chatRange: [number, number]; // Start and end indices of chats summarized + chatMemos: Set; // UUIDs of summarized chats }[]; - mainChunks: { - text: string; - targetId: string; - chatRange: [number, number]; // Start and end indices of chats summarized + chunks: { // split mainChunks for retrieval or something. Although quite uncomfortable logic, so maybe I will delete it soon or later. + mainChunkID: number; + text:string; }[]; } @@ -138,61 +138,42 @@ async function summary( result = da.result; } return { success: true, data: result }; +} // No, I am not going to touch any http API calls. + +function isSubset(subset: Set, superset: Set): boolean { // simple helper function. Check if subset IS a subset of superset given. + for (const item of subset) { + if (!superset.has(item)) { + return false; + } + } + return true; } function cleanInvalidChunks( chats: OpenAIChat[], data: HypaV2Data, - editedChatIndex?: number ): void { - // If editedChatIndex is provided, remove chunks and mainChunks that summarize chats from that index onwards - if (editedChatIndex !== undefined) { - data.mainChunks = data.mainChunks.filter( - (chunk) => chunk.chatRange[1] < editedChatIndex - ); - data.chunks = data.chunks.filter( - (chunk) => chunk.chatRange[1] < editedChatIndex - ); - } else { - // Confirmed that chat.memo is indeed unique uuid - const currentChatIds = new Set(chats.map((chat) => chat.memo)); + const currentChatMemos = new Set(chats.map((chat) => chat.memo)); // if chunk's memo set is not subset of this, the chunk's content -> delete - // 존재하지 않는 챗의 요약본 삭제 - data.mainChunks = data.mainChunks.filter((chunk) => { - const [startIdx, endIdx] = chunk.chatRange; - // Check if all chats in the range exist - for (let i = startIdx; i <= endIdx; i++) { - if (!currentChatIds.has(chats[i]?.memo)) { - console.log(`Removing this mainChunk(summary) due to chat context change: ${chunk}`); - return false; // false로 filtering - } - } - return true; - }); + // mainChunks filtering + data.mainChunks = data.mainChunks.filter((mainChunk) => { + return isSubset(mainChunk.chatMemos, currentChatMemos); + }); + // chunk filtering based on mainChunk's id + const validMainChunkIds = new Set(data.mainChunks.map((mainChunk) => mainChunk.id)); + data.chunks = data.chunks.filter((chunk) => + validMainChunkIds.has(chunk.mainChunkID) + ); + data.lastMainChunkId = data.mainChunks[-1].id; // Quite literally the definition of lastMainChunkId. Didn't use .length, since middle chat context can be partially deleted. - // 같은거, 근데 이건 쪼개진 chunk들에 대하여 수행 - data.chunks = data.chunks.filter((chunk) => { - const [startIdx, endIdx] = chunk.chatRange; - // 생성된 chunks는 더이상 mainChunks와 연결되지 않음. 따라서 같은 작업을 진행해야 한다. - for (let i = startIdx; i <= endIdx; i++) { - if (!currentChatIds.has(chats[i]?.memo)) { - console.log(`Removing this chunk(split) due to chat context change: ${chunk}`); - return false; - } - } - return true; - }); - } } export async function regenerateSummary( chats: OpenAIChat[], data: HypaV2Data, mainChunkIndex: number ) : Promise { -// Should re-summarize a certain main chunk, based on index. It will then replace the original one. How much chat needs to be summarized is already defined in the mainChunk's chatRange field. - // After the update on mainChunks, it should also update chunks that have the same ChatRange, as they should be updated with the newly generated summary. Follow the same principles of splitting them. - - + const targetMainChunk = data.mainChunks[mainChunkIndex]; + } export async function hypaMemoryV2( chats: OpenAIChat[], @@ -200,8 +181,7 @@ export async function hypaMemoryV2( maxContextTokens: number, room: Chat, char: character | groupChat, - tokenizer: ChatTokenizer, - editedChatIndex?: number + tokenizer: ChatTokenizer ): Promise<{ currentTokens: number; chats: OpenAIChat[]; @@ -209,51 +189,48 @@ export async function hypaMemoryV2( memory?: HypaV2Data; }> { const db = getDatabase(); - const data: HypaV2Data = room.hypaV2Data ?? { chunks: [], mainChunks: [] }; + const data: HypaV2Data = room.hypaV2Data ?? { + lastMainChunkId: 0, + chunks: [], + mainChunks: [] + }; - // Clean invalid chunks based on the edited chat index - cleanInvalidChunks(chats, data, editedChatIndex); + // Clean invalid HypaV2 data + cleanInvalidChunks(chats, data); let allocatedTokens = db.hypaAllocatedTokens; let chunkSize = db.hypaChunkSize; - currentTokens += allocatedTokens + 50; + currentTokens += allocatedTokens + chats.length * 4; // ChatML token counting from official openai documentation let mainPrompt = ""; const lastTwoChats = chats.slice(-2); - // Error handling for infinite summarization attempts let summarizationFailures = 0; const maxSummarizationFailures = 3; - const summarizedIndices = new Set(); + const summarizedMemos = new Set(); // Token management loop while (currentTokens >= maxContextTokens) { let idx = 0; - let targetId = ""; const halfData: OpenAIChat[] = []; - let halfDataTokens = 0; - let startIdx = -1; - // Find the next batch of chats to summarize + // Accumulate chats to summarize while ( halfDataTokens < chunkSize && idx < chats.length - 2 // Ensure latest two chats are not added to summarization. ) { - if (!summarizedIndices.has(idx)) { - const chat = chats[idx]; - if (startIdx === -1) startIdx = idx; + const chat = chats[idx]; + if (!summarizedMemos.has(chat.memo)) { halfDataTokens += await tokenizer.tokenizeChat(chat); halfData.push(chat); - targetId = chat.memo; } idx++; } + // End index gone due to using UUID sets + // Last two chats must not be summarized, else request will be broken - const endIdx = idx - 1; // End index of the chats being summarized - - // Avoid summarizing the last two chats if (halfData.length < 3) break; - const stringlizedChat = halfData + const stringlizedChat = halfData // please change this name to something else .map((e) => `${e.role}: ${e.content}`) .join("\n"); const summaryData = await summary(stringlizedChat); @@ -281,10 +258,15 @@ export async function hypaMemoryV2( currentTokens -= halfDataTokens; allocatedTokens -= summaryDataToken; - data.mainChunks.unshift({ + // lastMainChunkId updating(increment) + data.lastMainChunkId++; + const newMainChunkId = data.lastMainChunkId; + + const chatMemos = new Set(halfData.map((chat) => chat.memo)); + data.mainChunks.push({ + id: newMainChunkId, text: summaryData.data, - targetId: targetId, - chatRange: [startIdx, endIdx], + chatMemos: chatMemos, }); // Split the summary into chunks based on double line breaks @@ -296,15 +278,14 @@ export async function hypaMemoryV2( // Update chunks with the new summary data.chunks.push( ...splitted.map((e) => ({ + mainChunkID: newMainChunkId, text: e, - targetId: targetId, - chatRange: [startIdx, endIdx] as [number, number], })) ); // Mark the chats as summarized - for (let i = startIdx; i <= endIdx; i++) { - summarizedIndices.add(i); + for (const memo of chatMemos) { + summarizedMemos.add(memo); } } @@ -325,15 +306,18 @@ export async function hypaMemoryV2( const processor = new HypaProcesser(db.hypaModel); processor.oaikey = db.supaMemoryKey; + const searchDocumentPrefix = "search_document: "; + const prefixLength = searchDocumentPrefix.length; + // Add chunks to processor for similarity search await processor.addText( data.chunks .filter((v) => v.text.trim().length > 0) - .map((v) => "search_document: " + v.text.trim()) + .map((v) => searchDocumentPrefix + v.text.trim()) // sometimes this should not be used at all. RisuAI does not support embedding model that this is meaningful, isn't it? ); let scoredResults: { [key: string]: number } = {}; - for (let i = 0; i < 3; i++) { + for (let i = 0; i < 3; i++) { // Should parameterize this, fixed length 3 is a magic number without explanation const pop = chats[chats.length - i - 1]; if (!pop) break; const searched = await processor.similaritySearchScored( @@ -355,16 +339,17 @@ export async function hypaMemoryV2( scoredArray.length > 0 ) { const [text] = scoredArray.shift(); + const content = text.substring(prefixLength); const tokenized = await tokenizer.tokenizeChat({ role: "system", - content: text.substring(14), + content: content, }); if ( tokenized > allocatedTokens - mainPromptTokens - chunkResultTokens ) break; - chunkResultPrompts += text.substring(14) + "\n\n"; + chunkResultPrompts += content + "\n\n"; chunkResultTokens += tokenized; } @@ -372,7 +357,7 @@ export async function hypaMemoryV2( // Filter out summarized chats const unsummarizedChats = chats.filter( - (_, idx) => !summarizedIndices.has(idx) + (chat) => !summarizedMemos.has(chat.memo) ); // Insert the memory system prompt at the beginning From ea6d7dada18b6426fa28146f1b2977d6073948d1 Mon Sep 17 00:00:00 2001 From: HyperBlaze <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Wed, 4 Dec 2024 22:10:16 -0800 Subject: [PATCH 08/18] bugfix/deleted feature restored Resolved bug when entire chat context is deleted and hypaV2Data becomes empty, throwing a undefined index error. Also changed it to typescript index. Hopefully resolved issue where same chat is summarized over and over again by adding another field to mainChunks. --- src/ts/process/memory/hypav2.ts | 50 ++++++++++++++++----------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 0fb6275f..ff827ac5 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -18,6 +18,7 @@ export interface HypaV2Data { id: number; text: string; chatMemos: Set; // UUIDs of summarized chats + lastChatMemo: string; }[]; chunks: { // split mainChunks for retrieval or something. Although quite uncomfortable logic, so maybe I will delete it soon or later. mainChunkID: number; @@ -205,11 +206,21 @@ export async function hypaMemoryV2( const lastTwoChats = chats.slice(-2); let summarizationFailures = 0; const maxSummarizationFailures = 3; - const summarizedMemos = new Set(); + + // Find the index to start summarizing from + let idx = 0; + if (data.mainChunks.length > 0) { + const lastMainChunk = data.mainChunks[data.mainChunks.length - 1]; + const lastChatMemo = lastMainChunk.lastChatMemo; + const lastChatIndex = chats.findIndex(chat => chat.memo === lastChatMemo); + if (lastChatIndex !== -1) { + idx = lastChatIndex + 1; + } + } + // Starting chat index of new mainChunk to be generated - // Token management loop + // Token management loop(where using of ) while (currentTokens >= maxContextTokens) { - let idx = 0; const halfData: OpenAIChat[] = []; let halfDataTokens = 0; @@ -217,20 +228,16 @@ export async function hypaMemoryV2( while ( halfDataTokens < chunkSize && idx < chats.length - 2 // Ensure latest two chats are not added to summarization. - ) { + ) { const chat = chats[idx]; - if (!summarizedMemos.has(chat.memo)) { - halfDataTokens += await tokenizer.tokenizeChat(chat); - halfData.push(chat); - } idx++; + halfDataTokens += await tokenizer.tokenizeChat(chat); + halfData.push(chat); } - // End index gone due to using UUID sets - // Last two chats must not be summarized, else request will be broken - if (halfData.length < 3) break; + if (halfData.length === 0) break; - const stringlizedChat = halfData // please change this name to something else + const stringlizedChat = halfData .map((e) => `${e.role}: ${e.content}`) .join("\n"); const summaryData = await summary(stringlizedChat); @@ -258,18 +265,21 @@ export async function hypaMemoryV2( currentTokens -= halfDataTokens; allocatedTokens -= summaryDataToken; - // lastMainChunkId updating(increment) + // Update lastMainChunkId and create a new mainChunk data.lastMainChunkId++; const newMainChunkId = data.lastMainChunkId; const chatMemos = new Set(halfData.map((chat) => chat.memo)); + const lastChatMemo = halfData[halfData.length - 1].memo; + data.mainChunks.push({ id: newMainChunkId, text: summaryData.data, chatMemos: chatMemos, + lastChatMemo: lastChatMemo, }); - // Split the summary into chunks based on double line breaks + // Split the summary into chunks const splitted = summaryData.data .split("\n\n") .map((e) => e.trim()) @@ -282,11 +292,6 @@ export async function hypaMemoryV2( text: e, })) ); - - // Mark the chats as summarized - for (const memo of chatMemos) { - summarizedMemos.add(memo); - } } // Construct the mainPrompt from mainChunks @@ -356,9 +361,7 @@ export async function hypaMemoryV2( const fullResult = `${mainPrompt}\n${chunkResultPrompts}`; // Filter out summarized chats - const unsummarizedChats = chats.filter( - (chat) => !summarizedMemos.has(chat.memo) - ); + const unsummarizedChats = chats.slice(idx); // Insert the memory system prompt at the beginning unsummarizedChats.unshift({ @@ -367,9 +370,6 @@ export async function hypaMemoryV2( memo: "supaMemory", }); - // Add the last two chats back if they were removed - const lastTwoChatsSet = new Set(lastTwoChats.map((chat) => chat.memo)); - console.log(lastTwoChatsSet) // Not so sure if chat.memo is unique id. for (const chat of lastTwoChats) { if (!unsummarizedChats.find((c) => c.memo === chat.memo)) { unsummarizedChats.push(chat); From b283b4a1267d4e0bfad4c13e828ee9dcc246c7f4 Mon Sep 17 00:00:00 2001 From: HyperBlaze <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Thu, 5 Dec 2024 12:21:58 -0800 Subject: [PATCH 09/18] fix: index issues forgot to commit on my pc, so doing it on laptop --- src/ts/process/memory/hypav2.ts | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index ff827ac5..ab5c9d42 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -160,14 +160,20 @@ function cleanInvalidChunks( data.mainChunks = data.mainChunks.filter((mainChunk) => { return isSubset(mainChunk.chatMemos, currentChatMemos); }); + // chunk filtering based on mainChunk's id const validMainChunkIds = new Set(data.mainChunks.map((mainChunk) => mainChunk.id)); data.chunks = data.chunks.filter((chunk) => validMainChunkIds.has(chunk.mainChunkID) ); - data.lastMainChunkId = data.mainChunks[-1].id; // Quite literally the definition of lastMainChunkId. Didn't use .length, since middle chat context can be partially deleted. - + // Update lastMainChunkId + if (data.mainChunks.length > 0) { + data.lastMainChunkId = data.mainChunks[data.mainChunks.length - 1].id; + } else { + data.lastMainChunkId = 0; + } } + export async function regenerateSummary( chats: OpenAIChat[], data: HypaV2Data, @@ -206,7 +212,7 @@ export async function hypaMemoryV2( const lastTwoChats = chats.slice(-2); let summarizationFailures = 0; const maxSummarizationFailures = 3; - + // Find the index to start summarizing from let idx = 0; if (data.mainChunks.length > 0) { @@ -216,7 +222,7 @@ export async function hypaMemoryV2( if (lastChatIndex !== -1) { idx = lastChatIndex + 1; } - } + } // Starting chat index of new mainChunk to be generated // Token management loop(where using of ) @@ -396,4 +402,4 @@ export async function hypaMemoryV2( chats: unsummarizedChats, memory: data, }; -} +} \ No newline at end of file From e4e63dc2373ba6cd6db6967f0660e324fee0a09c Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Thu, 5 Dec 2024 16:09:06 -0800 Subject: [PATCH 10/18] force pushing desktop updates add log soon --- src/ts/process/memory/hypav2.ts | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index ff827ac5..7b96a5ce 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -141,7 +141,7 @@ async function summary( return { success: true, data: result }; } // No, I am not going to touch any http API calls. -function isSubset(subset: Set, superset: Set): boolean { // simple helper function. Check if subset IS a subset of superset given. +function isSubset(subset: Set, superset: Set): boolean { for (const item of subset) { if (!superset.has(item)) { return false; @@ -154,20 +154,27 @@ function cleanInvalidChunks( chats: OpenAIChat[], data: HypaV2Data, ): void { - const currentChatMemos = new Set(chats.map((chat) => chat.memo)); // if chunk's memo set is not subset of this, the chunk's content -> delete + const currentChatMemos = new Set(chats.map((chat) => chat.memo)); // mainChunks filtering data.mainChunks = data.mainChunks.filter((mainChunk) => { return isSubset(mainChunk.chatMemos, currentChatMemos); }); + // chunk filtering based on mainChunk's id const validMainChunkIds = new Set(data.mainChunks.map((mainChunk) => mainChunk.id)); data.chunks = data.chunks.filter((chunk) => validMainChunkIds.has(chunk.mainChunkID) ); - data.lastMainChunkId = data.mainChunks[-1].id; // Quite literally the definition of lastMainChunkId. Didn't use .length, since middle chat context can be partially deleted. + // Update lastMainChunkId + if (data.mainChunks.length > 0) { + data.lastMainChunkId = data.mainChunks[data.mainChunks.length - 1].id; + } else { + data.lastMainChunkId = 0; + } } + export async function regenerateSummary( chats: OpenAIChat[], data: HypaV2Data, From 56646809a1e262312ecfc0f3e90a947c8cbe4566 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Fri, 6 Dec 2024 08:49:00 -0800 Subject: [PATCH 11/18] add logging --- src/ts/process/index.svelte.ts | 6 +++--- src/ts/process/memory/hypav2.ts | 8 ++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index 4d22d63b..37863e8a 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -776,9 +776,9 @@ export async function sendChat(chatProcessIndex = -1,arg:{ chats = hn.chats currentTokens = hn.tokens } - else if(DBState.db.hypav2){ //HypaV2 support needs to be changed like this. + else if(DBState.db.hypav2){ + console.log("Current chat's hypaV2 Data: ", currentChat.hypaV2Data) const sp = await hypaMemoryV2(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer) - console.log("All chats: ", chats) if(sp.error){ console.log(sp) alertError(sp.error) @@ -788,7 +788,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{ currentTokens = sp.currentTokens currentChat.hypaV2Data = sp.memory ?? currentChat.hypaV2Data DBState.db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data - console.log(currentChat.hypaV2Data) + console.log("Current chat's HypaV2Data: ", currentChat.hypaV2Data) } else{ const sp = await supaMemory(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer, { diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 7b96a5ce..cb271a8a 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -226,11 +226,14 @@ export async function hypaMemoryV2( } // Starting chat index of new mainChunk to be generated - // Token management loop(where using of ) + // Token management loop(If current token exceeds allowed amount...) while (currentTokens >= maxContextTokens) { + console.log("The current Token exceeded maxContextTokens. Current tokens: ", currentTokens, "\nMax Context Tokens: ", maxContextTokens) const halfData: OpenAIChat[] = []; let halfDataTokens = 0; + const startIdx = idx; + // Accumulate chats to summarize while ( halfDataTokens < chunkSize && @@ -241,7 +244,8 @@ export async function hypaMemoryV2( halfDataTokens += await tokenizer.tokenizeChat(chat); halfData.push(chat); } - + const endIdx = idx - 1; + console.log(`Summarizing chats from index ${startIdx} to ${endIdx}.`); if (halfData.length === 0) break; const stringlizedChat = halfData From c45eed40afbc3e773e938311fc4a212bf6397ffd Mon Sep 17 00:00:00 2001 From: HyperBlaze <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Fri, 6 Dec 2024 11:24:45 -0800 Subject: [PATCH 12/18] fix: currentChat not being updated --- src/ts/process/index.svelte.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index 37863e8a..b7b5578d 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -788,6 +788,8 @@ export async function sendChat(chatProcessIndex = -1,arg:{ currentTokens = sp.currentTokens currentChat.hypaV2Data = sp.memory ?? currentChat.hypaV2Data DBState.db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data + + currentChat = DBState.db.characters[selectedChar].chats[selectedChat]; console.log("Current chat's HypaV2Data: ", currentChat.hypaV2Data) } else{ From 3d3e4dd6a12507787ef316a6d9cc65bf59e3976e Mon Sep 17 00:00:00 2001 From: HyperBlaze <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Fri, 6 Dec 2024 20:20:39 -0800 Subject: [PATCH 13/18] refactor: logging changes not so much but just in case: --- src/ts/process/index.svelte.ts | 2 +- src/ts/process/memory/hypav2.ts | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ts/process/index.svelte.ts b/src/ts/process/index.svelte.ts index b7b5578d..ace7e35f 100644 --- a/src/ts/process/index.svelte.ts +++ b/src/ts/process/index.svelte.ts @@ -790,7 +790,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{ DBState.db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data currentChat = DBState.db.characters[selectedChar].chats[selectedChat]; - console.log("Current chat's HypaV2Data: ", currentChat.hypaV2Data) + console.log("[Expected to be updated] chat's HypaV2Data: ", currentChat.hypaV2Data) } else{ const sp = await supaMemory(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer, { diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index cfddfbf0..cc94e384 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -30,7 +30,6 @@ async function summary( stringlizedChat: string ): Promise<{ success: boolean; data: string }> { const db = getDatabase(); - console.log("Summarizing"); if (db.supaModelType === "distilbart") { try { From d94581a0670598f09e5fe670c06fcc19211b033e Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 8 Dec 2024 10:37:38 -0800 Subject: [PATCH 14/18] add: unknown bug logging --- src/ts/process/memory/hypav2.ts | 79 ++++++++++++++++++++++++++++----- 1 file changed, 68 insertions(+), 11 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index cc94e384..2655a39b 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -20,7 +20,7 @@ export interface HypaV2Data { chatMemos: Set; // UUIDs of summarized chats lastChatMemo: string; }[]; - chunks: { // split mainChunks for retrieval or something. Although quite uncomfortable logic, so maybe I will delete it soon or later. + chunks: { // split mainChunks for retrieval or something. Although quite uncomfortable logic, so maybe I will delete it soon. mainChunkID: number; text:string; }[]; @@ -224,27 +224,56 @@ export async function hypaMemoryV2( } // Starting chat index of new mainChunk to be generated - // Token management loop(If current token exceeds allowed amount...) +// Token management loop(If current token exceeds allowed amount...) while (currentTokens >= maxContextTokens) { - console.log("The current Token exceeded maxContextTokens. Current tokens: ", currentTokens, "\nMax Context Tokens: ", maxContextTokens) const halfData: OpenAIChat[] = []; let halfDataTokens = 0; const startIdx = idx; + console.log( + "Entering summarization step:", + "\nCurrent Tokens:", currentTokens, + "\nMax Context Tokens:", maxContextTokens, + "\nIndex Start:", startIdx + ); + // Accumulate chats to summarize while ( halfDataTokens < chunkSize && idx < chats.length - 2 // Ensure latest two chats are not added to summarization. - ) { + ) { const chat = chats[idx]; - idx++; - halfDataTokens += await tokenizer.tokenizeChat(chat); + const chatTokens = await tokenizer.tokenizeChat(chat); + if (halfDataTokens + chatTokens > chunkSize) { + // If adding this chat would exceed chunkSize, break and summarize what we have + break; + } halfData.push(chat); + halfDataTokens += chatTokens; + idx++; } + const endIdx = idx - 1; - console.log(`Summarizing chats from index ${startIdx} to ${endIdx}.`); - if (halfData.length === 0) break; + + console.log( + "Summarization batch ready:", + "\nStartIdx:", startIdx, + "\nEndIdx:", endIdx, + "\nNumber of chats in halfData:", halfData.length, + "\nhalfDataTokens:", halfDataTokens, + "\nChats chosen for summarization:", + halfData.map((c, i) => ({ + index: startIdx + i, + role: c.role, + content: c.content + })) + ); + + if (halfData.length === 0) { + console.log("No chats to summarize this round. Breaking out..."); + break; + } const stringlizedChat = halfData .map((e) => `${e.role}: ${e.content}`) @@ -252,8 +281,10 @@ export async function hypaMemoryV2( const summaryData = await summary(stringlizedChat); if (!summaryData.success) { + console.log("Summarization failed:", summaryData.data); summarizationFailures++; if (summarizationFailures >= maxSummarizationFailures) { + console.error("Summarization failed multiple times. Aborting..."); return { currentTokens: currentTokens, chats: chats, @@ -261,19 +292,35 @@ export async function hypaMemoryV2( "Summarization failed multiple times. Aborting to prevent infinite loop.", }; } - continue; + continue; // Retry summarizing next loop iteration } - summarizationFailures = 0; // Reset failure counter on success + summarizationFailures = 0; // Reset on success const summaryDataToken = await tokenizer.tokenizeChat({ role: "system", content: summaryData.data, }); + + console.log( + "Summarization success:", + "\nSummary Data:", summaryData.data, + "\nSummary Token Count:", summaryDataToken, + "\nBefore adjusting tokens:", + "\nCurrent Tokens:", currentTokens, + "\nAllocated Tokens:", allocatedTokens + ); + mainPrompt += `\n\n${summaryData.data}`; currentTokens -= halfDataTokens; allocatedTokens -= summaryDataToken; + console.log( + "After adjusting tokens:", + "\nCurrent Tokens:", currentTokens, + "\nAllocated Tokens:", allocatedTokens + ); + // Update lastMainChunkId and create a new mainChunk data.lastMainChunkId++; const newMainChunkId = data.lastMainChunkId; @@ -294,13 +341,23 @@ export async function hypaMemoryV2( .map((e) => e.trim()) .filter((e) => e.length > 0); - // Update chunks with the new summary + console.log( + "Splitting summary into chunks for memory:", + splitted + ); + data.chunks.push( ...splitted.map((e) => ({ mainChunkID: newMainChunkId, text: e, })) ); + + console.log( + "End of iteration:", + "\nData mainChunks count:", data.mainChunks.length, + "\nData chunks count:", data.chunks.length + ); } // Construct the mainPrompt from mainChunks From 0c51e898f95566e73dcae01cfba04eeb93dd6aed Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Sun, 8 Dec 2024 17:24:46 -0800 Subject: [PATCH 15/18] fix: bug resolved --- src/ts/process/memory/hypav2.ts | 88 +++++++++++++++++++-------------- 1 file changed, 50 insertions(+), 38 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 2655a39b..6e8ab11d 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -213,7 +213,7 @@ export async function hypaMemoryV2( const maxSummarizationFailures = 3; // Find the index to start summarizing from - let idx = 0; + let idx = 2; // first two should not be considered if (data.mainChunks.length > 0) { const lastMainChunk = data.mainChunks[data.mainChunks.length - 1]; const lastChatMemo = lastMainChunk.lastChatMemo; @@ -224,7 +224,7 @@ export async function hypaMemoryV2( } // Starting chat index of new mainChunk to be generated -// Token management loop(If current token exceeds allowed amount...) +// Token management loop (If current token usage exceeds allowed amount) while (currentTokens >= maxContextTokens) { const halfData: OpenAIChat[] = []; let halfDataTokens = 0; @@ -232,52 +232,65 @@ export async function hypaMemoryV2( const startIdx = idx; console.log( - "Entering summarization step:", - "\nCurrent Tokens:", currentTokens, + "Starting summarization iteration:", + "\nCurrent Tokens (before):", currentTokens, "\nMax Context Tokens:", maxContextTokens, - "\nIndex Start:", startIdx + "\nStartIdx:", startIdx, + "\nchunkSize:", chunkSize ); // Accumulate chats to summarize while ( halfDataTokens < chunkSize && - idx < chats.length - 2 // Ensure latest two chats are not added to summarization. + idx < chats.length - 2 // keep the last two chats from summarizing(else, the roles will be fucked up) ) { const chat = chats[idx]; const chatTokens = await tokenizer.tokenizeChat(chat); + + console.log( + "Evaluating chat for summarization:", + "\nIndex:", idx, + "\nRole:", chat.role, + "\nContent:", chat.content, + "\nchatTokens:", chatTokens, + "\nhalfDataTokens so far:", halfDataTokens, + "\nWould adding this exceed chunkSize?", (halfDataTokens + chatTokens > chunkSize) + ); + + // Check if adding this chat would exceed our chunkSize limit if (halfDataTokens + chatTokens > chunkSize) { - // If adding this chat would exceed chunkSize, break and summarize what we have + // Can't add this chat without going over chunkSize + // Break out, and summarize what we have so far. break; } + + // Add this chat to the halfData batch halfData.push(chat); halfDataTokens += chatTokens; idx++; } const endIdx = idx - 1; - console.log( - "Summarization batch ready:", + "Summarization batch chosen with this:", "\nStartIdx:", startIdx, "\nEndIdx:", endIdx, "\nNumber of chats in halfData:", halfData.length, - "\nhalfDataTokens:", halfDataTokens, - "\nChats chosen for summarization:", - halfData.map((c, i) => ({ - index: startIdx + i, - role: c.role, - content: c.content - })) + "\nTotal tokens in halfData:", halfDataTokens, + "\nChats selected:", halfData.map(h => ({role: h.role, content: h.content})) ); + // If no chats were added, break to avoid infinite loop if (halfData.length === 0) { - console.log("No chats to summarize this round. Breaking out..."); + console.log("No chats to summarize in this iteration, breaking out."); break; } const stringlizedChat = halfData .map((e) => `${e.role}: ${e.content}`) .join("\n"); + + // Summarize the accumulated chunk const summaryData = await summary(stringlizedChat); if (!summaryData.success) { @@ -288,11 +301,11 @@ export async function hypaMemoryV2( return { currentTokens: currentTokens, chats: chats, - error: - "Summarization failed multiple times. Aborting to prevent infinite loop.", + error: "Summarization failed multiple times. Aborting to prevent infinite loop.", }; } - continue; // Retry summarizing next loop iteration + // If summarization fails, try again in next iteration + continue; } summarizationFailures = 0; // Reset on success @@ -305,20 +318,23 @@ export async function hypaMemoryV2( console.log( "Summarization success:", "\nSummary Data:", summaryData.data, - "\nSummary Token Count:", summaryDataToken, - "\nBefore adjusting tokens:", - "\nCurrent Tokens:", currentTokens, - "\nAllocated Tokens:", allocatedTokens + "\nSummary Token Count:", summaryDataToken ); - mainPrompt += `\n\n${summaryData.data}`; - currentTokens -= halfDataTokens; - allocatedTokens -= summaryDataToken; + // **Token accounting fix:** + // Previous commits, the code likely have missed removing summarized chat's tokens. + // and never actually accounted for adding the summary tokens. + // Now we: + // 1. Remove old chats' tokens (they are replaced by summary) + // 2. Add summary tokens instead + currentTokens -= halfDataTokens; // remove original chats' tokens + currentTokens += summaryDataToken; // add the summary's tokens console.log( - "After adjusting tokens:", - "\nCurrent Tokens:", currentTokens, - "\nAllocated Tokens:", allocatedTokens + "After token adjustment:", + "\nRemoved halfDataTokens:", halfDataTokens, + "\nAdded summaryDataToken:", summaryDataToken, + "\nCurrent Tokens (after):", currentTokens ); // Update lastMainChunkId and create a new mainChunk @@ -341,11 +357,6 @@ export async function hypaMemoryV2( .map((e) => e.trim()) .filter((e) => e.length > 0); - console.log( - "Splitting summary into chunks for memory:", - splitted - ); - data.chunks.push( ...splitted.map((e) => ({ mainChunkID: newMainChunkId, @@ -354,9 +365,10 @@ export async function hypaMemoryV2( ); console.log( - "End of iteration:", - "\nData mainChunks count:", data.mainChunks.length, - "\nData chunks count:", data.chunks.length + "Chunks added:", + splitted, + "\nUpdated mainChunks count:", data.mainChunks.length, + "\nUpdated chunks count:", data.chunks.length ); } From 4ddb932237d41784414705e03b937dd2942f6a62 Mon Sep 17 00:00:00 2001 From: LightningHyperBlaze45654 <73149145+LightningHyperBlaze45654@users.noreply.github.com> Date: Fri, 20 Dec 2024 23:38:40 -0800 Subject: [PATCH 16/18] add: convert previous version's hypaV2Data --- src/ts/process/memory/hypav2.ts | 142 ++++++++++++++++++++++++++++++-- 1 file changed, 134 insertions(+), 8 deletions(-) diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts index 19763332..889fcc2e 100644 --- a/src/ts/process/memory/hypav2.ts +++ b/src/ts/process/memory/hypav2.ts @@ -13,7 +13,7 @@ import { runSummarizer } from "../transformers"; import { parseChatML } from "src/ts/parser.svelte"; export interface HypaV2Data { - lastMainChunkId: number; // can be removed, but exists to more readability of the code. + lastMainChunkID: number; // can be removed, but exists to more readability of the code. mainChunks: { // summary itself id: number; text: string; @@ -133,6 +133,18 @@ async function summary( return { success: true, data: result }; } // No, I am not going to touch any http API calls. +// Helper function start +export interface OldHypaV2Data { + chunks: { + text: string; + targetId: string; + }[]; + mainChunks: { + text: string; + targetId: string; + }[]; +} + function isSubset(subset: Set, superset: Set): boolean { for (const item of subset) { if (!superset.has(item)) { @@ -141,6 +153,114 @@ function isSubset(subset: Set, superset: Set): boolean { } return true; } +function isOldHypaV2Data(obj:any): obj is OldHypaV2Data { + return ( + typeof obj === 'object' && + obj !== null && + Array.isArray(obj.chunks) && + Array.isArray(obj.mainChunks) && + obj.chunks.every(chunk => + typeof chunk === 'object' && + chunk !== null && + typeof chunk.text === 'string' && + typeof chunk.targetId === 'string' + ) && + obj.mainChunks.every(mainChunk => + typeof mainChunk === 'object' && + mainChunk !== null && + typeof mainChunk.text === 'string' && + typeof mainChunk.targetId === 'string' + ) + ); +} +// Helper function end + +function convertOldToNewHypaV2Data(oldData: OldHypaV2Data, chats: OpenAIChat[]): HypaV2Data { + const oldMainChunks = oldData.mainChunks.slice().reverse(); // Inversed order, old mainchunk is done by unshift instead of push + const oldChunks = oldData.chunks.slice(); + const newData: HypaV2Data = { + lastMainChunkID: 0, + mainChunks: [], + chunks: [], + }; + + const mainChunkTargetIds = new Set(); + for (const mc of oldMainChunks) { + if (mc.targetId) { + mainChunkTargetIds.add(mc.targetId); + } + } + + // map chat memo to index, efficiency issues + const chatMemoToIndex = new Map(); + for (const tid of mainChunkTargetIds) { + const idx = chats.findIndex(c => c.memo === tid); + if (idx !== -1) { + chatMemoToIndex.set(tid, idx); + } else { + chatMemoToIndex.set(tid, -1); + } + } + + for (let i = 0; i < oldMainChunks.length; i++) { + const oldMainChunk = oldMainChunks[i]; + const targetId = oldMainChunk.targetId; + const mainChunkText = oldMainChunk.text; + + const previousMainChunk = i > 0 ? oldMainChunks[i - 1] : null; + const previousMainChunkTarget = previousMainChunk ? previousMainChunk.targetId : null; + + let chatMemos = new Set(); + + if (previousMainChunkTarget && targetId) { + const startIndex = chatMemoToIndex.get(previousMainChunkTarget) ?? -1; + const endIndex = chatMemoToIndex.get(targetId) ?? -1; + + if (startIndex !== -1 && endIndex !== -1) { + const lowerIndex = Math.min(startIndex, endIndex); + const upperIndex = Math.max(startIndex, endIndex); + + for (let j = lowerIndex; j <= upperIndex; j++) { + chatMemos.add(chats[j].memo); + } + } else { + // Can't identify the chats correctly, so discard this main chunk at all + continue; // Technically, if this is the case Previous HypaV2Data is bugged. Discussion opened for changing it to break; + } + } else { + // No previous chunk, so we gather all chats from index 0 up to the targetId's index + if (targetId) { + const targetIndex = chatMemoToIndex.get(targetId) ?? -1; + if (targetIndex !== -1) { + // Include all memos from 0 up to targetIndex + for (let j = 0; j <= targetIndex; j++) { + chatMemos.add(chats[j].memo); + } + } else { + continue; // Invalid MainChunk. + } + } + } + const newMainChunk = { + id: newData.lastMainChunkID, + text: mainChunkText, + chatMemos: chatMemos, + lastChatMemo: targetId, + } + newData.mainChunks.push(newMainChunk); + newData.lastMainChunkID++; + // Adding chunks accordingly, matching MainChunkID by leveraging same targetId + const matchingOldChunks = oldChunks.filter((oldChunk) => oldChunk.targetId === targetId); + for (const oldChunk of matchingOldChunks) { + newData.chunks.push({ + mainChunkID: newMainChunk.id, + text: oldChunk.text, + }); + } + } + + return newData; // updated HypaV2Data +} function cleanInvalidChunks( chats: OpenAIChat[], @@ -158,11 +278,11 @@ function cleanInvalidChunks( data.chunks = data.chunks.filter((chunk) => validMainChunkIds.has(chunk.mainChunkID) ); - // Update lastMainChunkId + // Update lastMainChunkID if (data.mainChunks.length > 0) { - data.lastMainChunkId = data.mainChunks[data.mainChunks.length - 1].id; + data.lastMainChunkID = data.mainChunks[data.mainChunks.length - 1].id; } else { - data.lastMainChunkId = 0; + data.lastMainChunkID = 0; } } @@ -188,8 +308,14 @@ export async function hypaMemoryV2( memory?: HypaV2Data; }> { const db = getDatabase(); + + if(room.hypaV2Data && isOldHypaV2Data(room.hypaV2Data)){ + console.log("Old HypaV2 data detected. Converting to new format..."); + room.hypaV2Data = convertOldToNewHypaV2Data(room.hypaV2Data, chats); + } + const data: HypaV2Data = room.hypaV2Data ?? { - lastMainChunkId: 0, + lastMainChunkID: 0, chunks: [], mainChunks: [] }; @@ -331,9 +457,9 @@ export async function hypaMemoryV2( "\nCurrent Tokens (after):", currentTokens ); - // Update lastMainChunkId and create a new mainChunk - data.lastMainChunkId++; - const newMainChunkId = data.lastMainChunkId; + // Update lastMainChunkID and create a new mainChunk + data.lastMainChunkID++; + const newMainChunkId = data.lastMainChunkID; const chatMemos = new Set(halfData.map((chat) => chat.memo)); const lastChatMemo = halfData[halfData.length - 1].memo; From 47cf303d1ff76499c16d2368817c59e701e3dbad Mon Sep 17 00:00:00 2001 From: Do-hyun Ko Date: Wed, 25 Dec 2024 22:17:48 +0900 Subject: [PATCH 17/18] fix: add missing definitions of plugin APIs --- src/ts/plugins/plugins.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/ts/plugins/plugins.ts b/src/ts/plugins/plugins.ts index ada46156..84df7272 100644 --- a/src/ts/plugins/plugins.ts +++ b/src/ts/plugins/plugins.ts @@ -245,6 +245,8 @@ export async function loadV2Plugin(plugins:RisuPlugin[]){ const setChar = globalThis.__pluginApis__.setChar const addProvider = globalThis.__pluginApis__.addProvider const addRisuEventHandler = globalThis.__pluginApis__.addRisuEventHandler + const addRisuReplacer = globalThis.__pluginApis__.addRisuReplacer + const removeRisuReplacer = globalThis.__pluginApis__.removeRisuReplacer const onUnload = globalThis.__pluginApis__.onUnload ${data} From e7f509b51ed77b789e4facb2c9a86b81d1063f8f Mon Sep 17 00:00:00 2001 From: Do-hyun Ko Date: Wed, 25 Dec 2024 22:27:48 +0900 Subject: [PATCH 18/18] :pencil2: Fix typos and tones of the pr template --- .github/pull_request_template.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 6e340384..c46cf7a0 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ # PR Checklist -- [ ] Did you check if it works normally in all models? *ignore this when it dosen't uses models* -- [ ] Did you check if it works normally in all of web, local and node hosted versions? if it dosen't, did you blocked it in those versions? -- [ ] Did you added a type def? +- [ ] Have you checked if it works normally in all models? *Ignore this if it doesn't use models.* +- [ ] Have you checked if it works normally in all web, local, and node hosted versions? If it doesn't, have you blocked it in those versions? +- [ ] Have you added type definitions? # Description