diff --git a/src/lib/Setting/Pages/OtherBotSettings.svelte b/src/lib/Setting/Pages/OtherBotSettings.svelte
index 67c98344..37a5ffbb 100644
--- a/src/lib/Setting/Pages/OtherBotSettings.svelte
+++ b/src/lib/Setting/Pages/OtherBotSettings.svelte
@@ -211,6 +211,7 @@
{language.type}
{
@@ -222,6 +223,9 @@
} else if (value === 'hanuraiMemory'){
$DataBase.supaMemoryType = 'none'
$DataBase.hanuraiEnable = true
+ } else if (value === 'hypav2') {
+ $DataBase.supaMemoryType = 'hypaV2'
+ $DataBase.hanuraiEnable = false
} else {
$DataBase.supaMemoryType = 'none'
$DataBase.hanuraiEnable = false
@@ -229,6 +233,7 @@
}}>
None
{language.SuperMemory}
+ {language.HypaMemory} V2
{language.hanuraiMemory}
@@ -238,6 +243,8 @@
+ {:else if $DataBase.supaMemoryType === 'hypav2'}
+ {language.HypaMemory} V2 is Experimental
{:else if $DataBase.supaMemoryType !== 'none'}
{language.SuperMemory} {language.model}
diff --git a/src/ts/process/index.ts b/src/ts/process/index.ts
index 3ac76566..858516de 100644
--- a/src/ts/process/index.ts
+++ b/src/ts/process/index.ts
@@ -26,6 +26,7 @@ import { runCharacterJS } from "../plugins/embedscript";
import { addRerolls } from "./prereroll";
import { runImageEmbedding } from "./transformers";
import { hanuraiMemory } from "./memory/hanuraiMemory";
+import { hypaMemoryV2 } from "./memory/hypav2";
export interface OpenAIChat{
role: 'system'|'user'|'assistant'|'function'
@@ -668,6 +669,19 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
chats = hn.chats
currentTokens = hn.tokens
}
+ else if(db.supaMemoryType === 'hypaV2'){
+ const sp = await hypaMemoryV2(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer)
+ if(sp.error){
+ alertError(sp.error)
+ return false
+ }
+ chats = sp.chats
+ currentTokens = sp.currentTokens
+ currentChat.hypaV2Data = sp.memory ?? currentChat.hypaV2Data
+ db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data
+ console.log(currentChat.hypaV2Data)
+ DataBase.set(db)
+ }
else{
const sp = await supaMemory(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer, {
asHyper: db.hypaMemory
diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts
new file mode 100644
index 00000000..710a0b11
--- /dev/null
+++ b/src/ts/process/memory/hypav2.ts
@@ -0,0 +1,201 @@
+import { DataBase, type Chat, type character, type groupChat } from "src/ts/storage/database";
+import type { OpenAIChat } from "..";
+import type { ChatTokenizer } from "src/ts/tokenizer";
+import { get } from "svelte/store";
+import { requestChatData } from "../request";
+import { HypaProcesser } from "./hypamemory";
+
+export interface HypaV2Data{
+ chunks: {
+ text:string
+ targetId:string
+ }[]
+ mainChunks: {
+ text:string
+ targetId:string
+ }[]
+}
+
+
+async function summary(stringlizedChat:string):Promise<{
+ success:boolean
+ data:string
+}>{
+ const promptbody:OpenAIChat[] = [
+ {
+ role: "user",
+ content: stringlizedChat
+ },
+ {
+ role: "system",
+ content: "Summarize this roleplay scene in a coherent narrative format for future reference. Summarize what happened, focusing on events and interactions between them. If someone or something is new or changed, include a brief characterization of them."
+ }
+ ]
+ const da = await requestChatData({
+ formated: promptbody,
+ bias: {},
+ useStreaming: false,
+ }, 'model')
+ if(da.type === 'fail' || da.type === 'streaming' || da.type === 'multiline'){
+ return {
+ data: "Hypamemory HTTP: " + da.result,
+ success: false
+ }
+ }
+ return {
+ data: da.result,
+ success: true
+ }
+}
+
+export async function hypaMemoryV2(
+ chats:OpenAIChat[],
+ currentTokens:number,
+ maxContextTokens:number,
+ room:Chat,
+ char:character|groupChat,
+ tokenizer:ChatTokenizer,
+ arg:{asHyper?:boolean} = {}
+): Promise<{ currentTokens: number; chats: OpenAIChat[]; error?:string; memory?:HypaV2Data;}>{
+
+ const db = get(DataBase)
+
+ const data:HypaV2Data = room.hypaV2Data ?? {
+ chunks:[],
+ mainChunks:[]
+ }
+
+ //this is for the prompt
+
+ let allocatedTokens = 3000
+ currentTokens += allocatedTokens
+ currentTokens += 50 //this is for the template prompt
+ let mainPrompt = ""
+
+ while(data.mainChunks.length > 0){
+ const chunk = data.mainChunks[0]
+ const ind = chats.findIndex(e => e.memo === chunk.targetId)
+ if(ind === -1){
+ data.mainChunks.shift()
+ continue
+ }
+
+ const removedChats = chats.splice(0, ind)
+ for(const chat of removedChats){
+ currentTokens -= await tokenizer.tokenizeChat(chat)
+ }
+ chats = chats.slice(ind)
+ mainPrompt = chunk.text
+ const mpToken = await tokenizer.tokenizeChat({role:'system', content:mainPrompt})
+ allocatedTokens -= mpToken
+ break
+ }
+
+ while(currentTokens >= maxContextTokens){
+
+ const idx = (Math.floor(chats.length/2))
+ const targetId = chats[idx].memo
+ const halfData = chats.slice(idx)
+
+ let halfDataTokens = 0
+ for(const chat of halfData){
+ halfDataTokens += await tokenizer.tokenizeChat(chat)
+ }
+
+ const stringlizedChat = halfData.map(e => `${e.role}: ${e.content}`).join('\n')
+
+ const summaryData = await summary(stringlizedChat)
+
+ if(!summaryData.success){
+ return {
+ currentTokens: currentTokens,
+ chats: chats,
+ error: summaryData.data
+ }
+ }
+
+ const summaryDataToken = await tokenizer.tokenizeChat({role:'system', content:summaryData.data})
+ mainPrompt += `\n\n${summaryData.data}`
+ currentTokens -= halfDataTokens
+ allocatedTokens -= summaryDataToken
+
+ data.mainChunks.unshift({
+ text: mainPrompt,
+ targetId: targetId
+ })
+
+ if(allocatedTokens < 1500){
+ const summarizedMp = await summary(mainPrompt)
+ const mpToken = await tokenizer.tokenizeChat({role:'system', content:mainPrompt})
+ const summaryToken = await tokenizer.tokenizeChat({role:'system', content:summarizedMp.data})
+
+ allocatedTokens -= summaryToken
+ allocatedTokens += mpToken
+
+ const splited = mainPrompt.split('\n\n').map(e => e.trim()).filter(e => e.length > 0)
+
+ data.chunks.push(...splited.map(e => ({
+ text: e,
+ targetId: targetId
+ })))
+
+ data.mainChunks[0].text = mainPrompt
+ }
+ }
+
+ const processer = new HypaProcesser("nomic")
+
+ await processer.addText(data.chunks.map((v) => {
+ return "search_document: " + v.text
+ }))
+
+ let scoredResults:{[key:string]:number} = {}
+ for(let i=0;i<3;i++){
+ const pop = chats[chats.length - i - 1]
+ if(!pop){
+ break
+ }
+ const searched = await processer.similaritySearchScored(`search_query: ${pop.content}`)
+ for(const result of searched){
+ const score = result[1]/(i+1)
+ if(scoredResults[result[0]]){
+ scoredResults[result[0]] += score
+ }else{
+ scoredResults[result[0]] = score
+ }
+ }
+ }
+
+ const scoredArray = Object.entries(scoredResults).sort((a,b) => b[1] - a[1])
+
+ let chunkResultPrompts = ""
+ while(allocatedTokens > 0){
+ const target = scoredArray.shift()
+ if(!target){
+ break
+ }
+ const tokenized = await tokenizer.tokenizeChat({
+ role: 'system',
+ content: target[0].substring(14)
+ })
+ if(tokenized > allocatedTokens){
+ break
+ }
+ chunkResultPrompts += target[0].substring(14) + '\n\n'
+ allocatedTokens -= tokenized
+ }
+
+
+ const fullResult = `${mainPrompt}\n${chunkResultPrompts}`
+
+ chats.unshift({
+ role: "system",
+ content: fullResult,
+ memo: "supaMemory"
+ })
+ return {
+ currentTokens: currentTokens,
+ chats: chats,
+ memory: data
+ }
+}
\ No newline at end of file
diff --git a/src/ts/storage/database.ts b/src/ts/storage/database.ts
index f09a04c5..387c8678 100644
--- a/src/ts/storage/database.ts
+++ b/src/ts/storage/database.ts
@@ -914,6 +914,7 @@ export interface Chat{
localLore: loreBook[]
sdData?:string
supaMemoryData?:string
+ hypaV2Data?:HypaV2Data
lastMemory?:string
suggestMessages?:string[]
isStreaming?:boolean
@@ -1217,6 +1218,7 @@ import { encode as encodeMsgpack, decode as decodeMsgpack } from "msgpackr";
import * as fflate from "fflate";
import type { OnnxModelFiles } from '../process/transformers';
import type { RisuModule } from '../process/modules';
+import type { HypaV2Data } from '../process/memory/hypav2';
export async function downloadPreset(id:number){
saveCurrentPreset()