diff --git a/src/lib/Setting/Pages/OtherBotSettings.svelte b/src/lib/Setting/Pages/OtherBotSettings.svelte
index f3add6b5..db3aab15 100644
--- a/src/lib/Setting/Pages/OtherBotSettings.svelte
+++ b/src/lib/Setting/Pages/OtherBotSettings.svelte
@@ -276,7 +276,7 @@
{language.type}
{
@@ -284,15 +284,19 @@
const value = v.target.value
if (value === 'supaMemory'){
$DataBase.supaMemoryType = 'distilbart'
+ $DataBase.hypav2 = false
$DataBase.hanuraiEnable = false
} else if (value === 'hanuraiMemory'){
$DataBase.supaMemoryType = 'none'
+ $DataBase.hypav2 = false
$DataBase.hanuraiEnable = true
} else if (value === 'hypaV2') {
$DataBase.supaMemoryType = 'hypaV2'
+ $DataBase.hypav2= true
$DataBase.hanuraiEnable = false
} else {
$DataBase.supaMemoryType = 'none'
+ $DataBase.hypav2 = false
$DataBase.hanuraiEnable = false
}
}}>
@@ -309,26 +313,27 @@
- {:else if $DataBase.supaMemoryType === 'hypaV2'}
+ {:else if $DataBase.hypav2}
{language.hypaV2Desc}
{language.SuperMemory} {language.model}
- distilbart-cnn-6-6 (Free/Local)
- OpenAI 3.5 Turbo Instruct
- {language.submodel}
+ distilbart-cnn-6-6 (Free/Local)
+ OpenAI 3.5 Turbo Instruct
+ {language.submodel}
{language.SuperMemory} Prompt
{language.HypaMemory} Model
- MiniLM-L6-v2 (Free / Local)
- OpenAI Ada (Davinci / Curie Only)
+ MiniLM-L6-v2 (Free / Local)
+ Nomic (Free / Local)
+ OpenAI Ada (Davinci / Curie Only)
{language.hypaChunkSize}
{language.hypaAllocatedTokens}
- {:else if $DataBase.supaMemoryType !== 'none'}
+ {:else if ($DataBase.supaMemoryType !== 'none' && $DataBase.hypav2 === false)}
{language.supaDesc}
{language.SuperMemory} {language.model}
diff --git a/src/ts/process/memory/hypav2.ts b/src/ts/process/memory/hypav2.ts
index 8a944182..ddb94a94 100644
--- a/src/ts/process/memory/hypav2.ts
+++ b/src/ts/process/memory/hypav2.ts
@@ -4,210 +4,266 @@ import type { ChatTokenizer } from "src/ts/tokenizer";
import { get } from "svelte/store";
import { requestChatData } from "../request";
import { HypaProcesser } from "./hypamemory";
+import { globalFetch } from "src/ts/storage/globalApi";
+import { runSummarizer } from "../transformers";
-export interface HypaV2Data{
+export interface HypaV2Data {
chunks: {
- text:string
- targetId:string
- }[]
+ text: string;
+ targetId: string;
+ }[];
mainChunks: {
- text:string
- targetId:string
- }[]
+ text: string;
+ targetId: string;
+ }[];
}
+async function summarize(stringlizedChat: string): Promise<{ success: boolean; data: string }> {
+ const db = get(DataBase);
+ if (db.supaMemoryType === 'distilbart') {
+ try {
+ const sum = await runSummarizer(stringlizedChat);
+ return { success: true, data: sum };
+ } catch (error) {
+ return {
+ success: false,
+ data: "SupaMemory: Summarizer: " + `${error}`
+ };
+ }
+ }
+
+ const supaPrompt = db.supaMemoryPrompt === '' ?
+ "[Summarize the ongoing role story, It must also remove redundancy and unnecessary text and content from the output to reduce tokens for gpt3 and other sublanguage models]\n"
+ : db.supaMemoryPrompt;
+
+ let result = '';
+
+ if (db.supaMemoryType !== 'subModel') {
+ const promptbody = stringlizedChat + '\n\n' + supaPrompt + "\n\nOutput:";
+
+ const da = await globalFetch("https://api.openai.com/v1/completions", {
+ headers: {
+ "Content-Type": "application/json",
+ "Authorization": "Bearer " + db.supaMemoryKey
+ },
+ method: "POST",
+ body: {
+ "model": db.supaMemoryType === 'curie' ? "text-curie-001"
+ : db.supaMemoryType === 'instruct35' ? 'gpt-3.5-turbo-instruct'
+ : "text-davinci-003",
+ "prompt": promptbody,
+ "max_tokens": 600,
+ "temperature": 0
+ }
+ });
+
+ try {
+ if (!da.ok) {
+ return {
+ success: false,
+ data: "SupaMemory: HTTP: " + JSON.stringify(da.data)
+ };
+ }
+
+ result = (await da.data)?.choices[0]?.text?.trim();
+
+ if (!result) {
+ return {
+ success: false,
+ data: "SupaMemory: HTTP: " + JSON.stringify(da.data)
+ };
+ }
+
+ return { success: true, data: result };
+ } catch (error) {
+ return {
+ success: false,
+ data: "SupaMemory: HTTP: " + error
+ };
+ }
+ } else {
+ const promptbody: OpenAIChat[] = [
+ {
+ role: "user",
+ content: stringlizedChat
+ },
+ {
+ role: "system",
+ content: supaPrompt
+ }
+ ];
+ const da = await requestChatData({
+ formated: promptbody,
+ bias: {},
+ useStreaming: false,
+ noMultiGen: true
+ }, 'submodel');
+ if (da.type === 'fail' || da.type === 'streaming' || da.type === 'multiline') {
+ return {
+ success: false,
+ data: "SupaMemory: HTTP: " + da.result
+ };
+ }
+ result = da.result;
+ }
+ return { success: true, data: result };
+}
export async function hypaMemoryV2(
- chats:OpenAIChat[],
- currentTokens:number,
- maxContextTokens:number,
- room:Chat,
- char:character|groupChat,
- tokenizer:ChatTokenizer,
- arg:{asHyper?:boolean} = {}
-): Promise<{ currentTokens: number; chats: OpenAIChat[]; error?:string; memory?:HypaV2Data;}>{
+ chats: OpenAIChat[],
+ currentTokens: number,
+ maxContextTokens: number,
+ room: Chat,
+ char: character | groupChat,
+ tokenizer: ChatTokenizer,
+ arg: { asHyper?: boolean, summaryModel?: string, summaryPrompt?: string, hypaModel?: string } = {}
+): Promise<{ currentTokens: number; chats: OpenAIChat[]; error?: string; memory?: HypaV2Data; }> {
- const db = get(DataBase)
+ const db = get(DataBase);
- const data:HypaV2Data = room.hypaV2Data ?? {
- chunks:[],
- mainChunks:[]
- }
-
- //this is for the prompt
+ const data: HypaV2Data = room.hypaV2Data ?? {
+ chunks: [],
+ mainChunks: []
+ };
- let allocatedTokens = db.hypaAllocatedTokens
- let chunkSize = db.hypaChunkSize
- currentTokens += allocatedTokens
- currentTokens += 50 //this is for the template prompt
- let mainPrompt = ""
+ let allocatedTokens = db.hypaAllocatedTokens;
+ let chunkSize = db.hypaChunkSize;
+ currentTokens += allocatedTokens;
+ currentTokens += 50; // this is for the template prompt
+ let mainPrompt = "";
- while(data.mainChunks.length > 0){
- const chunk = data.mainChunks[0]
- const ind = chats.findIndex(e => e.memo === chunk.targetId)
- if(ind === -1){
- data.mainChunks.shift()
- continue
+ while (data.mainChunks.length > 0) {
+ const chunk = data.mainChunks[0];
+ const ind = chats.findIndex(e => e.memo === chunk.targetId);
+ if (ind === -1) {
+ data.mainChunks.shift();
+ continue;
}
- const removedChats = chats.splice(0, ind)
- for(const chat of removedChats){
- currentTokens -= await tokenizer.tokenizeChat(chat)
+ const removedChats = chats.splice(0, ind);
+ for (const chat of removedChats) {
+ currentTokens -= await tokenizer.tokenizeChat(chat);
}
- chats = chats.slice(ind)
- mainPrompt = chunk.text
- const mpToken = await tokenizer.tokenizeChat({role:'system', content:mainPrompt})
- allocatedTokens -= mpToken
- break
+ chats = chats.slice(ind);
+ mainPrompt = chunk.text;
+ const mpToken = await tokenizer.tokenizeChat({ role: 'system', content: mainPrompt });
+ allocatedTokens -= mpToken;
+ break;
}
- while(currentTokens >= maxContextTokens){
-
- let idx = 0
- let targetId = ''
- const halfData:OpenAIChat[] = []
+ while (currentTokens >= maxContextTokens) {
+ let idx = 0;
+ let targetId = '';
+ const halfData: OpenAIChat[] = [];
- let halfDataTokens = 0
- while(halfDataTokens < chunkSize){
- const chat = chats[idx]
- if(!chat){
- break
+ let halfDataTokens = 0;
+ while (halfDataTokens < chunkSize) {
+ const chat = chats[idx];
+ if (!chat) {
+ break;
}
- halfDataTokens += await tokenizer.tokenizeChat(chat)
- halfData.push(chat)
- idx++
- targetId = chat.memo
+ halfDataTokens += await tokenizer.tokenizeChat(chat);
+ halfData.push(chat);
+ idx++;
+ targetId = chat.memo;
}
- async function summary(stringlizedChat:string):Promise<{
- success:boolean
- data:string
- }>{
- const promptbody:OpenAIChat[] = [
- {
- role: "user",
- content: stringlizedChat
- },
- {
- role: "system",
- content: "Summarize this roleplay scene in a coherent narrative format for future reference. Summarize what happened, focusing on events and interactions between them. If someone or something is new or changed, include a brief characterization of them."
- }
- ]
- const da = await requestChatData({
- formated: promptbody,
- bias: {},
- useStreaming: false,
- noMultiGen: true
- }, 'model')
- if(da.type === 'fail' || da.type === 'streaming' || da.type === 'multiline'){
- return {
- data: "Hypamemory HTTP: " + da.result,
- success: false
- }
- }
- return {
- data: da.result,
- success: true
- }
- }
+ const stringlizedChat = halfData.map(e => `${e.role}: ${e.content}`).join('\n');
- const stringlizedChat = halfData.map(e => `${e.role}: ${e.content}`).join('\n')
+ const summaryData = await summarize(stringlizedChat);
- const summaryData = await summary(stringlizedChat)
-
- if(!summaryData.success){
+ if (!summaryData.success) {
return {
currentTokens: currentTokens,
chats: chats,
error: summaryData.data
- }
+ };
}
- const summaryDataToken = await tokenizer.tokenizeChat({role:'system', content:summaryData.data})
- mainPrompt += `\n\n${summaryData.data}`
- currentTokens -= halfDataTokens
- allocatedTokens -= summaryDataToken
+ const summaryDataToken = await tokenizer.tokenizeChat({ role: 'system', content: summaryData.data });
+ mainPrompt += `\n\n${summaryData.data}`;
+ currentTokens -= halfDataTokens;
+ allocatedTokens -= summaryDataToken;
data.mainChunks.unshift({
text: mainPrompt,
targetId: targetId
- })
+ });
- if(allocatedTokens < 1500){
- const summarizedMp = await summary(mainPrompt)
- const mpToken = await tokenizer.tokenizeChat({role:'system', content:mainPrompt})
- const summaryToken = await tokenizer.tokenizeChat({role:'system', content:summarizedMp.data})
+ if (allocatedTokens < 1500) {
+ const summarizedMp = await summarize(mainPrompt);
+ const mpToken = await tokenizer.tokenizeChat({ role: 'system', content: mainPrompt });
+ const summaryToken = await tokenizer.tokenizeChat({ role: 'system', content: summarizedMp.data });
- allocatedTokens -= summaryToken
- allocatedTokens += mpToken
+ allocatedTokens -= summaryToken;
+ allocatedTokens += mpToken;
- const splited = mainPrompt.split('\n\n').map(e => e.trim()).filter(e => e.length > 0)
+ const splited = mainPrompt.split('\n\n').map(e => e.trim()).filter(e => e.length > 0);
data.chunks.push(...splited.map(e => ({
text: e,
targetId: targetId
- })))
+ })));
- data.mainChunks[0].text = mainPrompt
+ data.mainChunks[0].text = mainPrompt;
}
}
-
- const processer = new HypaProcesser("nomic")
+
+ const processer = new HypaProcesser(db.hypaModel);
await processer.addText(data.chunks.filter(v => {
- return v.text.trim().length > 0
+ return v.text.trim().length > 0;
}).map((v) => {
- return "search_document: " + v.text.trim()
- }))
+ return "search_document: " + v.text.trim();
+ }));
- let scoredResults:{[key:string]:number} = {}
- for(let i=0;i<3;i++){
- const pop = chats[chats.length - i - 1]
- if(!pop){
- break
+ let scoredResults: { [key: string]: number } = {};
+ for (let i = 0; i < 3; i++) {
+ const pop = chats[chats.length - i - 1];
+ if (!pop) {
+ break;
}
- const searched = await processer.similaritySearchScored(`search_query: ${pop.content}`)
- for(const result of searched){
- const score = result[1]/(i+1)
- if(scoredResults[result[0]]){
- scoredResults[result[0]] += score
- }else{
- scoredResults[result[0]] = score
+ const searched = await processer.similaritySearchScored(`search_query: ${pop.content}`);
+ for (const result of searched) {
+ const score = result[1] / (i + 1);
+ if (scoredResults[result[0]]) {
+ scoredResults[result[0]] += score;
+ } else {
+ scoredResults[result[0]] = score;
}
}
}
- const scoredArray = Object.entries(scoredResults).sort((a,b) => b[1] - a[1])
+ const scoredArray = Object.entries(scoredResults).sort((a, b) => b[1] - a[1]);
- let chunkResultPrompts = ""
- while(allocatedTokens > 0){
- const target = scoredArray.shift()
- if(!target){
- break
+ let chunkResultPrompts = "";
+ while (allocatedTokens > 0) {
+ const target = scoredArray.shift();
+ if (!target) {
+ break;
}
const tokenized = await tokenizer.tokenizeChat({
role: 'system',
content: target[0].substring(14)
- })
- if(tokenized > allocatedTokens){
- break
+ });
+ if (tokenized > allocatedTokens) {
+ break;
}
- chunkResultPrompts += target[0].substring(14) + '\n\n'
- allocatedTokens -= tokenized
+ chunkResultPrompts += target[0].substring(14) + '\n\n';
+ allocatedTokens -= tokenized;
}
-
- const fullResult = `${mainPrompt}\n${chunkResultPrompts}`
+ const fullResult = `${mainPrompt}\n${chunkResultPrompts}`;
chats.unshift({
role: "system",
content: fullResult,
memo: "supaMemory"
- })
+ });
return {
currentTokens: currentTokens,
chats: chats,
memory: data
- }
-}
\ No newline at end of file
+ };
+}
diff --git a/src/ts/storage/database.ts b/src/ts/storage/database.ts
index 6d0cdbd5..d6650bb3 100644
--- a/src/ts/storage/database.ts
+++ b/src/ts/storage/database.ts
@@ -568,6 +568,7 @@ export interface Database{
useAdditionalAssetsPreview:boolean,
usePlainFetch:boolean
hypaMemory:boolean
+ hypav2:boolean
proxyRequestModel:string
ooba:OobaSettings
ainconfig: AINsettings