Stable update hypav2.ts

no joke, now really works.
This commit is contained in:
LightningHyperBlaze45654
2024-06-17 23:38:48 -07:00
parent 21f5d61521
commit e530783eb6

View File

@@ -150,12 +150,10 @@ export async function hypaMemoryV2(
for (const chat of removedChats) { for (const chat of removedChats) {
currentTokens -= await tokenizer.tokenizeChat(chat); currentTokens -= await tokenizer.tokenizeChat(chat);
} }
chats = chats.slice(ind + 1);
mainPrompt = chunk.text; mainPrompt = chunk.text;
const mpToken = await tokenizer.tokenizeChat({ role: 'system', content: mainPrompt }); const mpToken = await tokenizer.tokenizeChat({ role: 'system', content: mainPrompt });
allocatedTokens -= mpToken; allocatedTokens -= mpToken;
} }
// The mainChunks won't be overlapping eachother.
} }
// Token management loop // Token management loop
@@ -272,21 +270,20 @@ export async function hypaMemoryV2(
const lastTargetId = data.mainChunks.length > 0 ? data.mainChunks[0].targetId : null; const lastTargetId = data.mainChunks.length > 0 ? data.mainChunks[0].targetId : null;
if (lastTargetId) { if (lastTargetId) {
const lastIndex = getValidChatIndex(lastTargetId); const lastIndex = getValidChatIndex(lastTargetId);
console.log(chats[lastIndex], lastIndex)
if (lastIndex !== -1) { if (lastIndex !== -1) {
const remainingChats = chats.slice(lastIndex + 1); const remainingChats = chats.slice(lastIndex + 1);
chats = [chats[0]] chats = [chats[0], ...remainingChats];
chats.push(...remainingChats); }
} else {
chats = chats
}
} }
// Add last two chats if they exist // Add last two chats if they exist
// Yeah, It's fine to remove this, but for the sake of stability, currently commented it out. Will add stabilizer for this
/*
if (lastTwoChats.length === 2) { if (lastTwoChats.length === 2) {
chats.push(lastTwoChats[0]); chats.push(lastTwoChats[0]);
chats.push(lastTwoChats[1]); chats.push(lastTwoChats[1]);
} }
*/
console.log("model being used: ", db.hypaModel, db.supaModelType, "\nCurrent session tokens: ", currentTokens, "\nAll chats, including memory system prompt: ", chats, "\nMemory data, with all the chunks: ", data); console.log("model being used: ", db.hypaModel, db.supaModelType, "\nCurrent session tokens: ", currentTokens, "\nAll chats, including memory system prompt: ", chats, "\nMemory data, with all the chunks: ", data);
return { return {
currentTokens: currentTokens, currentTokens: currentTokens,