add logging
This commit is contained in:
@@ -776,9 +776,9 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
|||||||
chats = hn.chats
|
chats = hn.chats
|
||||||
currentTokens = hn.tokens
|
currentTokens = hn.tokens
|
||||||
}
|
}
|
||||||
else if(DBState.db.hypav2){ //HypaV2 support needs to be changed like this.
|
else if(DBState.db.hypav2){
|
||||||
|
console.log("Current chat's hypaV2 Data: ", currentChat.hypaV2Data)
|
||||||
const sp = await hypaMemoryV2(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer)
|
const sp = await hypaMemoryV2(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer)
|
||||||
console.log("All chats: ", chats)
|
|
||||||
if(sp.error){
|
if(sp.error){
|
||||||
console.log(sp)
|
console.log(sp)
|
||||||
alertError(sp.error)
|
alertError(sp.error)
|
||||||
@@ -788,7 +788,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
|||||||
currentTokens = sp.currentTokens
|
currentTokens = sp.currentTokens
|
||||||
currentChat.hypaV2Data = sp.memory ?? currentChat.hypaV2Data
|
currentChat.hypaV2Data = sp.memory ?? currentChat.hypaV2Data
|
||||||
DBState.db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data
|
DBState.db.characters[selectedChar].chats[selectedChat].hypaV2Data = currentChat.hypaV2Data
|
||||||
console.log(currentChat.hypaV2Data)
|
console.log("Current chat's HypaV2Data: ", currentChat.hypaV2Data)
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
const sp = await supaMemory(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer, {
|
const sp = await supaMemory(chats, currentTokens, maxContextTokens, currentChat, nowChatroom, tokenizer, {
|
||||||
|
|||||||
@@ -226,11 +226,14 @@ export async function hypaMemoryV2(
|
|||||||
}
|
}
|
||||||
// Starting chat index of new mainChunk to be generated
|
// Starting chat index of new mainChunk to be generated
|
||||||
|
|
||||||
// Token management loop(where using of )
|
// Token management loop(If current token exceeds allowed amount...)
|
||||||
while (currentTokens >= maxContextTokens) {
|
while (currentTokens >= maxContextTokens) {
|
||||||
|
console.log("The current Token exceeded maxContextTokens. Current tokens: ", currentTokens, "\nMax Context Tokens: ", maxContextTokens)
|
||||||
const halfData: OpenAIChat[] = [];
|
const halfData: OpenAIChat[] = [];
|
||||||
let halfDataTokens = 0;
|
let halfDataTokens = 0;
|
||||||
|
|
||||||
|
const startIdx = idx;
|
||||||
|
|
||||||
// Accumulate chats to summarize
|
// Accumulate chats to summarize
|
||||||
while (
|
while (
|
||||||
halfDataTokens < chunkSize &&
|
halfDataTokens < chunkSize &&
|
||||||
@@ -241,7 +244,8 @@ export async function hypaMemoryV2(
|
|||||||
halfDataTokens += await tokenizer.tokenizeChat(chat);
|
halfDataTokens += await tokenizer.tokenizeChat(chat);
|
||||||
halfData.push(chat);
|
halfData.push(chat);
|
||||||
}
|
}
|
||||||
|
const endIdx = idx - 1;
|
||||||
|
console.log(`Summarizing chats from index ${startIdx} to ${endIdx}.`);
|
||||||
if (halfData.length === 0) break;
|
if (halfData.length === 0) break;
|
||||||
|
|
||||||
const stringlizedChat = halfData
|
const stringlizedChat = halfData
|
||||||
|
|||||||
Reference in New Issue
Block a user