fix: improve HypaV3 logging

This commit is contained in:
Bo26fhmC5M
2025-01-16 22:40:08 +09:00
parent 6ffaa1257e
commit fac11bfe67

View File

@@ -87,7 +87,7 @@ function cleanOrphanedSummary(chats: OpenAIChat[], data: HypaV3Data): void {
const removedCount = originalLength - data.summaries.length;
if (removedCount > 0) {
console.log(`[HypaV3] Cleaned ${removedCount} orphaned summaries`);
console.log(`[HypaV3] Cleaned ${removedCount} orphaned summaries.`);
}
}
@@ -103,7 +103,7 @@ export async function summarize(
} catch (error) {
return {
success: false,
data: "[HypaV3] " + error,
data: error,
};
}
}
@@ -116,7 +116,7 @@ export async function summarize(
switch (db.supaModelType) {
case "instruct35": {
console.log(
"[HypaV3] Using openAI gpt-3.5-turbo-instruct for summarization"
"[HypaV3] Using openAI gpt-3.5-turbo-instruct for summarization."
);
const requestPrompt = `${stringifiedChats}\n\n${summarizePrompt}\n\nOutput:`;
@@ -165,7 +165,7 @@ export async function summarize(
}
case "subModel": {
console.log(`[HypaV3] Using ax model ${db.subModel} for summarization`);
console.log(`[HypaV3] Using ax model ${db.subModel} for summarization.`);
const requestMessages: OpenAIChat[] = parseChatML(
summarizePrompt.replaceAll("{{slot}}", stringifiedChats)
@@ -190,14 +190,17 @@ export async function summarize(
"memory"
);
if (
response.type === "fail" ||
response.type === "streaming" ||
response.type === "multiline"
) {
if (response.type === "streaming" || response.type === "multiline") {
return {
success: false,
data: "Unexpected response type",
data: "unexpected response type",
};
}
if (response.type === "fail") {
return {
success: false,
data: response.result,
};
}
@@ -207,7 +210,7 @@ export async function summarize(
default: {
return {
success: false,
data: `Unsupported model ${db.supaModelType} for summarization`,
data: `unsupported model ${db.supaModelType} for summarization`,
};
}
}
@@ -387,7 +390,7 @@ export async function hypaMemoryV3(
currentTokens - toSummarizeTokens < targetTokens
) {
console.log(
`[HypaV3] Stopping summarization: would reduce below target tokens (${currentTokens} - ${toSummarizeTokens} < ${targetTokens})`
`[HypaV3] Stopping summarization: currentTokens(${currentTokens}) - toSummarizeTokens(${toSummarizeTokens}) < targetTokens(${targetTokens})`
);
break;
}
@@ -417,7 +420,7 @@ export async function hypaMemoryV3(
return {
currentTokens,
chats,
error: "[HypaV3] Summarization failed after maximum retries",
error: `[HypaV3] Summarization failed after maximum retries: ${summarizeResult.data}`,
memory: toSerializableHypaV3Data(data),
};
}
@@ -633,7 +636,7 @@ export async function hypaMemoryV3(
return {
currentTokens,
chats,
error: "[HypaV3] Summarization failed after maximum retries",
error: `[HypaV3] Summarization failed after maximum retries: ${summarizeResult.data}`,
memory: toSerializableHypaV3Data(data),
};
}
@@ -654,7 +657,7 @@ export async function hypaMemoryV3(
);
}
console.log("[HypaV3] Similarity corrected");
console.log("[HypaV3] Similarity corrected.");
break;
}
@@ -677,6 +680,8 @@ export async function hypaMemoryV3(
"[HypaV3] Trying to add similar summary:",
"\nSummary Tokens: ",
summaryTokens,
"\nConsumed Similar Memory Tokens: ",
consumedSimilarMemoryTokens,
"\nReserved Tokens: ",
reservedSimilarMemoryTokens,
"\nWould exceed: ",
@@ -689,7 +694,7 @@ export async function hypaMemoryV3(
reservedSimilarMemoryTokens
) {
console.log(
`[HypaV3] Stopping similar memory selection: would exceed reserved tokens (${consumedSimilarMemoryTokens} + ${summaryTokens} > ${reservedSimilarMemoryTokens})`
`[HypaV3] Stopping similar memory selection: consumedSimilarMemoryTokens(${consumedSimilarMemoryTokens}) + summaryTokens(${summaryTokens}) > reservedSimilarMemoryTokens(${reservedSimilarMemoryTokens})`
);
break;
}
@@ -814,7 +819,7 @@ export async function hypaMemoryV3(
if (currentTokens > maxContextTokens) {
throw new Error(
`[HypaV3] Unexpected input token count:\nCurrent Tokens:${currentTokens}\nMax Context Tokens:${maxContextTokens}`
`[HypaV3] Unexpected error: input token count (${currentTokens}) exceeds max context size (${maxContextTokens})`
);
}