Fix hypav2 issue where the 'search_document: ' string is not truncated correctly. (#675)
# PR Checklist - [x] Did you check if it works normally in all models? *ignore this when it dosen't uses models* - [x] Did you check if it works normally in all of web, local and node hosted versions? if it dosen't, did you blocked it in those versions? - [ ] Did you added a type def? # Description This PR fixes hypav2 issue where the "search_document: " string is not truncated correctly. If the "search_document: " string is not truncated correctly, it will cause "t: " to remain within the <Past Events Details></Past Events Details> tag. Edit: This PR also add memo "supaPrompt" to detect supaMemory model requests in the plugin
This commit is contained in:
@@ -87,7 +87,7 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat
|
|||||||
|
|
||||||
let parsedPrompt = parseChatML(supaPrompt.replaceAll('{{slot}}', stringlizedChat))
|
let parsedPrompt = parseChatML(supaPrompt.replaceAll('{{slot}}', stringlizedChat))
|
||||||
|
|
||||||
const promptbody: OpenAIChat[] = parsedPrompt ?? [
|
const promptbody: OpenAIChat[] = (parsedPrompt ?? [
|
||||||
{
|
{
|
||||||
role: "user",
|
role: "user",
|
||||||
content: stringlizedChat
|
content: stringlizedChat
|
||||||
@@ -96,7 +96,10 @@ async function summary(stringlizedChat: string): Promise<{ success: boolean; dat
|
|||||||
role: "system",
|
role: "system",
|
||||||
content: supaPrompt
|
content: supaPrompt
|
||||||
}
|
}
|
||||||
];
|
]).map(message => ({
|
||||||
|
...message,
|
||||||
|
memo: "supaPrompt"
|
||||||
|
}));
|
||||||
console.log("Using submodel: ", db.subModel, "for supaMemory model");
|
console.log("Using submodel: ", db.subModel, "for supaMemory model");
|
||||||
const da = await requestChatData({
|
const da = await requestChatData({
|
||||||
formated: promptbody,
|
formated: promptbody,
|
||||||
@@ -230,6 +233,7 @@ export async function hypaMemoryV2(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fetch additional memory from chunks
|
// Fetch additional memory from chunks
|
||||||
|
const searchDocumentPrefix = "search_document: ";
|
||||||
const processor = new HypaProcesser(db.hypaModel);
|
const processor = new HypaProcesser(db.hypaModel);
|
||||||
processor.oaikey = db.supaMemoryKey;
|
processor.oaikey = db.supaMemoryKey;
|
||||||
|
|
||||||
@@ -249,7 +253,7 @@ export async function hypaMemoryV2(
|
|||||||
console.log("Older Chunks:", olderChunks);
|
console.log("Older Chunks:", olderChunks);
|
||||||
|
|
||||||
// Add older chunks to processor for similarity search
|
// Add older chunks to processor for similarity search
|
||||||
await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => "search_document: " + v.text.trim()));
|
await processor.addText(olderChunks.filter(v => v.text.trim().length > 0).map(v => searchDocumentPrefix + v.text.trim()));
|
||||||
|
|
||||||
let scoredResults: { [key: string]: number } = {};
|
let scoredResults: { [key: string]: number } = {};
|
||||||
for (let i = 0; i < 3; i++) {
|
for (let i = 0; i < 3; i++) {
|
||||||
@@ -267,9 +271,10 @@ export async function hypaMemoryV2(
|
|||||||
let chunkResultTokens = 0;
|
let chunkResultTokens = 0;
|
||||||
while (allocatedTokens - mainPromptTokens - chunkResultTokens > 0 && scoredArray.length > 0) {
|
while (allocatedTokens - mainPromptTokens - chunkResultTokens > 0 && scoredArray.length > 0) {
|
||||||
const [text] = scoredArray.shift();
|
const [text] = scoredArray.shift();
|
||||||
const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(14) });
|
const tokenized = await tokenizer.tokenizeChat({ role: 'system', content: text.substring(searchDocumentPrefix.length) });
|
||||||
if (tokenized > allocatedTokens - mainPromptTokens - chunkResultTokens) break;
|
if (tokenized > allocatedTokens - mainPromptTokens - chunkResultTokens) break;
|
||||||
chunkResultPrompts += text.substring(14) + '\n\n';
|
// Ensure strings are truncated correctly using searchDocumentPrefix.length
|
||||||
|
chunkResultPrompts += text.substring(searchDocumentPrefix.length) + '\n\n';
|
||||||
chunkResultTokens += tokenized;
|
chunkResultTokens += tokenized;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user