Update token length check in embedding transformer and error message in supaMemory
This commit is contained in:
@@ -57,7 +57,7 @@ export const runEmbedding = async (text: string):Promise<Float32Array> => {
|
||||
}
|
||||
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/all-MiniLM-L6-v2');
|
||||
const tokens = tokenizer.encode(text)
|
||||
if (tokens.length > 256) {
|
||||
if (tokens.length > 1024) {
|
||||
let chunks:string[] = []
|
||||
let chunk:number[] = []
|
||||
for (let i = 0; i < tokens.length; i++) {
|
||||
|
||||
@@ -178,7 +178,7 @@ export async function supaMemory(
|
||||
if(db.supaMemoryType === 'distilbart'){
|
||||
try {
|
||||
const sum = await runSummarizer(stringlizedChat)
|
||||
return sum[0].summary_text
|
||||
return sum
|
||||
} catch (error) {
|
||||
return {
|
||||
currentTokens: currentTokens,
|
||||
@@ -274,7 +274,7 @@ export async function supaMemory(
|
||||
return {
|
||||
currentTokens: currentTokens,
|
||||
chats: chats,
|
||||
error: "Not Enough Tokens"
|
||||
error: "Not Enough Tokens to summarize in SupaMemory"
|
||||
}
|
||||
}
|
||||
maxChunkSize = maxChunkSize * 0.7
|
||||
|
||||
Reference in New Issue
Block a user