Fix: Correct tokenize flow in tokenizer encode function

This commit is contained in:
sub-hub
2025-04-21 13:34:01 +09:00
committed by GitHub
parent 33d8ed4568
commit 09228f3f86

View File

@@ -89,9 +89,7 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
default:
result = await tikJS(data, 'o200k_base'); break;
}
}
if(db.aiModel === 'custom' && pluginTokenizer){
} else if (db.aiModel === 'custom' && pluginTokenizer) {
switch(pluginTokenizer){
case 'mistral':
result = await tokenizeWebTokenizers(data, 'mistral'); break;
@@ -120,6 +118,8 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
}
}
// Fallback
if (result === undefined) {
if(modelInfo.tokenizer === LLMTokenizer.NovelList){
result = await tokenizeWebTokenizers(data, 'novellist');
} else if(modelInfo.tokenizer === LLMTokenizer.Claude){
@@ -145,7 +145,7 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
} else {
result = await tikJS(data);
}
}
if(db.useTokenizerCaching){
encodeCache.set(cacheKey, result);
}