[fix] ooba to use llama tokenizer

This commit is contained in:
kwaroran
2023-12-03 21:31:14 +09:00
parent c250cc6214
commit 6a6321c5dc

View File

@@ -19,7 +19,11 @@ async function encode(data:string):Promise<(number[]|Uint32Array|Int32Array)>{
if(db.aiModel.startsWith('novelai')){
return await tokenizeWebTokenizers(data, 'novelai')
}
if(db.aiModel.startsWith('local_') || db.aiModel === 'mancer' || db.aiModel === 'textgen_webui' || (db.aiModel === 'reverse_proxy' && db.reverseProxyOobaMode)){
if(db.aiModel.startsWith('local_') ||
db.aiModel === 'mancer' ||
db.aiModel === 'textgen_webui' ||
(db.aiModel === 'reverse_proxy' && db.reverseProxyOobaMode ||
db.aiModel === 'ooba')){
return await tokenizeWebTokenizers(data, 'llama')
}