Merge branch 'main' into patch-7

This commit is contained in:
Bo26fhmC5M
2025-04-21 11:47:38 +09:00
committed by GitHub
8 changed files with 100 additions and 109 deletions

View File

@@ -29,7 +29,7 @@
},
"productName": "RisuAI",
"mainBinaryName": "RisuAI",
"version": "158.2.1",
"version": "159.0.0",
"identifier": "co.aiclient.risu",
"plugins": {
"updater": {

View File

@@ -1117,6 +1117,7 @@ export const languageEnglish = {
doNotChangeFallbackModels: "Do Not Change Fallback Models on Preset Change",
customModels: "Custom Models",
igpPrompt: "IGP Prompt",
useTokenizerCaching: "Tokenizer Caching",
hypaMemoryV2Modal: "Hypa V2 Modal",
hypaMemoryV3Modal: "Hypa V3 Modal",
showMenuHypaMemoryModal: "Show Menu Hypa Modal",

View File

@@ -207,6 +207,10 @@
<Check bind:check={DBState.db.antiServerOverloads} name={language.antiServerOverload}>
</Check>
</div>
<div class="flex items-center mt-4">
<Check bind:check={DBState.db.useTokenizerCaching} name={language.useTokenizerCaching}>
</Check>
</div>
{#if DBState.db.useExperimental}
<div class="flex items-center mt-4">
<Check bind:check={DBState.db.useExperimentalGoogleTranslator} name={"New Google Translate Experimental"}>

View File

@@ -650,6 +650,11 @@ function decodeStyleRule(rule:CssAtRuleAST){
rule.rules[i] = decodeStyleRule(rule.rules[i])
}
}
if(rule.type === 'import'){
if(rule.import.startsWith('data:')){
rule.import = 'data:,'
}
}
return rule
}

View File

@@ -59,9 +59,15 @@ export interface OpenAIChatFull extends OpenAIChat{
}
}
export interface requestTokenPart{
name:string
tokens:number
}
export const doingChat = writable(false)
export const chatProcessStage = writable(0)
export const abortChat = writable(false)
export let requestTokenParts:{[key:string]:requestTokenPart[]} = {}
export let previewFormated:OpenAIChat[] = []
export let previewBody:string = ''

View File

@@ -12,7 +12,7 @@ import { defaultColorScheme, type ColorScheme } from '../gui/colorscheme';
import type { PromptItem, PromptSettings } from '../process/prompt';
import type { OobaChatCompletionRequestParams } from '../model/ooba';
export let appVer = "158.2.1"
export let appVer = "159.0.0"
export let webAppSubVer = ''
@@ -1022,6 +1022,7 @@ export interface Database{
flags: LLMFlags[]
}[]
igpPrompt:string
useTokenizerCaching:boolean
showMenuHypaMemoryModal:boolean
}

View File

@@ -6,27 +6,9 @@ import { supportsInlayImage } from "./process/files/inlays";
import { risuChatParser } from "./parser.svelte";
import { tokenizeGGUFModel } from "./process/models/local";
import { globalFetch } from "./globalApi.svelte";
import { getModelInfo, LLMTokenizer, type LLMModel } from "./model/modellist";
import { getModelInfo, LLMTokenizer } from "./model/modellist";
import { pluginV2 } from "./plugins/plugins";
import type { GemmaTokenizer } from "@huggingface/transformers";
import { LRUMap } from 'mnemonist';
const MAX_CACHE_SIZE = 1500;
const encodeCache = new LRUMap<string, number[] | Uint32Array | Int32Array>(MAX_CACHE_SIZE);
function getHash(
data: string,
aiModel: string,
customTokenizer: string,
currentPluginProvider: string,
googleClaudeTokenizing: boolean,
modelInfo: LLMModel,
pluginTokenizer: string
): string {
const combined = `${data}::${aiModel}::${customTokenizer}::${currentPluginProvider}::${googleClaudeTokenizing ? '1' : '0'}::${modelInfo.tokenizer}::${pluginTokenizer}`;
return combined;
}
export const tokenizerList = [
@@ -43,108 +25,100 @@ export const tokenizerList = [
] as const
export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Array)>{
const db = getDatabase();
const modelInfo = getModelInfo(db.aiModel);
const pluginTokenizer = pluginV2.providerOptions.get(db.currentPluginProvider)?.tokenizer ?? "none";
const cacheKey = getHash(
data,
db.aiModel,
db.customTokenizer,
db.currentPluginProvider,
db.googleClaudeTokenizing,
modelInfo,
pluginTokenizer
);
const cachedResult = encodeCache.get(cacheKey);
if (cachedResult !== undefined) {
return cachedResult;
}
let result: number[] | Uint32Array | Int32Array;
let db = getDatabase()
if(db.aiModel === 'openrouter' || db.aiModel === 'reverse_proxy'){
switch(db.customTokenizer){
case 'mistral':
result = await tokenizeWebTokenizers(data, 'mistral'); break;
return await tokenizeWebTokenizers(data, 'mistral')
case 'llama':
result = await tokenizeWebTokenizers(data, 'llama'); break;
return await tokenizeWebTokenizers(data, 'llama')
case 'novelai':
result = await tokenizeWebTokenizers(data, 'novelai'); break;
return await tokenizeWebTokenizers(data, 'novelai')
case 'claude':
result = await tokenizeWebTokenizers(data, 'claude'); break;
return await tokenizeWebTokenizers(data, 'claude')
case 'novellist':
result = await tokenizeWebTokenizers(data, 'novellist'); break;
return await tokenizeWebTokenizers(data, 'novellist')
case 'llama3':
result = await tokenizeWebTokenizers(data, 'llama'); break;
return await tokenizeWebTokenizers(data, 'llama')
case 'gemma':
result = await gemmaTokenize(data); break;
return await gemmaTokenize(data)
case 'cohere':
result = await tokenizeWebTokenizers(data, 'cohere'); break;
return await tokenizeWebTokenizers(data, 'cohere')
case 'deepseek':
result = await tokenizeWebTokenizers(data, 'DeepSeek'); break;
return await tokenizeWebTokenizers(data, 'DeepSeek')
default:
result = await tikJS(data, 'o200k_base'); break;
return await tikJS(data, 'o200k_base')
}
}
if(db.aiModel === 'custom' && pluginTokenizer){
switch(pluginTokenizer){
const modelInfo = getModelInfo(db.aiModel)
if(db.aiModel === 'custom' && pluginV2.providerOptions.get(db.currentPluginProvider)?.tokenizer){
const tokenizer = pluginV2.providerOptions.get(db.currentPluginProvider)?.tokenizer
switch(tokenizer){
case 'mistral':
result = await tokenizeWebTokenizers(data, 'mistral'); break;
return await tokenizeWebTokenizers(data, 'mistral')
case 'llama':
result = await tokenizeWebTokenizers(data, 'llama'); break;
return await tokenizeWebTokenizers(data, 'llama')
case 'novelai':
result = await tokenizeWebTokenizers(data, 'novelai'); break;
return await tokenizeWebTokenizers(data, 'novelai')
case 'claude':
result = await tokenizeWebTokenizers(data, 'claude'); break;
return await tokenizeWebTokenizers(data, 'claude')
case 'novellist':
result = await tokenizeWebTokenizers(data, 'novellist'); break;
return await tokenizeWebTokenizers(data, 'novellist')
case 'llama3':
result = await tokenizeWebTokenizers(data, 'llama'); break;
return await tokenizeWebTokenizers(data, 'llama')
case 'gemma':
result = await gemmaTokenize(data); break;
return await gemmaTokenize(data)
case 'cohere':
result = await tokenizeWebTokenizers(data, 'cohere'); break;
return await tokenizeWebTokenizers(data, 'cohere')
case 'o200k_base':
result = await tikJS(data, 'o200k_base'); break;
return await tikJS(data, 'o200k_base')
case 'cl100k_base':
result = await tikJS(data, 'cl100k_base'); break;
return await tikJS(data, 'cl100k_base')
case 'custom':
result = await pluginV2.providerOptions.get(db.currentPluginProvider)?.tokenizerFunc?.(data) ?? [0]; break;
return await pluginV2.providerOptions.get(db.currentPluginProvider)?.tokenizerFunc?.(data) ?? [0]
default:
result = await tikJS(data, 'o200k_base'); break;
return await tikJS(data, 'o200k_base')
}
}
if(modelInfo.tokenizer === LLMTokenizer.NovelList){
result = await tokenizeWebTokenizers(data, 'novellist');
} else if(modelInfo.tokenizer === LLMTokenizer.Claude){
result = await tokenizeWebTokenizers(data, 'claude');
} else if(modelInfo.tokenizer === LLMTokenizer.NovelAI){
result = await tokenizeWebTokenizers(data, 'novelai');
} else if(modelInfo.tokenizer === LLMTokenizer.Mistral){
result = await tokenizeWebTokenizers(data, 'mistral');
} else if(modelInfo.tokenizer === LLMTokenizer.Llama){
result = await tokenizeWebTokenizers(data, 'llama');
} else if(modelInfo.tokenizer === LLMTokenizer.Local){
result = await tokenizeGGUFModel(data);
} else if(modelInfo.tokenizer === LLMTokenizer.tiktokenO200Base){
result = await tikJS(data, 'o200k_base');
} else if(modelInfo.tokenizer === LLMTokenizer.GoogleCloud && db.googleClaudeTokenizing){
result = await tokenizeGoogleCloud(data);
} else if(modelInfo.tokenizer === LLMTokenizer.Gemma || modelInfo.tokenizer === LLMTokenizer.GoogleCloud){
result = await gemmaTokenize(data);
} else if(modelInfo.tokenizer === LLMTokenizer.DeepSeek){
result = await tokenizeWebTokenizers(data, 'DeepSeek');
} else if(modelInfo.tokenizer === LLMTokenizer.Cohere){
result = await tokenizeWebTokenizers(data, 'cohere');
} else {
result = await tikJS(data);
const nv= await tokenizeWebTokenizers(data, 'novellist')
return nv
}
if(modelInfo.tokenizer === LLMTokenizer.Claude){
return await tokenizeWebTokenizers(data, 'claude')
}
if(modelInfo.tokenizer === LLMTokenizer.NovelAI){
return await tokenizeWebTokenizers(data, 'novelai')
}
if(modelInfo.tokenizer === LLMTokenizer.Mistral){
return await tokenizeWebTokenizers(data, 'mistral')
}
if(modelInfo.tokenizer === LLMTokenizer.Llama){
return await tokenizeWebTokenizers(data, 'llama')
}
if(modelInfo.tokenizer === LLMTokenizer.Local){
return await tokenizeGGUFModel(data)
}
if(modelInfo.tokenizer === LLMTokenizer.tiktokenO200Base){
return await tikJS(data, 'o200k_base')
}
if(modelInfo.tokenizer === LLMTokenizer.GoogleCloud && db.googleClaudeTokenizing){
return await tokenizeGoogleCloud(data)
}
if(modelInfo.tokenizer === LLMTokenizer.Gemma || modelInfo.tokenizer === LLMTokenizer.GoogleCloud){
return await gemmaTokenize(data)
}
if(modelInfo.tokenizer === LLMTokenizer.DeepSeek){
return await tokenizeWebTokenizers(data, 'DeepSeek')
}
if(modelInfo.tokenizer === LLMTokenizer.Cohere){
return await tokenizeWebTokenizers(data, 'cohere')
}
encodeCache.set(cacheKey, result);
return result;
return await tikJS(data)
}
type tokenizerType = 'novellist'|'claude'|'novelai'|'llama'|'mistral'|'llama3'|'gemma'|'cohere'|'googleCloud'|'DeepSeek'

View File

@@ -1 +1 @@
{"version":"158.2.1"}
{"version":"159.0.0"}