Add models
This commit is contained in:
@@ -18,7 +18,10 @@ export enum LLMFlags{
|
||||
OAICompletionTokens,
|
||||
DeveloperRole,
|
||||
geminiThinking,
|
||||
geminiBlockOff
|
||||
geminiBlockOff,
|
||||
deepSeekPrefix,
|
||||
deepSeekThinkingInput,
|
||||
deepSeekThinkingOutput,
|
||||
}
|
||||
|
||||
export enum LLMProvider{
|
||||
@@ -34,7 +37,9 @@ export enum LLMProvider{
|
||||
WebLLM,
|
||||
Horde,
|
||||
AWS,
|
||||
AI21
|
||||
AI21,
|
||||
DeepSeek,
|
||||
DeepInfra
|
||||
}
|
||||
|
||||
export enum LLMFormat{
|
||||
@@ -71,7 +76,8 @@ export enum LLMTokenizer{
|
||||
Gemma,
|
||||
GoogleCloud,
|
||||
Cohere,
|
||||
Local
|
||||
Local,
|
||||
DeepSeek
|
||||
}
|
||||
|
||||
export interface LLMModel{
|
||||
@@ -86,6 +92,8 @@ export interface LLMModel{
|
||||
parameters: Parameter[],
|
||||
tokenizer: LLMTokenizer
|
||||
recommended?: boolean
|
||||
keyIdentifier?: string
|
||||
endpoint?: string
|
||||
}
|
||||
|
||||
const ProviderNames = new Map<LLMProvider, string>([
|
||||
@@ -101,11 +109,31 @@ const ProviderNames = new Map<LLMProvider, string>([
|
||||
[LLMProvider.WebLLM, 'WebLLM'],
|
||||
[LLMProvider.Horde, 'Horde'],
|
||||
[LLMProvider.AWS, 'AWS'],
|
||||
[LLMProvider.DeepSeek, 'DeepSeek'],
|
||||
[LLMProvider.DeepInfra, 'DeepInfra']
|
||||
])
|
||||
|
||||
const OpenAIParameters:Parameter[] = ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty']
|
||||
const ClaudeParameters:Parameter[] = ['temperature', 'top_k', 'top_p']
|
||||
|
||||
function makeDeepInfraModels(id:string[]):LLMModel[]{
|
||||
return id.map((id) => {
|
||||
return {
|
||||
id: 'deepinfra_' + id,
|
||||
name: id,
|
||||
internalID: id,
|
||||
provider: LLMProvider.DeepInfra,
|
||||
format: LLMFormat.OpenAICompatible,
|
||||
parameters: ['frequency_penalty', 'presence_penalty','temperature', 'top_p'],
|
||||
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput, LLMFlags.hasPrefill, LLMFlags.deepSeekThinkingOutput, LLMFlags.hasStreaming],
|
||||
tokenizer: LLMTokenizer.DeepSeek,
|
||||
endpoint: 'https://api.deepinfra.com/v1/openai/chat/completions',
|
||||
keyIdentifier: 'deepinfra',
|
||||
recommended: true
|
||||
} as LLMModel
|
||||
})
|
||||
}
|
||||
|
||||
export const LLMModels: LLMModel[] = [
|
||||
{
|
||||
id: 'gpt35',
|
||||
@@ -809,7 +837,6 @@ export const LLMModels: LLMModel[] = [
|
||||
flags: [LLMFlags.geminiBlockOff,LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||
tokenizer: LLMTokenizer.GoogleCloud,
|
||||
recommended: true
|
||||
},
|
||||
{
|
||||
name: "Gemini Flash 2.0 Thinking 1219",
|
||||
@@ -819,6 +846,35 @@ export const LLMModels: LLMModel[] = [
|
||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.geminiThinking, LLMFlags.requiresAlternateRole],
|
||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||
tokenizer: LLMTokenizer.GoogleCloud,
|
||||
},
|
||||
{
|
||||
name: "Gemini Flash 2.0",
|
||||
id: 'gemini-2.0-flash',
|
||||
provider: LLMProvider.GoogleCloud,
|
||||
format: LLMFormat.GoogleCloud,
|
||||
flags: [LLMFlags.geminiBlockOff,LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||
tokenizer: LLMTokenizer.GoogleCloud,
|
||||
recommended: true
|
||||
},
|
||||
{
|
||||
name: "Gemini Pro 2.0 Exp 0128",
|
||||
id: 'gemini-2.0-pro-exp-01-28',
|
||||
provider: LLMProvider.GoogleCloud,
|
||||
format: LLMFormat.GoogleCloud,
|
||||
flags: [LLMFlags.geminiBlockOff,LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||
tokenizer: LLMTokenizer.GoogleCloud,
|
||||
recommended: true
|
||||
},
|
||||
{
|
||||
name: "Gemini Flash 2.0 Thinking 0121",
|
||||
id: 'gemini-2.0-flash-thinking-exp-01-21',
|
||||
provider: LLMProvider.GoogleCloud,
|
||||
format: LLMFormat.GoogleCloud,
|
||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.geminiThinking, LLMFlags.requiresAlternateRole],
|
||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||
tokenizer: LLMTokenizer.GoogleCloud,
|
||||
recommended: true
|
||||
},
|
||||
{
|
||||
@@ -1102,6 +1158,59 @@ export const LLMModels: LLMModel[] = [
|
||||
parameters: OpenAIParameters,
|
||||
tokenizer: LLMTokenizer.Local
|
||||
},
|
||||
{
|
||||
id: 'deepseek-chat',
|
||||
name: 'Deepseek Chat',
|
||||
provider: LLMProvider.DeepSeek,
|
||||
format: LLMFormat.OpenAICompatible,
|
||||
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput, LLMFlags.hasPrefill, LLMFlags.deepSeekPrefix, LLMFlags.hasStreaming],
|
||||
parameters: ['frequency_penalty', 'presence_penalty','temperature', 'top_p'],
|
||||
tokenizer: LLMTokenizer.DeepSeek,
|
||||
endpoint: 'https://api.deepseek.com/beta',
|
||||
keyIdentifier: 'deepseek',
|
||||
recommended: true
|
||||
},
|
||||
{
|
||||
id: 'deepseek-reasoner',
|
||||
name: 'Deepseek Reasoner',
|
||||
provider: LLMProvider.DeepSeek,
|
||||
format: LLMFormat.OpenAICompatible,
|
||||
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput, LLMFlags.hasPrefill, LLMFlags.deepSeekPrefix, LLMFlags.deepSeekThinkingInput, LLMFlags.hasStreaming],
|
||||
parameters: [],
|
||||
tokenizer: LLMTokenizer.DeepSeek,
|
||||
endpoint: 'https://api.deepseek.com/beta/chat/completions',
|
||||
keyIdentifier: 'deepseek',
|
||||
recommended: true
|
||||
},
|
||||
...makeDeepInfraModels([
|
||||
'deepseek-ai/DeepSeek-R1',
|
||||
'deepseek-ai/DeepSeek-R1-Distill-Llama-70B',
|
||||
'deepseek-ai/DeepSeek-V3',
|
||||
'meta-llama/Llama-3.3-70B-Instruct-Turbo',
|
||||
'meta-llama/Llama-3.3-70B-Instruct',
|
||||
'microsoft/phi-4',
|
||||
'meta-llama/Meta-Llama-3.1-70B-Instruct',
|
||||
'meta-llama/Meta-Llama-3.1-8B-Instruct',
|
||||
'meta-llama/Meta-Llama-3.1-405B-Instruct',
|
||||
'Qwen/QwQ-32B-Preview',
|
||||
'meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo',
|
||||
'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo',
|
||||
'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
'nvidia/Llama-3.1-Nemotron-70B-Instruct',
|
||||
'Qwen/Qwen2.5-72B-Instruct',
|
||||
'meta-llama/Llama-3.2-90B-Vision-Instruct',
|
||||
'meta-llama/Llama-3.2-11B-Vision-Instruct',
|
||||
'microsoft/WizardLM-2-8x22B',
|
||||
'01-ai/Yi-34B-Chat',
|
||||
'Austism/chronos-hermes-13b-v2',
|
||||
'Gryphe/MythoMax-L2-13b',
|
||||
'Gryphe/MythoMax-L2-13b-turbo',
|
||||
'Sao10K/L3.3-70B-Euryale-v2.3',
|
||||
'Sao10K/L3.1-70B-Euryale-v2.2',
|
||||
'Sao10K/L3-70B-Euryale-v2.1',
|
||||
'google/gemma-2-27b-it',
|
||||
'google/gemma-2-9b-it'
|
||||
]),
|
||||
{
|
||||
id: 'custom',
|
||||
name: "Plugin",
|
||||
@@ -1121,7 +1230,7 @@ export const LLMModels: LLMModel[] = [
|
||||
recommended: true,
|
||||
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'],
|
||||
tokenizer: LLMTokenizer.Unknown
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
for(let model of LLMModels){
|
||||
|
||||
@@ -287,6 +287,8 @@ export interface OpenAIChatExtra {
|
||||
attr?:string[]
|
||||
multimodals?:MultiModal[]
|
||||
thoughts?:string[]
|
||||
prefix?:boolean
|
||||
reasoning_content?:string
|
||||
}
|
||||
|
||||
function reformater(formated:OpenAIChat[],modelInfo:LLMModel){
|
||||
@@ -476,6 +478,12 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
if(db.newOAIHandle && formatedChat[i].memo && formatedChat[i].memo.startsWith('NewChat')){
|
||||
formatedChat[i].content = ''
|
||||
}
|
||||
if(arg.modelInfo.flags.includes(LLMFlags.deepSeekPrefix) && i === formatedChat.length-1 && formatedChat[i].role === 'assistant'){
|
||||
formatedChat[i].prefix = true
|
||||
}
|
||||
if(arg.modelInfo.flags.includes(LLMFlags.deepSeekThinkingInput) && i === formatedChat.length-1 && formatedChat[i].thoughts && formatedChat[i].thoughts.length > 0 && formatedChat[i].role === 'assistant'){
|
||||
formatedChat[i].reasoning_content = formatedChat[i].thoughts.join('\n')
|
||||
}
|
||||
delete formatedChat[i].memo
|
||||
delete formatedChat[i].removable
|
||||
delete formatedChat[i].attr
|
||||
@@ -796,6 +804,10 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
let replacerURL = aiModel === 'openrouter' ? "https://openrouter.ai/api/v1/chat/completions" :
|
||||
(aiModel === 'reverse_proxy') ? (arg.customURL) : ('https://api.openai.com/v1/chat/completions')
|
||||
|
||||
if(arg.modelInfo?.endpoint){
|
||||
replacerURL = arg.modelInfo.endpoint
|
||||
}
|
||||
|
||||
let risuIdentify = false
|
||||
if(replacerURL.startsWith("risu::")){
|
||||
risuIdentify = true
|
||||
@@ -824,6 +836,9 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
if(arg.modelInfo?.keyIdentifier){
|
||||
headers["Authorization"] = "Bearer " + db.OaiCompAPIKeys[arg.modelInfo.keyIdentifier]
|
||||
}
|
||||
if(aiModel === 'openrouter'){
|
||||
headers["X-Title"] = 'RisuAI'
|
||||
headers["HTTP-Referer"] = 'https://risuai.xyz'
|
||||
@@ -881,6 +896,7 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
})
|
||||
|
||||
let dataUint:Uint8Array|Buffer = new Uint8Array([])
|
||||
let reasoningContent = ""
|
||||
|
||||
const transtream = new TransformStream<Uint8Array, StreamResponseChunk>( {
|
||||
async transform(chunk, control) {
|
||||
@@ -894,6 +910,16 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
try {
|
||||
const rawChunk = data.replace("data: ", "")
|
||||
if(rawChunk === "[DONE]"){
|
||||
if(arg.modelInfo.flags.includes(LLMFlags.deepSeekThinkingOutput)){
|
||||
readed["0"] = readed["0"].replace(/(.*)\<\/think\>/gms, (m, p1) => {
|
||||
reasoningContent = p1
|
||||
return ""
|
||||
})
|
||||
|
||||
if(reasoningContent){
|
||||
reasoningContent = reasoningContent.replace(/\<think\>/gm, '')
|
||||
}
|
||||
}
|
||||
if(arg.extractJson && (db.jsonSchemaEnabled || arg.schema)){
|
||||
for(const key in readed){
|
||||
const extracted = extractJSON(readed[key], arg.extractJson)
|
||||
@@ -902,6 +928,11 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
console.log(JSONreaded)
|
||||
control.enqueue(JSONreaded)
|
||||
}
|
||||
else if(reasoningContent){
|
||||
control.enqueue({
|
||||
"0": `<Thoughts>\n${reasoningContent}\n</Thoughts>\n${readed["0"]}`
|
||||
})
|
||||
}
|
||||
else{
|
||||
control.enqueue(readed)
|
||||
}
|
||||
@@ -925,10 +956,27 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
readed["0"] += chunk
|
||||
}
|
||||
}
|
||||
|
||||
if(choice?.delta?.reasoning_content){
|
||||
reasoningContent += choice.delta.reasoning_content
|
||||
}
|
||||
}
|
||||
} catch (error) {}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if(arg.modelInfo.flags.includes(LLMFlags.deepSeekThinkingOutput)){
|
||||
readed["0"] = readed["0"].replace(/(.*)\<\/think\>/gms, (m, p1) => {
|
||||
reasoningContent = p1
|
||||
return ""
|
||||
})
|
||||
|
||||
if(reasoningContent){
|
||||
reasoningContent = reasoningContent.replace(/\<think\>/gm, '')
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.extractJson && (db.jsonSchemaEnabled || arg.schema)){
|
||||
for(const key in readed){
|
||||
const extracted = extractJSON(readed[key], arg.extractJson)
|
||||
@@ -937,6 +985,11 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
console.log(JSONreaded)
|
||||
control.enqueue(JSONreaded)
|
||||
}
|
||||
else if(reasoningContent){
|
||||
control.enqueue({
|
||||
"0": `<Thoughts>\n${reasoningContent}\n</Thoughts>\n${readed["0"]}`
|
||||
})
|
||||
}
|
||||
else{
|
||||
control.enqueue(readed)
|
||||
}
|
||||
@@ -1031,9 +1084,10 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
|
||||
if(dat?.choices[0]?.text){
|
||||
let text = dat.choices[0].text as string
|
||||
if(arg.extractJson && (db.jsonSchemaEnabled || arg.schema)){
|
||||
try {
|
||||
const parsed = JSON.parse(dat.choices[0].text)
|
||||
const parsed = JSON.parse(text)
|
||||
const extracted = extractJSON(parsed, arg.extractJson)
|
||||
return {
|
||||
type: 'success',
|
||||
@@ -1043,13 +1097,13 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
console.log(error)
|
||||
return {
|
||||
type: 'success',
|
||||
result: dat.choices[0].text
|
||||
result: text
|
||||
}
|
||||
}
|
||||
}
|
||||
return {
|
||||
type: 'success',
|
||||
result: dat.choices[0].text
|
||||
result: text
|
||||
}
|
||||
}
|
||||
if(arg.extractJson && (db.jsonSchemaEnabled || arg.schema)){
|
||||
@@ -1059,9 +1113,30 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
const msg:OpenAIChatFull = (dat.choices[0].message)
|
||||
let result = msg.content
|
||||
|
||||
if(arg.modelInfo.flags.includes(LLMFlags.deepSeekThinkingOutput)){
|
||||
console.log("Checking for reasoning content")
|
||||
let reasoningContent = ""
|
||||
result = result.replace(/(.*)\<\/think\>/gms, (m, p1) => {
|
||||
reasoningContent = p1
|
||||
return ""
|
||||
})
|
||||
console.log(`Reasoning Content: ${reasoningContent}`)
|
||||
|
||||
if(reasoningContent){
|
||||
reasoningContent = reasoningContent.replace(/\<think\>/gms, '')
|
||||
result = `<Thoughts>\n${reasoningContent}\n</Thoughts>\n${result}`
|
||||
}
|
||||
}
|
||||
|
||||
if(dat?.choices[0]?.reasoning_content){
|
||||
result = `<Thoughts>\n${dat.choices[0].reasoning_content}\n</Thoughts>\n${result}`
|
||||
}
|
||||
|
||||
return {
|
||||
type: 'success',
|
||||
result: msg.content
|
||||
result: result
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
|
||||
@@ -470,6 +470,7 @@ export function setDatabase(data:Database){
|
||||
data.banCharacterset ??= []
|
||||
data.showPromptComparison ??= false
|
||||
data.checkCorruption ??= true
|
||||
data.OaiCompAPIKeys ??= {}
|
||||
data.hypaV3Settings = {
|
||||
memoryTokensRatio: data.hypaV3Settings?.memoryTokensRatio ?? 0.2,
|
||||
extraSummarizationRatio: data.hypaV3Settings?.extraSummarizationRatio ?? 0.2,
|
||||
@@ -892,7 +893,8 @@ export interface Database{
|
||||
enableSimilarityCorrection: boolean
|
||||
preserveOrphanedMemory: boolean
|
||||
processRegexScript: boolean
|
||||
}
|
||||
},
|
||||
OaiCompAPIKeys: {[key:string]:string}
|
||||
}
|
||||
|
||||
interface SeparateParameters{
|
||||
|
||||
@@ -21,6 +21,7 @@ export const tokenizerList = [
|
||||
['novellist', 'Novellist'],
|
||||
['gemma', 'Gemma'],
|
||||
['cohere', 'Cohere'],
|
||||
['deepseek', 'DeepSeek'],
|
||||
] as const
|
||||
|
||||
export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Array)>{
|
||||
@@ -43,6 +44,8 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
|
||||
return await gemmaTokenize(data)
|
||||
case 'cohere':
|
||||
return await tokenizeWebTokenizers(data, 'cohere')
|
||||
case 'deepseek':
|
||||
return await tokenizeWebTokenizers(data, 'DeepSeek')
|
||||
default:
|
||||
return await tikJS(data, 'o200k_base')
|
||||
}
|
||||
@@ -108,6 +111,9 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
|
||||
if(modelInfo.tokenizer === LLMTokenizer.Gemma || modelInfo.tokenizer === LLMTokenizer.GoogleCloud){
|
||||
return await gemmaTokenize(data)
|
||||
}
|
||||
if(modelInfo.tokenizer === LLMTokenizer.DeepSeek){
|
||||
return await tokenizeWebTokenizers(data, 'DeepSeek')
|
||||
}
|
||||
if(modelInfo.tokenizer === LLMTokenizer.Cohere){
|
||||
return await tokenizeWebTokenizers(data, 'cohere')
|
||||
}
|
||||
@@ -115,7 +121,7 @@ export async function encode(data:string):Promise<(number[]|Uint32Array|Int32Arr
|
||||
return await tikJS(data)
|
||||
}
|
||||
|
||||
type tokenizerType = 'novellist'|'claude'|'novelai'|'llama'|'mistral'|'llama3'|'gemma'|'cohere'|'googleCloud'
|
||||
type tokenizerType = 'novellist'|'claude'|'novelai'|'llama'|'mistral'|'llama3'|'gemma'|'cohere'|'googleCloud'|'DeepSeek'
|
||||
|
||||
let tikParser:Tiktoken = null
|
||||
let tokenizersTokenizer:Tokenizer = null
|
||||
@@ -266,6 +272,11 @@ async function tokenizeWebTokenizers(text:string, type:tokenizerType) {
|
||||
await (await fetch("/token/gemma/tokenizer.model")
|
||||
).arrayBuffer())
|
||||
break
|
||||
case 'DeepSeek':
|
||||
tokenizersTokenizer = await webTokenizer.Tokenizer.fromJSON(
|
||||
await (await fetch("/token/deepseek/tokenizer.json")
|
||||
).arrayBuffer())
|
||||
break
|
||||
|
||||
}
|
||||
tokenizersType = type
|
||||
|
||||
Reference in New Issue
Block a user