Add systemContentReplacement and Flags

This commit is contained in:
Kwaroran
2024-11-27 04:33:12 +09:00
parent cc8d753dc8
commit 597c8879fc
5 changed files with 364 additions and 100 deletions

View File

@@ -164,7 +164,9 @@ export const languageEnglish = {
translatorNote: "Here, you can add a unique translation prompt for each character. This option only applies when using the Ax. model for translation. To apply it, include `{{slot::tnote}}` in the language settings. It doesn't work in group chats.",
groupInnerFormat: "This defines a format that is used in group chat for characters that isn't speaker. if it is not blank, it will use this format instead of the default format. if `Group Other Bot Role` is `assistant`, it will also be applied to the speaker.",
groupOtherBotRole: "This defines a role that is used in group chat for characters that isn't speaker.",
chatHTML: "A HTML that would be inserted as each chat.\n\nYou can use CBS and special tags.\n- `<risutextbox>`: a textbox that would be used to render text\n- `<risuicon>`: an icon for user or assistant\n- `<risubuttons>`: icon buttons for chat edit, translations and etc.\n- `<risugeninfo>`: generation information button."
chatHTML: "A HTML that would be inserted as each chat.\n\nYou can use CBS and special tags.\n- `<risutextbox>`: a textbox that would be used to render text\n- `<risuicon>`: an icon for user or assistant\n- `<risubuttons>`: icon buttons for chat edit, translations and etc.\n- `<risugeninfo>`: generation information button.",
systemContentReplacement: "The prompt format that replaces system prompt if the model doesn't support system prompt.",
systemRoleReplacement: "The role that replaces system role if the model doesn't support system role.",
},
setup: {
chooseProvider: "Choose AI Provider",
@@ -797,4 +799,6 @@ export const languageEnglish = {
recommended: "Recommended",
newChat: "New Chat",
predictedOutput: "Predicted Output",
systemContentReplacement: "System Content Replacement",
systemRoleReplacement: "System Role Replacement",
}

View File

@@ -149,6 +149,13 @@
<TextAreaInput bind:value={DBState.db.OAIPrediction}/>
<span class="text-textcolor mt-4">{language.groupInnerFormat} <Help key='groupInnerFormat' /></span>
<TextAreaInput placeholder={`<{{char}}\'s Message>\n{{slot}}\n</{{char}}\'s Message>`} bind:value={DBState.db.groupTemplate}/>
<span class="text-textcolor mt-4">{language.systemContentReplacement} <Help key="systemContentReplacement"/></span>
<TextAreaInput bind:value={DBState.db.systemContentReplacement}/>
<span class="text-textcolor mt-4">{language.systemRoleReplacement} <Help key="systemRoleReplacement"/></span>
<SelectInput bind:value={DBState.db.systemRoleReplacement}>
<OptionInput value="user">User</OptionInput>
<OptionInput value="assistant">assistant</OptionInput>
</SelectInput>
{#if DBState.db.jsonSchemaEnabled}
<span class="text-textcolor mt-4">{language.jsonSchema} <Help key='jsonSchema' /></span>
<TextAreaInput bind:value={DBState.db.jsonSchema}/>

View File

@@ -1,3 +1,5 @@
import type { Parameter } from "../process/request"
export enum LLMFlags{
hasImageInput,
hasImageOutput,
@@ -7,6 +9,8 @@ export enum LLMFlags{
hasCache,
hasFullSystemPrompt,
hasFirstSystemPrompt,
requiresAlternateRole,
mustStartWithUserInput,
}
export enum LLMProvider{
@@ -54,6 +58,7 @@ export interface LLMModel{
provider: LLMProvider
flags: LLMFlags[]
format: LLMFormat
parameters: Parameter[]
recommended?: boolean
}
@@ -72,6 +77,9 @@ const ProviderNames = new Map<LLMProvider, string>([
[LLMProvider.AWS, 'AWS'],
])
const OpenAIParameters:Parameter[] = ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty']
const ClaudeParameters:Parameter[] = ['temperature', 'top_k', 'top_p']
export const LLMModels: LLMModel[] = [
{
id: 'gpt35',
@@ -79,7 +87,8 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters,
},
{
id: 'instructgpt35',
@@ -87,7 +96,8 @@ export const LLMModels: LLMModel[] = [
name: 'InstructGPT-3.5',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAILegacyInstruct,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters,
},
{
id: 'gpt4_turbo',
@@ -95,7 +105,8 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 Turbo',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters,
},
{
id: 'gpt4o',
@@ -104,9 +115,11 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasImageInput
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
],
recommended: true
recommended: true,
parameters: OpenAIParameters,
},
{
id: 'gpt4om',
@@ -115,9 +128,11 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasImageInput
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
],
recommended: true
recommended: true,
parameters: OpenAIParameters,
},
{
id: 'gpt4',
@@ -125,7 +140,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4_32k',
@@ -133,7 +151,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 32k',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt35_16k',
@@ -141,7 +162,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 16k',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4_0314',
@@ -149,7 +173,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 0314',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4_0613',
@@ -157,7 +184,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 0613',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4_32k_0613',
@@ -165,7 +195,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 32k 0613',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4_1106',
@@ -173,7 +206,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 1106',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt35_0125',
@@ -181,7 +217,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 0125',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt35_1106',
@@ -189,7 +228,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 1106',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt35_0613',
@@ -197,7 +239,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 0613',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt35_16k_0613',
@@ -205,7 +250,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 16k',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt35_0301',
@@ -213,7 +261,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 0301',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4_0125',
@@ -221,7 +272,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 0125',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gptvi4_1106',
@@ -230,6 +284,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [LLMFlags.hasImageInput],
parameters: OpenAIParameters,
},
{
id: 'gpt4_turbo_20240409',
@@ -237,7 +292,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 Turbo 2024-04-09',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4o-2024-05-13',
@@ -246,8 +304,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasImageInput
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4o-2024-08-06',
@@ -256,8 +316,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasImageInput
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4o-2024-11-20',
@@ -266,8 +328,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasImageInput
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4o-chatgpt',
@@ -276,8 +340,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasImageInput
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4o1-preview',
@@ -285,7 +351,10 @@ export const LLMModels: LLMModel[] = [
name: 'o1 Preview',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
id: 'gpt4o1-mini',
@@ -293,7 +362,10 @@ export const LLMModels: LLMModel[] = [
name: 'o1 Mini',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
},
{
name: "Claude 3.5 Sonnet",
@@ -301,8 +373,13 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Sonnet",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
recommended: true
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
recommended: true,
parameters: ClaudeParameters,
},
{
name: "Claude 3.5 Haiku",
@@ -310,8 +387,13 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Haiku",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
recommended: true
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
recommended: true,
parameters: ClaudeParameters,
},
{
name: 'Claude 3.5 Sonnet (20241022)',
@@ -319,7 +401,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Sonnet 1022",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
},
{
name: "Claude 3.5 Haiku (20241022)",
@@ -327,7 +414,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Haiku 1022",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
},
{
name: 'Claude 3 Haiku (20240307)',
@@ -335,7 +427,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3 Haiku 0307",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
},
{
name: 'Claude 3.5 Sonnet (20240620)',
@@ -343,7 +440,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Sonnet 0620",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
},
{
name: 'Claude 3 Opus (20240229)',
@@ -351,7 +453,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3 Opus 0229",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
},
{
name: 'Claude 3 Sonnet (20240229)',
@@ -359,14 +466,22 @@ export const LLMModels: LLMModel[] = [
shortName: "3 Sonnet 0229",
provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
},
{
name: 'Claude 2.1',
id: 'claude-2.1',
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
flags: [
LLMFlags.hasPrefill,
],
parameters: ClaudeParameters,
},
{
name: 'Claude 2',
@@ -374,6 +489,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude 2 100k',
@@ -381,6 +497,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude v1',
@@ -388,6 +505,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude v1 100k',
@@ -395,6 +513,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude Instant v1',
@@ -402,6 +521,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude Instant v1 100k',
@@ -409,6 +529,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude v1.2',
@@ -416,6 +537,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude v1.0',
@@ -423,49 +545,56 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
},
{
name: 'Claude 3.5 Sonnet (20241022) v2',
id: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
provider: LLMProvider.AWS,
format: LLMFormat.AWSBedrockClaude,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ClaudeParameters,
},
{
name: 'Claude 3.5 Sonnet (20240620) v1',
id: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
provider: LLMProvider.AWS,
format: LLMFormat.AWSBedrockClaude,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ClaudeParameters,
},
{
name: 'Claude 3 Opus (20240229) v1',
id: 'anthropic.claude-3-opus-20240229-v1:0',
provider: LLMProvider.AWS,
format: LLMFormat.AWSBedrockClaude,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput],
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ClaudeParameters,
},
{
name: 'Ooba',
id: 'ooba',
provider: LLMProvider.AsIs,
format: LLMFormat.Ooba,
flags: [],
recommended: true
flags: [LLMFlags.hasFirstSystemPrompt],
recommended: true,
parameters: []
},
{
name: 'Mancer',
id: 'mancer',
provider: LLMProvider.AsIs,
format: LLMFormat.OobaLegacy,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt],
parameters: []
},
{
name: 'OpenRouter',
id: 'openrouter',
provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasImageInput],
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'],
recommended: true
},
{
@@ -474,8 +603,9 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral S',
provider: LLMProvider.Mistral,
format: LLMFormat.Mistral,
flags: [],
recommended: true
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
recommended: true,
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
},
{
name: 'Mistral Medium Latest',
@@ -483,8 +613,9 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral M',
provider: LLMProvider.Mistral,
format: LLMFormat.Mistral,
flags: [],
recommended: true
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
recommended: true,
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
},
{
name: 'Mistral Large 2411',
@@ -492,7 +623,8 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral L 2411',
provider: LLMProvider.Mistral,
format: LLMFormat.Mistral,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
},
{
name: 'Mistral Nemo',
@@ -500,7 +632,8 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral Nemo',
provider: LLMProvider.Mistral,
format: LLMFormat.Mistral,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
},
{
name: 'Mistral Large Latest',
@@ -508,7 +641,8 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral L',
provider: LLMProvider.Mistral,
format: LLMFormat.Mistral,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
recommended: true
},
{
@@ -516,31 +650,35 @@ export const LLMModels: LLMModel[] = [
id: 'gemini-1.5-pro-exp-0827',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Exp 1121",
id: 'gemini-exp-1121',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
recommended: true
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
recommended: true,
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Pro 1.5",
id: 'gemini-1.5-pro-latest',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
recommended: true
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
recommended: true,
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Flash 1.5",
id: 'gemini-1.5-flash',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
recommended: true
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
recommended: true,
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Exp 1121",
@@ -548,7 +686,8 @@ export const LLMModels: LLMModel[] = [
internalID: 'gemini-exp-1121',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Pro 1.5",
@@ -556,7 +695,8 @@ export const LLMModels: LLMModel[] = [
internalID: 'gemini-1.5-pro-latest',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Flash 1.5",
@@ -564,64 +704,79 @@ export const LLMModels: LLMModel[] = [
internalID: 'gemini-1.5-flash',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Exp 1114",
id: 'gemini-exp-1114',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Pro 1.5 002",
id: 'gemini-1.5-pro-002',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Flash 1.5 002",
id: 'gemini-1.5-flash-002',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Pro",
id: 'gemini-pro',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Pro Vision",
id: 'gemini-pro-vision',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Ultra",
id: 'gemini-ultra',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: "Gemini Ultra Vision",
id: 'gemini-ultra-vision',
provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput],
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
},
{
name: 'Kobold',
id: 'kobold',
provider: LLMProvider.AsIs,
format: LLMFormat.Kobold,
flags: [],
recommended: true
flags: [LLMFlags.hasFirstSystemPrompt],
recommended: true,
parameters: [
'temperature',
'top_p',
'repetition_penalty',
'top_k',
'top_a'
]
},
{
name: "SuperTrin",
@@ -629,6 +784,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.NovelList,
format: LLMFormat.NovelList,
flags: [],
parameters: []
},
{
name: "Damsel",
@@ -636,6 +792,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.NovelList,
format: LLMFormat.NovelList,
flags: [],
parameters: []
},
{
name: "Command R",
@@ -643,8 +800,11 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r',
provider: LLMProvider.Cohere,
format: LLMFormat.Cohere,
flags: [],
recommended: true
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Command R Plus",
@@ -652,8 +812,11 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-plus',
provider: LLMProvider.Cohere,
format: LLMFormat.Cohere,
flags: [],
recommended: true
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Command R 08-2024",
@@ -661,7 +824,10 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-08-2024',
provider: LLMProvider.Cohere,
format: LLMFormat.Cohere,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Command R 03-2024",
@@ -669,7 +835,10 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-03-2024',
provider: LLMProvider.Cohere,
format: LLMFormat.Cohere,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Command R Plus 08-2024",
@@ -677,7 +846,10 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-plus-08-2024',
provider: LLMProvider.Cohere,
format: LLMFormat.Cohere,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Command R Plus 04-2024",
@@ -685,67 +857,82 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-plus-04-2024',
provider: LLMProvider.Cohere,
format: LLMFormat.Cohere,
flags: [],
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Clio",
id: 'novelai',
provider: LLMProvider.NovelAI,
format: LLMFormat.NovelAI,
flags: [],
recommended: true
flags: [LLMFlags.hasFullSystemPrompt],
recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
name: "Kayra",
id: 'novelai_kayra',
provider: LLMProvider.NovelAI,
format: LLMFormat.NovelAI,
flags: [],
recommended: true
flags: [LLMFlags.hasFullSystemPrompt],
recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
},
{
id: 'ollama-hosted',
name: 'Ollama',
provider: LLMProvider.AsIs,
format: LLMFormat.Ollama,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
},
{
id: 'hf:::Xenova/opt-350m',
name: 'opt-350m',
provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
},
{
id: 'hf:::Xenova/tiny-random-mistral',
name: 'tiny-random-mistral',
provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
},
{
id: 'hf:::Xenova/gpt2-large-conversational',
name: 'gpt2-large-conversational',
provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM,
flags: [],
flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
},
{
id: 'custom',
name: "Plugin",
provider: LLMProvider.AsIs,
format: LLMFormat.Plugin,
flags: [],
recommended: true
flags: [LLMFlags.hasFullSystemPrompt],
recommended: true,
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k']
},
{
id: 'reverse_proxy',
name: "Custom API",
provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible,
flags: [],
recommended: true
flags: [LLMFlags.hasFullSystemPrompt],
recommended: true,
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k']
}
]
@@ -757,12 +944,13 @@ for(let model of LLMModels){
export function getModelInfo(id: string): LLMModel{
const found = LLMModels.find(model => model.id === id) ?? {
const found:LLMModel = LLMModels.find(model => model.id === id) ?? {
id,
name: id,
provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible,
flags: [],
parameters: OpenAIParameters
}
if(found) return found
@@ -778,6 +966,7 @@ export function getModelInfo(id: string): LLMModel{
provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM,
flags: [],
parameters: OpenAIParameters
}
}
if(id.startsWith('horde:::')){
@@ -791,6 +980,7 @@ export function getModelInfo(id: string): LLMModel{
provider: LLMProvider.Horde,
format: LLMFormat.Horde,
flags: [],
parameters: OpenAIParameters
}
}
@@ -803,6 +993,7 @@ export function getModelInfo(id: string): LLMModel{
provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible,
flags: [],
parameters: OpenAIParameters
}
}

View File

@@ -20,7 +20,7 @@ import {Ollama} from 'ollama/dist/browser.mjs'
import { applyChatTemplate } from "./templates/chatTemplate";
import { OobaParams } from "./prompt";
import { extractJSON, getOpenAIJSONSchema } from "./templates/jsonSchema";
import { getModelInfo, LLMFormat, type LLMModel } from "../model/modellist";
import { getModelInfo, LLMFlags, LLMFormat, type LLMModel } from "../model/modellist";
@@ -88,7 +88,7 @@ interface OaiFunctions {
}
type Parameter = 'temperature'|'top_k'|'repetition_penalty'|'min_p'|'top_a'|'top_p'|'frequency_penalty'|'presence_penalty'
export type Parameter = 'temperature'|'top_k'|'repetition_penalty'|'min_p'|'top_a'|'top_p'|'frequency_penalty'|'presence_penalty'
type ParameterMap = {
[key in Parameter]?: string;
};
@@ -182,6 +182,63 @@ export interface OpenAIChatExtra {
multimodals?:MultiModal[]
}
function reformater(formated:OpenAIChat[],modelInfo:LLMModel){
const db = getDatabase()
let systemPrompt:OpenAIChat|null = null
if(!modelInfo.flags.includes(LLMFlags.hasFullSystemPrompt)){
if(modelInfo.flags.includes(LLMFlags.hasFirstSystemPrompt)){
if(formated[0].role === 'system'){
systemPrompt = formated[0]
formated = formated.slice(1)
}
}
for(let i=0;i<formated.length;i++){
if(formated[i].role === 'system'){
formated[i].content = db.systemContentReplacement.replace('{{slot}}', formated[i].content)
formated[i].role = db.systemRoleReplacement
}
}
}
if(modelInfo.flags.includes(LLMFlags.requiresAlternateRole)){
let newFormated:OpenAIChat[] = []
for(let i=0;i<formated.length;i++){
const m = formated[i]
if(newFormated.length === 0){
newFormated.push(m)
continue
}
if(newFormated[newFormated.length-1].role === m.role){
newFormated[newFormated.length-1].content += '\n' + m.content
continue
}
else{
newFormated.push(m)
}
}
formated = newFormated
}
if(modelInfo.flags.includes(LLMFlags.mustStartWithUserInput)){
if(formated.length === 0 || formated[0].role !== 'user'){
formated.unshift({
role: 'user',
content: ' '
})
}
}
if(systemPrompt){
formated.unshift(systemPrompt)
}
return formated
}
export async function requestChatDataMain(arg:requestDataArgument, model:'model'|'submodel', abortSignal:AbortSignal=null):Promise<requestDataResponse> {
const db = getDatabase()
@@ -206,6 +263,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const format = targ.modelInfo.format
targ.formated = reformater(targ.formated, targ.modelInfo)
switch(format){
case LLMFormat.OpenAICompatible:
case LLMFormat.Mistral:
@@ -437,14 +496,13 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
}
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", {
body: {
body: applyParameters({
model: requestModel,
messages: reformatedChat,
temperature: arg.temperature,
max_tokens: arg.maxTokens,
top_p: db.top_p,
safe_prompt: false
},
safe_prompt: false,
max_tokens: arg.maxTokens,
}, ['temperature', 'presence_penalty', 'frequency_penalty'] ),
headers: {
"Authorization": "Bearer " + db.mistralKey,
},

View File

@@ -446,6 +446,8 @@ export function setDatabase(data:Database){
data.groupOtherBotRole ??= 'user'
data.customGUI ??= ''
data.customAPIFormat ??= LLMFormat.OpenAICompatible
data.systemContentReplacement ??= `system: {{slot}}`
data.systemRoleReplacement ??= 'user'
changeLanguage(data.language)
setDatabaseLite(data)
}
@@ -821,6 +823,8 @@ export interface Database{
logShare:boolean
OAIPrediction:string
customAPIFormat:LLMFormat
systemContentReplacement:string
systemRoleReplacement:'user'|'assistant'
}
export interface customscript{