Add systemContentReplacement and Flags

This commit is contained in:
Kwaroran
2024-11-27 04:33:12 +09:00
parent cc8d753dc8
commit 597c8879fc
5 changed files with 364 additions and 100 deletions

View File

@@ -164,7 +164,9 @@ export const languageEnglish = {
translatorNote: "Here, you can add a unique translation prompt for each character. This option only applies when using the Ax. model for translation. To apply it, include `{{slot::tnote}}` in the language settings. It doesn't work in group chats.", translatorNote: "Here, you can add a unique translation prompt for each character. This option only applies when using the Ax. model for translation. To apply it, include `{{slot::tnote}}` in the language settings. It doesn't work in group chats.",
groupInnerFormat: "This defines a format that is used in group chat for characters that isn't speaker. if it is not blank, it will use this format instead of the default format. if `Group Other Bot Role` is `assistant`, it will also be applied to the speaker.", groupInnerFormat: "This defines a format that is used in group chat for characters that isn't speaker. if it is not blank, it will use this format instead of the default format. if `Group Other Bot Role` is `assistant`, it will also be applied to the speaker.",
groupOtherBotRole: "This defines a role that is used in group chat for characters that isn't speaker.", groupOtherBotRole: "This defines a role that is used in group chat for characters that isn't speaker.",
chatHTML: "A HTML that would be inserted as each chat.\n\nYou can use CBS and special tags.\n- `<risutextbox>`: a textbox that would be used to render text\n- `<risuicon>`: an icon for user or assistant\n- `<risubuttons>`: icon buttons for chat edit, translations and etc.\n- `<risugeninfo>`: generation information button." chatHTML: "A HTML that would be inserted as each chat.\n\nYou can use CBS and special tags.\n- `<risutextbox>`: a textbox that would be used to render text\n- `<risuicon>`: an icon for user or assistant\n- `<risubuttons>`: icon buttons for chat edit, translations and etc.\n- `<risugeninfo>`: generation information button.",
systemContentReplacement: "The prompt format that replaces system prompt if the model doesn't support system prompt.",
systemRoleReplacement: "The role that replaces system role if the model doesn't support system role.",
}, },
setup: { setup: {
chooseProvider: "Choose AI Provider", chooseProvider: "Choose AI Provider",
@@ -797,4 +799,6 @@ export const languageEnglish = {
recommended: "Recommended", recommended: "Recommended",
newChat: "New Chat", newChat: "New Chat",
predictedOutput: "Predicted Output", predictedOutput: "Predicted Output",
systemContentReplacement: "System Content Replacement",
systemRoleReplacement: "System Role Replacement",
} }

View File

@@ -149,6 +149,13 @@
<TextAreaInput bind:value={DBState.db.OAIPrediction}/> <TextAreaInput bind:value={DBState.db.OAIPrediction}/>
<span class="text-textcolor mt-4">{language.groupInnerFormat} <Help key='groupInnerFormat' /></span> <span class="text-textcolor mt-4">{language.groupInnerFormat} <Help key='groupInnerFormat' /></span>
<TextAreaInput placeholder={`<{{char}}\'s Message>\n{{slot}}\n</{{char}}\'s Message>`} bind:value={DBState.db.groupTemplate}/> <TextAreaInput placeholder={`<{{char}}\'s Message>\n{{slot}}\n</{{char}}\'s Message>`} bind:value={DBState.db.groupTemplate}/>
<span class="text-textcolor mt-4">{language.systemContentReplacement} <Help key="systemContentReplacement"/></span>
<TextAreaInput bind:value={DBState.db.systemContentReplacement}/>
<span class="text-textcolor mt-4">{language.systemRoleReplacement} <Help key="systemRoleReplacement"/></span>
<SelectInput bind:value={DBState.db.systemRoleReplacement}>
<OptionInput value="user">User</OptionInput>
<OptionInput value="assistant">assistant</OptionInput>
</SelectInput>
{#if DBState.db.jsonSchemaEnabled} {#if DBState.db.jsonSchemaEnabled}
<span class="text-textcolor mt-4">{language.jsonSchema} <Help key='jsonSchema' /></span> <span class="text-textcolor mt-4">{language.jsonSchema} <Help key='jsonSchema' /></span>
<TextAreaInput bind:value={DBState.db.jsonSchema}/> <TextAreaInput bind:value={DBState.db.jsonSchema}/>

View File

@@ -1,3 +1,5 @@
import type { Parameter } from "../process/request"
export enum LLMFlags{ export enum LLMFlags{
hasImageInput, hasImageInput,
hasImageOutput, hasImageOutput,
@@ -7,6 +9,8 @@ export enum LLMFlags{
hasCache, hasCache,
hasFullSystemPrompt, hasFullSystemPrompt,
hasFirstSystemPrompt, hasFirstSystemPrompt,
requiresAlternateRole,
mustStartWithUserInput,
} }
export enum LLMProvider{ export enum LLMProvider{
@@ -54,6 +58,7 @@ export interface LLMModel{
provider: LLMProvider provider: LLMProvider
flags: LLMFlags[] flags: LLMFlags[]
format: LLMFormat format: LLMFormat
parameters: Parameter[]
recommended?: boolean recommended?: boolean
} }
@@ -72,6 +77,9 @@ const ProviderNames = new Map<LLMProvider, string>([
[LLMProvider.AWS, 'AWS'], [LLMProvider.AWS, 'AWS'],
]) ])
const OpenAIParameters:Parameter[] = ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty']
const ClaudeParameters:Parameter[] = ['temperature', 'top_k', 'top_p']
export const LLMModels: LLMModel[] = [ export const LLMModels: LLMModel[] = [
{ {
id: 'gpt35', id: 'gpt35',
@@ -79,7 +87,8 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5', name: 'GPT-3.5',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters,
}, },
{ {
id: 'instructgpt35', id: 'instructgpt35',
@@ -87,7 +96,8 @@ export const LLMModels: LLMModel[] = [
name: 'InstructGPT-3.5', name: 'InstructGPT-3.5',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAILegacyInstruct, format: LLMFormat.OpenAILegacyInstruct,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_turbo', id: 'gpt4_turbo',
@@ -95,7 +105,8 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 Turbo', name: 'GPT-4 Turbo',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o', id: 'gpt4o',
@@ -104,9 +115,11 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [ flags: [
LLMFlags.hasImageInput LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
], ],
recommended: true recommended: true,
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4om', id: 'gpt4om',
@@ -115,9 +128,11 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [ flags: [
LLMFlags.hasImageInput LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
], ],
recommended: true recommended: true,
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4', id: 'gpt4',
@@ -125,7 +140,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4', name: 'GPT-4',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_32k', id: 'gpt4_32k',
@@ -133,7 +151,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 32k', name: 'GPT-4 32k',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt35_16k', id: 'gpt35_16k',
@@ -141,7 +162,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 16k', name: 'GPT-3.5 Turbo 16k',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_0314', id: 'gpt4_0314',
@@ -149,7 +173,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 0314', name: 'GPT-4 0314',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_0613', id: 'gpt4_0613',
@@ -157,7 +184,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 0613', name: 'GPT-4 0613',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_32k_0613', id: 'gpt4_32k_0613',
@@ -165,7 +195,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 32k 0613', name: 'GPT-4 32k 0613',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_1106', id: 'gpt4_1106',
@@ -173,7 +206,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 1106', name: 'GPT-4 1106',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt35_0125', id: 'gpt35_0125',
@@ -181,7 +217,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 0125', name: 'GPT-3.5 Turbo 0125',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt35_1106', id: 'gpt35_1106',
@@ -189,7 +228,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 1106', name: 'GPT-3.5 Turbo 1106',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt35_0613', id: 'gpt35_0613',
@@ -197,7 +239,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 0613', name: 'GPT-3.5 Turbo 0613',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt35_16k_0613', id: 'gpt35_16k_0613',
@@ -205,7 +250,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 16k', name: 'GPT-3.5 Turbo 16k',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt35_0301', id: 'gpt35_0301',
@@ -213,7 +261,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-3.5 Turbo 0301', name: 'GPT-3.5 Turbo 0301',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_0125', id: 'gpt4_0125',
@@ -221,7 +272,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 0125', name: 'GPT-4 0125',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gptvi4_1106', id: 'gptvi4_1106',
@@ -230,6 +284,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4_turbo_20240409', id: 'gpt4_turbo_20240409',
@@ -237,7 +292,10 @@ export const LLMModels: LLMModel[] = [
name: 'GPT-4 Turbo 2024-04-09', name: 'GPT-4 Turbo 2024-04-09',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o-2024-05-13', id: 'gpt4o-2024-05-13',
@@ -246,8 +304,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [ flags: [
LLMFlags.hasImageInput LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
], ],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o-2024-08-06', id: 'gpt4o-2024-08-06',
@@ -256,8 +316,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [ flags: [
LLMFlags.hasImageInput LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
], ],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o-2024-11-20', id: 'gpt4o-2024-11-20',
@@ -266,8 +328,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [ flags: [
LLMFlags.hasImageInput LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
], ],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o-chatgpt', id: 'gpt4o-chatgpt',
@@ -276,8 +340,10 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [ flags: [
LLMFlags.hasImageInput LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt
], ],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o1-preview', id: 'gpt4o1-preview',
@@ -285,7 +351,10 @@ export const LLMModels: LLMModel[] = [
name: 'o1 Preview', name: 'o1 Preview',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
id: 'gpt4o1-mini', id: 'gpt4o1-mini',
@@ -293,7 +362,10 @@ export const LLMModels: LLMModel[] = [
name: 'o1 Mini', name: 'o1 Mini',
provider: LLMProvider.OpenAI, provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [
LLMFlags.hasFullSystemPrompt
],
parameters: OpenAIParameters,
}, },
{ {
name: "Claude 3.5 Sonnet", name: "Claude 3.5 Sonnet",
@@ -301,8 +373,13 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Sonnet", shortName: "3.5 Sonnet",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
recommended: true LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
recommended: true,
parameters: ClaudeParameters,
}, },
{ {
name: "Claude 3.5 Haiku", name: "Claude 3.5 Haiku",
@@ -310,8 +387,13 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Haiku", shortName: "3.5 Haiku",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
recommended: true LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
recommended: true,
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3.5 Sonnet (20241022)', name: 'Claude 3.5 Sonnet (20241022)',
@@ -319,7 +401,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Sonnet 1022", shortName: "3.5 Sonnet 1022",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
}, },
{ {
name: "Claude 3.5 Haiku (20241022)", name: "Claude 3.5 Haiku (20241022)",
@@ -327,7 +414,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Haiku 1022", shortName: "3.5 Haiku 1022",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3 Haiku (20240307)', name: 'Claude 3 Haiku (20240307)',
@@ -335,7 +427,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3 Haiku 0307", shortName: "3 Haiku 0307",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3.5 Sonnet (20240620)', name: 'Claude 3.5 Sonnet (20240620)',
@@ -343,7 +440,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3.5 Sonnet 0620", shortName: "3.5 Sonnet 0620",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3 Opus (20240229)', name: 'Claude 3 Opus (20240229)',
@@ -351,7 +453,12 @@ export const LLMModels: LLMModel[] = [
shortName: "3 Opus 0229", shortName: "3 Opus 0229",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3 Sonnet (20240229)', name: 'Claude 3 Sonnet (20240229)',
@@ -359,14 +466,22 @@ export const LLMModels: LLMModel[] = [
shortName: "3 Sonnet 0229", shortName: "3 Sonnet 0229",
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.Anthropic, format: LLMFormat.Anthropic,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [
LLMFlags.hasPrefill,
LLMFlags.hasImageInput,
LLMFlags.hasFirstSystemPrompt
],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 2.1', name: 'Claude 2.1',
id: 'claude-2.1', id: 'claude-2.1',
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [
LLMFlags.hasPrefill,
],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 2', name: 'Claude 2',
@@ -374,6 +489,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 2 100k', name: 'Claude 2 100k',
@@ -381,6 +497,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude v1', name: 'Claude v1',
@@ -388,6 +505,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude v1 100k', name: 'Claude v1 100k',
@@ -395,6 +513,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude Instant v1', name: 'Claude Instant v1',
@@ -402,6 +521,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude Instant v1 100k', name: 'Claude Instant v1 100k',
@@ -409,6 +529,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude v1.2', name: 'Claude v1.2',
@@ -416,6 +537,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude v1.0', name: 'Claude v1.0',
@@ -423,49 +545,56 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.Anthropic, provider: LLMProvider.Anthropic,
format: LLMFormat.AnthropicLegacy, format: LLMFormat.AnthropicLegacy,
flags: [LLMFlags.hasPrefill], flags: [LLMFlags.hasPrefill],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3.5 Sonnet (20241022) v2', name: 'Claude 3.5 Sonnet (20241022) v2',
id: 'anthropic.claude-3-5-sonnet-20241022-v2:0', id: 'anthropic.claude-3-5-sonnet-20241022-v2:0',
provider: LLMProvider.AWS, provider: LLMProvider.AWS,
format: LLMFormat.AWSBedrockClaude, format: LLMFormat.AWSBedrockClaude,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3.5 Sonnet (20240620) v1', name: 'Claude 3.5 Sonnet (20240620) v1',
id: 'anthropic.claude-3-5-sonnet-20240620-v1:0', id: 'anthropic.claude-3-5-sonnet-20240620-v1:0',
provider: LLMProvider.AWS, provider: LLMProvider.AWS,
format: LLMFormat.AWSBedrockClaude, format: LLMFormat.AWSBedrockClaude,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ClaudeParameters,
}, },
{ {
name: 'Claude 3 Opus (20240229) v1', name: 'Claude 3 Opus (20240229) v1',
id: 'anthropic.claude-3-opus-20240229-v1:0', id: 'anthropic.claude-3-opus-20240229-v1:0',
provider: LLMProvider.AWS, provider: LLMProvider.AWS,
format: LLMFormat.AWSBedrockClaude, format: LLMFormat.AWSBedrockClaude,
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput], flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ClaudeParameters,
}, },
{ {
name: 'Ooba', name: 'Ooba',
id: 'ooba', id: 'ooba',
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.Ooba, format: LLMFormat.Ooba,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt],
recommended: true recommended: true,
parameters: []
}, },
{ {
name: 'Mancer', name: 'Mancer',
id: 'mancer', id: 'mancer',
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.OobaLegacy, format: LLMFormat.OobaLegacy,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt],
parameters: []
}, },
{ {
name: 'OpenRouter', name: 'OpenRouter',
id: 'openrouter', id: 'openrouter',
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasImageInput],
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'],
recommended: true recommended: true
}, },
{ {
@@ -474,8 +603,9 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral S', shortName: 'Mistral S',
provider: LLMProvider.Mistral, provider: LLMProvider.Mistral,
format: LLMFormat.Mistral, format: LLMFormat.Mistral,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
recommended: true recommended: true,
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
}, },
{ {
name: 'Mistral Medium Latest', name: 'Mistral Medium Latest',
@@ -483,8 +613,9 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral M', shortName: 'Mistral M',
provider: LLMProvider.Mistral, provider: LLMProvider.Mistral,
format: LLMFormat.Mistral, format: LLMFormat.Mistral,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
recommended: true recommended: true,
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
}, },
{ {
name: 'Mistral Large 2411', name: 'Mistral Large 2411',
@@ -492,7 +623,8 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral L 2411', shortName: 'Mistral L 2411',
provider: LLMProvider.Mistral, provider: LLMProvider.Mistral,
format: LLMFormat.Mistral, format: LLMFormat.Mistral,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
}, },
{ {
name: 'Mistral Nemo', name: 'Mistral Nemo',
@@ -500,7 +632,8 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral Nemo', shortName: 'Mistral Nemo',
provider: LLMProvider.Mistral, provider: LLMProvider.Mistral,
format: LLMFormat.Mistral, format: LLMFormat.Mistral,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
}, },
{ {
name: 'Mistral Large Latest', name: 'Mistral Large Latest',
@@ -508,7 +641,8 @@ export const LLMModels: LLMModel[] = [
shortName: 'Mistral L', shortName: 'Mistral L',
provider: LLMProvider.Mistral, provider: LLMProvider.Mistral,
format: LLMFormat.Mistral, format: LLMFormat.Mistral,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
recommended: true recommended: true
}, },
{ {
@@ -516,31 +650,35 @@ export const LLMModels: LLMModel[] = [
id: 'gemini-1.5-pro-exp-0827', id: 'gemini-1.5-pro-exp-0827',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Exp 1121", name: "Gemini Exp 1121",
id: 'gemini-exp-1121', id: 'gemini-exp-1121',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
recommended: true recommended: true,
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Pro 1.5", name: "Gemini Pro 1.5",
id: 'gemini-1.5-pro-latest', id: 'gemini-1.5-pro-latest',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
recommended: true recommended: true,
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Flash 1.5", name: "Gemini Flash 1.5",
id: 'gemini-1.5-flash', id: 'gemini-1.5-flash',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
recommended: true recommended: true,
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Exp 1121", name: "Gemini Exp 1121",
@@ -548,7 +686,8 @@ export const LLMModels: LLMModel[] = [
internalID: 'gemini-exp-1121', internalID: 'gemini-exp-1121',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.VertexAIGemini, format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Pro 1.5", name: "Gemini Pro 1.5",
@@ -556,7 +695,8 @@ export const LLMModels: LLMModel[] = [
internalID: 'gemini-1.5-pro-latest', internalID: 'gemini-1.5-pro-latest',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.VertexAIGemini, format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Flash 1.5", name: "Gemini Flash 1.5",
@@ -564,64 +704,79 @@ export const LLMModels: LLMModel[] = [
internalID: 'gemini-1.5-flash', internalID: 'gemini-1.5-flash',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.VertexAIGemini, format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Exp 1114", name: "Gemini Exp 1114",
id: 'gemini-exp-1114', id: 'gemini-exp-1114',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Pro 1.5 002", name: "Gemini Pro 1.5 002",
id: 'gemini-1.5-pro-002', id: 'gemini-1.5-pro-002',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Flash 1.5 002", name: "Gemini Flash 1.5 002",
id: 'gemini-1.5-flash-002', id: 'gemini-1.5-flash-002',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Pro", name: "Gemini Pro",
id: 'gemini-pro', id: 'gemini-pro',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Pro Vision", name: "Gemini Pro Vision",
id: 'gemini-pro-vision', id: 'gemini-pro-vision',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Ultra", name: "Gemini Ultra",
id: 'gemini-ultra', id: 'gemini-ultra',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: "Gemini Ultra Vision", name: "Gemini Ultra Vision",
id: 'gemini-ultra-vision', id: 'gemini-ultra-vision',
provider: LLMProvider.GoogleCloud, provider: LLMProvider.GoogleCloud,
format: LLMFormat.GoogleCloud, format: LLMFormat.GoogleCloud,
flags: [LLMFlags.hasImageInput], flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
parameters: ['temperature', 'top_k', 'top_p']
}, },
{ {
name: 'Kobold', name: 'Kobold',
id: 'kobold', id: 'kobold',
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.Kobold, format: LLMFormat.Kobold,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt],
recommended: true recommended: true,
parameters: [
'temperature',
'top_p',
'repetition_penalty',
'top_k',
'top_a'
]
}, },
{ {
name: "SuperTrin", name: "SuperTrin",
@@ -629,6 +784,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.NovelList, provider: LLMProvider.NovelList,
format: LLMFormat.NovelList, format: LLMFormat.NovelList,
flags: [], flags: [],
parameters: []
}, },
{ {
name: "Damsel", name: "Damsel",
@@ -636,6 +792,7 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.NovelList, provider: LLMProvider.NovelList,
format: LLMFormat.NovelList, format: LLMFormat.NovelList,
flags: [], flags: [],
parameters: []
}, },
{ {
name: "Command R", name: "Command R",
@@ -643,8 +800,11 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r', internalID: 'command-r',
provider: LLMProvider.Cohere, provider: LLMProvider.Cohere,
format: LLMFormat.Cohere, format: LLMFormat.Cohere,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
recommended: true recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Command R Plus", name: "Command R Plus",
@@ -652,8 +812,11 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-plus', internalID: 'command-r-plus',
provider: LLMProvider.Cohere, provider: LLMProvider.Cohere,
format: LLMFormat.Cohere, format: LLMFormat.Cohere,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
recommended: true recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Command R 08-2024", name: "Command R 08-2024",
@@ -661,7 +824,10 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-08-2024', internalID: 'command-r-08-2024',
provider: LLMProvider.Cohere, provider: LLMProvider.Cohere,
format: LLMFormat.Cohere, format: LLMFormat.Cohere,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Command R 03-2024", name: "Command R 03-2024",
@@ -669,7 +835,10 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-03-2024', internalID: 'command-r-03-2024',
provider: LLMProvider.Cohere, provider: LLMProvider.Cohere,
format: LLMFormat.Cohere, format: LLMFormat.Cohere,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Command R Plus 08-2024", name: "Command R Plus 08-2024",
@@ -677,7 +846,10 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-plus-08-2024', internalID: 'command-r-plus-08-2024',
provider: LLMProvider.Cohere, provider: LLMProvider.Cohere,
format: LLMFormat.Cohere, format: LLMFormat.Cohere,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Command R Plus 04-2024", name: "Command R Plus 04-2024",
@@ -685,67 +857,82 @@ export const LLMModels: LLMModel[] = [
internalID: 'command-r-plus-04-2024', internalID: 'command-r-plus-04-2024',
provider: LLMProvider.Cohere, provider: LLMProvider.Cohere,
format: LLMFormat.Cohere, format: LLMFormat.Cohere,
flags: [], flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Clio", name: "Clio",
id: 'novelai', id: 'novelai',
provider: LLMProvider.NovelAI, provider: LLMProvider.NovelAI,
format: LLMFormat.NovelAI, format: LLMFormat.NovelAI,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
recommended: true recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
name: "Kayra", name: "Kayra",
id: 'novelai_kayra', id: 'novelai_kayra',
provider: LLMProvider.NovelAI, provider: LLMProvider.NovelAI,
format: LLMFormat.NovelAI, format: LLMFormat.NovelAI,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
recommended: true recommended: true,
parameters: [
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
]
}, },
{ {
id: 'ollama-hosted', id: 'ollama-hosted',
name: 'Ollama', name: 'Ollama',
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.Ollama, format: LLMFormat.Ollama,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
}, },
{ {
id: 'hf:::Xenova/opt-350m', id: 'hf:::Xenova/opt-350m',
name: 'opt-350m', name: 'opt-350m',
provider: LLMProvider.WebLLM, provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM, format: LLMFormat.WebLLM,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
}, },
{ {
id: 'hf:::Xenova/tiny-random-mistral', id: 'hf:::Xenova/tiny-random-mistral',
name: 'tiny-random-mistral', name: 'tiny-random-mistral',
provider: LLMProvider.WebLLM, provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM, format: LLMFormat.WebLLM,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
}, },
{ {
id: 'hf:::Xenova/gpt2-large-conversational', id: 'hf:::Xenova/gpt2-large-conversational',
name: 'gpt2-large-conversational', name: 'gpt2-large-conversational',
provider: LLMProvider.WebLLM, provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM, format: LLMFormat.WebLLM,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
parameters: OpenAIParameters
}, },
{ {
id: 'custom', id: 'custom',
name: "Plugin", name: "Plugin",
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.Plugin, format: LLMFormat.Plugin,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
recommended: true recommended: true,
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k']
}, },
{ {
id: 'reverse_proxy', id: 'reverse_proxy',
name: "Custom API", name: "Custom API",
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [LLMFlags.hasFullSystemPrompt],
recommended: true recommended: true,
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k']
} }
] ]
@@ -757,12 +944,13 @@ for(let model of LLMModels){
export function getModelInfo(id: string): LLMModel{ export function getModelInfo(id: string): LLMModel{
const found = LLMModels.find(model => model.id === id) ?? { const found:LLMModel = LLMModels.find(model => model.id === id) ?? {
id, id,
name: id, name: id,
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [],
parameters: OpenAIParameters
} }
if(found) return found if(found) return found
@@ -778,6 +966,7 @@ export function getModelInfo(id: string): LLMModel{
provider: LLMProvider.WebLLM, provider: LLMProvider.WebLLM,
format: LLMFormat.WebLLM, format: LLMFormat.WebLLM,
flags: [], flags: [],
parameters: OpenAIParameters
} }
} }
if(id.startsWith('horde:::')){ if(id.startsWith('horde:::')){
@@ -791,6 +980,7 @@ export function getModelInfo(id: string): LLMModel{
provider: LLMProvider.Horde, provider: LLMProvider.Horde,
format: LLMFormat.Horde, format: LLMFormat.Horde,
flags: [], flags: [],
parameters: OpenAIParameters
} }
} }
@@ -803,6 +993,7 @@ export function getModelInfo(id: string): LLMModel{
provider: LLMProvider.AsIs, provider: LLMProvider.AsIs,
format: LLMFormat.OpenAICompatible, format: LLMFormat.OpenAICompatible,
flags: [], flags: [],
parameters: OpenAIParameters
} }
} }

View File

@@ -20,7 +20,7 @@ import {Ollama} from 'ollama/dist/browser.mjs'
import { applyChatTemplate } from "./templates/chatTemplate"; import { applyChatTemplate } from "./templates/chatTemplate";
import { OobaParams } from "./prompt"; import { OobaParams } from "./prompt";
import { extractJSON, getOpenAIJSONSchema } from "./templates/jsonSchema"; import { extractJSON, getOpenAIJSONSchema } from "./templates/jsonSchema";
import { getModelInfo, LLMFormat, type LLMModel } from "../model/modellist"; import { getModelInfo, LLMFlags, LLMFormat, type LLMModel } from "../model/modellist";
@@ -88,7 +88,7 @@ interface OaiFunctions {
} }
type Parameter = 'temperature'|'top_k'|'repetition_penalty'|'min_p'|'top_a'|'top_p'|'frequency_penalty'|'presence_penalty' export type Parameter = 'temperature'|'top_k'|'repetition_penalty'|'min_p'|'top_a'|'top_p'|'frequency_penalty'|'presence_penalty'
type ParameterMap = { type ParameterMap = {
[key in Parameter]?: string; [key in Parameter]?: string;
}; };
@@ -182,6 +182,63 @@ export interface OpenAIChatExtra {
multimodals?:MultiModal[] multimodals?:MultiModal[]
} }
function reformater(formated:OpenAIChat[],modelInfo:LLMModel){
const db = getDatabase()
let systemPrompt:OpenAIChat|null = null
if(!modelInfo.flags.includes(LLMFlags.hasFullSystemPrompt)){
if(modelInfo.flags.includes(LLMFlags.hasFirstSystemPrompt)){
if(formated[0].role === 'system'){
systemPrompt = formated[0]
formated = formated.slice(1)
}
}
for(let i=0;i<formated.length;i++){
if(formated[i].role === 'system'){
formated[i].content = db.systemContentReplacement.replace('{{slot}}', formated[i].content)
formated[i].role = db.systemRoleReplacement
}
}
}
if(modelInfo.flags.includes(LLMFlags.requiresAlternateRole)){
let newFormated:OpenAIChat[] = []
for(let i=0;i<formated.length;i++){
const m = formated[i]
if(newFormated.length === 0){
newFormated.push(m)
continue
}
if(newFormated[newFormated.length-1].role === m.role){
newFormated[newFormated.length-1].content += '\n' + m.content
continue
}
else{
newFormated.push(m)
}
}
formated = newFormated
}
if(modelInfo.flags.includes(LLMFlags.mustStartWithUserInput)){
if(formated.length === 0 || formated[0].role !== 'user'){
formated.unshift({
role: 'user',
content: ' '
})
}
}
if(systemPrompt){
formated.unshift(systemPrompt)
}
return formated
}
export async function requestChatDataMain(arg:requestDataArgument, model:'model'|'submodel', abortSignal:AbortSignal=null):Promise<requestDataResponse> { export async function requestChatDataMain(arg:requestDataArgument, model:'model'|'submodel', abortSignal:AbortSignal=null):Promise<requestDataResponse> {
const db = getDatabase() const db = getDatabase()
@@ -206,6 +263,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const format = targ.modelInfo.format const format = targ.modelInfo.format
targ.formated = reformater(targ.formated, targ.modelInfo)
switch(format){ switch(format){
case LLMFormat.OpenAICompatible: case LLMFormat.OpenAICompatible:
case LLMFormat.Mistral: case LLMFormat.Mistral:
@@ -437,14 +496,13 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
} }
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", { const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", {
body: { body: applyParameters({
model: requestModel, model: requestModel,
messages: reformatedChat, messages: reformatedChat,
temperature: arg.temperature,
max_tokens: arg.maxTokens,
top_p: db.top_p, top_p: db.top_p,
safe_prompt: false safe_prompt: false,
}, max_tokens: arg.maxTokens,
}, ['temperature', 'presence_penalty', 'frequency_penalty'] ),
headers: { headers: {
"Authorization": "Bearer " + db.mistralKey, "Authorization": "Bearer " + db.mistralKey,
}, },

View File

@@ -446,6 +446,8 @@ export function setDatabase(data:Database){
data.groupOtherBotRole ??= 'user' data.groupOtherBotRole ??= 'user'
data.customGUI ??= '' data.customGUI ??= ''
data.customAPIFormat ??= LLMFormat.OpenAICompatible data.customAPIFormat ??= LLMFormat.OpenAICompatible
data.systemContentReplacement ??= `system: {{slot}}`
data.systemRoleReplacement ??= 'user'
changeLanguage(data.language) changeLanguage(data.language)
setDatabaseLite(data) setDatabaseLite(data)
} }
@@ -821,6 +823,8 @@ export interface Database{
logShare:boolean logShare:boolean
OAIPrediction:string OAIPrediction:string
customAPIFormat:LLMFormat customAPIFormat:LLMFormat
systemContentReplacement:string
systemRoleReplacement:'user'|'assistant'
} }
export interface customscript{ export interface customscript{