|
|
|
|
@@ -52,6 +52,22 @@ export enum LLMFormat{
|
|
|
|
|
AWSBedrockClaude
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
export enum LLMTokenizer{
|
|
|
|
|
Unknown,
|
|
|
|
|
tiktokenCl100kBase,
|
|
|
|
|
tiktokenO200Base,
|
|
|
|
|
Mistral,
|
|
|
|
|
Llama,
|
|
|
|
|
NovelAI,
|
|
|
|
|
Claude,
|
|
|
|
|
NovelList,
|
|
|
|
|
Llama3,
|
|
|
|
|
Gemma,
|
|
|
|
|
GoogleCloud,
|
|
|
|
|
Cohere,
|
|
|
|
|
Local
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
export interface LLMModel{
|
|
|
|
|
id: string
|
|
|
|
|
name: string
|
|
|
|
|
@@ -61,7 +77,8 @@ export interface LLMModel{
|
|
|
|
|
provider: LLMProvider
|
|
|
|
|
flags: LLMFlags[]
|
|
|
|
|
format: LLMFormat
|
|
|
|
|
parameters: Parameter[]
|
|
|
|
|
parameters: Parameter[],
|
|
|
|
|
tokenizer: LLMTokenizer
|
|
|
|
|
recommended?: boolean
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -92,6 +109,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.OpenAICompatible,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'instructgpt35',
|
|
|
|
|
@@ -101,6 +119,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.OpenAILegacyInstruct,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_turbo',
|
|
|
|
|
@@ -110,6 +129,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.OpenAICompatible,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o',
|
|
|
|
|
@@ -124,6 +144,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4om',
|
|
|
|
|
@@ -138,6 +159,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4',
|
|
|
|
|
@@ -150,6 +172,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_32k',
|
|
|
|
|
@@ -162,6 +185,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt35_16k',
|
|
|
|
|
@@ -174,6 +198,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_0314',
|
|
|
|
|
@@ -186,6 +211,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_0613',
|
|
|
|
|
@@ -198,6 +224,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_32k_0613',
|
|
|
|
|
@@ -210,6 +237,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_1106',
|
|
|
|
|
@@ -222,6 +250,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt35_0125',
|
|
|
|
|
@@ -234,6 +263,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt35_1106',
|
|
|
|
|
@@ -246,6 +276,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt35_0613',
|
|
|
|
|
@@ -258,6 +289,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt35_16k_0613',
|
|
|
|
|
@@ -270,6 +302,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt35_0301',
|
|
|
|
|
@@ -282,6 +315,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_0125',
|
|
|
|
|
@@ -294,6 +328,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gptvi4_1106',
|
|
|
|
|
@@ -306,6 +341,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4_turbo_20240409',
|
|
|
|
|
@@ -318,6 +354,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenCl100kBase
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o-2024-05-13',
|
|
|
|
|
@@ -331,6 +368,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o-2024-08-06',
|
|
|
|
|
@@ -344,6 +382,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o-2024-11-20',
|
|
|
|
|
@@ -357,6 +396,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o-chatgpt',
|
|
|
|
|
@@ -370,6 +410,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o1-preview',
|
|
|
|
|
@@ -382,6 +423,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'gpt4o1-mini',
|
|
|
|
|
@@ -394,6 +436,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.tiktokenO200Base
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Claude 3.5 Sonnet",
|
|
|
|
|
@@ -409,6 +452,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Claude 3.5 Haiku",
|
|
|
|
|
@@ -424,6 +468,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3.5 Sonnet (20241022)',
|
|
|
|
|
@@ -438,6 +483,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Claude 3.5 Haiku (20241022)",
|
|
|
|
|
@@ -452,6 +498,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3 Haiku (20240307)',
|
|
|
|
|
@@ -466,6 +513,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3.5 Sonnet (20240620)',
|
|
|
|
|
@@ -480,6 +528,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3 Opus (20240229)',
|
|
|
|
|
@@ -494,6 +543,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3 Sonnet (20240229)',
|
|
|
|
|
@@ -508,6 +558,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasStreaming
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 2.1',
|
|
|
|
|
@@ -518,6 +569,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
LLMFlags.hasPrefill,
|
|
|
|
|
],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 2',
|
|
|
|
|
@@ -526,6 +578,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 2 100k',
|
|
|
|
|
@@ -534,6 +587,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude v1',
|
|
|
|
|
@@ -542,6 +596,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude v1 100k',
|
|
|
|
|
@@ -550,6 +605,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude Instant v1',
|
|
|
|
|
@@ -558,6 +614,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude Instant v1 100k',
|
|
|
|
|
@@ -566,6 +623,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude v1.2',
|
|
|
|
|
@@ -574,6 +632,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude v1.0',
|
|
|
|
|
@@ -582,6 +641,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AnthropicLegacy,
|
|
|
|
|
flags: [LLMFlags.hasPrefill],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3.5 Sonnet (20241022) v2',
|
|
|
|
|
@@ -590,6 +650,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AWSBedrockClaude,
|
|
|
|
|
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3.5 Sonnet (20240620) v1',
|
|
|
|
|
@@ -598,6 +659,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AWSBedrockClaude,
|
|
|
|
|
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Claude 3 Opus (20240229) v1',
|
|
|
|
|
@@ -606,6 +668,7 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.AWSBedrockClaude,
|
|
|
|
|
flags: [LLMFlags.hasPrefill, LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ClaudeParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Claude
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Ooba',
|
|
|
|
|
@@ -614,7 +677,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.Ooba,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: []
|
|
|
|
|
parameters: [],
|
|
|
|
|
tokenizer: LLMTokenizer.Llama
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Mancer',
|
|
|
|
|
@@ -622,7 +686,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.AsIs,
|
|
|
|
|
format: LLMFormat.OobaLegacy,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: []
|
|
|
|
|
parameters: [],
|
|
|
|
|
tokenizer: LLMTokenizer.Llama
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'OpenRouter',
|
|
|
|
|
@@ -631,7 +696,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.OpenAICompatible,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasImageInput, LLMFlags.hasStreaming],
|
|
|
|
|
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'],
|
|
|
|
|
recommended: true
|
|
|
|
|
recommended: true,
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Mistral Small Latest',
|
|
|
|
|
@@ -641,7 +707,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.Mistral,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
|
|
|
|
|
tokenizer: LLMTokenizer.Mistral
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Mistral Medium Latest',
|
|
|
|
|
@@ -651,7 +718,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.Mistral,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
|
|
|
|
|
tokenizer: LLMTokenizer.Mistral
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Mistral Large 2411',
|
|
|
|
|
@@ -660,7 +728,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.Mistral,
|
|
|
|
|
format: LLMFormat.Mistral,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
|
|
|
|
|
tokenizer: LLMTokenizer.Mistral
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Mistral Nemo',
|
|
|
|
|
@@ -669,7 +738,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.Mistral,
|
|
|
|
|
format: LLMFormat.Mistral,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty']
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
|
|
|
|
|
tokenizer: LLMTokenizer.Mistral
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Mistral Large Latest',
|
|
|
|
|
@@ -679,7 +749,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.Mistral,
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.mustStartWithUserInput, LLMFlags.requiresAlternateRole],
|
|
|
|
|
parameters: ['temperature', 'presence_penalty', 'frequency_penalty'],
|
|
|
|
|
recommended: true
|
|
|
|
|
recommended: true,
|
|
|
|
|
tokenizer: LLMTokenizer.Mistral
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Pro 1.5 0827",
|
|
|
|
|
@@ -687,7 +758,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Exp 1121",
|
|
|
|
|
@@ -695,7 +767,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Exp 1206",
|
|
|
|
|
@@ -704,7 +777,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Pro 1.5",
|
|
|
|
|
@@ -713,7 +787,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Flash 1.5",
|
|
|
|
|
@@ -722,7 +797,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Exp 1121",
|
|
|
|
|
@@ -731,7 +807,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.VertexAI,
|
|
|
|
|
format: LLMFormat.VertexAIGemini,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.Gemma
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Pro 1.5",
|
|
|
|
|
@@ -740,7 +817,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.VertexAI,
|
|
|
|
|
format: LLMFormat.VertexAIGemini,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.Gemma
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Flash 1.5",
|
|
|
|
|
@@ -749,7 +827,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.VertexAI,
|
|
|
|
|
format: LLMFormat.VertexAIGemini,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.Gemma
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Exp 1114",
|
|
|
|
|
@@ -757,7 +836,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Pro 1.5 002",
|
|
|
|
|
@@ -765,7 +845,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Flash 1.5 002",
|
|
|
|
|
@@ -773,7 +854,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Pro",
|
|
|
|
|
@@ -781,7 +863,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Pro Vision",
|
|
|
|
|
@@ -789,7 +872,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Ultra",
|
|
|
|
|
@@ -797,7 +881,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Gemini Ultra Vision",
|
|
|
|
|
@@ -805,7 +890,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.GoogleCloud,
|
|
|
|
|
format: LLMFormat.GoogleCloud,
|
|
|
|
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p']
|
|
|
|
|
parameters: ['temperature', 'top_k', 'top_p'],
|
|
|
|
|
tokenizer: LLMTokenizer.GoogleCloud
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: 'Kobold',
|
|
|
|
|
@@ -820,7 +906,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
'repetition_penalty',
|
|
|
|
|
'top_k',
|
|
|
|
|
'top_a'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "SuperTrin",
|
|
|
|
|
@@ -828,7 +915,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.NovelList,
|
|
|
|
|
format: LLMFormat.NovelList,
|
|
|
|
|
flags: [],
|
|
|
|
|
parameters: []
|
|
|
|
|
parameters: [],
|
|
|
|
|
tokenizer: LLMTokenizer.NovelList
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Damsel",
|
|
|
|
|
@@ -836,7 +924,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.NovelList,
|
|
|
|
|
format: LLMFormat.NovelList,
|
|
|
|
|
flags: [],
|
|
|
|
|
parameters: []
|
|
|
|
|
parameters: [],
|
|
|
|
|
tokenizer: LLMTokenizer.NovelList
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Command R",
|
|
|
|
|
@@ -848,7 +937,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Cohere
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Command R Plus",
|
|
|
|
|
@@ -860,7 +950,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Cohere
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Command R 08-2024",
|
|
|
|
|
@@ -871,7 +962,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Cohere
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Command R 03-2024",
|
|
|
|
|
@@ -882,7 +974,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Cohere
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Command R Plus 08-2024",
|
|
|
|
|
@@ -893,7 +986,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Cohere
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Command R Plus 04-2024",
|
|
|
|
|
@@ -904,7 +998,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
flags: [LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole, LLMFlags.mustStartWithUserInput],
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.Cohere
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Clio",
|
|
|
|
|
@@ -915,7 +1010,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.NovelAI
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
name: "Kayra",
|
|
|
|
|
@@ -926,7 +1022,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: [
|
|
|
|
|
'temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'
|
|
|
|
|
]
|
|
|
|
|
],
|
|
|
|
|
tokenizer: LLMTokenizer.NovelAI
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'ollama-hosted',
|
|
|
|
|
@@ -934,7 +1031,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.AsIs,
|
|
|
|
|
format: LLMFormat.Ollama,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'hf:::Xenova/opt-350m',
|
|
|
|
|
@@ -942,7 +1040,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.WebLLM,
|
|
|
|
|
format: LLMFormat.WebLLM,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Local
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'hf:::Xenova/tiny-random-mistral',
|
|
|
|
|
@@ -950,7 +1049,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.WebLLM,
|
|
|
|
|
format: LLMFormat.WebLLM,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Local
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'hf:::Xenova/gpt2-large-conversational',
|
|
|
|
|
@@ -958,7 +1058,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
provider: LLMProvider.WebLLM,
|
|
|
|
|
format: LLMFormat.WebLLM,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Local
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'custom',
|
|
|
|
|
@@ -967,7 +1068,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.Plugin,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k']
|
|
|
|
|
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'],
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
id: 'reverse_proxy',
|
|
|
|
|
@@ -976,7 +1078,8 @@ export const LLMModels: LLMModel[] = [
|
|
|
|
|
format: LLMFormat.OpenAICompatible,
|
|
|
|
|
flags: [LLMFlags.hasFullSystemPrompt, LLMFlags.hasStreaming],
|
|
|
|
|
recommended: true,
|
|
|
|
|
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k']
|
|
|
|
|
parameters: ['temperature', 'top_p', 'frequency_penalty', 'presence_penalty', 'repetition_penalty', 'min_p', 'top_a', 'top_k'],
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
}
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
@@ -1010,7 +1113,8 @@ export function getModelInfo(id: string): LLMModel{
|
|
|
|
|
provider: LLMProvider.WebLLM,
|
|
|
|
|
format: LLMFormat.WebLLM,
|
|
|
|
|
flags: [],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Local
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if(id.startsWith('horde:::')){
|
|
|
|
|
@@ -1024,7 +1128,8 @@ export function getModelInfo(id: string): LLMModel{
|
|
|
|
|
provider: LLMProvider.Horde,
|
|
|
|
|
format: LLMFormat.Horde,
|
|
|
|
|
flags: [],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -1037,7 +1142,8 @@ export function getModelInfo(id: string): LLMModel{
|
|
|
|
|
provider: LLMProvider.AsIs,
|
|
|
|
|
format: LLMFormat.OpenAICompatible,
|
|
|
|
|
flags: [],
|
|
|
|
|
parameters: OpenAIParameters
|
|
|
|
|
parameters: OpenAIParameters,
|
|
|
|
|
tokenizer: LLMTokenizer.Unknown
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|