Enhance LLM model flags to include requiresAlternateRole and update request processing to handle multimodal and thoughts data
This commit is contained in:
@@ -778,7 +778,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-1.5-pro-exp-0827',
|
id: 'gemini-1.5-pro-exp-0827',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -787,7 +787,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-exp-1121',
|
id: 'gemini-exp-1121',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud,
|
tokenizer: LLMTokenizer.GoogleCloud,
|
||||||
},
|
},
|
||||||
@@ -796,7 +796,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-exp-1206',
|
id: 'gemini-exp-1206',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -805,7 +805,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-2.0-flash-exp',
|
id: 'gemini-2.0-flash-exp',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud,
|
tokenizer: LLMTokenizer.GoogleCloud,
|
||||||
recommended: true
|
recommended: true
|
||||||
@@ -815,7 +815,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-2.0-flash-thinking-exp-1219',
|
id: 'gemini-2.0-flash-thinking-exp-1219',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.geminiThinking],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.poolSupported, LLMFlags.hasAudioInput, LLMFlags.hasVideoInput, LLMFlags.hasStreaming, LLMFlags.geminiThinking, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
parameters: ['temperature', 'top_k', 'top_p', 'presence_penalty', 'frequency_penalty'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud,
|
tokenizer: LLMTokenizer.GoogleCloud,
|
||||||
recommended: true
|
recommended: true
|
||||||
@@ -825,7 +825,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-1.5-pro-latest',
|
id: 'gemini-1.5-pro-latest',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
recommended: true,
|
recommended: true,
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
@@ -835,7 +835,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-1.5-flash',
|
id: 'gemini-1.5-flash',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
recommended: true,
|
recommended: true,
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
@@ -846,7 +846,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
internalID: 'gemini-exp-1121',
|
internalID: 'gemini-exp-1121',
|
||||||
provider: LLMProvider.VertexAI,
|
provider: LLMProvider.VertexAI,
|
||||||
format: LLMFormat.VertexAIGemini,
|
format: LLMFormat.VertexAIGemini,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.Gemma
|
tokenizer: LLMTokenizer.Gemma
|
||||||
},
|
},
|
||||||
@@ -856,7 +856,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
internalID: 'gemini-1.5-pro-latest',
|
internalID: 'gemini-1.5-pro-latest',
|
||||||
provider: LLMProvider.VertexAI,
|
provider: LLMProvider.VertexAI,
|
||||||
format: LLMFormat.VertexAIGemini,
|
format: LLMFormat.VertexAIGemini,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.Gemma
|
tokenizer: LLMTokenizer.Gemma
|
||||||
},
|
},
|
||||||
@@ -866,7 +866,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
internalID: 'gemini-1.5-flash',
|
internalID: 'gemini-1.5-flash',
|
||||||
provider: LLMProvider.VertexAI,
|
provider: LLMProvider.VertexAI,
|
||||||
format: LLMFormat.VertexAIGemini,
|
format: LLMFormat.VertexAIGemini,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.Gemma
|
tokenizer: LLMTokenizer.Gemma
|
||||||
},
|
},
|
||||||
@@ -875,7 +875,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-exp-1114',
|
id: 'gemini-exp-1114',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -884,7 +884,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-1.5-pro-002',
|
id: 'gemini-1.5-pro-002',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -893,7 +893,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-1.5-flash-002',
|
id: 'gemini-1.5-flash-002',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -902,7 +902,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-pro',
|
id: 'gemini-pro',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -911,7 +911,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-pro-vision',
|
id: 'gemini-pro-vision',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -920,7 +920,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-ultra',
|
id: 'gemini-ultra',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
@@ -929,7 +929,7 @@ export const LLMModels: LLMModel[] = [
|
|||||||
id: 'gemini-ultra-vision',
|
id: 'gemini-ultra-vision',
|
||||||
provider: LLMProvider.GoogleCloud,
|
provider: LLMProvider.GoogleCloud,
|
||||||
format: LLMFormat.GoogleCloud,
|
format: LLMFormat.GoogleCloud,
|
||||||
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming],
|
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.hasStreaming, LLMFlags.requiresAlternateRole],
|
||||||
parameters: ['temperature', 'top_k', 'top_p'],
|
parameters: ['temperature', 'top_k', 'top_p'],
|
||||||
tokenizer: LLMTokenizer.GoogleCloud
|
tokenizer: LLMTokenizer.GoogleCloud
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -282,7 +282,23 @@ function reformater(formated:OpenAIChat[],modelInfo:LLMModel){
|
|||||||
}
|
}
|
||||||
|
|
||||||
if(newFormated[newFormated.length-1].role === m.role){
|
if(newFormated[newFormated.length-1].role === m.role){
|
||||||
|
|
||||||
newFormated[newFormated.length-1].content += '\n' + m.content
|
newFormated[newFormated.length-1].content += '\n' + m.content
|
||||||
|
|
||||||
|
if(m.multimodals){
|
||||||
|
if(!newFormated[newFormated.length-1].multimodals){
|
||||||
|
newFormated[newFormated.length-1].multimodals = []
|
||||||
|
}
|
||||||
|
newFormated[newFormated.length-1].multimodals.push(...m.multimodals)
|
||||||
|
}
|
||||||
|
|
||||||
|
if(m.thoughts){
|
||||||
|
if(!newFormated[newFormated.length-1].thoughts){
|
||||||
|
newFormated[newFormated.length-1].thoughts = []
|
||||||
|
}
|
||||||
|
newFormated[newFormated.length-1].thoughts.push(...m.thoughts)
|
||||||
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
else{
|
else{
|
||||||
|
|||||||
Reference in New Issue
Block a user