Add new vertex providers

This commit is contained in:
Kwaroran
2025-03-23 16:47:52 +09:00
parent 1640157801
commit 6bd0794a4d
2 changed files with 12 additions and 30 deletions

View File

@@ -1050,36 +1050,6 @@ export const LLMModels: LLMModel[] = [
parameters: ['temperature', 'top_k', 'top_p'],
tokenizer: LLMTokenizer.GoogleCloud
},
{
name: "Gemini Exp 1121",
id: 'gemini-exp-1121-vertex',
internalID: 'gemini-exp-1121',
provider: LLMProvider.VertexAI,
format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'top_k', 'top_p'],
tokenizer: LLMTokenizer.Gemma
},
{
name: "Gemini Pro 1.5",
id: 'gemini-1.5-pro-latest-vertex',
internalID: 'gemini-1.5-pro-latest',
provider: LLMProvider.VertexAI,
format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'top_k', 'top_p'],
tokenizer: LLMTokenizer.Gemma
},
{
name: "Gemini Flash 1.5",
id: 'gemini-1.5-flash-vertex',
internalID: 'gemini-1.5-flash',
provider: LLMProvider.VertexAI,
format: LLMFormat.VertexAIGemini,
flags: [LLMFlags.hasImageInput, LLMFlags.hasFirstSystemPrompt, LLMFlags.requiresAlternateRole],
parameters: ['temperature', 'top_k', 'top_p'],
tokenizer: LLMTokenizer.Gemma
},
{
name: "Gemini Exp 1114",
id: 'gemini-exp-1114',
@@ -1405,6 +1375,17 @@ for(let i=0; i<LLMModels.length; i++){
})
}
if(LLMModels[i].provider === LLMProvider.GoogleCloud){
LLMModels.push({
...LLMModels[i],
id: `${LLMModels[i].id}-vertex`,
name: `${LLMModels[i].name} Vertex`,
fullName: `${LLMModels[i].fullName ?? LLMModels[i].name} Vertex`,
flags: [...LLMModels[i].flags],
recommended: false,
provider: LLMProvider.VertexAI
})
}
}
export function getModelInfo(id: string): LLMModel{

View File

@@ -547,6 +547,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:ModelMo
return requestPlugin(targ)
case LLMFormat.Ooba:
return requestOoba(targ)
case LLMFormat.VertexAIGemini:
case LLMFormat.GoogleCloud:
return requestGoogleCloudVertex(targ)
case LLMFormat.Kobold: