Add o1 and fix some mistakes

This commit is contained in:
Kwaroran
2024-12-18 03:26:02 +09:00
parent 25cd5af074
commit 5df9e68bae
9 changed files with 233 additions and 77 deletions

View File

@@ -14,7 +14,8 @@ export enum LLMFlags{
requiresAlternateRole,
mustStartWithUserInput,
poolSupported,
hasVideoInput
hasVideoInput,
OAICompletionTokens
}
export enum LLMProvider{
@@ -409,7 +410,7 @@ export const LLMModels: LLMModel[] = [
flags: [
LLMFlags.hasImageInput,
LLMFlags.hasFullSystemPrompt,
LLMFlags.hasStreaming
LLMFlags.hasStreaming,
],
parameters: OpenAIParameters,
tokenizer: LLMTokenizer.tiktokenO200Base
@@ -421,8 +422,8 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasFullSystemPrompt,
LLMFlags.hasStreaming
LLMFlags.hasStreaming,
LLMFlags.OAICompletionTokens
],
parameters: OpenAIParameters,
tokenizer: LLMTokenizer.tiktokenO200Base
@@ -434,8 +435,23 @@ export const LLMModels: LLMModel[] = [
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasStreaming,
LLMFlags.OAICompletionTokens
],
parameters: OpenAIParameters,
tokenizer: LLMTokenizer.tiktokenO200Base
},
{
id: 'o1',
internalID: 'o1',
name: 'o1',
provider: LLMProvider.OpenAI,
format: LLMFormat.OpenAICompatible,
flags: [
LLMFlags.hasStreaming,
LLMFlags.OAICompletionTokens,
LLMFlags.hasFullSystemPrompt,
LLMFlags.hasStreaming
LLMFlags.hasImageInput
],
parameters: OpenAIParameters,
tokenizer: LLMTokenizer.tiktokenO200Base

View File

@@ -409,15 +409,6 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
})
}
if(aiModel.startsWith('gpt4o1')){
for(let i=0;i<formatedChat.length;i++){
if(formatedChat[i].role === 'system'){
formatedChat[i].content = `<system>${formatedChat[i].content}</system>`
formatedChat[i].role = 'user'
}
}
}
for(let i=0;i<arg.biasString.length;i++){
const bia = arg.biasString[i]
if(bia[0].startsWith('[[') && bia[0].endsWith(']]')){
@@ -617,7 +608,7 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
})
if(aiModel.startsWith('gpt4o1')){
if(aiModel.startsWith('gpt4o1') || arg.modelInfo.flags.includes(LLMFlags.OAICompletionTokens)){
body.max_completion_tokens = body.max_tokens
delete body.max_tokens
}

View File

@@ -134,7 +134,6 @@ export const runVITS = async (text: string, modelData:string|OnnxModelFiles = 'X
});
}
export const registerOnnxModel = async ():Promise<OnnxModelFiles> => {
const id = v4().replace(/-/g, '')