diff --git a/src/ts/model/modellist.ts b/src/ts/model/modellist.ts index 1367399f..bb335228 100644 --- a/src/ts/model/modellist.ts +++ b/src/ts/model/modellist.ts @@ -15,7 +15,8 @@ export enum LLMFlags{ mustStartWithUserInput, poolSupported, hasVideoInput, - OAICompletionTokens + OAICompletionTokens, + DeveloperRole } export enum LLMProvider{ @@ -451,7 +452,8 @@ export const LLMModels: LLMModel[] = [ LLMFlags.hasStreaming, LLMFlags.OAICompletionTokens, LLMFlags.hasFullSystemPrompt, - LLMFlags.hasImageInput + LLMFlags.hasImageInput, + LLMFlags.DeveloperRole ], parameters: OpenAIParameters, tokenizer: LLMTokenizer.tiktokenO200Base diff --git a/src/ts/process/request.ts b/src/ts/process/request.ts index e77297d8..412f7918 100644 --- a/src/ts/process/request.ts +++ b/src/ts/process/request.ts @@ -237,7 +237,7 @@ interface OpenAIImageContents { type OpenAIContents = OpenAITextContents|OpenAIImageContents export interface OpenAIChatExtra { - role: 'system'|'user'|'assistant'|'function' + role: 'system'|'user'|'assistant'|'function'|'developer' content: string|OpenAIContents[] memo?:string name?:string @@ -555,6 +555,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise { + if(v.role === 'system'){ + return { + ...v, + role: 'developer' + } + } + }) + } const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", { body: applyParameters({