Add OAI response API

This commit is contained in:
kwaroran
2025-03-20 12:08:34 +09:00
parent fff4ec74af
commit ad4f52239b
6 changed files with 226 additions and 1 deletions

View File

@@ -62,7 +62,8 @@ export enum LLMFormat{
Kobold,
Ollama,
Horde,
AWSBedrockClaude
AWSBedrockClaude,
OpenAIResponseAPI
}
export enum LLMTokenizer{
@@ -1391,6 +1392,21 @@ for(let model of LLMModels){
model.fullName ??= model.provider !== LLMProvider.AsIs ? `${ProviderNames.get(model.provider) ?? ''} ${model.name}`.trim() : model.name
}
for(let i=0; i<LLMModels.length; i++){
if(LLMModels[i].provider === LLMProvider.OpenAI && LLMModels[i].format === LLMFormat.OpenAICompatible){
LLMModels.push({
...LLMModels[i],
format: LLMFormat.OpenAIResponseAPI,
flags: [...LLMModels[i].flags, LLMFlags.hasPrefill],
id: `${LLMModels[i].id}-response-api`,
name: `${LLMModels[i].name} (Response API)`,
fullName: `${LLMModels[i].fullName ?? LLMModels[i].name} (Response API)`,
recommended: false
})
}
}
export function getModelInfo(id: string): LLMModel{
const db = getDatabase()

View File

@@ -22,6 +22,7 @@ import { extractJSON, getGeneralJSONSchema, getOpenAIJSONSchema } from "./templa
import { getModelInfo, LLMFlags, LLMFormat, type LLMModel } from "../model/modellist";
import { runTrigger } from "./triggers";
import { registerClaudeObserver } from "../observer.svelte";
import { v4 } from "uuid";
@@ -529,6 +530,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:ModelMo
return requestHorde(targ)
case LLMFormat.WebLLM:
return requestWebLLM(targ)
case LLMFormat.OpenAIResponseAPI:
return requestOpenAIResponseAPI(targ)
}
return {
@@ -1362,6 +1365,165 @@ async function requestOpenAILegacyInstruct(arg:RequestDataArgumentExtended):Prom
}
interface OAIResponseInputItem {
content:({
type: 'input_text',
text: string
}|{
detail: 'high'|'low'|'auto'
type: 'input_image',
image_url: string
}|{
type: 'input_file',
file_data: string
filename?: string
})[]
role:'user'|'system'|'developer'
}
interface OAIResponseOutputItem {
content:({
type: 'output_text',
text: string,
annotations: []
})[]
type: 'message',
status: 'in_progress'|'complete'|'incomplete'
role:'assistant'
}
type OAIResponseItem = OAIResponseInputItem|OAIResponseOutputItem
async function requestOpenAIResponseAPI(arg:RequestDataArgumentExtended):Promise<requestDataResponse>{
const formated = arg.formated
const db = getDatabase()
const aiModel = arg.aiModel
const maxTokens = arg.maxTokens
const items:OAIResponseItem[] = []
for(let i=0;i<formated.length;i++){
const content = formated[i]
switch(content.role){
case 'function':
break
case 'assistant':{
const item:OAIResponseOutputItem = {
content: [],
role: content.role,
status: 'complete',
type: 'message',
}
item.content.push({
type: 'output_text',
text: content.content,
annotations: []
})
items.push(item)
break
}
case 'user':
case 'system':{
const item:OAIResponseInputItem = {
content: [],
role: content.role
}
item.content.push({
type: 'input_text',
text: content.content
})
content.multimodals ??= []
for(const multimodal of content.multimodals){
if(multimodal.type === 'image'){
item.content.push({
type: 'input_image',
detail: 'auto',
image_url: multimodal.base64
})
}
else{
item.content.push({
type: 'input_file',
file_data: multimodal.base64,
})
}
}
items.push(item)
break
}
}
}
if(items[items.length-1].role === 'assistant'){
(items[items.length-1] as OAIResponseOutputItem).status = 'incomplete'
}
const body = applyParameters({
model: arg.modelInfo.internalID ?? aiModel,
input: items,
max_output_tokens: maxTokens,
tools: [],
store: false
}, ['temperature', 'top_p'], {}, arg.mode)
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: "https://api.openai.com/v1/responses",
body: body,
headers: {
"Authorization": "Bearer " + db.openAIKey,
"Content-Type": "application/json"
}
})
}
}
if(db.modelTools.includes('search')){
body.tools.push('web_search_preview')
}
const response = await globalFetch("https://api.openai.com/v1/responses", {
body: body,
headers: {
"Content-Type": "application/json",
"Authorization": "Bearer " + db.openAIKey,
},
chatId: arg.chatId
});
if(!response.ok){
return {
type: 'fail',
result: (language.errors.httpError + `${JSON.stringify(response.data)}`)
}
}
const text:string = (response.data.output?.find((m:OAIResponseOutputItem) => m.type === 'message') as OAIResponseOutputItem)?.content?.find(m => m.type === 'output_text')?.text
if(!text){
return {
type: 'fail',
result: JSON.stringify(response.data)
}
}
return {
type: 'success',
result: text
}
}
async function requestNovelAI(arg:RequestDataArgumentExtended):Promise<requestDataResponse>{
const formated = arg.formated
const db = getDatabase()

View File

@@ -496,6 +496,7 @@ export function setDatabase(data:Database){
model: data.hypaCustomSettings?.model ?? "",
}
data.doNotChangeSeperateModels ??= false
data.modelTools ??= []
changeLanguage(data.language)
setDatabaseLite(data)
}
@@ -941,6 +942,7 @@ export interface Database{
otherAx: string
}
doNotChangeSeperateModels:boolean
modelTools: string[]
}
interface SeparateParameters{
@@ -1283,6 +1285,7 @@ export interface botPreset{
translate: string
otherAx: string
}
modelTools?:string[]
}
@@ -1602,6 +1605,7 @@ export function saveCurrentPreset(){
outputImageModal: db.outputImageModal ?? false,
seperateModelsForAxModels: db.doNotChangeSeperateModels ? false : db.seperateModelsForAxModels ?? false,
seperateModels: db.doNotChangeSeperateModels ? null : safeStructuredClone(db.seperateModels),
modelTools: safeStructuredClone(db.modelTools),
}
db.botPresets = pres
setDatabase(db)
@@ -1723,6 +1727,7 @@ export function setPreset(db:Database, newPres: botPreset){
otherAx: ''
}
}
db.modelTools = safeStructuredClone(newPres.modelTools ?? [])
return db
}