Merge branch 'main' of https://github.com/kwaroran/RisuAI
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
import { get, writable } from "svelte/store";
|
||||
import { type character, type MessageGenerationInfo, type Chat, changeToPreset } from "../storage/database.svelte";
|
||||
import { type character, type MessageGenerationInfo, type Chat, changeToPreset, setCurrentChat } from "../storage/database.svelte";
|
||||
import { DBState } from '../stores.svelte';
|
||||
import { CharEmotion, selectedCharID } from "../stores.svelte";
|
||||
import { ChatTokenizer, tokenize, tokenizeNum } from "../tokenizer";
|
||||
@@ -63,6 +63,7 @@ export const doingChat = writable(false)
|
||||
export const chatProcessStage = writable(0)
|
||||
export const abortChat = writable(false)
|
||||
export let previewFormated:OpenAIChat[] = []
|
||||
export let previewBody:string = ''
|
||||
|
||||
export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
chatAdditonalTokens?:number,
|
||||
@@ -70,6 +71,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
continue?:boolean,
|
||||
usedContinueTokens?:number,
|
||||
preview?:boolean
|
||||
previewPrompt?:boolean
|
||||
} = {}):Promise<boolean> {
|
||||
|
||||
chatProcessStage.set(0)
|
||||
@@ -694,6 +696,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
const triggerResult = await runTrigger(currentChar, 'start', {chat: currentChat})
|
||||
if(triggerResult){
|
||||
currentChat = triggerResult.chat
|
||||
setCurrentChat(currentChat)
|
||||
ms = currentChat.message
|
||||
currentTokens += triggerResult.tokens
|
||||
if(triggerResult.stopSending){
|
||||
@@ -1303,9 +1306,15 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
bias: {},
|
||||
continue: arg.continue,
|
||||
chatId: generationId,
|
||||
imageResponse: DBState.db.outputImageModal
|
||||
imageResponse: DBState.db.outputImageModal,
|
||||
previewBody: arg.previewPrompt
|
||||
}, 'model', abortSignal)
|
||||
|
||||
if(arg.previewPrompt && req.type === 'success'){
|
||||
previewBody = req.result
|
||||
return true
|
||||
}
|
||||
|
||||
let result = ''
|
||||
let emoChanged = false
|
||||
let resendChat = false
|
||||
|
||||
@@ -17,6 +17,7 @@ function toRPN(expression:string) {
|
||||
'≤': {precedence: 1, associativity: 'Left'},
|
||||
'≥': {precedence: 1, associativity: 'Left'},
|
||||
'=': {precedence: 1, associativity: 'Left'},
|
||||
'≠': {precedence: 1, associativity: 'Left'},
|
||||
'!': {precedence: 5, associativity: 'Right'},
|
||||
};
|
||||
const operatorsKeys = Object.keys(operators);
|
||||
@@ -27,7 +28,11 @@ function toRPN(expression:string) {
|
||||
let lastToken = ''
|
||||
|
||||
for(let i = 0; i < expression.length; i++) {
|
||||
if(operatorsKeys.includes(expression[i])) {
|
||||
const char = expression[i]
|
||||
if (char === '-' && (i === 0 || operatorsKeys.includes(expression[i - 1]) || expression[i - 1] === '(')) {
|
||||
lastToken += char
|
||||
}
|
||||
else if (operatorsKeys.includes(char)) {
|
||||
if(lastToken !== '') {
|
||||
expression2.push(lastToken)
|
||||
}
|
||||
@@ -35,10 +40,10 @@ function toRPN(expression:string) {
|
||||
expression2.push('0')
|
||||
}
|
||||
lastToken = ''
|
||||
expression2.push(expression[i])
|
||||
expression2.push(char)
|
||||
}
|
||||
else{
|
||||
lastToken += expression[i]
|
||||
lastToken += char
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +99,7 @@ function calculateRPN(expression:string) {
|
||||
case '≤': stack.push(a <= b ? 1 : 0); break;
|
||||
case '≥': stack.push(a >= b ? 1 : 0); break;
|
||||
case '=': stack.push(a === b ? 1 : 0); break;
|
||||
case '≠': stack.push(a !== b ? 1 : 0); break;
|
||||
case '!': stack.push(b ? 0 : 1); break;
|
||||
}
|
||||
}
|
||||
@@ -121,7 +127,14 @@ function executeRPNCalculation(text:string) {
|
||||
return "0"
|
||||
}
|
||||
return parsed.toString()
|
||||
}).replace(/&&/g, '&').replace(/\|\|/g, '|').replace(/<=/g, '≤').replace(/>=/g, '≥').replace(/==/g, '=').replace(/null/gi, '0')
|
||||
})
|
||||
.replace(/&&/g, '&')
|
||||
.replace(/\|\|/g, '|')
|
||||
.replace(/<=/g, '≤')
|
||||
.replace(/>=/g, '≥')
|
||||
.replace(/==/g, '=')
|
||||
.replace(/!=/g, '≠')
|
||||
.replace(/null/gi, '0')
|
||||
const expression = toRPN(text);
|
||||
const evaluated = calculateRPN(expression);
|
||||
return evaluated
|
||||
|
||||
@@ -23,6 +23,8 @@ interface LuaEngineState {
|
||||
engine: LuaEngine;
|
||||
mutex: Mutex;
|
||||
chat: Chat;
|
||||
setVar: (key:string, value:string) => void,
|
||||
getVar: (key:string) => string
|
||||
}
|
||||
|
||||
let LuaEngines = new Map<string, LuaEngineState>()
|
||||
@@ -55,12 +57,16 @@ export async function runLua(code:string, arg:{
|
||||
code,
|
||||
engine: await luaFactory.createEngine({injectObjects: true}),
|
||||
mutex: new Mutex(),
|
||||
chat
|
||||
chat,
|
||||
setVar,
|
||||
getVar
|
||||
}
|
||||
LuaEngines.set(mode, luaEngineState)
|
||||
wasEmpty = true
|
||||
} else {
|
||||
luaEngineState.chat = chat
|
||||
luaEngineState.setVar = setVar
|
||||
luaEngineState.getVar = getVar
|
||||
}
|
||||
return await luaEngineState.mutex.runExclusive(async () => {
|
||||
if (wasEmpty || code !== luaEngineState.code) {
|
||||
@@ -72,13 +78,13 @@ export async function runLua(code:string, arg:{
|
||||
if(!LuaSafeIds.has(id) && !LuaEditDisplayIds.has(id)){
|
||||
return
|
||||
}
|
||||
setVar(key, value)
|
||||
luaEngineState.setVar(key, value)
|
||||
})
|
||||
luaEngine.global.set('getChatVar', (id:string,key:string) => {
|
||||
if(!LuaSafeIds.has(id) && !LuaEditDisplayIds.has(id)){
|
||||
return
|
||||
}
|
||||
return getVar(key)
|
||||
return luaEngineState.getVar(key)
|
||||
})
|
||||
luaEngine.global.set('stopChat', (id:string) => {
|
||||
if(!LuaSafeIds.has(id)){
|
||||
|
||||
@@ -133,9 +133,11 @@ export class HypaProcesser{
|
||||
}
|
||||
|
||||
async addText(texts:string[]) {
|
||||
const db = getDatabase()
|
||||
const suffix = (this.model === 'custom' && db.hypaCustomSettings.model) ? `-${db.hypaCustomSettings.model}` : ""
|
||||
|
||||
for(let i=0;i<texts.length;i++){
|
||||
const itm:memoryVector = await this.forage.getItem(texts[i] + '|' + this.model)
|
||||
const itm:memoryVector = await this.forage.getItem(texts[i] + '|' + this.model + suffix)
|
||||
if(itm){
|
||||
itm.alreadySaved = true
|
||||
this.vectors.push(itm)
|
||||
@@ -164,7 +166,7 @@ export class HypaProcesser{
|
||||
for(let i=0;i<memoryVectors.length;i++){
|
||||
const vec = memoryVectors[i]
|
||||
if(!vec.alreadySaved){
|
||||
await this.forage.setItem(texts[i] + '|' + this.model, vec)
|
||||
await this.forage.setItem(texts[i] + '|' + this.model + suffix, vec)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ import { extractJSON, getGeneralJSONSchema, getOpenAIJSONSchema } from "./templa
|
||||
import { getModelInfo, LLMFlags, LLMFormat, type LLMModel } from "../model/modellist";
|
||||
import { runTrigger } from "./triggers";
|
||||
import { registerClaudeObserver } from "../observer.svelte";
|
||||
import { v4 } from "uuid";
|
||||
|
||||
|
||||
|
||||
@@ -43,6 +44,7 @@ interface requestDataArgument{
|
||||
schema?:string
|
||||
extractJson?:string
|
||||
imageResponse?:boolean
|
||||
previewBody?:boolean
|
||||
}
|
||||
|
||||
interface RequestDataArgumentExtended extends requestDataArgument{
|
||||
@@ -485,6 +487,13 @@ export async function requestChatDataMain(arg:requestDataArgument, model:ModelMo
|
||||
targ.customURL = db.forceReplaceUrl
|
||||
}
|
||||
|
||||
if(db.seperateModelsForAxModels){
|
||||
if(db.seperateModels[model]){
|
||||
targ.aiModel = db.seperateModels[model]
|
||||
targ.modelInfo = getModelInfo(targ.aiModel)
|
||||
}
|
||||
}
|
||||
|
||||
const format = targ.modelInfo.format
|
||||
|
||||
targ.formated = reformater(targ.formated, targ.modelInfo)
|
||||
@@ -521,6 +530,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:ModelMo
|
||||
return requestHorde(targ)
|
||||
case LLMFormat.WebLLM:
|
||||
return requestWebLLM(targ)
|
||||
case LLMFormat.OpenAIResponseAPI:
|
||||
return requestOpenAIResponseAPI(targ)
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -726,8 +737,8 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", {
|
||||
|
||||
const targs = {
|
||||
body: applyParameters({
|
||||
model: requestModel,
|
||||
messages: reformatedChat,
|
||||
@@ -739,7 +750,20 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
},
|
||||
abortSignal: arg.abortSignal,
|
||||
chatId: arg.chatId
|
||||
})
|
||||
} as const
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: "https://api.mistral.ai/v1/chat/completions",
|
||||
body: targs.body,
|
||||
headers: targs.headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", targs)
|
||||
|
||||
const dat = res.data as any
|
||||
if(res.ok){
|
||||
@@ -959,6 +983,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: replacerURL,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
const da = await fetchNative(replacerURL, {
|
||||
body: JSON.stringify(body),
|
||||
method: "POST",
|
||||
@@ -1140,6 +1175,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: replacerURL,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(replacerURL, {
|
||||
body: body,
|
||||
headers: headers,
|
||||
@@ -1278,6 +1324,15 @@ async function requestOpenAILegacyInstruct(arg:RequestDataArgumentExtended):Prom
|
||||
//return `\n\n${author}: ${m.content.trim()}`;
|
||||
}).join("") + `\n## Response\n`;
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "This model is not supported in preview mode"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const response = await globalFetch(arg.customURL ?? "https://api.openai.com/v1/completions", {
|
||||
body: {
|
||||
model: "gpt-3.5-turbo-instruct",
|
||||
@@ -1310,6 +1365,165 @@ async function requestOpenAILegacyInstruct(arg:RequestDataArgumentExtended):Prom
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
interface OAIResponseInputItem {
|
||||
content:({
|
||||
type: 'input_text',
|
||||
text: string
|
||||
}|{
|
||||
detail: 'high'|'low'|'auto'
|
||||
type: 'input_image',
|
||||
image_url: string
|
||||
}|{
|
||||
type: 'input_file',
|
||||
file_data: string
|
||||
filename?: string
|
||||
})[]
|
||||
role:'user'|'system'|'developer'
|
||||
}
|
||||
|
||||
interface OAIResponseOutputItem {
|
||||
content:({
|
||||
type: 'output_text',
|
||||
text: string,
|
||||
annotations: []
|
||||
})[]
|
||||
type: 'message',
|
||||
status: 'in_progress'|'complete'|'incomplete'
|
||||
role:'assistant'
|
||||
}
|
||||
|
||||
type OAIResponseItem = OAIResponseInputItem|OAIResponseOutputItem
|
||||
|
||||
|
||||
async function requestOpenAIResponseAPI(arg:RequestDataArgumentExtended):Promise<requestDataResponse>{
|
||||
|
||||
const formated = arg.formated
|
||||
const db = getDatabase()
|
||||
const aiModel = arg.aiModel
|
||||
const maxTokens = arg.maxTokens
|
||||
|
||||
const items:OAIResponseItem[] = []
|
||||
|
||||
for(let i=0;i<formated.length;i++){
|
||||
const content = formated[i]
|
||||
switch(content.role){
|
||||
case 'function':
|
||||
break
|
||||
case 'assistant':{
|
||||
const item:OAIResponseOutputItem = {
|
||||
content: [],
|
||||
role: content.role,
|
||||
status: 'complete',
|
||||
type: 'message',
|
||||
}
|
||||
|
||||
item.content.push({
|
||||
type: 'output_text',
|
||||
text: content.content,
|
||||
annotations: []
|
||||
})
|
||||
|
||||
items.push(item)
|
||||
break
|
||||
}
|
||||
case 'user':
|
||||
case 'system':{
|
||||
const item:OAIResponseInputItem = {
|
||||
content: [],
|
||||
role: content.role
|
||||
}
|
||||
|
||||
item.content.push({
|
||||
type: 'input_text',
|
||||
text: content.content
|
||||
})
|
||||
|
||||
content.multimodals ??= []
|
||||
for(const multimodal of content.multimodals){
|
||||
if(multimodal.type === 'image'){
|
||||
item.content.push({
|
||||
type: 'input_image',
|
||||
detail: 'auto',
|
||||
image_url: multimodal.base64
|
||||
})
|
||||
}
|
||||
else{
|
||||
item.content.push({
|
||||
type: 'input_file',
|
||||
file_data: multimodal.base64,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
items.push(item)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(items[items.length-1].role === 'assistant'){
|
||||
(items[items.length-1] as OAIResponseOutputItem).status = 'incomplete'
|
||||
}
|
||||
|
||||
const body = applyParameters({
|
||||
model: arg.modelInfo.internalID ?? aiModel,
|
||||
input: items,
|
||||
max_output_tokens: maxTokens,
|
||||
tools: [],
|
||||
store: false
|
||||
}, ['temperature', 'top_p'], {}, arg.mode)
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: "https://api.openai.com/v1/responses",
|
||||
body: body,
|
||||
headers: {
|
||||
"Authorization": "Bearer " + db.openAIKey,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if(db.modelTools.includes('search')){
|
||||
body.tools.push('web_search_preview')
|
||||
}
|
||||
|
||||
const response = await globalFetch("https://api.openai.com/v1/responses", {
|
||||
body: body,
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Authorization": "Bearer " + db.openAIKey,
|
||||
},
|
||||
chatId: arg.chatId
|
||||
});
|
||||
|
||||
if(!response.ok){
|
||||
return {
|
||||
type: 'fail',
|
||||
result: (language.errors.httpError + `${JSON.stringify(response.data)}`)
|
||||
}
|
||||
}
|
||||
|
||||
const text:string = (response.data.output?.find((m:OAIResponseOutputItem) => m.type === 'message') as OAIResponseOutputItem)?.content?.find(m => m.type === 'output_text')?.text
|
||||
|
||||
if(!text){
|
||||
return {
|
||||
type: 'fail',
|
||||
result: JSON.stringify(response.data)
|
||||
}
|
||||
}
|
||||
return {
|
||||
type: 'success',
|
||||
result: text
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
async function requestNovelAI(arg:RequestDataArgumentExtended):Promise<requestDataResponse>{
|
||||
const formated = arg.formated
|
||||
const db = getDatabase()
|
||||
@@ -1324,6 +1538,15 @@ async function requestNovelAI(arg:RequestDataArgumentExtended):Promise<requestDa
|
||||
sequence: number[], bias: number, ensure_sequence_finish: false, generate_once: true
|
||||
}[] = []
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "This model is not supported in preview mode"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for(let i=0;i<biasString.length;i++){
|
||||
const bia = biasString[i]
|
||||
const tokens = await tokenizeNum(bia[0])
|
||||
@@ -1426,6 +1649,7 @@ async function requestOobaLegacy(arg:RequestDataArgumentExtended):Promise<reques
|
||||
return risuChatParser(v.replace(/\\n/g, "\n"))
|
||||
})
|
||||
}
|
||||
|
||||
bodyTemplate = {
|
||||
'max_new_tokens': db.maxResponse,
|
||||
'do_sample': db.ooba.do_sample,
|
||||
@@ -1454,6 +1678,17 @@ async function requestOobaLegacy(arg:RequestDataArgumentExtended):Promise<reques
|
||||
'X-API-KEY': db.mancerHeader
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: blockingUrl,
|
||||
body: bodyTemplate,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if(useStreaming){
|
||||
const oobaboogaSocket = new WebSocket(streamUrl);
|
||||
const statusCode = await new Promise((resolve) => {
|
||||
@@ -1572,6 +1807,17 @@ async function requestOoba(arg:RequestDataArgumentExtended):Promise<requestDataR
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: urlStr,
|
||||
body: bodyTemplate,
|
||||
headers: {}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const response = await globalFetch(urlStr, {
|
||||
body: bodyTemplate,
|
||||
chatId: arg.chatId
|
||||
@@ -1598,6 +1844,15 @@ async function requestPlugin(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
const maxTokens = arg.maxTokens
|
||||
const bias = arg.biasString
|
||||
const v2Function = pluginV2.providers.get(db.currentPluginProvider)
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Plugin is not supported in preview mode"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const d = v2Function ? (await v2Function(applyParameters({
|
||||
prompt_chat: formated,
|
||||
@@ -1606,7 +1861,7 @@ async function requestPlugin(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
max_tokens: maxTokens,
|
||||
}, [
|
||||
'frequency_penalty','min_p','presence_penalty','repetition_penalty','top_k','top_p','temperature'
|
||||
], {}, arg.mode) as any)) : await pluginProcess({
|
||||
], {}, arg.mode) as any, arg.abortSignal)) : await pluginProcess({
|
||||
bias: bias,
|
||||
prompt_chat: formated,
|
||||
temperature: (db.temperature / 100),
|
||||
@@ -2037,6 +2292,17 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise
|
||||
|
||||
if(arg.modelInfo.format === LLMFormat.GoogleCloud && arg.useStreaming){
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
const f = await fetchNative(url, {
|
||||
headers: headers,
|
||||
body: JSON.stringify(body),
|
||||
@@ -2127,6 +2393,17 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(url, {
|
||||
headers: headers,
|
||||
body: body,
|
||||
@@ -2240,6 +2517,17 @@ async function requestKobold(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
], {
|
||||
'repetition_penalty': 'rep_pen'
|
||||
}, arg.mode) as KoboldGenerationInputSchema
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url.toString(),
|
||||
body: body,
|
||||
headers: {}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const da = await globalFetch(url.toString(), {
|
||||
method: "POST",
|
||||
@@ -2306,6 +2594,18 @@ async function requestNovelList(arg:RequestDataArgumentExtended):Promise<request
|
||||
logit_bias: (logit_bias.length > 0) ? logit_bias.join("<<|>>") : undefined,
|
||||
logit_bias_values: (logit_bias_values.length > 0) ? logit_bias_values.join("|") : undefined,
|
||||
};
|
||||
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: api_server_url + '/api',
|
||||
body: send_body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
const response = await globalFetch(arg.customURL ?? api_server_url + '/api', {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
@@ -2339,6 +2639,15 @@ async function requestOllama(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
const formated = arg.formated
|
||||
const db = getDatabase()
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Preview body is not supported for Ollama"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const ollama = new Ollama({host: db.ollamaURL})
|
||||
|
||||
const response = await ollama.chat({
|
||||
@@ -2453,6 +2762,20 @@ async function requestCohere(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
|
||||
console.log(body)
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: arg.customURL ?? 'https://api.cohere.com/v1/chat',
|
||||
body: body,
|
||||
headers: {
|
||||
"Authorization": "Bearer " + db.cohereAPIKey,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(arg.customURL ?? 'https://api.cohere.com/v1/chat', {
|
||||
method: "POST",
|
||||
headers: {
|
||||
@@ -2548,7 +2871,7 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
if(claudeChat.length > 0 && claudeChat[claudeChat.length-1].role === chat.role){
|
||||
let content = claudeChat[claudeChat.length-1].content
|
||||
if(multimodals && multimodals.length > 0 && !Array.isArray(content)){
|
||||
content = [{
|
||||
content = [{
|
||||
type: 'text',
|
||||
text: content
|
||||
}]
|
||||
@@ -2725,6 +3048,9 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
else if(body?.thinking?.budget_tokens && body?.thinking?.budget_tokens > 0){
|
||||
body.thinking.type = 'enabled'
|
||||
}
|
||||
else if(body?.thinking?.budget_tokens === null){
|
||||
delete body.thinking
|
||||
}
|
||||
|
||||
if(systemPrompt === ''){
|
||||
delete body.system
|
||||
@@ -2783,6 +3109,18 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
|
||||
const signed = await signer.sign(rq);
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url,
|
||||
body: params,
|
||||
headers: signed.headers
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const res = await globalFetch(url, {
|
||||
method: "POST",
|
||||
body: params,
|
||||
@@ -2873,6 +3211,18 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
headers['anthropic-dangerous-direct-browser-access'] = 'true'
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: replacerURL,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
if(db.claudeRetrivalCaching){
|
||||
registerClaudeObserver({
|
||||
@@ -3100,6 +3450,15 @@ async function requestHorde(arg:RequestDataArgumentExtended):Promise<requestData
|
||||
const currentChar = getCurrentCharacter()
|
||||
const abortSignal = arg.abortSignal
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Preview body is not supported for Horde"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const prompt = applyChatTemplate(formated)
|
||||
|
||||
const realModel = aiModel.split(":::")[1]
|
||||
@@ -3198,6 +3557,15 @@ async function requestWebLLM(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
const temperature = arg.temperature
|
||||
const realModel = aiModel.split(":::")[1]
|
||||
const prompt = applyChatTemplate(formated)
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Preview body is not supported for WebLLM"
|
||||
})
|
||||
}
|
||||
}
|
||||
const v = await runTransformers(prompt, realModel, {
|
||||
temperature: temperature,
|
||||
max_new_tokens: maxTokens,
|
||||
|
||||
Reference in New Issue
Block a user