Add preview request data

This commit is contained in:
kwaroran
2025-03-16 19:24:59 +09:00
parent fef10c1e56
commit 5c53e16b85
3 changed files with 212 additions and 7 deletions

View File

@@ -13,7 +13,7 @@
import TextAreaInput from "../UI/GUI/TextAreaInput.svelte";
import { FolderUpIcon, PlusIcon, TrashIcon } from "lucide-svelte";
import { selectSingleFile } from "src/ts/util";
import { doingChat, previewFormated, sendChat } from "src/ts/process/index.svelte";
import { doingChat, previewFormated, previewBody, sendChat } from "src/ts/process/index.svelte";
import SelectInput from "../UI/GUI/SelectInput.svelte";
import { applyChatTemplate, chatTemplates } from "src/ts/process/templates/chatTemplate";
import OptionInput from "../UI/GUI/OptionInput.svelte";
@@ -31,7 +31,8 @@
}
alertWait("Loading...")
await sendChat(-1, {
preview: true
preview: previewJoin !== 'prompt',
previewPrompt: previewJoin === 'prompt'
})
let md = ''
@@ -41,6 +42,15 @@
"system": "⚙️ System",
"assistant": "✨ Assistant",
}
if(previewJoin === 'prompt'){
md += '### Prompt\n'
md += '```json\n' + JSON.stringify(JSON.parse(previewBody), null, 2).replaceAll('```', '\\`\\`\\`') + '\n```\n'
$doingChat = false
alertMd(md)
return
}
let formated = safeStructuredClone(previewFormated)
if(previewJoin === 'yes'){
@@ -252,6 +262,7 @@
<SelectInput bind:value={previewJoin}>
<OptionInput value="yes">With Join</OptionInput>
<OptionInput value="no">Without Join</OptionInput>
<OptionInput value="prompt">As Request</OptionInput>
</SelectInput>
<Button className="mt-2" onclick={() => {preview()}}>Run</Button>
</Arcodion>

View File

@@ -63,6 +63,7 @@ export const doingChat = writable(false)
export const chatProcessStage = writable(0)
export const abortChat = writable(false)
export let previewFormated:OpenAIChat[] = []
export let previewBody:string = ''
export async function sendChat(chatProcessIndex = -1,arg:{
chatAdditonalTokens?:number,
@@ -70,6 +71,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
continue?:boolean,
usedContinueTokens?:number,
preview?:boolean
previewPrompt?:boolean
} = {}):Promise<boolean> {
chatProcessStage.set(0)
@@ -1303,9 +1305,15 @@ export async function sendChat(chatProcessIndex = -1,arg:{
bias: {},
continue: arg.continue,
chatId: generationId,
imageResponse: DBState.db.outputImageModal
imageResponse: DBState.db.outputImageModal,
previewBody: arg.previewPrompt
}, 'model', abortSignal)
if(arg.previewPrompt && req.type === 'success'){
previewBody = req.result
return true
}
let result = ''
let emoChanged = false
let resendChat = false

View File

@@ -43,6 +43,7 @@ interface requestDataArgument{
schema?:string
extractJson?:string
imageResponse?:boolean
previewBody?:boolean
}
interface RequestDataArgumentExtended extends requestDataArgument{
@@ -727,7 +728,7 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
}
}
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", {
const targs = {
body: applyParameters({
model: requestModel,
messages: reformatedChat,
@@ -739,7 +740,20 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
},
abortSignal: arg.abortSignal,
chatId: arg.chatId
})
} as const
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: "https://api.mistral.ai/v1/chat/completions",
body: targs.body,
headers: targs.headers
})
}
}
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", targs)
const dat = res.data as any
if(res.ok){
@@ -959,6 +973,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
}
}
}
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: replacerURL,
body: body,
headers: headers
})
}
}
const da = await fetchNative(replacerURL, {
body: JSON.stringify(body),
method: "POST",
@@ -1140,6 +1165,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
}
}
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: replacerURL,
body: body,
headers: headers
})
}
}
const res = await globalFetch(replacerURL, {
body: body,
headers: headers,
@@ -1278,6 +1314,15 @@ async function requestOpenAILegacyInstruct(arg:RequestDataArgumentExtended):Prom
//return `\n\n${author}: ${m.content.trim()}`;
}).join("") + `\n## Response\n`;
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
error: "This model is not supported in preview mode"
})
}
}
const response = await globalFetch(arg.customURL ?? "https://api.openai.com/v1/completions", {
body: {
model: "gpt-3.5-turbo-instruct",
@@ -1324,6 +1369,15 @@ async function requestNovelAI(arg:RequestDataArgumentExtended):Promise<requestDa
sequence: number[], bias: number, ensure_sequence_finish: false, generate_once: true
}[] = []
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
error: "This model is not supported in preview mode"
})
}
}
for(let i=0;i<biasString.length;i++){
const bia = biasString[i]
const tokens = await tokenizeNum(bia[0])
@@ -1426,6 +1480,7 @@ async function requestOobaLegacy(arg:RequestDataArgumentExtended):Promise<reques
return risuChatParser(v.replace(/\\n/g, "\n"))
})
}
bodyTemplate = {
'max_new_tokens': db.maxResponse,
'do_sample': db.ooba.do_sample,
@@ -1454,6 +1509,17 @@ async function requestOobaLegacy(arg:RequestDataArgumentExtended):Promise<reques
'X-API-KEY': db.mancerHeader
}
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: blockingUrl,
body: bodyTemplate,
headers: headers
})
}
}
if(useStreaming){
const oobaboogaSocket = new WebSocket(streamUrl);
const statusCode = await new Promise((resolve) => {
@@ -1572,6 +1638,17 @@ async function requestOoba(arg:RequestDataArgumentExtended):Promise<requestDataR
}
}
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: urlStr,
body: bodyTemplate,
headers: {}
})
}
}
const response = await globalFetch(urlStr, {
body: bodyTemplate,
chatId: arg.chatId
@@ -2037,6 +2114,17 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise
if(arg.modelInfo.format === LLMFormat.GoogleCloud && arg.useStreaming){
headers['Content-Type'] = 'application/json'
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: url,
body: body,
headers: headers
})
}
}
const f = await fetchNative(url, {
headers: headers,
body: JSON.stringify(body),
@@ -2127,6 +2215,17 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise
}
}
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: url,
body: body,
headers: headers
})
}
}
const res = await globalFetch(url, {
headers: headers,
body: body,
@@ -2246,6 +2345,17 @@ async function requestKobold(arg:RequestDataArgumentExtended):Promise<requestDat
'repetition_penalty': 'rep_pen'
}, arg.mode) as KoboldGenerationInputSchema
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: url.toString(),
body: body,
headers: {}
})
}
}
const da = await globalFetch(url.toString(), {
method: "POST",
body: body,
@@ -2311,6 +2421,18 @@ async function requestNovelList(arg:RequestDataArgumentExtended):Promise<request
logit_bias: (logit_bias.length > 0) ? logit_bias.join("<<|>>") : undefined,
logit_bias_values: (logit_bias_values.length > 0) ? logit_bias_values.join("|") : undefined,
};
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: api_server_url + '/api',
body: send_body,
headers: headers
})
}
}
const response = await globalFetch(arg.customURL ?? api_server_url + '/api', {
method: 'POST',
headers: headers,
@@ -2344,6 +2466,15 @@ async function requestOllama(arg:RequestDataArgumentExtended):Promise<requestDat
const formated = arg.formated
const db = getDatabase()
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
error: "Preview body is not supported for Ollama"
})
}
}
const ollama = new Ollama({host: db.ollamaURL})
const response = await ollama.chat({
@@ -2458,6 +2589,20 @@ async function requestCohere(arg:RequestDataArgumentExtended):Promise<requestDat
console.log(body)
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: arg.customURL ?? 'https://api.cohere.com/v1/chat',
body: body,
headers: {
"Authorization": "Bearer " + db.cohereAPIKey,
"Content-Type": "application/json"
}
})
}
}
const res = await globalFetch(arg.customURL ?? 'https://api.cohere.com/v1/chat', {
method: "POST",
headers: {
@@ -2788,6 +2933,18 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
const signed = await signer.sign(rq);
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: url,
body: params,
headers: signed.headers
})
}
}
const res = await globalFetch(url, {
method: "POST",
body: params,
@@ -2887,6 +3044,17 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
})
}
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
url: replacerURL,
body: body,
headers: headers
})
}
}
if(useStreaming){
const res = await fetchNative(replacerURL, {
@@ -3105,6 +3273,15 @@ async function requestHorde(arg:RequestDataArgumentExtended):Promise<requestData
const currentChar = getCurrentCharacter()
const abortSignal = arg.abortSignal
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
error: "Preview body is not supported for Horde"
})
}
}
const prompt = applyChatTemplate(formated)
const realModel = aiModel.split(":::")[1]
@@ -3203,6 +3380,15 @@ async function requestWebLLM(arg:RequestDataArgumentExtended):Promise<requestDat
const temperature = arg.temperature
const realModel = aiModel.split(":::")[1]
const prompt = applyChatTemplate(formated)
if(arg.previewBody){
return {
type: 'success',
result: JSON.stringify({
error: "Preview body is not supported for WebLLM"
})
}
}
const v = await runTransformers(prompt, realModel, {
temperature: temperature,
max_new_tokens: maxTokens,