Add preview request data
This commit is contained in:
@@ -63,6 +63,7 @@ export const doingChat = writable(false)
|
||||
export const chatProcessStage = writable(0)
|
||||
export const abortChat = writable(false)
|
||||
export let previewFormated:OpenAIChat[] = []
|
||||
export let previewBody:string = ''
|
||||
|
||||
export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
chatAdditonalTokens?:number,
|
||||
@@ -70,6 +71,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
continue?:boolean,
|
||||
usedContinueTokens?:number,
|
||||
preview?:boolean
|
||||
previewPrompt?:boolean
|
||||
} = {}):Promise<boolean> {
|
||||
|
||||
chatProcessStage.set(0)
|
||||
@@ -1303,9 +1305,15 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
bias: {},
|
||||
continue: arg.continue,
|
||||
chatId: generationId,
|
||||
imageResponse: DBState.db.outputImageModal
|
||||
imageResponse: DBState.db.outputImageModal,
|
||||
previewBody: arg.previewPrompt
|
||||
}, 'model', abortSignal)
|
||||
|
||||
if(arg.previewPrompt && req.type === 'success'){
|
||||
previewBody = req.result
|
||||
return true
|
||||
}
|
||||
|
||||
let result = ''
|
||||
let emoChanged = false
|
||||
let resendChat = false
|
||||
|
||||
@@ -43,6 +43,7 @@ interface requestDataArgument{
|
||||
schema?:string
|
||||
extractJson?:string
|
||||
imageResponse?:boolean
|
||||
previewBody?:boolean
|
||||
}
|
||||
|
||||
interface RequestDataArgumentExtended extends requestDataArgument{
|
||||
@@ -726,8 +727,8 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", {
|
||||
|
||||
const targs = {
|
||||
body: applyParameters({
|
||||
model: requestModel,
|
||||
messages: reformatedChat,
|
||||
@@ -739,7 +740,20 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
},
|
||||
abortSignal: arg.abortSignal,
|
||||
chatId: arg.chatId
|
||||
})
|
||||
} as const
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: "https://api.mistral.ai/v1/chat/completions",
|
||||
body: targs.body,
|
||||
headers: targs.headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(arg.customURL ?? "https://api.mistral.ai/v1/chat/completions", targs)
|
||||
|
||||
const dat = res.data as any
|
||||
if(res.ok){
|
||||
@@ -959,6 +973,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: replacerURL,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
const da = await fetchNative(replacerURL, {
|
||||
body: JSON.stringify(body),
|
||||
method: "POST",
|
||||
@@ -1140,6 +1165,17 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: replacerURL,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(replacerURL, {
|
||||
body: body,
|
||||
headers: headers,
|
||||
@@ -1278,6 +1314,15 @@ async function requestOpenAILegacyInstruct(arg:RequestDataArgumentExtended):Prom
|
||||
//return `\n\n${author}: ${m.content.trim()}`;
|
||||
}).join("") + `\n## Response\n`;
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "This model is not supported in preview mode"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const response = await globalFetch(arg.customURL ?? "https://api.openai.com/v1/completions", {
|
||||
body: {
|
||||
model: "gpt-3.5-turbo-instruct",
|
||||
@@ -1324,6 +1369,15 @@ async function requestNovelAI(arg:RequestDataArgumentExtended):Promise<requestDa
|
||||
sequence: number[], bias: number, ensure_sequence_finish: false, generate_once: true
|
||||
}[] = []
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "This model is not supported in preview mode"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for(let i=0;i<biasString.length;i++){
|
||||
const bia = biasString[i]
|
||||
const tokens = await tokenizeNum(bia[0])
|
||||
@@ -1426,6 +1480,7 @@ async function requestOobaLegacy(arg:RequestDataArgumentExtended):Promise<reques
|
||||
return risuChatParser(v.replace(/\\n/g, "\n"))
|
||||
})
|
||||
}
|
||||
|
||||
bodyTemplate = {
|
||||
'max_new_tokens': db.maxResponse,
|
||||
'do_sample': db.ooba.do_sample,
|
||||
@@ -1454,6 +1509,17 @@ async function requestOobaLegacy(arg:RequestDataArgumentExtended):Promise<reques
|
||||
'X-API-KEY': db.mancerHeader
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: blockingUrl,
|
||||
body: bodyTemplate,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if(useStreaming){
|
||||
const oobaboogaSocket = new WebSocket(streamUrl);
|
||||
const statusCode = await new Promise((resolve) => {
|
||||
@@ -1572,6 +1638,17 @@ async function requestOoba(arg:RequestDataArgumentExtended):Promise<requestDataR
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: urlStr,
|
||||
body: bodyTemplate,
|
||||
headers: {}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const response = await globalFetch(urlStr, {
|
||||
body: bodyTemplate,
|
||||
chatId: arg.chatId
|
||||
@@ -2037,6 +2114,17 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise
|
||||
|
||||
if(arg.modelInfo.format === LLMFormat.GoogleCloud && arg.useStreaming){
|
||||
headers['Content-Type'] = 'application/json'
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
const f = await fetchNative(url, {
|
||||
headers: headers,
|
||||
body: JSON.stringify(body),
|
||||
@@ -2127,6 +2215,17 @@ async function requestGoogleCloudVertex(arg:RequestDataArgumentExtended):Promise
|
||||
}
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(url, {
|
||||
headers: headers,
|
||||
body: body,
|
||||
@@ -2245,6 +2344,17 @@ async function requestKobold(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
], {
|
||||
'repetition_penalty': 'rep_pen'
|
||||
}, arg.mode) as KoboldGenerationInputSchema
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url.toString(),
|
||||
body: body,
|
||||
headers: {}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const da = await globalFetch(url.toString(), {
|
||||
method: "POST",
|
||||
@@ -2311,6 +2421,18 @@ async function requestNovelList(arg:RequestDataArgumentExtended):Promise<request
|
||||
logit_bias: (logit_bias.length > 0) ? logit_bias.join("<<|>>") : undefined,
|
||||
logit_bias_values: (logit_bias_values.length > 0) ? logit_bias_values.join("|") : undefined,
|
||||
};
|
||||
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: api_server_url + '/api',
|
||||
body: send_body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
const response = await globalFetch(arg.customURL ?? api_server_url + '/api', {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
@@ -2344,6 +2466,15 @@ async function requestOllama(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
const formated = arg.formated
|
||||
const db = getDatabase()
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Preview body is not supported for Ollama"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const ollama = new Ollama({host: db.ollamaURL})
|
||||
|
||||
const response = await ollama.chat({
|
||||
@@ -2458,6 +2589,20 @@ async function requestCohere(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
|
||||
console.log(body)
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: arg.customURL ?? 'https://api.cohere.com/v1/chat',
|
||||
body: body,
|
||||
headers: {
|
||||
"Authorization": "Bearer " + db.cohereAPIKey,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const res = await globalFetch(arg.customURL ?? 'https://api.cohere.com/v1/chat', {
|
||||
method: "POST",
|
||||
headers: {
|
||||
@@ -2553,7 +2698,7 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
if(claudeChat.length > 0 && claudeChat[claudeChat.length-1].role === chat.role){
|
||||
let content = claudeChat[claudeChat.length-1].content
|
||||
if(multimodals && multimodals.length > 0 && !Array.isArray(content)){
|
||||
content = [{
|
||||
content = [{
|
||||
type: 'text',
|
||||
text: content
|
||||
}]
|
||||
@@ -2788,6 +2933,18 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
|
||||
const signed = await signer.sign(rq);
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: url,
|
||||
body: params,
|
||||
headers: signed.headers
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
const res = await globalFetch(url, {
|
||||
method: "POST",
|
||||
body: params,
|
||||
@@ -2887,6 +3044,17 @@ async function requestClaude(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
})
|
||||
}
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
url: replacerURL,
|
||||
body: body,
|
||||
headers: headers
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if(useStreaming){
|
||||
|
||||
const res = await fetchNative(replacerURL, {
|
||||
@@ -3105,6 +3273,15 @@ async function requestHorde(arg:RequestDataArgumentExtended):Promise<requestData
|
||||
const currentChar = getCurrentCharacter()
|
||||
const abortSignal = arg.abortSignal
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Preview body is not supported for Horde"
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const prompt = applyChatTemplate(formated)
|
||||
|
||||
const realModel = aiModel.split(":::")[1]
|
||||
@@ -3203,6 +3380,15 @@ async function requestWebLLM(arg:RequestDataArgumentExtended):Promise<requestDat
|
||||
const temperature = arg.temperature
|
||||
const realModel = aiModel.split(":::")[1]
|
||||
const prompt = applyChatTemplate(formated)
|
||||
|
||||
if(arg.previewBody){
|
||||
return {
|
||||
type: 'success',
|
||||
result: JSON.stringify({
|
||||
error: "Preview body is not supported for WebLLM"
|
||||
})
|
||||
}
|
||||
}
|
||||
const v = await runTransformers(prompt, realModel, {
|
||||
temperature: temperature,
|
||||
max_new_tokens: maxTokens,
|
||||
|
||||
Reference in New Issue
Block a user