Add generation info viewer

This commit is contained in:
kwaroran
2024-03-19 14:50:48 +09:00
parent eddd7afa3c
commit 31856a9c48
8 changed files with 185 additions and 64 deletions

View File

@@ -1,5 +1,5 @@
import { get, writable } from "svelte/store";
import { DataBase, setDatabase, type character } from "../storage/database";
import { DataBase, setDatabase, type character, type MessageGenerationInfo } from "../storage/database";
import { CharEmotion, selectedCharID } from "../stores";
import { ChatTokenizer, tokenize, tokenizeNum } from "../tokenizer";
import { language } from "../../lang";
@@ -907,32 +907,46 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
data: formated
})
{
//token rechecking
let tokens = 0
for(const chat of formated){
tokens += await tokenizer.tokenizeChat(chat)
}
//token rechecking
let inputTokens = 0
if(tokens > maxContextTokens){
let pointer = 0
while(tokens > maxContextTokens){
if(pointer >= formated.length){
alertError(language.errors.toomuchtoken + "\n\nAt token rechecking. Required Tokens: " + tokens)
return false
}
if(formated[pointer].removable){
tokens -= await tokenizer.tokenizeChat(formated[pointer])
formated[pointer].content = ''
}
pointer++
for(const chat of formated){
inputTokens += await tokenizer.tokenizeChat(chat)
}
if(inputTokens > maxContextTokens){
let pointer = 0
while(inputTokens > maxContextTokens){
if(pointer >= formated.length){
alertError(language.errors.toomuchtoken + "\n\nAt token rechecking. Required Tokens: " + inputTokens)
return false
}
formated = formated.filter((v) => {
return v.content !== ''
})
}
if(formated[pointer].removable){
inputTokens -= await tokenizer.tokenizeChat(formated[pointer])
formated[pointer].content = ''
}
pointer++
}
formated = formated.filter((v) => {
return v.content !== ''
})
}
//estimate tokens
let outputTokens = db.maxResponse
if(inputTokens + outputTokens > maxContextTokens){
outputTokens = maxContextTokens - inputTokens
}
const generationId = v4()
const generationModel = getGenerationModelString()
const generationInfo:MessageGenerationInfo = {
model: generationModel,
generationId: generationId,
inputTokens: inputTokens,
outputTokens: outputTokens,
maxContext: maxContextTokens,
}
const req = await requestChatData({
formated: formated,
biasString: biases,
@@ -941,6 +955,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
isGroupChat: nowChatroom.type === 'group',
bias: {},
continue: arg.continue,
chatId: generationId
}, 'model', abortSignal)
let result = ''
@@ -949,8 +964,6 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
if(abortSignal.aborted === true){
return false
}
const generationId = v4()
const generationModel = getGenerationModelString()
if(req.type === 'fail'){
alertError(req.result)
return false
@@ -969,10 +982,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
data: "",
saying: currentChar.chaId,
time: Date.now(),
generationInfo: {
model: generationModel,
generationId: generationId,
}
generationInfo,
})
}
db.characters[selectedChar].chats[selectedChat].isStreaming = true
@@ -1051,10 +1061,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
data: result,
saying: currentChar.chaId,
time: Date.now(),
generationInfo: {
model: generationModel,
generationId: generationId,
}
generationInfo
}
if(inlayResult.promise){
const p = await inlayResult.promise
@@ -1067,10 +1074,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
data: result,
saying: currentChar.chaId,
time: Date.now(),
generationInfo: {
model: generationModel,
generationId: generationId,
}
generationInfo
})
const ind = db.characters[selectedChar].chats[selectedChat].message.length - 1
if(inlayResult.promise){

View File

@@ -39,6 +39,7 @@ interface requestDataArgument{
isGroupChat?:boolean
useEmotion?:boolean
continue?:boolean
chatId?:string
}
type requestDataResponse = {
@@ -360,6 +361,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
"Authorization": "Bearer " + db.mistralKey,
},
abortSignal,
chatId: arg.chatId
})
const dat = res.data as any
@@ -526,7 +528,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
body: JSON.stringify(body),
method: "POST",
headers: headers,
signal: abortSignal
signal: abortSignal,
chatId: arg.chatId
})
if(da.status !== 200){
@@ -607,7 +610,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
body: body,
headers: headers,
abortSignal,
useRisuToken:throughProxi
useRisuToken:throughProxi,
chatId: arg.chatId
})
const dat = res.data as any
@@ -727,7 +731,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
headers: {
"Authorization": "Bearer " + db.novelai.token
},
abortSignal
abortSignal,
chatId: arg.chatId
})
if((!da.ok )|| (!da.data.output)){
@@ -775,8 +780,9 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
},
headers: {
"Content-Type": "application/json",
"Authorization": "Bearer " + db.openAIKey
"Authorization": "Bearer " + db.openAIKey,
},
chatId: arg.chatId
});
if(!response.ok){
@@ -883,7 +889,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const res = await globalFetch(blockingUrl, {
body: bodyTemplate,
headers: headers,
abortSignal
abortSignal,
chatId: arg.chatId
})
const dat = res.data as any
@@ -947,6 +954,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const response = await globalFetch(urlStr, {
body: bodyTemplate,
chatId: arg.chatId
})
if(!response.ok){
@@ -1024,7 +1032,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
"Content-Type": "application/json",
"Authorization": "Bearer " + db.google.accessToken
},
abortSignal
abortSignal,
chatId: arg.chatId
})
if(res.ok){
console.log(res.data)
@@ -1215,6 +1224,7 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const res = await globalFetch(url, {
headers: headers,
body: body,
chatId: arg.chatId
})
if(!res.ok){
@@ -1276,7 +1286,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
headers: {
"content-type": "application/json",
},
abortSignal
abortSignal,
chatId: arg.chatId
})
if(!da.ok){
@@ -1329,7 +1340,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
const response = await globalFetch(api_server_url + '/api', {
method: 'POST',
headers: headers,
body: send_body
body: send_body,
chatId: arg.chatId
});
if(!response.ok){
@@ -1645,7 +1657,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
method: "POST",
body: params,
headers: signed.headers,
plainFetchForce: true
plainFetchForce: true,
chatId: arg.chatId
})
if(!res.ok){
@@ -1677,7 +1690,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
"anthropic-version": "2023-06-01",
"accept": "application/json",
},
method: "POST"
method: "POST",
chatId: arg.chatId
})
if(res.status !== 200){
@@ -1744,7 +1758,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
"anthropic-version": "2023-06-01",
"accept": "application/json"
},
method: "POST"
method: "POST",
chatId: arg.chatId
})
if(!res.ok){
@@ -1906,7 +1921,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
method: "POST",
body: params,
headers: signed.headers,
plainFetchForce: true
plainFetchForce: true,
chatId: arg.chatId
})
@@ -1940,7 +1956,8 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
"anthropic-version": "2023-06-01",
"accept": "application/json"
},
useRisuToken: aiModel === 'reverse_proxy'
useRisuToken: aiModel === 'reverse_proxy',
chatId: arg.chatId
})
if((!da.ok) || (da.data.error)){