Merge branch 'kwaroran:main' into main

This commit is contained in:
HyperBlaze
2024-12-26 23:53:59 -08:00
committed by GitHub
36 changed files with 837252 additions and 1545 deletions

View File

@@ -22,7 +22,6 @@ import { getInlayAsset, supportsInlayImage } from "./files/inlays";
import { getGenerationModelString } from "./models/modelString";
import { connectionOpen, peerRevertChat, peerSafeCheck, peerSync } from "../sync/multiuser";
import { runInlayScreen } from "./inlayScreen";
import { runCharacterJS } from "../plugins/embedscript";
import { addRerolls } from "./prereroll";
import { runImageEmbedding } from "./transformers";
import { hanuraiMemory } from "./memory/hanuraiMemory";
@@ -30,7 +29,6 @@ import { hypaMemoryV2 } from "./memory/hypav2";
import { runLuaEditTrigger } from "./lua";
import { parseChatML } from "../parser.svelte";
import { getModelInfo, LLMFlags } from "../model/modellist";
import { pluginV2 } from "../plugins/plugins";
export interface OpenAIChat{
role: 'system'|'user'|'assistant'|'function'
@@ -755,7 +753,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
}
}
let thoughts:string[] = []
formatedChat = formatedChat.replace(/<Thoughts>(.+?)<\/Thoughts>/gm, (match, p1) => {
formatedChat = formatedChat.replace(/<Thoughts>(.+)<\/Thoughts>/gms, (match, p1) => {
thoughts.push(p1)
return ''
})
@@ -1116,12 +1114,6 @@ export async function sendChat(chatProcessIndex = -1,arg:{
})
}
formated = await runCharacterJS({
code: null,
mode: 'modifyRequestChat',
data: formated
})
formated = await runLuaEditTrigger(currentChar, 'editRequest', formated)
//token rechecking

View File

@@ -279,10 +279,6 @@ export function getModules(){
if (currentChat){
ids = ids.concat(currentChat.modules ?? [])
}
if(db.moduleIntergration){
const intList = db.moduleIntergration.split(',').map((s) => s.trim())
ids = ids.concat(intList)
}
const idsJoined = ids.join('-')
if(lastModules === idsJoined){
return lastModuleData

View File

@@ -259,6 +259,7 @@ export interface OpenAIChatExtra {
removable?:boolean
attr?:string[]
multimodals?:MultiModal[]
thoughts?:string[]
}
function reformater(formated:OpenAIChat[],modelInfo:LLMModel){
@@ -451,6 +452,7 @@ async function requestOpenAI(arg:RequestDataArgumentExtended):Promise<requestDat
delete formatedChat[i].removable
delete formatedChat[i].attr
delete formatedChat[i].multimodals
delete formatedChat[i].thoughts
}
if(aiModel === 'reverse_proxy' && db.reverseProxyOobaMode && formatedChat[i].role === 'system'){
const cont = formatedChat[i].content
@@ -1390,43 +1392,69 @@ async function requestOoba(arg:RequestDataArgumentExtended):Promise<requestDataR
}
async function requestPlugin(arg:RequestDataArgumentExtended):Promise<requestDataResponse> {
const formated = arg.formated
const db = getDatabase()
const maxTokens = arg.maxTokens
const bias = arg.biasString
const v2Function = pluginV2.providers.get(db.currentPluginProvider)
const d = v2Function ? (await v2Function(applyParameters({
prompt_chat: formated,
mode: arg.mode,
bias: []
}, [
'frequency_penalty','min_p','presence_penalty','repetition_penalty','top_k','top_p','temperature'
], {}, arg.mode) as any)) : await pluginProcess({
bias: bias,
prompt_chat: formated,
temperature: (db.temperature / 100),
max_tokens: maxTokens,
presence_penalty: (db.PresensePenalty / 100),
frequency_penalty: (db.frequencyPenalty / 100)
})
if(!d){
try {
const formated = arg.formated
const maxTokens = arg.maxTokens
const bias = arg.biasString
const v2Function = pluginV2.providers.get(db.currentPluginProvider)
const d = v2Function ? (await v2Function(applyParameters({
prompt_chat: formated,
mode: arg.mode,
bias: [],
max_tokens: maxTokens,
}, [
'frequency_penalty','min_p','presence_penalty','repetition_penalty','top_k','top_p','temperature'
], {}, arg.mode) as any)) : await pluginProcess({
bias: bias,
prompt_chat: formated,
temperature: (db.temperature / 100),
max_tokens: maxTokens,
presence_penalty: (db.PresensePenalty / 100),
frequency_penalty: (db.frequencyPenalty / 100)
})
if(!d){
return {
type: 'fail',
result: (language.errors.unknownModel)
}
}
else if(!d.success){
return {
type: 'fail',
result: d.content instanceof ReadableStream ? await (new Response(d.content)).text() : d.content
}
}
else if(d.content instanceof ReadableStream){
let fullText = ''
const piper = new TransformStream<string, StreamResponseChunk>( {
transform(chunk, control) {
fullText += chunk
control.enqueue({
"0": fullText
})
}
})
return {
type: 'streaming',
result: d.content.pipeThrough(piper)
}
}
else{
return {
type: 'success',
result: d.content
}
}
} catch (error) {
console.error(error)
return {
type: 'fail',
result: (language.errors.unknownModel)
}
}
else if(!d.success){
return {
type: 'fail',
result: d.content
}
}
else{
return {
type: 'success',
result: d.content
result: `Plugin Error from ${db.currentPluginProvider}: ` + JSON.stringify(error)
}
}
}

View File

@@ -6,7 +6,6 @@ import { alertError, alertNormal } from "../alert";
import { language } from "src/lang";
import { selectSingleFile } from "../util";
import { assetRegex, type CbsConditions, risuChatParser as risuChatParserOrg, type simpleCharacterArgument } from "../parser.svelte";
import { runCharacterJS } from "../plugins/embedscript";
import { getModuleAssets, getModuleRegexScripts } from "./modules";
import { HypaProcesser } from "./memory/hypamemory";
import { runLuaEditTrigger } from "./lua";
@@ -98,17 +97,12 @@ export function resetScriptCache(){
export async function processScriptFull(char:character|groupChat|simpleCharacterArgument, data:string, mode:ScriptMode, chatID = -1, cbsConditions:CbsConditions = {}){
let db = getDatabase()
const originalData = data
const cached = getScriptCache((db.globalscript ?? []).concat(char.customscript), originalData, mode)
const cached = getScriptCache((db.presetRegex ?? []).concat(char.customscript), originalData, mode)
if(cached){
return {data: cached, emoChanged: false}
}
let emoChanged = false
const scripts = (db.globalscript ?? []).concat(char.customscript).concat(getModuleRegexScripts())
data = await runCharacterJS({
code: char.virtualscript ?? null,
mode,
data,
})
const scripts = (db.presetRegex ?? []).concat(char.customscript).concat(getModuleRegexScripts())
data = await runLuaEditTrigger(char, mode, data)
if(pluginV2[mode].size > 0){
for(const plugin of pluginV2[mode]){

View File

@@ -2,7 +2,7 @@ import { get } from "svelte/store"
import { getDatabase, type character } from "../storage/database.svelte"
import { requestChatData } from "./request"
import { alertError } from "../alert"
import { globalFetch, readImage } from "../globalApi.svelte"
import { fetchNative, globalFetch, readImage } from "../globalApi.svelte"
import { CharEmotion } from "../stores.svelte"
import type { OpenAIChat } from "./index.svelte"
import { processZip } from "./processzip"
@@ -415,12 +415,14 @@ export async function generateAIImage(genPrompt:string, currentChar:character, n
}
if(db.sdProvider === 'comfy'){
if(db.sdProvider === 'comfy' || db.sdProvider === 'comfyui'){
const legacy = db.sdProvider === 'comfy' // Legacy Comfy mode
const {workflow, posNodeID, posInputName, negNodeID, negInputName} = db.comfyConfig
const baseUrl = new URL(db.comfyUiUrl)
const createUrl = (pathname: string, params: Record<string, string> = {}) => {
const url = new URL(pathname, baseUrl)
const url = db.comfyUiUrl.endsWith('/api') ? new URL(`${db.comfyUiUrl}${pathname}`) : new URL(pathname, baseUrl)
url.search = new URLSearchParams(params).toString()
return url.toString()
}
@@ -437,8 +439,31 @@ export async function generateAIImage(genPrompt:string, currentChar:character, n
try {
const prompt = JSON.parse(workflow)
prompt[posNodeID].inputs[posInputName] = genPrompt
prompt[negNodeID].inputs[negInputName] = neg
if(legacy){
prompt[posNodeID].inputs[posInputName] = genPrompt
prompt[negNodeID].inputs[negInputName] = neg
}
else{
//search all nodes for the prompt and negative prompt
const keys = Object.keys(prompt)
for(let i = 0; i < keys.length; i++){
const node = prompt[keys[i]]
const inputKeys = Object.keys(node.inputs)
for(let j = 0; j < inputKeys.length; j++){
let input = node.inputs[inputKeys[j]]
if(typeof input === 'string'){
input = input.replaceAll('{{risu_prompt}}', genPrompt)
input = input.replaceAll('{{risu_neg}}', neg)
}
if(inputKeys[j] === 'seed' && typeof input === 'number'){
input = Math.floor(Math.random() * 1000000000)
}
node.inputs[inputKeys[j]] = input
}
}
}
const { prompt_id: id } = await fetchWrapper(createUrl('/prompt'), {
method: 'POST',
@@ -451,9 +476,10 @@ export async function generateAIImage(genPrompt:string, currentChar:character, n
const startTime = Date.now()
const timeout = db.comfyConfig.timeout * 1000
while (!(item = (await (await fetch(createUrl('/history'), {
while (!(item = (await (await fetchNative(createUrl('/history'), {
headers: { 'Content-Type': 'application/json' },
method: 'GET'})).json())[id])) {
method: 'GET'
})).json())[id])) {
console.log("Checking /history...")
if (Date.now() - startTime >= timeout) {
alertError("Error: Image generation took longer than expected.");
@@ -463,13 +489,14 @@ export async function generateAIImage(genPrompt:string, currentChar:character, n
} // Check history until the generation is complete.
const genImgInfo = Object.values(item.outputs).flatMap((output: any) => output.images)[0];
const imgResponse = await fetch(createUrl('/view', {
const imgResponse = await fetchNative(createUrl('/view', {
filename: genImgInfo.filename,
subfolder: genImgInfo.subfolder,
type: genImgInfo.type
}), {
headers: { 'Content-Type': 'application/json' },
method: 'GET'})
method: 'GET'
})
const img64 = Buffer.from(await imgResponse.arrayBuffer()).toString('base64')
if(returnSdData === 'inlay'){
@@ -552,7 +579,6 @@ export async function generateAIImage(genPrompt:string, currentChar:character, n
if(db.falModel === 'fal-ai/flux-pro'){
delete body.enable_safety_checker
}
console.log(body)
const res = await globalFetch('https://fal.run/' + model, {
headers: {
@@ -563,8 +589,6 @@ export async function generateAIImage(genPrompt:string, currentChar:character, n
body: body
})
console.log(res)
if(!res.ok){
alertError(JSON.stringify(res.data))
return false