Refactor inlay image parsing and add image embedding functionality

This commit is contained in:
kwaroran
2024-03-18 00:05:03 +09:00
parent 1954a79bd0
commit 6e5465356a
4 changed files with 48 additions and 46 deletions

View File

@@ -154,8 +154,6 @@ async function parseAdditionalAssets(data:string, char:simpleCharacterArgument|c
} }
async function parseInlayImages(data:string){ async function parseInlayImages(data:string){
const db = get(DataBase)
if(db.inlayImage){
const inlayMatch = data.match(/{{inlay::(.+?)}}/g) const inlayMatch = data.match(/{{inlay::(.+?)}}/g)
if(inlayMatch){ if(inlayMatch){
for(const inlay of inlayMatch){ for(const inlay of inlayMatch){
@@ -166,7 +164,6 @@ async function parseInlayImages(data:string){
} }
} }
} }
}
return data return data
} }

View File

@@ -25,6 +25,7 @@ import { sendPeerChar } from "../sync/multiuser";
import { runInlayScreen } from "./inlayScreen"; import { runInlayScreen } from "./inlayScreen";
import { runCharacterJS } from "../plugins/embedscript"; import { runCharacterJS } from "../plugins/embedscript";
import { addRerolls } from "./prereroll"; import { addRerolls } from "./prereroll";
import { runImageEmbedding } from "./transformers";
export interface OpenAIChat{ export interface OpenAIChat{
role: 'system'|'user'|'assistant'|'function' role: 'system'|'user'|'assistant'|'function'
@@ -562,7 +563,6 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
msg.chatId = v4() msg.chatId = v4()
} }
let inlays:string[] = [] let inlays:string[] = []
if(db.inlayImage){
if(msg.role === 'char'){ if(msg.role === 'char'){
formatedChat = formatedChat.replace(/{{inlay::(.+?)}}/g, '') formatedChat = formatedChat.replace(/{{inlay::(.+?)}}/g, '')
} }
@@ -574,7 +574,6 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
} }
} }
} }
}
let multimodal:MultiModal[] = [] let multimodal:MultiModal[] = []
if(inlays.length > 0){ if(inlays.length > 0){
@@ -590,6 +589,10 @@ export async function sendChat(chatProcessIndex = -1,arg:{chatAdditonalTokens?:n
height: inlayData.height height: inlayData.height
}) })
} }
else{
const captionResult = await runImageEmbedding(inlayData.data)
formatedChat += `[${captionResult[0].generated_text}]`
}
} }
formatedChat = formatedChat.replace(inlay, '') formatedChat = formatedChat.replace(inlay, '')
} }

View File

@@ -172,7 +172,6 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
case 'mistral-large-latest': case 'mistral-large-latest':
case 'reverse_proxy':{ case 'reverse_proxy':{
let formatedChat:OpenAIChatExtra[] = [] let formatedChat:OpenAIChatExtra[] = []
if(db.inlayImage){
for(let i=0;i<formated.length;i++){ for(let i=0;i<formated.length;i++){
const m = formated[i] const m = formated[i]
if(m.multimodals && m.multimodals.length > 0 && m.role === 'user'){ if(m.multimodals && m.multimodals.length > 0 && m.role === 'user'){
@@ -198,10 +197,6 @@ export async function requestChatDataMain(arg:requestDataArgument, model:'model'
formatedChat.push(m) formatedChat.push(m)
} }
} }
}
else{
formatedChat = formated
}
let oobaSystemPrompts:string[] = [] let oobaSystemPrompts:string[] = []
for(let i=0;i<formatedChat.length;i++){ for(let i=0;i<formatedChat.length;i++){

View File

@@ -1,4 +1,4 @@
import {env, AutoTokenizer, pipeline, type SummarizationOutput, type TextGenerationConfig, type TextGenerationOutput, FeatureExtractionPipeline, TextToAudioPipeline } from '@xenova/transformers'; import {env, AutoTokenizer, pipeline, type SummarizationOutput, type TextGenerationConfig, type TextGenerationOutput, FeatureExtractionPipeline, TextToAudioPipeline, type ImageToTextOutput } from '@xenova/transformers';
import { unzip } from 'fflate'; import { unzip } from 'fflate';
import { globalFetch, loadAsset, saveAsset } from 'src/ts/storage/globalApi'; import { globalFetch, loadAsset, saveAsset } from 'src/ts/storage/globalApi';
import { selectSingleFile } from 'src/ts/util'; import { selectSingleFile } from 'src/ts/util';
@@ -93,6 +93,13 @@ export const runEmbedding = async (text: string):Promise<Float32Array> => {
return (result?.data as Float32Array) ?? null; return (result?.data as Float32Array) ?? null;
} }
export const runImageEmbedding = async (dataurl:string) => {
await initTransformers()
const captioner = await pipeline('image-to-text', 'Xenova/vit-gpt2-image-captioning');
const output = await captioner(dataurl)
return output as ImageToTextOutput
}
let synthesizer:TextToAudioPipeline = null let synthesizer:TextToAudioPipeline = null
let lastSynth:string = null let lastSynth:string = null