Refactor HypaProcesser instantiation to remove hardcoded model name and add models and others
This commit is contained in:
@@ -3,7 +3,7 @@ import { HypaProcesser } from '../memory/hypamemory'
|
||||
import { getUserName } from "src/ts/util";
|
||||
|
||||
export async function additionalInformations(char: character,chats:Chat,){
|
||||
const processer = new HypaProcesser('MiniLM')
|
||||
const processer = new HypaProcesser()
|
||||
const db = getDatabase()
|
||||
|
||||
const info = char.additionalText
|
||||
|
||||
@@ -124,7 +124,7 @@ async function sendPDFFile(arg:sendFileArg) {
|
||||
}
|
||||
}
|
||||
console.log(texts)
|
||||
const hypa = new HypaProcesser('MiniLM')
|
||||
const hypa = new HypaProcesser()
|
||||
hypa.addText(texts)
|
||||
const result = await hypa.similaritySearch(arg.query)
|
||||
let message = ''
|
||||
@@ -142,7 +142,7 @@ async function sendTxtFile(arg:sendFileArg) {
|
||||
const lines = arg.file.split('\n').filter((a) => {
|
||||
return a !== ''
|
||||
})
|
||||
const hypa = new HypaProcesser('MiniLM')
|
||||
const hypa = new HypaProcesser()
|
||||
hypa.addText(lines)
|
||||
const result = await hypa.similaritySearch(arg.query)
|
||||
let message = ''
|
||||
@@ -157,7 +157,7 @@ async function sendTxtFile(arg:sendFileArg) {
|
||||
}
|
||||
|
||||
async function sendXMLFile(arg:sendFileArg) {
|
||||
const hypa = new HypaProcesser('MiniLM')
|
||||
const hypa = new HypaProcesser()
|
||||
let nodeTexts:string[] = []
|
||||
const parser = new DOMParser();
|
||||
const xmlDoc = parser.parseFromString(arg.file, "text/xml");
|
||||
|
||||
@@ -1394,7 +1394,7 @@ export async function sendChat(chatProcessIndex = -1,arg:{
|
||||
}
|
||||
|
||||
if(DBState.db.emotionProcesser === 'embedding'){
|
||||
const hypaProcesser = new HypaProcesser('MiniLM')
|
||||
const hypaProcesser = new HypaProcesser()
|
||||
await hypaProcesser.addText(emotionList.map((v) => 'emotion:' + v))
|
||||
let searched = (await hypaProcesser.similaritySearchScored(result)).map((v) => {
|
||||
v[0] = v[0].replace("emotion:",'')
|
||||
|
||||
@@ -213,7 +213,7 @@ export async function runLua(code:string, arg:{
|
||||
if(!LuaLowLevelIds.has(id)){
|
||||
return
|
||||
}
|
||||
const processer = new HypaProcesser('MiniLM')
|
||||
const processer = new HypaProcesser()
|
||||
await processer.addText(value)
|
||||
return await processer.similaritySearch(source)
|
||||
})
|
||||
|
||||
@@ -13,7 +13,7 @@ export async function hanuraiMemory(chats:OpenAIChat[],arg:{
|
||||
}){
|
||||
const db = getDatabase()
|
||||
const tokenizer = arg.tokenizer
|
||||
const processer = new HypaProcesser('MiniLM')
|
||||
const processer = new HypaProcesser()
|
||||
let addTexts:string[] = []
|
||||
const queryStartIndex=chats.length-maxRecentChatQuery
|
||||
console.log(chats.length,maxRecentChatQuery,queryStartIndex)
|
||||
|
||||
@@ -3,21 +3,47 @@ import { globalFetch } from "src/ts/globalApi.svelte";
|
||||
import { runEmbedding } from "../transformers";
|
||||
import { alertError } from "src/ts/alert";
|
||||
import { appendLastPath } from "src/ts/util";
|
||||
import { getDatabase } from "src/ts/storage/database.svelte";
|
||||
|
||||
|
||||
export type HypaModel = 'ada'|'MiniLM'|'nomic'|'custom'|'nomicGPU'|'bgeSmallEn'|'bgeSmallEnGPU'|'bgem3'|'bgem3GPU'|'openai3small'|'openai3large'
|
||||
|
||||
const localModels = {
|
||||
models: {
|
||||
'MiniLM':'Xenova/all-MiniLM-L6-v2',
|
||||
'nomic':'nomic-ai/nomic-embed-text-v1.5',
|
||||
'nomicGPU':'nomic-ai/nomic-embed-text-v1.5',
|
||||
'bgeSmallEn': 'BAAI/bge-small-en-v1.5',
|
||||
'bgeSmallEnGPU': 'BAAI/bge-small-en-v1.5',
|
||||
'bgem3': 'BAAI/bge-m3',
|
||||
'bgem3GPU': 'BAAI/bge-m3',
|
||||
},
|
||||
gpuModels:[
|
||||
'nomicGPU',
|
||||
'bgeSmallEnGPU',
|
||||
'bgem3GPU'
|
||||
]
|
||||
}
|
||||
|
||||
export class HypaProcesser{
|
||||
oaikey:string
|
||||
vectors:memoryVector[]
|
||||
forage:LocalForage
|
||||
model:'ada'|'MiniLM'|'nomic'|'custom'
|
||||
model:HypaModel
|
||||
customEmbeddingUrl:string
|
||||
|
||||
constructor(model:'ada'|'MiniLM'|'nomic'|'custom',customEmbeddingUrl?:string){
|
||||
constructor(model:HypaModel|'auto' = 'auto',customEmbeddingUrl?:string){
|
||||
this.forage = localforage.createInstance({
|
||||
name: "hypaVector"
|
||||
})
|
||||
this.vectors = []
|
||||
this.model = model
|
||||
if(model === 'auto'){
|
||||
const db = getDatabase()
|
||||
this.model = db.hypaModel || 'MiniLM'
|
||||
}
|
||||
else{
|
||||
this.model = model
|
||||
}
|
||||
this.customEmbeddingUrl = customEmbeddingUrl
|
||||
}
|
||||
|
||||
@@ -39,9 +65,9 @@ export class HypaProcesser{
|
||||
|
||||
|
||||
async getEmbeds(input:string[]|string):Promise<VectorArray[]> {
|
||||
if(this.model === 'MiniLM' || this.model === 'nomic'){
|
||||
if(Object.keys(localModels.models).includes(this.model)){
|
||||
const inputs:string[] = Array.isArray(input) ? input : [input]
|
||||
let results:Float32Array[] = await runEmbedding(inputs, this.model === 'nomic' ? 'nomic-ai/nomic-embed-text-v1.5' : 'Xenova/all-MiniLM-L6-v2')
|
||||
let results:Float32Array[] = await runEmbedding(inputs, localModels.models[this.model], localModels.gpuModels.includes(this.model) ? 'webgpu' : 'wasm')
|
||||
return results
|
||||
}
|
||||
let gf = null;
|
||||
@@ -58,14 +84,21 @@ export class HypaProcesser{
|
||||
},
|
||||
})
|
||||
}
|
||||
if(this.model === 'ada'){
|
||||
if(this.model === 'ada' || this.model === 'openai3small' || this.model === 'openai3large'){
|
||||
const db = getDatabase()
|
||||
const models = {
|
||||
'ada':'text-embedding-ada-002',
|
||||
'openai3small':'text-embedding-3-small',
|
||||
'openai3large':'text-embedding-3-large'
|
||||
}
|
||||
|
||||
gf = await globalFetch("https://api.openai.com/v1/embeddings", {
|
||||
headers: {
|
||||
"Authorization": "Bearer " + this.oaikey
|
||||
"Authorization": "Bearer " + db.supaMemoryKey || this.oaikey
|
||||
},
|
||||
body: {
|
||||
"input": input,
|
||||
"model": "text-embedding-ada-002"
|
||||
"input": input,
|
||||
"model": models[this.model]
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -321,7 +321,7 @@ export async function processScriptFull(char:character|groupChat|simpleCharacter
|
||||
}
|
||||
}
|
||||
|
||||
const processer = new HypaProcesser('MiniLM')
|
||||
const processer = new HypaProcesser()
|
||||
await processer.addText(assetNames)
|
||||
const matches = data.matchAll(assetRegex)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import {env, AutoTokenizer, pipeline, type SummarizationOutput, type TextGenerationConfig, type TextGenerationOutput, FeatureExtractionPipeline, TextToAudioPipeline, type ImageToTextOutput } from '@xenova/transformers';
|
||||
import {env, AutoTokenizer, pipeline, type SummarizationOutput, type TextGenerationConfig, type TextGenerationOutput, FeatureExtractionPipeline, TextToAudioPipeline, type ImageToTextOutput } from '@huggingface/transformers';
|
||||
import { unzip } from 'fflate';
|
||||
import { globalFetch, loadAsset, saveAsset } from 'src/ts/globalApi.svelte';
|
||||
import { selectSingleFile } from 'src/ts/util';
|
||||
@@ -15,6 +15,7 @@ async function initTransformers(){
|
||||
env.useBrowserCache = false
|
||||
env.useFSCache = false
|
||||
env.useCustomCache = true
|
||||
env.allowLocalModels = true
|
||||
env.customCache = {
|
||||
put: async (url:URL|string, response:Response) => {
|
||||
await tfCache.put(url, response)
|
||||
@@ -33,10 +34,12 @@ async function initTransformers(){
|
||||
console.log('transformers loaded')
|
||||
}
|
||||
|
||||
export const runTransformers = async (baseText:string, model:string,config:TextGenerationConfig = {}) => {
|
||||
export const runTransformers = async (baseText:string, model:string,config:TextGenerationConfig, device:'webgpu'|'wasm' = 'wasm') => {
|
||||
await initTransformers()
|
||||
let text = baseText
|
||||
let generator = await pipeline('text-generation', model);
|
||||
let generator = await pipeline('text-generation', model, {
|
||||
device
|
||||
});
|
||||
let output = await generator(text, config) as TextGenerationOutput
|
||||
const outputOne = output[0]
|
||||
return outputOne
|
||||
@@ -50,16 +53,25 @@ export const runSummarizer = async (text: string) => {
|
||||
}
|
||||
|
||||
let extractor:FeatureExtractionPipeline = null
|
||||
let lastEmbeddingModelQuery:string = ''
|
||||
type EmbeddingModel = 'Xenova/all-MiniLM-L6-v2'|'nomic-ai/nomic-embed-text-v1.5'
|
||||
export const runEmbedding = async (texts: string[], model:EmbeddingModel = 'Xenova/all-MiniLM-L6-v2'):Promise<Float32Array[]> => {
|
||||
export const runEmbedding = async (texts: string[], model:EmbeddingModel = 'Xenova/all-MiniLM-L6-v2', device:'webgpu'|'wasm'):Promise<Float32Array[]> => {
|
||||
await initTransformers()
|
||||
if(!extractor){
|
||||
extractor = await pipeline('feature-extraction', model);
|
||||
console.log('running embedding')
|
||||
let embeddingModelQuery = model + device
|
||||
if(!extractor || embeddingModelQuery !== lastEmbeddingModelQuery){
|
||||
extractor = await pipeline('feature-extraction', model, {
|
||||
device: device,
|
||||
progress_callback: (progress) => {
|
||||
console.log(progress)
|
||||
}
|
||||
});
|
||||
console.log('extractor loaded')
|
||||
}
|
||||
let result = await extractor(texts, { pooling: 'mean', normalize: true });
|
||||
console.log(texts, result)
|
||||
const data = result.data as Float32Array
|
||||
|
||||
console.log(data)
|
||||
const lenPerText = data.length / texts.length
|
||||
let res:Float32Array[] = []
|
||||
for(let i = 0; i < texts.length; i++){
|
||||
|
||||
@@ -459,7 +459,7 @@ export async function runTrigger(char:character,mode:triggerMode, arg:{
|
||||
break
|
||||
}
|
||||
|
||||
const processer = new HypaProcesser('MiniLM')
|
||||
const processer = new HypaProcesser()
|
||||
const effectValue = risuChatParser(effect.value,{chara:char})
|
||||
const source = risuChatParser(effect.source,{chara:char})
|
||||
await processer.addText(effectValue.split('§'))
|
||||
|
||||
Reference in New Issue
Block a user