Add nomic embedding

This commit is contained in:
kwaroran
2024-04-23 20:41:08 +09:00
parent bb0ad4c57e
commit 376fa1641b
3 changed files with 8 additions and 8 deletions

View File

@@ -8,9 +8,9 @@ export class HypaProcesser{
oaikey:string
vectors:memoryVector[]
forage:LocalForage
model:'ada'|'MiniLM'
model:'ada'|'MiniLM'|'nomic'
constructor(model:'ada'|'MiniLM'){
constructor(model:'ada'|'MiniLM'|'nomic'){
this.forage = localforage.createInstance({
name: "hypaVector"
})
@@ -36,11 +36,11 @@ export class HypaProcesser{
async getEmbeds(input:string[]|string) {
if(this.model === 'MiniLM'){
if(this.model === 'MiniLM' || this.model === 'nomic'){
const inputs:string[] = Array.isArray(input) ? input : [input]
let results:Float32Array[] = []
for(let i=0;i<inputs.length;i++){
const res = await runEmbedding(inputs[i])
const res = await runEmbedding(inputs[i], this.model === 'nomic' ? 'nomic-ai/nomic-embed-text-v1.5' : 'Xenova/all-MiniLM-L6-v2')
results.push(res)
}
//convert to number[][]

View File

@@ -2,7 +2,7 @@ import type { OpenAIChat } from "..";
import { HypaProcesser } from "./hypamemory";
export async function termMemory(chats:OpenAIChat[]){
const processer = new HypaProcesser('MiniLM')
const processer = new HypaProcesser('nomic')
processer.addText(chats.map(chat=>chat.content))
let scoredResults:{[key:string]:number}

View File

@@ -50,12 +50,12 @@ export const runSummarizer = async (text: string) => {
}
let extractor:FeatureExtractionPipeline = null
export const runEmbedding = async (text: string):Promise<Float32Array> => {
export const runEmbedding = async (text: string, model:'Xenova/all-MiniLM-L6-v2'|'nomic-ai/nomic-embed-text-v1.5' = 'Xenova/all-MiniLM-L6-v2'):Promise<Float32Array> => {
await initTransformers()
if(!extractor){
extractor = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
extractor = await pipeline('feature-extraction', model);
}
const tokenizer = await AutoTokenizer.from_pretrained('Xenova/all-MiniLM-L6-v2');
const tokenizer = await AutoTokenizer.from_pretrained(model);
const tokens = tokenizer.encode(text)
if (tokens.length > 1024) {
let chunks:string[] = []