feat: add BGE-m3-ko embedding

This commit is contained in:
Bo26fhmC5M
2025-05-18 14:31:35 +09:00
parent a41ac074db
commit ba150a0913
4 changed files with 40 additions and 15 deletions

View File

@@ -21,6 +21,8 @@ export const localModels = {
'bgem3GPU': 'Xenova/bge-m3',
'multiMiniLM': 'Xenova/paraphrase-multilingual-MiniLM-L12-v2',
'multiMiniLMGPU': 'Xenova/paraphrase-multilingual-MiniLM-L12-v2',
'bgeM3Ko': 'HyperBlaze/BGE-m3-ko',
'bgeM3KoGPU': 'HyperBlaze/BGE-m3-ko',
},
gpuModels:[
'MiniLMGPU',
@@ -28,6 +30,7 @@ export const localModels = {
'bgeSmallEnGPU',
'bgem3GPU',
'multiMiniLMGPU',
'bgeM3KoGPU',
]
}

View File

@@ -66,7 +66,7 @@ export const runEmbedding = async (texts: string[], model:EmbeddingModel = 'Xeno
}
extractor = await pipeline('feature-extraction', model, {
// Default dtype for webgpu is fp32, so we can use q8, which is the default dtype in wasm.
...(device === 'webgpu' ? { dtype: "q8" } : {}),
dtype: "q8",
device: device,
progress_callback: (progress) => {
console.log(progress)