feat: add BGE-m3-ko embedding
This commit is contained in:
@@ -21,6 +21,8 @@ export const localModels = {
|
||||
'bgem3GPU': 'Xenova/bge-m3',
|
||||
'multiMiniLM': 'Xenova/paraphrase-multilingual-MiniLM-L12-v2',
|
||||
'multiMiniLMGPU': 'Xenova/paraphrase-multilingual-MiniLM-L12-v2',
|
||||
'bgeM3Ko': 'HyperBlaze/BGE-m3-ko',
|
||||
'bgeM3KoGPU': 'HyperBlaze/BGE-m3-ko',
|
||||
},
|
||||
gpuModels:[
|
||||
'MiniLMGPU',
|
||||
@@ -28,6 +30,7 @@ export const localModels = {
|
||||
'bgeSmallEnGPU',
|
||||
'bgem3GPU',
|
||||
'multiMiniLMGPU',
|
||||
'bgeM3KoGPU',
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -66,7 +66,7 @@ export const runEmbedding = async (texts: string[], model:EmbeddingModel = 'Xeno
|
||||
}
|
||||
extractor = await pipeline('feature-extraction', model, {
|
||||
// Default dtype for webgpu is fp32, so we can use q8, which is the default dtype in wasm.
|
||||
...(device === 'webgpu' ? { dtype: "q8" } : {}),
|
||||
dtype: "q8",
|
||||
device: device,
|
||||
progress_callback: (progress) => {
|
||||
console.log(progress)
|
||||
|
||||
Reference in New Issue
Block a user