Enable quantized embedder for speed boost
All checks were successful
Publish Library / Build NPM Project (push) Successful in 23s
Publish Library / Tag Version (push) Successful in 5s

This commit is contained in:
2026-02-11 20:28:14 -05:00
parent f2c66b0cb8
commit c8d5660b1a
2 changed files with 2 additions and 2 deletions

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.5.2", "version": "0.5.3",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",

View File

@@ -4,7 +4,7 @@ import { parentPort } from 'worker_threads';
let embedder: any; let embedder: any;
parentPort?.on('message', async ({ id, text, model }) => { parentPort?.on('message', async ({ id, text, model }) => {
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model); if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true});
const output = await embedder(text, { pooling: 'mean', normalize: true }); const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data); const embedding = Array.from(output.data);
parentPort?.postMessage({ id, embedding }); parentPort?.postMessage({ id, embedding });