Moved embeddings to worker to prevent blocking
This commit is contained in:
11
src/embedder.ts
Normal file
11
src/embedder.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
import { pipeline } from '@xenova/transformers';
|
||||
import { parentPort } from 'worker_threads';
|
||||
|
||||
let model: any;
|
||||
|
||||
parentPort?.on('message', async ({ id, text }) => {
|
||||
if(!model) model = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
||||
const output = await model(text, { pooling: 'mean', normalize: true });
|
||||
const embedding = Array.from(output.data);
|
||||
parentPort?.postMessage({ id, embedding });
|
||||
});
|
||||
32
src/llm.ts
32
src/llm.ts
@@ -1,4 +1,3 @@
|
||||
import {pipeline} from '@xenova/transformers';
|
||||
import {JSONAttemptParse} from '@ztimson/utils';
|
||||
import {Ai} from './ai.ts';
|
||||
import {Anthropic} from './antrhopic.ts';
|
||||
@@ -6,7 +5,9 @@ import {Ollama} from './ollama.ts';
|
||||
import {OpenAi} from './open-ai.ts';
|
||||
import {AbortablePromise, LLMProvider} from './provider.ts';
|
||||
import {AiTool} from './tools.ts';
|
||||
import * as tf from '@tensorflow/tfjs';
|
||||
import {Worker} from 'worker_threads';
|
||||
import {fileURLToPath} from 'url';
|
||||
import {dirname, join} from 'path';
|
||||
|
||||
export type LLMMessage = {
|
||||
/** Message originator */
|
||||
@@ -83,11 +84,22 @@ export type LLMRequest = {
|
||||
}
|
||||
|
||||
export class LLM {
|
||||
private embedModel: any;
|
||||
private embedWorker: Worker | null = null;
|
||||
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
||||
private embedId = 0;
|
||||
private providers: {[key: string]: LLMProvider} = {};
|
||||
|
||||
|
||||
constructor(public readonly ai: Ai) {
|
||||
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
||||
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
||||
this.embedWorker.on('message', ({ id, embedding }) => {
|
||||
const pending = this.embedQueue.get(id);
|
||||
if (pending) {
|
||||
pending.resolve(embedding);
|
||||
this.embedQueue.delete(id);
|
||||
}
|
||||
});
|
||||
|
||||
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
|
||||
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
|
||||
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
|
||||
@@ -159,10 +171,12 @@ export class LLM {
|
||||
});
|
||||
};
|
||||
|
||||
const embed = async (text: string): Promise<number[]> => {
|
||||
const model = await this.embedModel;
|
||||
const output = await model(text, {pooling: 'mean', normalize: true});
|
||||
return Array.from(output.data);
|
||||
const embed = (text: string): Promise<number[]> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const id = this.embedId++;
|
||||
this.embedQueue.set(id, { resolve, reject });
|
||||
this.embedWorker?.postMessage({ id, text });
|
||||
});
|
||||
};
|
||||
|
||||
// Tokenize
|
||||
@@ -188,7 +202,7 @@ export class LLM {
|
||||
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim();
|
||||
if(cleanText) chunks.push(cleanText);
|
||||
start = end - overlapTokens;
|
||||
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
|
||||
if(start <= end - tokens.length + end) start = end;
|
||||
}
|
||||
|
||||
return Promise.all(chunks.map(async (text, index) => ({
|
||||
|
||||
Reference in New Issue
Block a user