Compare commits

...

14 Commits
0.6.6 ... 0.7.7

Author SHA1 Message Date
790608f020 Queue OCR & ASR work
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 19:05:19 -05:00
473424ae23 segfault fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 17:31:49 -05:00
9b831f7d95 Better ASR IDing
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 16:55:25 -05:00
498b326e45 Bump 0.7.4
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 14:19:17 -05:00
56e4efec94 Use either python or python3 or diarization 2026-02-20 14:14:30 -05:00
a07f069ad0 One embedding at a time
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 7s
2026-02-19 22:58:53 -05:00
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
4143d00de7 Working speaker detection with advanced LLM identifying. Improved LLM json function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 5s
2026-02-14 09:39:17 -05:00
0360f2493d Added hugging face token
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 22:15:57 -05:00
0172887877 audio worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:24:12 -05:00
8f89f5e3cf embedding worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:18:56 -05:00
5bd41f8c6a worker fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:17:31 -05:00
e4399e1b7b Updataes?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 26s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:14:00 -05:00
9 changed files with 335 additions and 1002 deletions

1038
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.6.6", "version": "0.7.7",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",
@@ -25,14 +25,14 @@
"watch": "npx vite build --watch" "watch": "npx vite build --watch"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.67.0", "@anthropic-ai/sdk": "^0.78.0",
"@tensorflow/tfjs": "^4.22.0", "@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2", "@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4", "@ztimson/node-utils": "^1.0.7",
"@ztimson/utils": "^0.27.9", "@ztimson/utils": "^0.28.13",
"cheerio": "^1.2.0", "cheerio": "^1.2.0",
"openai": "^6.6.0", "openai": "^6.22.0",
"tesseract.js": "^6.0.1", "tesseract.js": "^7.0.0",
"wavefile": "^11.0.0" "wavefile": "^11.0.0"
}, },
"devDependencies": { "devDependencies": {

View File

@@ -8,6 +8,8 @@ export type AbortablePromise<T> = Promise<T> & {
}; };
export type AiOptions = { export type AiOptions = {
/** Token to pull models from hugging face */
hfToken?: string;
/** Path to models */ /** Path to models */
path?: string; path?: string;
/** ASR model: whisper-tiny, whisper-base */ /** ASR model: whisper-tiny, whisper-base */

View File

@@ -1,44 +1,48 @@
import { pipeline } from '@xenova/transformers'; import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads'; import { parentPort } from 'worker_threads';
import * as fs from 'node:fs';
import wavefile from 'wavefile';
import { spawn } from 'node:child_process'; import { spawn } from 'node:child_process';
import { execSync } from 'node:child_process';
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import wavefile from 'wavefile';
let whisperPipeline: any; let whisperPipeline: any;
export async function canDiarization(): Promise<boolean> { export async function canDiarization(): Promise<string | null> {
return new Promise((resolve) => { const checkPython = (cmd: string) => {
const proc = spawn('python3', ['-c', 'import pyannote.audio']); return new Promise<boolean>((resolve) => {
const proc = spawn(cmd, ['-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0)); proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false)); proc.on('error', () => resolve(false));
}); });
};
if(await checkPython('python3')) return 'python3';
if(await checkPython('python')) return 'python';
return null;
} }
async function runDiarization(audioPath: string, torchHome: string): Promise<any[]> { async function runDiarization(binary: string, audioPath: string, dir: string, token: string): Promise<any[]> {
const script = ` const script = `
import sys import sys
import json import json
import os import os
from pyannote.audio import Pipeline from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = "${torchHome}" os.environ['TORCH_HOME'] = r"${dir}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1") pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${token}")
diarization = pipeline(sys.argv[1]) output = pipeline(sys.argv[1])
segments = [] segments = []
for turn, _, speaker in diarization.itertracks(yield_label=True): for turn, speaker in output.speaker_diarization:
segments.append({ segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
"start": turn.start,
"end": turn.end,
"speaker": speaker
})
print(json.dumps(segments)) print(json.dumps(segments))
`; `;
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
let output = ''; let output = '';
const proc = spawn('python3', ['-c', script, audioPath]); const proc = spawn(binary, ['-c', script, audioPath]);
proc.stdout.on('data', (data: Buffer) => output += data.toString()); proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString())); proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => { proc.on('close', (code: number) => {
@@ -71,54 +75,63 @@ function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end); const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1; const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
if (speakerNum !== currentSpeaker) { if (speakerNum !== currentSpeaker) {
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`); if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
currentSpeaker = speakerNum; currentSpeaker = speakerNum;
currentText = chunk.text; currentText = chunk.text;
} else { } else {
currentText += chunk.text; currentText += chunk.text;
} }
}); });
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`); if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
return lines.join('\n'); return lines.join('\n');
} }
parentPort?.on('message', async ({ path, model, speaker, torchHome }) => { function prepareAudioBuffer(file: string): [string, Float32Array] {
let wav: any, tmp;
try { try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: torchHome, quantized: true}); wav = new wavefile.WaveFile(readFileSync(file));
} catch(err) {
// Prepare audio file (convert to mono channel wave) tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
const wav = new wavefile.WaveFile(fs.readFileSync(path)); execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
wav = new wavefile.WaveFile(readFileSync(tmp));
} finally {
wav.toBitDepth('32f'); wav.toBitDepth('32f');
wav.toSampleRate(16000); wav.toSampleRate(16000);
const samples = wav.getSamples(); const samples = wav.getSamples();
let buffer; if(Array.isArray(samples)) {
if(Array.isArray(samples)) { // stereo to mono - average the channels
const left = samples[0]; const left = samples[0];
const right = samples[1]; const right = samples[1];
buffer = new Float32Array(left.length); const buffer = new Float32Array(left.length);
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2; for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
} else { return [tmp || file, buffer];
buffer = samples;
} }
return [tmp || file, samples];
// Transcribe
const transcriptResult = await whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false});
if(!speaker) {
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null });
return;
} }
}
// Speaker Diarization parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
let tempFile = null;
try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
const [f, buffer] = prepareAudioBuffer(file);
tempFile = f !== file ? f : null;
const hasDiarization = await canDiarization(); const hasDiarization = await canDiarization();
if(!hasDiarization) { const [transcript, speakers] = await Promise.all([
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, warning: 'Speaker diarization unavailable' }); whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}),
return; (!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(hasDiarization, f, modelDir, token),
} ]);
const speakers = await runDiarization(path, torchHome); const text = transcript.text?.trim() || null;
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers); if(!speaker) return parentPort?.postMessage({ text });
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
parentPort?.postMessage({ text: combined }); parentPort?.postMessage({ text: combined });
} catch (err) { } catch (err: any) {
parentPort?.postMessage({ error: (err as Error).message }); parentPort?.postMessage({ error: err.stack || err.message });
} finally {
if(tempFile) rmSync(tempFile, { recursive: true, force: true });
} }
}); });

View File

@@ -1,40 +1,82 @@
import {fileURLToPath} from 'url';
import {Worker} from 'worker_threads'; import {Worker} from 'worker_threads';
import path from 'node:path';
import {AbortablePromise, Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {canDiarization} from './asr.ts'; import {canDiarization} from './asr.ts';
import {dirname, join} from 'path';
export class Audio { export class Audio {
private busy = false;
private currentJob: any;
private queue: Array<{file: string, model: string, speaker: boolean | 'id', modelDir: string, token: string, resolve: any, reject: any}> = [];
private worker: Worker | null = null;
constructor(private ai: Ai) {} constructor(private ai: Ai) {}
asr(filepath: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise<string | null> { private processQueue() {
if(this.busy || !this.queue.length) return;
this.busy = true;
const job = this.queue.shift()!;
if(!this.worker) {
this.worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
this.worker.on('message', this.handleMessage.bind(this));
this.worker.on('error', this.handleError.bind(this));
}
this.currentJob = job;
this.worker.postMessage({file: job.file, model: job.model, speaker: job.speaker, modelDir: job.modelDir, token: job.token});
}
private handleMessage({text, warning, error}: any) {
const job = this.currentJob!;
this.busy = false;
if(error) job.reject(new Error(error));
else {
if(warning) console.warn(warning);
job.resolve(text);
}
this.processQueue();
}
private handleError(err: Error) {
if(this.currentJob) {
this.currentJob.reject(err);
this.busy = false;
this.processQueue();
}
}
asr(file: string, options: { model?: string; speaker?: boolean | 'id' } = {}): AbortablePromise<string | null> {
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options; const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
let aborted = false; let aborted = false;
const abort = () => { aborted = true; }; const abort = () => { aborted = true; };
let p = new Promise<string | null>((resolve, reject) => {
this.queue.push({file, model, speaker, modelDir: <string>this.ai.options.path, token: <string>this.ai.options.hfToken,
resolve: (text: string | null) => !aborted && resolve(text),
reject: (err: Error) => !aborted && reject(err)
});
this.processQueue();
});
const p = new Promise<string | null>((resolve, reject) => { if(options.speaker == 'id') {
const worker = new Worker(path.join(import.meta.dirname, 'asr.js')); if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
const handleMessage = ({ text, warning, error }: any) => { p = p.then(async transcript => {
worker.terminate(); if(!transcript) return transcript;
if(aborted) return; let chunks = this.ai.language.chunk(transcript, 500, 0);
if(error) reject(new Error(error)); if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
else { const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
if(warning) console.warn(warning); system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
resolve(text); temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => {
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);
});
return transcript;
})
} }
};
const handleError = (err: Error) => {
worker.terminate();
if(!aborted) reject(err);
};
worker.on('message', handleMessage);
worker.on('error', handleError);
worker.on('exit', (code) => {
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({path: filepath, model, speaker, torchHome: this.ai.options.path,});
});
return Object.assign(p, { abort }); return Object.assign(p, { abort });
} }
canDiarization = canDiarization; canDiarization = () => canDiarization().then(resp => !!resp);
} }

View File

@@ -3,12 +3,9 @@ import { parentPort } from 'worker_threads';
let embedder: any; let embedder: any;
parentPort?.on('message', async ({ id, text, model, path }) => { parentPort?.on('message', async ({text, model, modelDir }) => {
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, { if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
quantized: true,
cache_dir: path,
});
const output = await embedder(text, { pooling: 'mean', normalize: true }); const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data); const embedding = Array.from(output.data);
parentPort?.postMessage({ id, embedding }); parentPort?.postMessage({embedding});
}); });

View File

@@ -75,8 +75,8 @@ export type LLMRequest = {
} }
class LLM { class LLM {
private models: {[model: string]: LLMProvider} = {}; defaultModel!: string;
private defaultModel!: string; models: {[model: string]: LLMProvider} = {};
constructor(public readonly ai: Ai) { constructor(public readonly ai: Ai) {
if(!ai.options.llm?.models) return; if(!ai.options.llm?.models) return;
@@ -184,7 +184,12 @@ class LLM {
const system = history[0].role == 'system' ? history[0] : null, const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep), recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user'); process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', {
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.',
model: options?.model,
temperature: options?.temperature || 0.3
});
const timestamp = new Date(); const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => { const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]); const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
@@ -250,11 +255,11 @@ class LLM {
/** /**
* Create a vector representation of a string * Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted) * @param {object | string} target Item that will be embedded (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines) * @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings * @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/ */
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) { async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}) {
let {maxTokens = 500, overlapTokens = 50} = opts;
const embed = (text: string): Promise<number[]> => { const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js')); const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
@@ -271,16 +276,16 @@ class LLM {
worker.on('exit', (code) => { worker.on('exit', (code) => {
if(code !== 0) reject(new Error(`Worker exited with code ${code}`)); if(code !== 0) reject(new Error(`Worker exited with code ${code}`));
}); });
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', path: this.ai.options.path}); worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
}); });
}; };
const chunks = this.chunk(target, maxTokens, overlapTokens); const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
return Promise.all(chunks.map(async (text, index) => ({ for(let i = 0; i < chunks.length; i++) {
index, const text= chunks[i];
embedding: await embed(text), const embedding = await embed(text);
text, results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
tokens: this.estimateTokens(text), }
}))); return results;
} }
/** /**
@@ -312,12 +317,16 @@ class LLM {
/** /**
* Ask a question with JSON response * Ask a question with JSON response
* @param {string} message Question * @param {string} text Text to process
* @param {string} schema JSON schema the AI should match
* @param {LLMRequest} options Configuration options and chat history * @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>} * @returns {Promise<{} | {} | RegExpExecArray | null>}
*/ */
async json(message: string, options?: LLMRequest): Promise<any> { async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options}); let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema:
\`\`\`json
${schema}
\`\`\``});
if(!resp) return {}; if(!resp) return {};
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp); const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp; const jsonStr = codeBlock ? codeBlock[1].trim() : resp;

View File

@@ -2,8 +2,26 @@ import {createWorker} from 'tesseract.js';
import {AbortablePromise, Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
export class Vision { export class Vision {
private worker: any = null;
private queue: Array<{ path: string, resolve: any, reject: any }> = [];
private busy = false;
constructor(private ai: Ai) { } constructor(private ai: Ai) {}
private async processQueue() {
if(this.busy || !this.queue.length) return;
this.busy = true;
const job = this.queue.shift()!;
if(!this.worker) this.worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
try {
const {data} = await this.worker.recognize(job.path);
job.resolve(data.text.trim() || null);
} catch(err) {
job.reject(err);
}
this.busy = false;
this.processQueue();
}
/** /**
* Convert image to text using Optical Character Recognition * Convert image to text using Optical Character Recognition
@@ -11,13 +29,16 @@ export class Vision {
* @returns {AbortablePromise<string | null>} Promise of extracted text with abort method * @returns {AbortablePromise<string | null>} Promise of extracted text with abort method
*/ */
ocr(path: string): AbortablePromise<string | null> { ocr(path: string): AbortablePromise<string | null> {
let worker: any; let aborted = false;
const p = new Promise<string | null>(async res => { const abort = () => { aborted = true; };
worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path}); const p = new Promise<string | null>((resolve, reject) => {
const {data} = await worker.recognize(path); this.queue.push({
await worker.terminate(); path,
res(data.text.trim() || null); resolve: (text: string | null) => !aborted && resolve(text),
reject: (err: Error) => !aborted && reject(err)
}); });
return Object.assign(p, {abort: () => worker?.terminate()}); this.processQueue();
});
return Object.assign(p, {abort});
} }
} }

View File

@@ -1,6 +1,5 @@
import {defineConfig} from 'vite'; import {defineConfig} from 'vite';
import dts from 'vite-plugin-dts'; import dts from 'vite-plugin-dts';
import {resolve} from 'path';
export default defineConfig({ export default defineConfig({
build: { build: {