Compare commits

...

5 Commits
0.7.2 ... 0.7.6

Author SHA1 Message Date
473424ae23 segfault fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 17:31:49 -05:00
9b831f7d95 Better ASR IDing
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 16:55:25 -05:00
498b326e45 Bump 0.7.4
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 14:19:17 -05:00
56e4efec94 Use either python or python3 or diarization 2026-02-20 14:14:30 -05:00
a07f069ad0 One embedding at a time
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 7s
2026-02-19 22:58:53 -05:00
6 changed files with 185 additions and 943 deletions

1038
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.7.2", "version": "0.7.6",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",
@@ -25,14 +25,14 @@
"watch": "npx vite build --watch" "watch": "npx vite build --watch"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.67.0", "@anthropic-ai/sdk": "^0.78.0",
"@tensorflow/tfjs": "^4.22.0", "@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2", "@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4", "@ztimson/node-utils": "^1.0.7",
"@ztimson/utils": "^0.27.9", "@ztimson/utils": "^0.28.13",
"cheerio": "^1.2.0", "cheerio": "^1.2.0",
"openai": "^6.6.0", "openai": "^6.22.0",
"tesseract.js": "^6.0.1", "tesseract.js": "^7.0.0",
"wavefile": "^11.0.0" "wavefile": "^11.0.0"
}, },
"devDependencies": { "devDependencies": {

View File

@@ -7,17 +7,20 @@ import { join } from 'node:path';
import { tmpdir } from 'node:os'; import { tmpdir } from 'node:os';
import wavefile from 'wavefile'; import wavefile from 'wavefile';
let whisperPipeline: any; export async function canDiarization(): Promise<string | null> {
const checkPython = (cmd: string) => {
export async function canDiarization(): Promise<boolean> { return new Promise<boolean>((resolve) => {
return new Promise((resolve) => { const proc = spawn(cmd, ['-c', 'import pyannote.audio']);
const proc = spawn('python', ['-c', 'import pyannote.audio']); proc.on('close', (code: number) => resolve(code === 0));
proc.on('close', (code: number) => resolve(code === 0)); proc.on('error', () => resolve(false));
proc.on('error', () => resolve(false)); });
}); };
if(await checkPython('python3')) return 'python3';
if(await checkPython('python')) return 'python';
return null;
} }
async function runDiarization(audioPath: string, dir: string, token: string): Promise<any[]> { async function runDiarization(binary: string, audioPath: string, dir: string, token: string): Promise<any[]> {
const script = ` const script = `
import sys import sys
import json import json
@@ -37,7 +40,7 @@ print(json.dumps(segments))
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
let output = ''; let output = '';
const proc = spawn('python', ['-c', script, audioPath]); const proc = spawn(binary, ['-c', script, audioPath]);
proc.stdout.on('data', (data: Buffer) => output += data.toString()); proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString())); proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => { proc.on('close', (code: number) => {
@@ -105,30 +108,27 @@ function prepareAudioBuffer(file: string): [string, Float32Array] {
} }
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => { parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
let tempFile = null;
try { try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true}); const asr: any = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
// Prepare audio file
const [f, buffer] = prepareAudioBuffer(file); const [f, buffer] = prepareAudioBuffer(file);
tempFile = f !== file ? f : null;
// Fetch transcript and speakers const hasDiarization = await canDiarization();
const hasDiarization = speaker && await canDiarization();
const [transcript, speakers] = await Promise.all([ const [transcript, speakers] = await Promise.all([
whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}), asr(buffer, {return_timestamps: speaker ? 'word' : false}),
(!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(f, modelDir, token), (!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(hasDiarization, f, modelDir, token),
]); ]);
if(file != f) rmSync(f, { recursive: true, force: true });
// Return any results / errors if no more processing required
const text = transcript.text?.trim() || null; const text = transcript.text?.trim() || null;
if(!speaker) return parentPort?.postMessage({ text }); if(!speaker) return parentPort?.postMessage({ text });
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' }); if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' }); if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
// Combine transcript and speakers
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []); const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
parentPort?.postMessage({ text: combined }); parentPort?.postMessage({ text: combined });
} catch (err: any) { } catch (err: any) {
parentPort?.postMessage({ error: err.stack || err.message }); parentPort?.postMessage({ error: err.stack || err.message });
} finally {
if(tempFile) rmSync(tempFile, { recursive: true, force: true });
} }
}); });

View File

@@ -15,7 +15,7 @@ export class Audio {
let p = new Promise<string | null>((resolve, reject) => { let p = new Promise<string | null>((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js')); const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
const handleMessage = ({ text, warning, error }: any) => { const handleMessage = ({ text, warning, error }: any) => {
worker.terminate(); setTimeout(() => worker.terminate(), 1000);
if(aborted) return; if(aborted) return;
if(error) reject(new Error(error)); if(error) reject(new Error(error));
else { else {
@@ -24,7 +24,7 @@ export class Audio {
} }
}; };
const handleError = (err: Error) => { const handleError = (err: Error) => {
worker.terminate(); setTimeout(() => worker.terminate(), 1000);
if(!aborted) reject(err); if(!aborted) reject(err);
}; };
worker.on('message', handleMessage); worker.on('message', handleMessage);
@@ -42,8 +42,8 @@ export class Audio {
if(!transcript) return transcript; if(!transcript) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0); let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)]; if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name"}', { const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about', system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1, temperature: 0.1,
}); });
Object.entries(names).forEach(([speaker, name]) => { Object.entries(names).forEach(([speaker, name]) => {
@@ -56,5 +56,5 @@ export class Audio {
return Object.assign(p, { abort }); return Object.assign(p, { abort });
} }
canDiarization = canDiarization; canDiarization = () => canDiarization().then(resp => !!resp);
} }

View File

@@ -255,12 +255,11 @@ class LLM {
/** /**
* Create a vector representation of a string * Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted) * @param {object | string} target Item that will be embedded (objects get converted)
* @param {maxTokens?: number, overlapTokens?: number, parellel?: number} opts Options for embedding such as chunk sizes and parallel processing * @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings * @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/ */
async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number, parallel?: number} = {}) { async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}) {
let {maxTokens = 500, overlapTokens = 50, parallel = 1} = opts; let {maxTokens = 500, overlapTokens = 50} = opts;
const embed = (text: string): Promise<number[]> => { const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js')); const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
@@ -280,19 +279,13 @@ class LLM {
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path}); worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
}); });
}; };
let i = 0, chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = []; const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
const next: Function = () => { for(let i = 0; i < chunks.length; i++) {
const index = i++; const text= chunks[i];
if(index >= chunks.length) return; const embedding = await embed(text);
const text = chunks[index]; results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
return embed(text).then(embedding => {
results.push({index, embedding, text, tokens: this.estimateTokens(text)});
return next();
})
} }
return results;
await Promise.all(Array(parallel).fill(null).map(() => next()));
return results.toSorted((a, b) => a.index - b.index);
} }
/** /**

View File

@@ -1,6 +1,5 @@
import {defineConfig} from 'vite'; import {defineConfig} from 'vite';
import dts from 'vite-plugin-dts'; import dts from 'vite-plugin-dts';
import {resolve} from 'path';
export default defineConfig({ export default defineConfig({
build: { build: {