Compare commits

..

8 Commits
0.6.7 ... 0.7.3

Author SHA1 Message Date
a07f069ad0 One embedding at a time
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 7s
2026-02-19 22:58:53 -05:00
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
4143d00de7 Working speaker detection with advanced LLM identifying. Improved LLM json function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 5s
2026-02-14 09:39:17 -05:00
0360f2493d Added hugging face token
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 22:15:57 -05:00
0172887877 audio worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:24:12 -05:00
8f89f5e3cf embedding worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:18:56 -05:00
5bd41f8c6a worker fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:17:31 -05:00
6 changed files with 108 additions and 67 deletions

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.6.7",
"version": "0.7.3",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",

View File

@@ -8,6 +8,8 @@ export type AbortablePromise<T> = Promise<T> & {
};
export type AiOptions = {
/** Token to pull models from hugging face */
hfToken?: string;
/** Path to models */
path?: string;
/** ASR model: whisper-tiny, whisper-base */

View File

@@ -1,44 +1,43 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
import * as fs from 'node:fs';
import wavefile from 'wavefile';
import { spawn } from 'node:child_process';
import { execSync } from 'node:child_process';
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import wavefile from 'wavefile';
let whisperPipeline: any;
export async function canDiarization(): Promise<boolean> {
return new Promise((resolve) => {
const proc = spawn('python3', ['-c', 'import pyannote.audio']);
const proc = spawn('python', ['-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
}
async function runDiarization(audioPath: string, torchHome: string): Promise<any[]> {
async function runDiarization(audioPath: string, dir: string, token: string): Promise<any[]> {
const script = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = "${torchHome}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
diarization = pipeline(sys.argv[1])
os.environ['TORCH_HOME'] = r"${dir}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${token}")
output = pipeline(sys.argv[1])
segments = []
for turn, _, speaker in diarization.itertracks(yield_label=True):
segments.append({
"start": turn.start,
"end": turn.end,
"speaker": speaker
})
for turn, speaker in output.speaker_diarization:
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
print(json.dumps(segments))
`;
return new Promise((resolve, reject) => {
let output = '';
const proc = spawn('python3', ['-c', script, audioPath]);
const proc = spawn('python', ['-c', script, audioPath]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
@@ -71,54 +70,65 @@ function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
if (speakerNum !== currentSpeaker) {
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
currentSpeaker = speakerNum;
currentText = chunk.text;
} else {
currentText += chunk.text;
}
});
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
return lines.join('\n');
}
parentPort?.on('message', async ({ file, speaker, model, modelDir }) => {
function prepareAudioBuffer(file: string): [string, Float32Array] {
let wav: any, tmp;
try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
// Prepare audio file (convert to mono channel wave)
const wav = new wavefile.WaveFile(fs.readFileSync(file));
wav = new wavefile.WaveFile(readFileSync(file));
} catch(err) {
tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
wav = new wavefile.WaveFile(readFileSync(tmp));
} finally {
wav.toBitDepth('32f');
wav.toSampleRate(16000);
const samples = wav.getSamples();
let buffer;
if(Array.isArray(samples)) { // stereo to mono - average the channels
if(Array.isArray(samples)) {
const left = samples[0];
const right = samples[1];
buffer = new Float32Array(left.length);
const buffer = new Float32Array(left.length);
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
} else {
buffer = samples;
return [tmp || file, buffer];
}
// Transcribe
const transcriptResult = await whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false});
if(!speaker) {
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null });
return;
return [tmp || file, samples];
}
}
// Speaker Diarization
const hasDiarization = await canDiarization();
if(!hasDiarization) {
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, error: 'Speaker diarization unavailable' });
return;
}
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
const speakers = await runDiarization(file, modelDir);
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
// Prepare audio file
const [f, buffer] = prepareAudioBuffer(file);
// Fetch transcript and speakers
const hasDiarization = speaker && await canDiarization();
const [transcript, speakers] = await Promise.all([
whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}),
(!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(f, modelDir, token),
]);
if(file != f) rmSync(f, { recursive: true, force: true });
// Return any results / errors if no more processing required
const text = transcript.text?.trim() || null;
if(!speaker) return parentPort?.postMessage({ text });
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
// Combine transcript and speakers
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
parentPort?.postMessage({ text: combined });
} catch (err) {
parentPort?.postMessage({ error: (err as Error).message });
} catch (err: any) {
parentPort?.postMessage({ error: err.stack || err.message });
}
});

View File

@@ -1,18 +1,19 @@
import {fileURLToPath} from 'url';
import {Worker} from 'worker_threads';
import Path from 'node:path';
import {AbortablePromise, Ai} from './ai.ts';
import {canDiarization} from './asr.ts';
import {dirname, join} from 'path';
export class Audio {
constructor(private ai: Ai) {}
asr(file: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise<string | null> {
asr(file: string, options: { model?: string; speaker?: boolean | 'id' } = {}): AbortablePromise<string | null> {
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
let aborted = false;
const abort = () => { aborted = true; };
const p = new Promise<string | null>((resolve, reject) => {
const worker = new Worker(Path.join(import.meta.dirname, 'asr.js'));
let p = new Promise<string | null>((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
const handleMessage = ({ text, warning, error }: any) => {
worker.terminate();
if(aborted) return;
@@ -31,8 +32,27 @@ export class Audio {
worker.on('exit', (code) => {
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path});
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path, token: this.ai.options.hfToken});
});
// Name speakers using AI
if(options.speaker == 'id') {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
p = p.then(async transcript => {
if(!transcript) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name"}', {
system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => {
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);
});
return transcript;
})
}
return Object.assign(p, { abort });
}

View File

@@ -3,9 +3,9 @@ import { parentPort } from 'worker_threads';
let embedder: any;
parentPort?.on('message', async ({ id, text, model, modelDir }) => {
parentPort?.on('message', async ({text, model, modelDir }) => {
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data);
parentPort?.postMessage({ id, embedding });
parentPort?.postMessage({embedding});
});

View File

@@ -75,8 +75,8 @@ export type LLMRequest = {
}
class LLM {
private models: {[model: string]: LLMProvider} = {};
private defaultModel!: string;
defaultModel!: string;
models: {[model: string]: LLMProvider} = {};
constructor(public readonly ai: Ai) {
if(!ai.options.llm?.models) return;
@@ -184,7 +184,12 @@ class LLM {
const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', {
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.',
model: options?.model,
temperature: options?.temperature || 0.3
});
const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
@@ -250,11 +255,11 @@ class LLM {
/**
* Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}) {
let {maxTokens = 500, overlapTokens = 50} = opts;
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
@@ -271,16 +276,16 @@ class LLM {
worker.on('exit', (code) => {
if(code !== 0) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', path: this.ai.options.path});
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
});
};
const chunks = this.chunk(target, maxTokens, overlapTokens);
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),
text,
tokens: this.estimateTokens(text),
})));
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
for(let i = 0; i < chunks.length; i++) {
const text= chunks[i];
const embedding = await embed(text);
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
}
return results;
}
/**
@@ -312,12 +317,16 @@ class LLM {
/**
* Ask a question with JSON response
* @param {string} message Question
* @param {string} text Text to process
* @param {string} schema JSON schema the AI should match
* @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>}
*/
async json(message: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema:
\`\`\`json
${schema}
\`\`\``});
if(!resp) return {};
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;