diff --git a/package.json b/package.json index 72d8c15..9e21d97 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@ztimson/ai-utils", - "version": "0.6.5", + "version": "0.6.6", "description": "AI Utility library", "author": "Zak Timson", "license": "MIT", diff --git a/src/asr.ts b/src/asr.ts new file mode 100644 index 0000000..d177a5f --- /dev/null +++ b/src/asr.ts @@ -0,0 +1,124 @@ +import { pipeline } from '@xenova/transformers'; +import { parentPort } from 'worker_threads'; +import * as fs from 'node:fs'; +import wavefile from 'wavefile'; +import { spawn } from 'node:child_process'; + +let whisperPipeline: any; + +export async function canDiarization(): Promise { + return new Promise((resolve) => { + const proc = spawn('python3', ['-c', 'import pyannote.audio']); + proc.on('close', (code: number) => resolve(code === 0)); + proc.on('error', () => resolve(false)); + }); +} + +async function runDiarization(audioPath: string, torchHome: string): Promise { + const script = ` +import sys +import json +import os +from pyannote.audio import Pipeline + +os.environ['TORCH_HOME'] = "${torchHome}" +pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1") +diarization = pipeline(sys.argv[1]) + +segments = [] +for turn, _, speaker in diarization.itertracks(yield_label=True): + segments.append({ + "start": turn.start, + "end": turn.end, + "speaker": speaker + }) + +print(json.dumps(segments)) +`; + + return new Promise((resolve, reject) => { + let output = ''; + const proc = spawn('python3', ['-c', script, audioPath]); + proc.stdout.on('data', (data: Buffer) => output += data.toString()); + proc.stderr.on('data', (data: Buffer) => console.error(data.toString())); + proc.on('close', (code: number) => { + if(code === 0) { + try { + resolve(JSON.parse(output)); + } catch (err) { + reject(new Error('Failed to parse diarization output')); + } + } else { + reject(new Error(`Python process exited with code ${code}`)); + } + }); + proc.on('error', reject); + }); +} + +function combineSpeakerTranscript(chunks: any[], speakers: any[]): string { + const speakerMap = new Map(); + let speakerCount = 0; + speakers.forEach((seg: any) => { + if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount); + }); + + const lines: string[] = []; + let currentSpeaker = -1; + let currentText = ''; + chunks.forEach((chunk: any) => { + const time = chunk.timestamp[0]; + const speaker = speakers.find((s: any) => time >= s.start && time <= s.end); + const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1; + if (speakerNum !== currentSpeaker) { + if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`); + currentSpeaker = speakerNum; + currentText = chunk.text; + } else { + currentText += chunk.text; + } + }); + if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`); + return lines.join('\n'); +} + +parentPort?.on('message', async ({ path, model, speaker, torchHome }) => { + try { + if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: torchHome, quantized: true}); + + // Prepare audio file (convert to mono channel wave) + const wav = new wavefile.WaveFile(fs.readFileSync(path)); + wav.toBitDepth('32f'); + wav.toSampleRate(16000); + const samples = wav.getSamples(); + let buffer; + if(Array.isArray(samples)) { // stereo to mono - average the channels + const left = samples[0]; + const right = samples[1]; + buffer = new Float32Array(left.length); + for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2; + } else { + buffer = samples; + } + + // Transcribe + const transcriptResult = await whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}); + if(!speaker) { + parentPort?.postMessage({ text: transcriptResult.text?.trim() || null }); + return; + } + + // Speaker Diarization + const hasDiarization = await canDiarization(); + if(!hasDiarization) { + parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, warning: 'Speaker diarization unavailable' }); + return; + } + + const speakers = await runDiarization(path, torchHome); + const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers); + parentPort?.postMessage({ text: combined }); + } catch (err) { + parentPort?.postMessage({ error: (err as Error).message }); + } +}); diff --git a/src/audio.ts b/src/audio.ts index 50104e0..d41d99b 100644 --- a/src/audio.ts +++ b/src/audio.ts @@ -1,133 +1,40 @@ -import {spawn} from 'node:child_process'; -import {pipeline} from '@xenova/transformers'; -import * as fs from 'node:fs'; +import {Worker} from 'worker_threads'; +import path from 'node:path'; import {AbortablePromise, Ai} from './ai.ts'; -import wavefile from 'wavefile'; +import {canDiarization} from './asr.ts'; export class Audio { - private whisperPipeline: any; + constructor(private ai: Ai) {} - constructor(private ai: Ai) { } - - private combineSpeakerTranscript(chunks: any[], speakers: any[]): string { - const speakerMap = new Map(); - let speakerCount = 0; - speakers.forEach((seg: any) => { - if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount); - }); - - const lines: string[] = []; - let currentSpeaker = -1; - let currentText = ''; - chunks.forEach((chunk: any) => { - const time = chunk.timestamp[0]; - const speaker = speakers.find((s: any) => time >= s.start && time <= s.end); - const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1; - if (speakerNum !== currentSpeaker) { - if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`); - currentSpeaker = speakerNum; - currentText = chunk.text; - } else { - currentText += chunk.text; - } - }); - if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`); - return lines.join('\n'); - } - - async canDiarization(): Promise { - return new Promise((resolve) => { - const proc = spawn('python3', ['-c', 'import pyannote.audio']); - proc.on('close', (code: number) => resolve(code === 0)); - proc.on('error', () => resolve(false)); - }); - } - - private async runDiarization(audioPath: string): Promise { - if(!await this.canDiarization()) throw new Error('Pyannote is not installed: pip install pyannote.audio'); - const script = ` -import sys -import json -from pyannote.audio import Pipeline - -os.environ['TORCH_HOME'] = "${this.ai.options.path}" -pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1") -diarization = pipeline(sys.argv[1]) - -segments = [] -for turn, _, speaker in diarization.itertracks(yield_label=True): - segments.append({ - "start": turn.start, - "end": turn.end, - "speaker": speaker - }) - -print(json.dumps(segments)) -`; - - return new Promise((resolve, reject) => { - let output = ''; - const proc = spawn('python3', ['-c', script, audioPath]); - proc.stdout.on('data', (data: Buffer) => output += data.toString()); - proc.stderr.on('data', (data: Buffer) => console.error(data.toString())); - proc.on('close', (code: number) => { - if(code === 0) { - try { - resolve(JSON.parse(output)); - } catch (err) { - reject(new Error('Failed to parse diarization output')); - } - } else { - reject(new Error(`Python process exited with code ${code}`)); - } - }); - - proc.on('error', reject); - }); - } - - asr(path: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise { + asr(filepath: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise { const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options; let aborted = false; const abort = () => { aborted = true; }; - const p = new Promise(async (resolve, reject) => { - try { - if(aborted) return resolve(null); - if(!this.whisperPipeline) this.whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, { cache_dir: this.ai.options.path, quantized: true }); - - // Prepare audio file (convert to mono channel wave) - if(aborted) return resolve(null); - const wav = new wavefile.WaveFile(fs.readFileSync(path)); - wav.toBitDepth('32f'); - wav.toSampleRate(16000); - const samples = wav.getSamples(); - let buffer; - if(Array.isArray(samples)) { // stereo to mono - average the channels - const left = samples[0]; - const right = samples[1]; - buffer = new Float32Array(left.length); - for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2; - } else { - buffer = samples; + const p = new Promise((resolve, reject) => { + const worker = new Worker(path.join(import.meta.dirname, 'asr.js')); + const handleMessage = ({ text, warning, error }: any) => { + worker.terminate(); + if(aborted) return; + if(error) reject(new Error(error)); + else { + if(warning) console.warn(warning); + resolve(text); } - - // Transcribe - if(aborted) return resolve(null); - const transcriptResult = await this.whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}); - if(!speaker) return resolve(transcriptResult.text?.trim() || null); - - // Speaker Diarization - if(aborted) return resolve(null); - const speakers = await this.runDiarization(path); - if(aborted) return resolve(null); - const combined = this.combineSpeakerTranscript(transcriptResult.chunks || [], speakers); - resolve(combined); - } catch (err) { - reject(err); - } + }; + const handleError = (err: Error) => { + worker.terminate(); + if(!aborted) reject(err); + }; + worker.on('message', handleMessage); + worker.on('error', handleError); + worker.on('exit', (code) => { + if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`)); + }); + worker.postMessage({path: filepath, model, speaker, torchHome: this.ai.options.path,}); }); - return Object.assign(p, { abort }); } + + canDiarization = canDiarization; } diff --git a/src/index.ts b/src/index.ts index b25cf61..a27ae96 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,5 +1,6 @@ export * from './ai'; export * from './antrhopic'; +export * from './asr'; export * from './audio'; export * from './embedder' export * from './llm'; diff --git a/src/llm.ts b/src/llm.ts index 1b13e0b..fdc7e5c 100644 --- a/src/llm.ts +++ b/src/llm.ts @@ -75,22 +75,10 @@ export type LLMRequest = { } class LLM { - private embedWorker: Worker | null = null; - private embedQueue = new Map void; reject: (error: any) => void }>(); - private embedId = 0; private models: {[model: string]: LLMProvider} = {}; private defaultModel!: string; constructor(public readonly ai: Ai) { - this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js')); - this.embedWorker.on('message', ({ id, embedding }) => { - const pending = this.embedQueue.get(id); - if (pending) { - pending.resolve(embedding); - this.embedQueue.delete(id); - } - }); - if(!ai.options.llm?.models) return; Object.entries(ai.options.llm.models).forEach(([model, config]) => { if(!this.defaultModel) this.defaultModel = model; @@ -269,14 +257,21 @@ class LLM { embedding(target: object | string, maxTokens = 500, overlapTokens = 50) { const embed = (text: string): Promise => { return new Promise((resolve, reject) => { - const id = this.embedId++; - this.embedQueue.set(id, { resolve, reject }); - this.embedWorker?.postMessage({ - id, - text, - model: this.ai.options?.embedder || 'bge-small-en-v1.5', - path: this.ai.options.path + const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js')); + const handleMessage = ({ embedding }: any) => { + worker.terminate(); + resolve(embedding); + }; + const handleError = (err: Error) => { + worker.terminate(); + reject(err); + }; + worker.on('message', handleMessage); + worker.on('error', handleError); + worker.on('exit', (code) => { + if(code !== 0) reject(new Error(`Worker exited with code ${code}`)); }); + worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', path: this.ai.options.path}); }); }; const chunks = this.chunk(target, maxTokens, overlapTokens); diff --git a/vite.config.ts b/vite.config.ts index ef51e05..30b190f 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -6,6 +6,7 @@ export default defineConfig({ build: { lib: { entry: { + asr: './src/asr.ts', index: './src/index.ts', embedder: './src/embedder.ts', },