Improved whisper + pyannote, sentence diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 7s

This commit is contained in:
2026-02-21 14:16:20 -05:00
parent cec892563e
commit ca66e8e304
3 changed files with 108 additions and 49 deletions

View File

@@ -3,7 +3,7 @@
<br />
<!-- Logo -->
<img src="https://git.zakscode.com/repo-avatars/a90851ca730480ec37a5c0c2c4f1b4609eee5eadf806eaf16c83ac4cb7493aa9" alt="Logo" width="200" height="200">
<img alt="Logo" width="200" height="200" src="https://git.zakscode.com/repo-avatars/a82d423674763e7a0c1c945bdbb07e249b2bb786d3c9beae76d5b196a10f5c0f">
<!-- Title -->
### @ztimson/ai-utils
@@ -53,13 +53,15 @@ A TypeScript library that provides a unified interface for working with multiple
- **Provider Abstraction**: Switch between AI providers without changing your code
### Built With
[![Anthropic](https://img.shields.io/badge/Anthropic-191919?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/)
[![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white)](https://openai.com/)
[![Ollama](https://img.shields.io/badge/Ollama-000000?style=for-the-badge&logo=ollama&logoColor=white)](https://ollama.com/)
[![TensorFlow](https://img.shields.io/badge/TensorFlow-FF6F00?style=for-the-badge&logo=tensorflow&logoColor=white)](https://tensorflow.org/)
[![Tesseract](https://img.shields.io/badge/Tesseract-3C8FC7?style=for-the-badge&logo=tesseract&logoColor=white)](https://tesseract-ocr.github.io/)
[![Anthropic](https://img.shields.io/badge/Anthropic-de7356?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/)
[![llama](https://img.shields.io/badge/llama.cpp-fff?style=for-the-badge&logo=ollama&logoColor=black)](https://github.com/ggml-org/llama.cpp)
[![OpenAI](https://img.shields.io/badge/OpenAI-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://openai.com/)
[![Pyannote](https://img.shields.io/badge/Pyannote-458864?style=for-the-badge&logo=python&logoColor=white)](https://github.com/pyannote)
[![TensorFlow](https://img.shields.io/badge/TensorFlow-fff?style=for-the-badge&logo=tensorflow&logoColor=ff6f00)](https://tensorflow.org/)
[![Tesseract](https://img.shields.io/badge/Tesseract-B874B2?style=for-the-badge&logo=hack-the-box&logoColor=white)](https://tesseract-ocr.github.io/)
[![Transformers.js](https://img.shields.io/badge/Transformers.js-000?style=for-the-badge&logo=hugging-face&logoColor=yellow)](https://huggingface.co/docs/transformers.js/en/index)
[![TypeScript](https://img.shields.io/badge/TypeScript-3178C6?style=for-the-badge&logo=typescript&logoColor=white)](https://typescriptlang.org/)
[![Whisper](https://img.shields.io/badge/Whisper-412991?style=for-the-badge&logo=openai&logoColor=white)](https://github.com/ggerganov/whisper.cpp)
[![Whisper](https://img.shields.io/badge/Whisper.cpp-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://github.com/ggerganov/whisper.cpp)
## Setup
@@ -88,6 +90,8 @@ A TypeScript library that provides a unified interface for working with multiple
#### Prerequisites
- [Node.js](https://nodejs.org/en/download)
- _[Whisper.cpp](https://github.com/ggml-org/whisper.cpp/releases/tag) (ASR)_
- _[Pyannote](https://github.com/pyannote) (ASR Diarization):_ `pip install pyannote.audio`
#### Instructions
1. Install the dependencies: `npm i`

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.7.11",
"version": "0.8.0",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",

View File

@@ -1,7 +1,8 @@
import {execSync, spawn} from 'node:child_process';
import {mkdtempSync} from 'node:fs';
import fs, {rm} from 'node:fs/promises';
import fs from 'node:fs/promises';
import {tmpdir} from 'node:os';
import * as path from 'node:path';
import Path, {join} from 'node:path';
import {AbortablePromise, Ai} from './ai.ts';
@@ -38,23 +39,36 @@ print(json.dumps(segments))
let proc: any;
const p = new Promise<any>((resolve, reject) => {
this.downloadAsrModel(opts.model).then(m => {
let output = '';
const args = [opts.diarization ? '-owts' : '-nt', '-m', m, '-f', file];
proc = spawn(<string>this.ai.options.whisper, args, {stdio: ['ignore', 'pipe', 'ignore']});
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(code === 0) {
if(opts.diarization) {
if(opts.diarization) {
let output = path.join(path.dirname(file), 'transcript');
proc = spawn(<string>this.ai.options.whisper,
['-m', m, '-f', file, '-np', '-ml', '1', '-oj', '-of', output],
{stdio: ['ignore', 'ignore', 'pipe']}
);
proc.on('error', (err: Error) => reject(err));
proc.on('close', async (code: number) => {
if(code === 0) {
output = await fs.readFile(output + '.json', 'utf-8');
fs.rm(output + '.json').catch(() => { });
try { resolve(JSON.parse(output)); }
catch(e) { reject(new Error('Failed to parse whisper JSON')); }
} else {
resolve(output.trim() || null);
reject(new Error(`Exit code ${code}`));
}
} else {
reject(new Error(`Exit code ${code}`));
}
});
});
} else {
let output = '';
proc = spawn(<string>this.ai.options.whisper, ['-m', m, '-f', file, '-np', '-nt']);
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', async (code: number) => {
if(code === 0) {
resolve(output.trim() || null);
} else {
reject(new Error(`Exit code ${code}`));
}
});
}
});
});
return <any>Object.assign(p, {abort: () => proc?.kill('SIGTERM')});
@@ -64,7 +78,7 @@ print(json.dumps(segments))
let aborted = false, abort = () => { aborted = true; };
const checkPython = (cmd: string) => {
return new Promise<boolean>((resolve) => {
const proc = spawn(cmd, ['-c', 'import pyannote.audio']);
const proc = spawn(cmd, ['-W', 'ignore', '-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
@@ -79,7 +93,7 @@ print(json.dumps(segments))
return new Promise((resolve, reject) => {
if(aborted) return;
let output = '';
const proc = spawn(binary, ['-c', this.pyannote, file]);
const proc = spawn(binary, ['-W', 'ignore', '-c', this.pyannote, file]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
@@ -97,30 +111,65 @@ print(json.dumps(segments))
return <any>Object.assign(p, {abort});
}
private combineSpeakerTranscript(transcript: any, speakers: any[]): string {
private async combineSpeakerTranscript(punctuatedText: string, timestampData: any, speakers: any[]): Promise<string> {
const speakerMap = new Map();
let speakerCount = 0;
speakers.forEach((seg: any) => {
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
});
const sentences = punctuatedText.match(/[^.!?]+[.!?]+/g) || [punctuatedText];
const lines: string[] = [];
let currentSpeaker = -1;
let currentText = '';
transcript.transcription.forEach((word: any) => {
const time = word.offsets.from / 1000; // Convert ms to seconds
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
if (speakerNum !== currentSpeaker) {
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
currentSpeaker = speakerNum;
currentText = word.text;
} else {
currentText += ' ' + word.text;
}
sentences.forEach(sentence => {
sentence = sentence.trim();
if(!sentence) return;
const words = sentence.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
let startTime = Infinity, endTime = 0;
const wordTimings: {start: number, end: number}[] = [];
timestampData.transcription.forEach((word: any) => {
const wordText = word.text.trim().toLowerCase();
if(words.some(w => wordText.includes(w))) {
const start = word.offsets.from / 1000;
const end = word.offsets.to / 1000;
wordTimings.push({start, end});
if(start < startTime) startTime = start;
if(end > endTime) endTime = end;
}
});
if(startTime === Infinity) return;
// Weight by word-level overlap instead of sentence span
const speakerScores = new Map<number, number>();
wordTimings.forEach(wt => {
speakers.forEach((seg: any) => {
const overlap = Math.max(0, Math.min(wt.end, seg.end) - Math.max(wt.start, seg.start));
const duration = wt.end - wt.start;
if(duration > 0) {
const score = overlap / duration; // % of word covered
const spkNum = speakerMap.get(seg.speaker);
speakerScores.set(spkNum, (speakerScores.get(spkNum) || 0) + score);
}
});
});
let bestSpeaker = 1;
let maxScore = 0;
speakerScores.forEach((score, speaker) => {
if(score > maxScore) {
maxScore = score;
bestSpeaker = speaker;
}
});
lines.push(`[Speaker ${bestSpeaker}]: ${sentence}`);
});
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
return lines.join('\n');
return lines.join('\n').trim();
}
asr(file: string, options: { model?: string; diarization?: boolean | 'id' } = {}): AbortablePromise<string | null> {
@@ -128,30 +177,36 @@ print(json.dumps(segments))
const tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
const clean = () => rm(Path.dirname(tmp), { recursive: true, force: true }).catch(() => {});
const transcript = this.runAsr(tmp, {model: options.model, diarization: !!options.diarization});
const diarization: any = options.diarization ? this.runDiarization(tmp) : Promise.resolve(null);
const clean = () => fs.rm(Path.dirname(tmp), {recursive: true, force: true}).catch(() => {});
const transcript = this.runAsr(tmp, {model: options.model, diarization: false});
const timestamps: any = !options.diarization ? Promise.resolve(null) : this.runAsr(tmp, {model: options.model, diarization: true});
const diarization: any = !options.diarization ? Promise.resolve(null) : this.runDiarization(tmp);
let aborted = false, abort = () => {
aborted = true;
transcript.abort();
timestamps?.abort?.();
diarization?.abort?.();
clean();
};
const response = Promise.all([transcript, diarization]).then(async ([t, d]) => {
if(aborted || !options.diarization) return t;
t = this.combineSpeakerTranscript(t, d);
const response = Promise.allSettled([transcript, timestamps, diarization]).then(async ([t, ts, d]) => {
if(t.status == 'rejected') throw new Error('Whisper.cpp punctuated:\n' + t.reason);
if(ts.status == 'rejected') throw new Error('Whisper.cpp timestamps:\n' + ts.reason);
if(d.status == 'rejected') throw new Error('Pyannote:\n' + d.reason);
if(aborted || !options.diarization) return t.value;
let transcript = await this.combineSpeakerTranscript(t.value, ts.value, d.value);
if(!aborted && options.diarization === 'id') {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
let chunks = this.ai.language.chunk(t, 500, 0);
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => t = t.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
Object.entries(names).forEach(([speaker, name]) => transcript = transcript.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
}
return t;
return transcript;
}).finally(() => clean());
return <any>Object.assign(response, {abort});
}