Compare commits

..

2 Commits
0.7.4 ... 0.7.6

Author SHA1 Message Date
473424ae23 segfault fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 17:31:49 -05:00
9b831f7d95 Better ASR IDing
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 16:55:25 -05:00
3 changed files with 11 additions and 16 deletions

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.7.4",
"version": "0.7.6",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",

View File

@@ -7,8 +7,6 @@ import { join } from 'node:path';
import { tmpdir } from 'node:os';
import wavefile from 'wavefile';
let whisperPipeline: any;
export async function canDiarization(): Promise<string | null> {
const checkPython = (cmd: string) => {
return new Promise<boolean>((resolve) => {
@@ -110,30 +108,27 @@ function prepareAudioBuffer(file: string): [string, Float32Array] {
}
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
let tempFile = null;
try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
// Prepare audio file
const asr: any = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
const [f, buffer] = prepareAudioBuffer(file);
// Fetch transcript and speakers
tempFile = f !== file ? f : null;
const hasDiarization = await canDiarization();
const [transcript, speakers] = await Promise.all([
whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}),
asr(buffer, {return_timestamps: speaker ? 'word' : false}),
(!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(hasDiarization, f, modelDir, token),
]);
if(file != f) rmSync(f, { recursive: true, force: true });
// Return any results / errors if no more processing required
const text = transcript.text?.trim() || null;
if(!speaker) return parentPort?.postMessage({ text });
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
// Combine transcript and speakers
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
parentPort?.postMessage({ text: combined });
} catch (err: any) {
parentPort?.postMessage({ error: err.stack || err.message });
} finally {
if(tempFile) rmSync(tempFile, { recursive: true, force: true });
}
});

View File

@@ -15,7 +15,7 @@ export class Audio {
let p = new Promise<string | null>((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
const handleMessage = ({ text, warning, error }: any) => {
worker.terminate();
setTimeout(() => worker.terminate(), 1000);
if(aborted) return;
if(error) reject(new Error(error));
else {
@@ -24,7 +24,7 @@ export class Audio {
}
};
const handleError = (err: Error) => {
worker.terminate();
setTimeout(() => worker.terminate(), 1000);
if(!aborted) reject(err);
};
worker.on('message', handleMessage);
@@ -42,8 +42,8 @@ export class Audio {
if(!transcript) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name"}', {
system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about',
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => {