Use one-off workers to process requests without blocking
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@ztimson/ai-utils",
|
"name": "@ztimson/ai-utils",
|
||||||
"version": "0.6.5",
|
"version": "0.6.6",
|
||||||
"description": "AI Utility library",
|
"description": "AI Utility library",
|
||||||
"author": "Zak Timson",
|
"author": "Zak Timson",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
|||||||
124
src/asr.ts
Normal file
124
src/asr.ts
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
import { pipeline } from '@xenova/transformers';
|
||||||
|
import { parentPort } from 'worker_threads';
|
||||||
|
import * as fs from 'node:fs';
|
||||||
|
import wavefile from 'wavefile';
|
||||||
|
import { spawn } from 'node:child_process';
|
||||||
|
|
||||||
|
let whisperPipeline: any;
|
||||||
|
|
||||||
|
export async function canDiarization(): Promise<boolean> {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
const proc = spawn('python3', ['-c', 'import pyannote.audio']);
|
||||||
|
proc.on('close', (code: number) => resolve(code === 0));
|
||||||
|
proc.on('error', () => resolve(false));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async function runDiarization(audioPath: string, torchHome: string): Promise<any[]> {
|
||||||
|
const script = `
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
from pyannote.audio import Pipeline
|
||||||
|
|
||||||
|
os.environ['TORCH_HOME'] = "${torchHome}"
|
||||||
|
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
|
||||||
|
diarization = pipeline(sys.argv[1])
|
||||||
|
|
||||||
|
segments = []
|
||||||
|
for turn, _, speaker in diarization.itertracks(yield_label=True):
|
||||||
|
segments.append({
|
||||||
|
"start": turn.start,
|
||||||
|
"end": turn.end,
|
||||||
|
"speaker": speaker
|
||||||
|
})
|
||||||
|
|
||||||
|
print(json.dumps(segments))
|
||||||
|
`;
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
let output = '';
|
||||||
|
const proc = spawn('python3', ['-c', script, audioPath]);
|
||||||
|
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||||
|
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
||||||
|
proc.on('close', (code: number) => {
|
||||||
|
if(code === 0) {
|
||||||
|
try {
|
||||||
|
resolve(JSON.parse(output));
|
||||||
|
} catch (err) {
|
||||||
|
reject(new Error('Failed to parse diarization output'));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
reject(new Error(`Python process exited with code ${code}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
proc.on('error', reject);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
|
||||||
|
const speakerMap = new Map();
|
||||||
|
let speakerCount = 0;
|
||||||
|
speakers.forEach((seg: any) => {
|
||||||
|
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
||||||
|
});
|
||||||
|
|
||||||
|
const lines: string[] = [];
|
||||||
|
let currentSpeaker = -1;
|
||||||
|
let currentText = '';
|
||||||
|
chunks.forEach((chunk: any) => {
|
||||||
|
const time = chunk.timestamp[0];
|
||||||
|
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
|
||||||
|
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
|
||||||
|
if (speakerNum !== currentSpeaker) {
|
||||||
|
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
||||||
|
currentSpeaker = speakerNum;
|
||||||
|
currentText = chunk.text;
|
||||||
|
} else {
|
||||||
|
currentText += chunk.text;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
||||||
|
return lines.join('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
parentPort?.on('message', async ({ path, model, speaker, torchHome }) => {
|
||||||
|
try {
|
||||||
|
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: torchHome, quantized: true});
|
||||||
|
|
||||||
|
// Prepare audio file (convert to mono channel wave)
|
||||||
|
const wav = new wavefile.WaveFile(fs.readFileSync(path));
|
||||||
|
wav.toBitDepth('32f');
|
||||||
|
wav.toSampleRate(16000);
|
||||||
|
const samples = wav.getSamples();
|
||||||
|
let buffer;
|
||||||
|
if(Array.isArray(samples)) { // stereo to mono - average the channels
|
||||||
|
const left = samples[0];
|
||||||
|
const right = samples[1];
|
||||||
|
buffer = new Float32Array(left.length);
|
||||||
|
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
|
||||||
|
} else {
|
||||||
|
buffer = samples;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transcribe
|
||||||
|
const transcriptResult = await whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false});
|
||||||
|
if(!speaker) {
|
||||||
|
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Speaker Diarization
|
||||||
|
const hasDiarization = await canDiarization();
|
||||||
|
if(!hasDiarization) {
|
||||||
|
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, warning: 'Speaker diarization unavailable' });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const speakers = await runDiarization(path, torchHome);
|
||||||
|
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
|
||||||
|
parentPort?.postMessage({ text: combined });
|
||||||
|
} catch (err) {
|
||||||
|
parentPort?.postMessage({ error: (err as Error).message });
|
||||||
|
}
|
||||||
|
});
|
||||||
145
src/audio.ts
145
src/audio.ts
@@ -1,133 +1,40 @@
|
|||||||
import {spawn} from 'node:child_process';
|
import {Worker} from 'worker_threads';
|
||||||
import {pipeline} from '@xenova/transformers';
|
import path from 'node:path';
|
||||||
import * as fs from 'node:fs';
|
|
||||||
import {AbortablePromise, Ai} from './ai.ts';
|
import {AbortablePromise, Ai} from './ai.ts';
|
||||||
import wavefile from 'wavefile';
|
import {canDiarization} from './asr.ts';
|
||||||
|
|
||||||
export class Audio {
|
export class Audio {
|
||||||
private whisperPipeline: any;
|
|
||||||
|
|
||||||
constructor(private ai: Ai) {}
|
constructor(private ai: Ai) {}
|
||||||
|
|
||||||
private combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
|
asr(filepath: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise<string | null> {
|
||||||
const speakerMap = new Map();
|
|
||||||
let speakerCount = 0;
|
|
||||||
speakers.forEach((seg: any) => {
|
|
||||||
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
|
||||||
});
|
|
||||||
|
|
||||||
const lines: string[] = [];
|
|
||||||
let currentSpeaker = -1;
|
|
||||||
let currentText = '';
|
|
||||||
chunks.forEach((chunk: any) => {
|
|
||||||
const time = chunk.timestamp[0];
|
|
||||||
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
|
|
||||||
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
|
|
||||||
if (speakerNum !== currentSpeaker) {
|
|
||||||
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
|
||||||
currentSpeaker = speakerNum;
|
|
||||||
currentText = chunk.text;
|
|
||||||
} else {
|
|
||||||
currentText += chunk.text;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
|
||||||
return lines.join('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
async canDiarization(): Promise<boolean> {
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
const proc = spawn('python3', ['-c', 'import pyannote.audio']);
|
|
||||||
proc.on('close', (code: number) => resolve(code === 0));
|
|
||||||
proc.on('error', () => resolve(false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private async runDiarization(audioPath: string): Promise<any[]> {
|
|
||||||
if(!await this.canDiarization()) throw new Error('Pyannote is not installed: pip install pyannote.audio');
|
|
||||||
const script = `
|
|
||||||
import sys
|
|
||||||
import json
|
|
||||||
from pyannote.audio import Pipeline
|
|
||||||
|
|
||||||
os.environ['TORCH_HOME'] = "${this.ai.options.path}"
|
|
||||||
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
|
|
||||||
diarization = pipeline(sys.argv[1])
|
|
||||||
|
|
||||||
segments = []
|
|
||||||
for turn, _, speaker in diarization.itertracks(yield_label=True):
|
|
||||||
segments.append({
|
|
||||||
"start": turn.start,
|
|
||||||
"end": turn.end,
|
|
||||||
"speaker": speaker
|
|
||||||
})
|
|
||||||
|
|
||||||
print(json.dumps(segments))
|
|
||||||
`;
|
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
let output = '';
|
|
||||||
const proc = spawn('python3', ['-c', script, audioPath]);
|
|
||||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
|
||||||
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
|
||||||
proc.on('close', (code: number) => {
|
|
||||||
if(code === 0) {
|
|
||||||
try {
|
|
||||||
resolve(JSON.parse(output));
|
|
||||||
} catch (err) {
|
|
||||||
reject(new Error('Failed to parse diarization output'));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
reject(new Error(`Python process exited with code ${code}`));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
proc.on('error', reject);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
asr(path: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise<string | null> {
|
|
||||||
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
|
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
|
||||||
let aborted = false;
|
let aborted = false;
|
||||||
const abort = () => { aborted = true; };
|
const abort = () => { aborted = true; };
|
||||||
|
|
||||||
const p = new Promise<string | null>(async (resolve, reject) => {
|
const p = new Promise<string | null>((resolve, reject) => {
|
||||||
try {
|
const worker = new Worker(path.join(import.meta.dirname, 'asr.js'));
|
||||||
if(aborted) return resolve(null);
|
const handleMessage = ({ text, warning, error }: any) => {
|
||||||
if(!this.whisperPipeline) this.whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, { cache_dir: this.ai.options.path, quantized: true });
|
worker.terminate();
|
||||||
|
if(aborted) return;
|
||||||
// Prepare audio file (convert to mono channel wave)
|
if(error) reject(new Error(error));
|
||||||
if(aborted) return resolve(null);
|
else {
|
||||||
const wav = new wavefile.WaveFile(fs.readFileSync(path));
|
if(warning) console.warn(warning);
|
||||||
wav.toBitDepth('32f');
|
resolve(text);
|
||||||
wav.toSampleRate(16000);
|
|
||||||
const samples = wav.getSamples();
|
|
||||||
let buffer;
|
|
||||||
if(Array.isArray(samples)) { // stereo to mono - average the channels
|
|
||||||
const left = samples[0];
|
|
||||||
const right = samples[1];
|
|
||||||
buffer = new Float32Array(left.length);
|
|
||||||
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
|
|
||||||
} else {
|
|
||||||
buffer = samples;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transcribe
|
|
||||||
if(aborted) return resolve(null);
|
|
||||||
const transcriptResult = await this.whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false});
|
|
||||||
if(!speaker) return resolve(transcriptResult.text?.trim() || null);
|
|
||||||
|
|
||||||
// Speaker Diarization
|
|
||||||
if(aborted) return resolve(null);
|
|
||||||
const speakers = await this.runDiarization(path);
|
|
||||||
if(aborted) return resolve(null);
|
|
||||||
const combined = this.combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
|
|
||||||
resolve(combined);
|
|
||||||
} catch (err) {
|
|
||||||
reject(err);
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
const handleError = (err: Error) => {
|
||||||
|
worker.terminate();
|
||||||
|
if(!aborted) reject(err);
|
||||||
|
};
|
||||||
|
worker.on('message', handleMessage);
|
||||||
|
worker.on('error', handleError);
|
||||||
|
worker.on('exit', (code) => {
|
||||||
|
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
|
||||||
|
});
|
||||||
|
worker.postMessage({path: filepath, model, speaker, torchHome: this.ai.options.path,});
|
||||||
});
|
});
|
||||||
|
|
||||||
return Object.assign(p, { abort });
|
return Object.assign(p, { abort });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
canDiarization = canDiarization;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
export * from './ai';
|
export * from './ai';
|
||||||
export * from './antrhopic';
|
export * from './antrhopic';
|
||||||
|
export * from './asr';
|
||||||
export * from './audio';
|
export * from './audio';
|
||||||
export * from './embedder'
|
export * from './embedder'
|
||||||
export * from './llm';
|
export * from './llm';
|
||||||
|
|||||||
33
src/llm.ts
33
src/llm.ts
@@ -75,22 +75,10 @@ export type LLMRequest = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
class LLM {
|
class LLM {
|
||||||
private embedWorker: Worker | null = null;
|
|
||||||
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
|
||||||
private embedId = 0;
|
|
||||||
private models: {[model: string]: LLMProvider} = {};
|
private models: {[model: string]: LLMProvider} = {};
|
||||||
private defaultModel!: string;
|
private defaultModel!: string;
|
||||||
|
|
||||||
constructor(public readonly ai: Ai) {
|
constructor(public readonly ai: Ai) {
|
||||||
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
|
||||||
this.embedWorker.on('message', ({ id, embedding }) => {
|
|
||||||
const pending = this.embedQueue.get(id);
|
|
||||||
if (pending) {
|
|
||||||
pending.resolve(embedding);
|
|
||||||
this.embedQueue.delete(id);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if(!ai.options.llm?.models) return;
|
if(!ai.options.llm?.models) return;
|
||||||
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
|
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
|
||||||
if(!this.defaultModel) this.defaultModel = model;
|
if(!this.defaultModel) this.defaultModel = model;
|
||||||
@@ -269,14 +257,21 @@ class LLM {
|
|||||||
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
||||||
const embed = (text: string): Promise<number[]> => {
|
const embed = (text: string): Promise<number[]> => {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const id = this.embedId++;
|
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
||||||
this.embedQueue.set(id, { resolve, reject });
|
const handleMessage = ({ embedding }: any) => {
|
||||||
this.embedWorker?.postMessage({
|
worker.terminate();
|
||||||
id,
|
resolve(embedding);
|
||||||
text,
|
};
|
||||||
model: this.ai.options?.embedder || 'bge-small-en-v1.5',
|
const handleError = (err: Error) => {
|
||||||
path: this.ai.options.path
|
worker.terminate();
|
||||||
|
reject(err);
|
||||||
|
};
|
||||||
|
worker.on('message', handleMessage);
|
||||||
|
worker.on('error', handleError);
|
||||||
|
worker.on('exit', (code) => {
|
||||||
|
if(code !== 0) reject(new Error(`Worker exited with code ${code}`));
|
||||||
});
|
});
|
||||||
|
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', path: this.ai.options.path});
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ export default defineConfig({
|
|||||||
build: {
|
build: {
|
||||||
lib: {
|
lib: {
|
||||||
entry: {
|
entry: {
|
||||||
|
asr: './src/asr.ts',
|
||||||
index: './src/index.ts',
|
index: './src/index.ts',
|
||||||
embedder: './src/embedder.ts',
|
embedder: './src/embedder.ts',
|
||||||
},
|
},
|
||||||
|
|||||||
Reference in New Issue
Block a user