Added hugging face token
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s

This commit is contained in:
2026-02-12 22:15:57 -05:00
parent 0172887877
commit 0360f2493d
4 changed files with 10 additions and 7 deletions

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.6.9",
"version": "0.6.10",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",

View File

@@ -8,6 +8,8 @@ export type AbortablePromise<T> = Promise<T> & {
};
export type AiOptions = {
/** Token to pull models from hugging face */
hfToken?: string;
/** Path to models */
path?: string;
/** ASR model: whisper-tiny, whisper-base */

View File

@@ -14,14 +14,15 @@ export async function canDiarization(): Promise<boolean> {
});
}
async function runDiarization(audioPath: string, torchHome: string): Promise<any[]> {
async function runDiarization(audioPath: string, dir: string, token: string): Promise<any[]> {
const script = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = "${torchHome}"
os.environ['TORCH_HOME'] = "${dir}"
os.environ['HF_TOKEN'] = "${token}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
diarization = pipeline(sys.argv[1])
@@ -82,7 +83,7 @@ function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
return lines.join('\n');
}
parentPort?.on('message', async ({ file, speaker, model, modelDir }) => {
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
try {
console.log('worker', file);
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
@@ -111,12 +112,12 @@ parentPort?.on('message', async ({ file, speaker, model, modelDir }) => {
// Speaker Diarization
const hasDiarization = await canDiarization();
if(!hasDiarization) {
if(!token || !hasDiarization) {
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, error: 'Speaker diarization unavailable' });
return;
}
const speakers = await runDiarization(file, modelDir);
const speakers = await runDiarization(file, modelDir, token);
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
parentPort?.postMessage({ text: combined });
} catch (err) {

View File

@@ -32,7 +32,7 @@ export class Audio {
worker.on('exit', (code) => {
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path});
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path, token: this.ai.options.hfToken});
});
return Object.assign(p, { abort });
}