Added hugging face token
This commit is contained in:
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@ztimson/ai-utils",
|
"name": "@ztimson/ai-utils",
|
||||||
"version": "0.6.9",
|
"version": "0.6.10",
|
||||||
"description": "AI Utility library",
|
"description": "AI Utility library",
|
||||||
"author": "Zak Timson",
|
"author": "Zak Timson",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
|
|||||||
@@ -8,6 +8,8 @@ export type AbortablePromise<T> = Promise<T> & {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export type AiOptions = {
|
export type AiOptions = {
|
||||||
|
/** Token to pull models from hugging face */
|
||||||
|
hfToken?: string;
|
||||||
/** Path to models */
|
/** Path to models */
|
||||||
path?: string;
|
path?: string;
|
||||||
/** ASR model: whisper-tiny, whisper-base */
|
/** ASR model: whisper-tiny, whisper-base */
|
||||||
|
|||||||
11
src/asr.ts
11
src/asr.ts
@@ -14,14 +14,15 @@ export async function canDiarization(): Promise<boolean> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async function runDiarization(audioPath: string, torchHome: string): Promise<any[]> {
|
async function runDiarization(audioPath: string, dir: string, token: string): Promise<any[]> {
|
||||||
const script = `
|
const script = `
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
from pyannote.audio import Pipeline
|
from pyannote.audio import Pipeline
|
||||||
|
|
||||||
os.environ['TORCH_HOME'] = "${torchHome}"
|
os.environ['TORCH_HOME'] = "${dir}"
|
||||||
|
os.environ['HF_TOKEN'] = "${token}"
|
||||||
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
|
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
|
||||||
diarization = pipeline(sys.argv[1])
|
diarization = pipeline(sys.argv[1])
|
||||||
|
|
||||||
@@ -82,7 +83,7 @@ function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
|
|||||||
return lines.join('\n');
|
return lines.join('\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
parentPort?.on('message', async ({ file, speaker, model, modelDir }) => {
|
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
|
||||||
try {
|
try {
|
||||||
console.log('worker', file);
|
console.log('worker', file);
|
||||||
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
|
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
|
||||||
@@ -111,12 +112,12 @@ parentPort?.on('message', async ({ file, speaker, model, modelDir }) => {
|
|||||||
|
|
||||||
// Speaker Diarization
|
// Speaker Diarization
|
||||||
const hasDiarization = await canDiarization();
|
const hasDiarization = await canDiarization();
|
||||||
if(!hasDiarization) {
|
if(!token || !hasDiarization) {
|
||||||
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, error: 'Speaker diarization unavailable' });
|
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, error: 'Speaker diarization unavailable' });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const speakers = await runDiarization(file, modelDir);
|
const speakers = await runDiarization(file, modelDir, token);
|
||||||
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
|
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
|
||||||
parentPort?.postMessage({ text: combined });
|
parentPort?.postMessage({ text: combined });
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ export class Audio {
|
|||||||
worker.on('exit', (code) => {
|
worker.on('exit', (code) => {
|
||||||
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
|
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
|
||||||
});
|
});
|
||||||
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path});
|
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path, token: this.ai.options.hfToken});
|
||||||
});
|
});
|
||||||
return Object.assign(p, { abort });
|
return Object.assign(p, { abort });
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user