diff --git a/package.json b/package.json index 6c52ba5..62efdd5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@ztimson/ai-utils", - "version": "0.6.7", + "version": "0.6.7-rc.1", "description": "AI Utility library", "author": "Zak Timson", "license": "MIT", diff --git a/src/asr.ts b/src/asr.ts index 57a42f9..1bfa874 100644 --- a/src/asr.ts +++ b/src/asr.ts @@ -84,6 +84,7 @@ function combineSpeakerTranscript(chunks: any[], speakers: any[]): string { parentPort?.on('message', async ({ file, speaker, model, modelDir }) => { try { + console.log('worker', file); if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true}); // Prepare audio file (convert to mono channel wave) diff --git a/src/audio.ts b/src/audio.ts index 5dd523f..21cf027 100644 --- a/src/audio.ts +++ b/src/audio.ts @@ -7,6 +7,7 @@ export class Audio { constructor(private ai: Ai) {} asr(file: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise { + console.log('audio', file); const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options; let aborted = false; const abort = () => { aborted = true; };