Files
ai-utils/src/ai.ts
ztimson 39537a4a8f
All checks were successful
Publish Library / Build NPM Project (push) Successful in 38s
Publish Library / Tag Version (push) Successful in 5s
Switching to processes and whisper.cpp to avoid transformers.js memory leaks
2026-02-20 21:50:01 -05:00

45 lines
1.2 KiB
TypeScript

import * as os from 'node:os';
import LLM, {AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
import { Audio } from './audio.ts';
import {Vision} from './vision.ts';
export type AbortablePromise<T> = Promise<T> & {
abort: () => any
};
export type AiOptions = {
/** Token to pull models from hugging face */
hfToken?: string;
/** Path to models */
path?: string;
/** Whisper ASR model: ggml-tiny.en.bin, ggml-base.en.bin */
asr?: string;
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
embedder?: string;
/** Large language models, first is default */
llm?: Omit<LLMRequest, 'model'> & {
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
}
/** OCR model: eng, eng_best, eng_fast */
ocr?: string;
/** Whisper binary */
whisper?: string;
}
export class Ai {
/** Audio processing AI */
audio!: Audio;
/** Language processing AI */
language!: LLM;
/** Vision processing AI */
vision!: Vision;
constructor(public readonly options: AiOptions) {
if(!options.path) options.path = os.tmpdir();
process.env.TRANSFORMERS_CACHE = options.path;
this.audio = new Audio(this);
this.language = new LLM(this);
this.vision = new Vision(this);
}
}