import * as os from 'node:os'; import LLM, {AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm'; import { Audio } from './audio.ts'; import {Vision} from './vision.ts'; export type AbortablePromise = Promise & { abort: () => any }; export type AiOptions = { /** Path to models */ path?: string; /** ASR model: whisper-tiny, whisper-base */ asr?: string; /** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */ embedder?: string; /** Large language models, first is default */ llm?: Omit & { models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig}; } /** OCR model: eng, eng_best, eng_fast */ ocr?: string; } export class Ai { /** Audio processing AI */ audio!: Audio; /** Language processing AI */ language!: LLM; /** Vision processing AI */ vision!: Vision; constructor(public readonly options: AiOptions) { if(!options.path) options.path = os.tmpdir(); process.env.TRANSFORMERS_CACHE = options.path; this.audio = new Audio(this); this.language = new LLM(this); this.vision = new Vision(this); } }