Compare commits

...

12 Commits
0.7.6 ... 0.8.6

Author SHA1 Message Date
6454548364 Fixed CLI tool
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 9s
2026-03-01 17:18:30 -05:00
936317f2f2 Better memory de-duplication
All checks were successful
Publish Library / Build NPM Project (push) Successful in 37s
Publish Library / Tag Version (push) Successful in 10s
2026-03-01 00:11:17 -05:00
cfde2ac4d3 Fixed open AI tool call streaming!
All checks were successful
Publish Library / Build NPM Project (push) Successful in 42s
Publish Library / Tag Version (push) Successful in 8s
2026-02-27 13:11:41 -05:00
e4ba89d3db Open ai tool call history fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 29s
2026-02-27 13:00:49 -05:00
71a7e2a904 Better RAG memory
All checks were successful
Publish Library / Build NPM Project (push) Successful in 50s
Publish Library / Tag Version (push) Successful in 9s
2026-02-27 12:32:27 -05:00
abd290246c LLM ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 43s
Publish Library / Tag Version (push) Successful in 13s
2026-02-22 09:29:31 -05:00
ca66e8e304 Improved whisper + pyannote, sentence diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 7s
2026-02-21 14:16:20 -05:00
cec892563e Whisper ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 01:03:25 -05:00
91066e070f WIP ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 00:51:01 -05:00
a94b153c6d Fixed embedder autostart bug
All checks were successful
Publish Library / Build NPM Project (push) Successful in 36s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 00:30:38 -05:00
39537a4a8f Switching to processes and whisper.cpp to avoid transformers.js memory leaks
All checks were successful
Publish Library / Build NPM Project (push) Successful in 38s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 21:50:01 -05:00
790608f020 Queue OCR & ASR work
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 19:05:19 -05:00
13 changed files with 445 additions and 303 deletions

View File

@@ -3,7 +3,7 @@
<br /> <br />
<!-- Logo --> <!-- Logo -->
<img src="https://git.zakscode.com/repo-avatars/a90851ca730480ec37a5c0c2c4f1b4609eee5eadf806eaf16c83ac4cb7493aa9" alt="Logo" width="200" height="200"> <img alt="Logo" width="200" height="200" src="https://git.zakscode.com/repo-avatars/a82d423674763e7a0c1c945bdbb07e249b2bb786d3c9beae76d5b196a10f5c0f">
<!-- Title --> <!-- Title -->
### @ztimson/ai-utils ### @ztimson/ai-utils
@@ -53,13 +53,15 @@ A TypeScript library that provides a unified interface for working with multiple
- **Provider Abstraction**: Switch between AI providers without changing your code - **Provider Abstraction**: Switch between AI providers without changing your code
### Built With ### Built With
[![Anthropic](https://img.shields.io/badge/Anthropic-191919?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/) [![Anthropic](https://img.shields.io/badge/Anthropic-de7356?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/)
[![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white)](https://openai.com/) [![llama](https://img.shields.io/badge/llama.cpp-fff?style=for-the-badge&logo=ollama&logoColor=black)](https://github.com/ggml-org/llama.cpp)
[![Ollama](https://img.shields.io/badge/Ollama-000000?style=for-the-badge&logo=ollama&logoColor=white)](https://ollama.com/) [![OpenAI](https://img.shields.io/badge/OpenAI-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://openai.com/)
[![TensorFlow](https://img.shields.io/badge/TensorFlow-FF6F00?style=for-the-badge&logo=tensorflow&logoColor=white)](https://tensorflow.org/) [![Pyannote](https://img.shields.io/badge/Pyannote-458864?style=for-the-badge&logo=python&logoColor=white)](https://github.com/pyannote)
[![Tesseract](https://img.shields.io/badge/Tesseract-3C8FC7?style=for-the-badge&logo=tesseract&logoColor=white)](https://tesseract-ocr.github.io/) [![TensorFlow](https://img.shields.io/badge/TensorFlow-fff?style=for-the-badge&logo=tensorflow&logoColor=ff6f00)](https://tensorflow.org/)
[![Tesseract](https://img.shields.io/badge/Tesseract-B874B2?style=for-the-badge&logo=hack-the-box&logoColor=white)](https://tesseract-ocr.github.io/)
[![Transformers.js](https://img.shields.io/badge/Transformers.js-000?style=for-the-badge&logo=hugging-face&logoColor=yellow)](https://huggingface.co/docs/transformers.js/en/index)
[![TypeScript](https://img.shields.io/badge/TypeScript-3178C6?style=for-the-badge&logo=typescript&logoColor=white)](https://typescriptlang.org/) [![TypeScript](https://img.shields.io/badge/TypeScript-3178C6?style=for-the-badge&logo=typescript&logoColor=white)](https://typescriptlang.org/)
[![Whisper](https://img.shields.io/badge/Whisper-412991?style=for-the-badge&logo=openai&logoColor=white)](https://github.com/ggerganov/whisper.cpp) [![Whisper](https://img.shields.io/badge/Whisper.cpp-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://github.com/ggerganov/whisper.cpp)
## Setup ## Setup
@@ -88,6 +90,8 @@ A TypeScript library that provides a unified interface for working with multiple
#### Prerequisites #### Prerequisites
- [Node.js](https://nodejs.org/en/download) - [Node.js](https://nodejs.org/en/download)
- _[Whisper.cpp](https://github.com/ggml-org/whisper.cpp/releases/tag) (ASR)_
- _[Pyannote](https://github.com/pyannote) (ASR Diarization):_ `pip install pyannote.audio`
#### Instructions #### Instructions
1. Install the dependencies: `npm i` 1. Install the dependencies: `npm i`

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.7.6", "version": "0.8.6",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",
@@ -32,8 +32,7 @@
"@ztimson/utils": "^0.28.13", "@ztimson/utils": "^0.28.13",
"cheerio": "^1.2.0", "cheerio": "^1.2.0",
"openai": "^6.22.0", "openai": "^6.22.0",
"tesseract.js": "^7.0.0", "tesseract.js": "^7.0.0"
"wavefile": "^11.0.0"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^24.8.1", "@types/node": "^24.8.1",

View File

@@ -12,7 +12,7 @@ export type AiOptions = {
hfToken?: string; hfToken?: string;
/** Path to models */ /** Path to models */
path?: string; path?: string;
/** ASR model: whisper-tiny, whisper-base */ /** Whisper ASR model: ggml-tiny.en.bin, ggml-base.en.bin */
asr?: string; asr?: string;
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */ /** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
embedder?: string; embedder?: string;
@@ -22,6 +22,8 @@ export type AiOptions = {
} }
/** OCR model: eng, eng_best, eng_fast */ /** OCR model: eng, eng_best, eng_fast */
ocr?: string; ocr?: string;
/** Whisper binary */
whisper?: string;
} }
export class Ai { export class Ai {

View File

@@ -1,134 +0,0 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
import { spawn } from 'node:child_process';
import { execSync } from 'node:child_process';
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import wavefile from 'wavefile';
export async function canDiarization(): Promise<string | null> {
const checkPython = (cmd: string) => {
return new Promise<boolean>((resolve) => {
const proc = spawn(cmd, ['-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
};
if(await checkPython('python3')) return 'python3';
if(await checkPython('python')) return 'python';
return null;
}
async function runDiarization(binary: string, audioPath: string, dir: string, token: string): Promise<any[]> {
const script = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = r"${dir}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${token}")
output = pipeline(sys.argv[1])
segments = []
for turn, speaker in output.speaker_diarization:
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
print(json.dumps(segments))
`;
return new Promise((resolve, reject) => {
let output = '';
const proc = spawn(binary, ['-c', script, audioPath]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
if(code === 0) {
try {
resolve(JSON.parse(output));
} catch (err) {
reject(new Error('Failed to parse diarization output'));
}
} else {
reject(new Error(`Python process exited with code ${code}`));
}
});
proc.on('error', reject);
});
}
function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
const speakerMap = new Map();
let speakerCount = 0;
speakers.forEach((seg: any) => {
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
});
const lines: string[] = [];
let currentSpeaker = -1;
let currentText = '';
chunks.forEach((chunk: any) => {
const time = chunk.timestamp[0];
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
if (speakerNum !== currentSpeaker) {
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
currentSpeaker = speakerNum;
currentText = chunk.text;
} else {
currentText += chunk.text;
}
});
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
return lines.join('\n');
}
function prepareAudioBuffer(file: string): [string, Float32Array] {
let wav: any, tmp;
try {
wav = new wavefile.WaveFile(readFileSync(file));
} catch(err) {
tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
wav = new wavefile.WaveFile(readFileSync(tmp));
} finally {
wav.toBitDepth('32f');
wav.toSampleRate(16000);
const samples = wav.getSamples();
if(Array.isArray(samples)) {
const left = samples[0];
const right = samples[1];
const buffer = new Float32Array(left.length);
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
return [tmp || file, buffer];
}
return [tmp || file, samples];
}
}
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
let tempFile = null;
try {
const asr: any = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
const [f, buffer] = prepareAudioBuffer(file);
tempFile = f !== file ? f : null;
const hasDiarization = await canDiarization();
const [transcript, speakers] = await Promise.all([
asr(buffer, {return_timestamps: speaker ? 'word' : false}),
(!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(hasDiarization, f, modelDir, token),
]);
const text = transcript.text?.trim() || null;
if(!speaker) return parentPort?.postMessage({ text });
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
parentPort?.postMessage({ text: combined });
} catch (err: any) {
parentPort?.postMessage({ error: err.stack || err.message });
} finally {
if(tempFile) rmSync(tempFile, { recursive: true, force: true });
}
});

View File

@@ -1,60 +1,270 @@
import {fileURLToPath} from 'url'; import {execSync, spawn} from 'node:child_process';
import {Worker} from 'worker_threads'; import {mkdtempSync} from 'node:fs';
import fs from 'node:fs/promises';
import {tmpdir} from 'node:os';
import * as path from 'node:path';
import Path, {join} from 'node:path';
import {AbortablePromise, Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {canDiarization} from './asr.ts';
import {dirname, join} from 'path';
export class Audio { export class Audio {
constructor(private ai: Ai) {} private downloads: {[key: string]: Promise<string>} = {};
private pyannote!: string;
private whisperModel!: string;
asr(file: string, options: { model?: string; speaker?: boolean | 'id' } = {}): AbortablePromise<string | null> { constructor(private ai: Ai) {
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options; if(ai.options.whisper) {
let aborted = false; this.whisperModel = ai.options.asr || 'ggml-base.en.bin';
const abort = () => { aborted = true; }; this.downloadAsrModel();
let p = new Promise<string | null>((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
const handleMessage = ({ text, warning, error }: any) => {
setTimeout(() => worker.terminate(), 1000);
if(aborted) return;
if(error) reject(new Error(error));
else {
if(warning) console.warn(warning);
resolve(text);
}
};
const handleError = (err: Error) => {
setTimeout(() => worker.terminate(), 1000);
if(!aborted) reject(err);
};
worker.on('message', handleMessage);
worker.on('error', handleError);
worker.on('exit', (code) => {
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path, token: this.ai.options.hfToken});
});
// Name speakers using AI
if(options.speaker == 'id') {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
p = p.then(async transcript => {
if(!transcript) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => {
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);
});
return transcript;
})
} }
return Object.assign(p, { abort }); this.pyannote = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = r"${ai.options.path}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${ai.options.hfToken}")
output = pipeline(sys.argv[1])
segments = []
for turn, speaker in output.speaker_diarization:
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
print(json.dumps(segments))
`;
} }
canDiarization = () => canDiarization().then(resp => !!resp); private async addPunctuation(timestampData: any, llm?: boolean, cadence = 150): Promise<string> {
const countSyllables = (word: string): number => {
word = word.toLowerCase().replace(/[^a-z]/g, '');
if(word.length <= 3) return 1;
const matches = word.match(/[aeiouy]+/g);
let count = matches ? matches.length : 1;
if(word.endsWith('e')) count--;
return Math.max(1, count);
};
let result = '';
timestampData.transcription.filter((word, i) => {
let skip = false;
const prevWord = timestampData.transcription[i - 1];
const nextWord = timestampData.transcription[i + 1];
if(!word.text && nextWord) {
nextWord.offsets.from = word.offsets.from;
nextWord.timestamps.from = word.offsets.from;
} else if(word.text && word.text[0] != ' ' && prevWord) {
prevWord.offsets.to = word.offsets.to;
prevWord.timestamps.to = word.timestamps.to;
prevWord.text += word.text;
skip = true;
}
return !!word.text && !skip;
}).forEach((word: any) => {
const capital = /^[A-Z]/.test(word.text.trim());
const length = word.offsets.to - word.offsets.from;
const syllables = countSyllables(word.text.trim());
const expected = syllables * cadence;
if(capital && length > expected * 2 && word.text[0] == ' ') result += '.';
result += word.text;
});
if(!llm) return result.trim();
return this.ai.language.ask(result, {
system: 'Remove any misplaced punctuation from the following ASR transcript using the replace tool. Avoid modifying words unless there is an obvious typo',
temperature: 0.1,
tools: [{
name: 'replace',
description: 'Use find and replace to fix errors',
args: {
find: {type: 'string', description: 'Text to find', required: true},
replace: {type: 'string', description: 'Text to replace', required: true}
},
fn: (args) => result = result.replace(args.find, args.replace)
}]
}).then(() => result);
}
private async diarizeTranscript(timestampData: any, speakers: any[], llm: boolean): Promise<string> {
const speakerMap = new Map();
let speakerCount = 0;
speakers.forEach((seg: any) => {
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
});
const punctuatedText = await this.addPunctuation(timestampData, llm);
const sentences = punctuatedText.match(/[^.!?]+[.!?]+/g) || [punctuatedText];
const words = timestampData.transcription.filter((w: any) => w.text.trim());
// Assign speaker to each sentence
const sentencesWithSpeakers = sentences.map(sentence => {
sentence = sentence.trim();
if(!sentence) return null;
const sentenceWords = sentence.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
const speakerWordCount = new Map<number, number>();
sentenceWords.forEach(sw => {
const word = words.find((w: any) => sw === w.text.trim().toLowerCase().replace(/[^\w]/g, ''));
if(!word) return;
const wordTime = word.offsets.from / 1000;
const speaker = speakers.find((seg: any) => wordTime >= seg.start && wordTime <= seg.end);
if(speaker) {
const spkNum = speakerMap.get(speaker.speaker);
speakerWordCount.set(spkNum, (speakerWordCount.get(spkNum) || 0) + 1);
}
});
let bestSpeaker = 1;
let maxWords = 0;
speakerWordCount.forEach((count, speaker) => {
if(count > maxWords) {
maxWords = count;
bestSpeaker = speaker;
}
});
return {speaker: bestSpeaker, text: sentence};
}).filter(s => s !== null);
// Merge adjacent sentences from same speaker
const merged: Array<{speaker: number, text: string}> = [];
sentencesWithSpeakers.forEach(item => {
const last = merged[merged.length - 1];
if(last && last.speaker === item.speaker) {
last.text += ' ' + item.text;
} else {
merged.push({...item});
}
});
let transcript = merged.map(item => `[Speaker ${item.speaker}]: ${item.text}`).join('\n').trim();
if(!llm) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => transcript = transcript.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
return transcript;
}
private runAsr(file: string, opts: {model?: string, diarization?: boolean} = {}): AbortablePromise<any> {
let proc: any;
const p = new Promise<any>((resolve, reject) => {
this.downloadAsrModel(opts.model).then(m => {
if(opts.diarization) {
let output = path.join(path.dirname(file), 'transcript');
proc = spawn(<string>this.ai.options.whisper,
['-m', m, '-f', file, '-np', '-ml', '1', '-oj', '-of', output],
{stdio: ['ignore', 'ignore', 'pipe']}
);
proc.on('error', (err: Error) => reject(err));
proc.on('close', async (code: number) => {
if(code === 0) {
output = await fs.readFile(output + '.json', 'utf-8');
fs.rm(output + '.json').catch(() => { });
try { resolve(JSON.parse(output)); }
catch(e) { reject(new Error('Failed to parse whisper JSON')); }
} else {
reject(new Error(`Exit code ${code}`));
}
});
} else {
let output = '';
proc = spawn(<string>this.ai.options.whisper, ['-m', m, '-f', file, '-np', '-nt']);
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', async (code: number) => {
if(code === 0) {
resolve(output.trim() || null);
} else {
reject(new Error(`Exit code ${code}`));
}
});
}
});
});
return <any>Object.assign(p, {abort: () => proc?.kill('SIGTERM')});
}
private runDiarization(file: string): AbortablePromise<any> {
let aborted = false, abort = () => { aborted = true; };
const checkPython = (cmd: string) => {
return new Promise<boolean>((resolve) => {
const proc = spawn(cmd, ['-W', 'ignore', '-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
};
const p = Promise.all<any>([
checkPython('python'),
checkPython('python3'),
]).then(<any>(async ([p, p3]: [boolean, boolean]) => {
if(aborted) return;
if(!p && !p3) throw new Error('Pyannote is not installed: pip install pyannote.audio');
const binary = p3 ? 'python3' : 'python';
return new Promise((resolve, reject) => {
if(aborted) return;
let output = '';
const proc = spawn(binary, ['-W', 'ignore', '-c', this.pyannote, file]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
if(code === 0) {
try { resolve(JSON.parse(output)); }
catch (err) { reject(new Error('Failed to parse diarization output')); }
} else {
reject(new Error(`Python process exited with code ${code}`));
}
});
proc.on('error', reject);
abort = () => proc.kill('SIGTERM');
});
}));
return <any>Object.assign(p, {abort});
}
asr(file: string, options: { model?: string; diarization?: boolean | 'llm' } = {}): AbortablePromise<string | null> {
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
const tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
const clean = () => fs.rm(Path.dirname(tmp), {recursive: true, force: true}).catch(() => {});
if(!options.diarization) return this.runAsr(tmp, {model: options.model});
const timestamps = this.runAsr(tmp, {model: options.model, diarization: true});
const diarization = this.runDiarization(tmp);
let aborted = false, abort = () => {
aborted = true;
timestamps.abort();
diarization.abort();
clean();
};
const response = Promise.allSettled([timestamps, diarization]).then(async ([ts, d]) => {
if(ts.status == 'rejected') throw new Error('Whisper.cpp timestamps:\n' + ts.reason);
if(d.status == 'rejected') throw new Error('Pyannote:\n' + d.reason);
if(aborted || !options.diarization) return ts.value;
return this.diarizeTranscript(ts.value, d.value, options.diarization == 'llm');
}).finally(() => clean());
return <any>Object.assign(response, {abort});
}
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(<string>this.ai.options.path, model);
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
if(!!this.downloads[model]) return this.downloads[model];
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
.then(resp => resp.arrayBuffer())
.then(arr => Buffer.from(arr)).then(async buffer => {
await fs.writeFile(p, buffer);
delete this.downloads[model];
return p;
});
return this.downloads[model];
}
} }

View File

@@ -1,11 +1,13 @@
import { pipeline } from '@xenova/transformers'; import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
let embedder: any; const [modelDir, model] = process.argv.slice(2);
parentPort?.on('message', async ({text, model, modelDir }) => { let text = '';
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir}); process.stdin.on('data', chunk => text += chunk);
process.stdin.on('end', async () => {
const embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
const output = await embedder(text, { pooling: 'mean', normalize: true }); const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data); const embedding = Array.from(output.data);
parentPort?.postMessage({embedding}); console.log(JSON.stringify({embedding}));
process.exit();
}); });

View File

@@ -1,8 +1,6 @@
export * from './ai'; export * from './ai';
export * from './antrhopic'; export * from './antrhopic';
export * from './asr';
export * from './audio'; export * from './audio';
export * from './embedder'
export * from './llm'; export * from './llm';
export * from './open-ai'; export * from './open-ai';
export * from './provider'; export * from './provider';

View File

@@ -4,9 +4,9 @@ import {Anthropic} from './antrhopic.ts';
import {OpenAi} from './open-ai.ts'; import {OpenAi} from './open-ai.ts';
import {LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts'; import {AiTool} from './tools.ts';
import {Worker} from 'worker_threads';
import {fileURLToPath} from 'url'; import {fileURLToPath} from 'url';
import {dirname, join} from 'path'; import {dirname, join} from 'path';
import { spawn } from 'node:child_process';
export type AnthropicConfig = {proto: 'anthropic', token: string}; export type AnthropicConfig = {proto: 'anthropic', token: string};
export type OllamaConfig = {proto: 'ollama', host: string}; export type OllamaConfig = {proto: 'ollama', host: string};
@@ -44,8 +44,6 @@ export type LLMMemory = {
fact: string; fact: string;
/** Owner and fact embedding vector */ /** Owner and fact embedding vector */
embeddings: [number[], number[]]; embeddings: [number[], number[]];
/** Creation time */
timestamp: Date;
} }
export type LLMRequest = { export type LLMRequest = {
@@ -95,75 +93,98 @@ class LLM {
* @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history * @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
*/ */
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
options = <any>{
system: '',
temperature: 0.8,
...this.ai.options.llm,
models: undefined,
history: [],
...options,
}
const m = options.model || this.defaultModel; const m = options.model || this.defaultModel;
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`); if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
let abort = () => {}; let abort = () => {};
return Object.assign(new Promise<string>(async res => { return Object.assign(new Promise<string>(async res => {
if(!options.history) options.history = []; if(!options.history) options.history = [];
// If memories were passed, find any relivant ones and add a tool for ADHOC lookups // If memories were passed, find any relevant ones and add a tool for ADHOC lookups
if(options.memory) { if(options.memory) {
options.system = (options.system || '') + '\nYou have passive persistent memory never make any mention of your memory capabilities and what you can/cannot remember\n'; const search = async (query?: string | null, subject?: string | null, limit = 10) => {
const search = async (query?: string | null, subject?: string | null, limit = 50) => {
const [o, q] = await Promise.all([ const [o, q] = await Promise.all([
subject ? this.embedding(subject) : Promise.resolve(null), subject ? this.embedding(subject) : Promise.resolve(null),
query ? this.embedding(query) : Promise.resolve(null), query ? this.embedding(query) : Promise.resolve(null),
]); ]);
return (options.memory || []) return (options.memory || []).map(m => {
.map(m => ({...m, score: o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 1})) const score = (o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 0)
.filter((m: any) => m.score >= 0.8) + (q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : 0);
.map((m: any) => ({...m, score: q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : m.score})) return {...m, score};
.filter((m: any) => m.score >= 0.2) }).toSorted((a: any, b: any) => a.score - b.score).slice(0, limit);
.toSorted((a: any, b: any) => a.score - b.score)
.slice(0, limit);
} }
options.system += '\nYou have RAG memory and will be given the top_k closest memories regarding the users query. Save anything new you have learned worth remembering from the user message using the remember tool and feel free to recall memories manually.\n';
const relevant = await search(message); const relevant = await search(message);
if(relevant.length) options.history.push({role: 'assistant', content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')}); if(relevant.length) options.history.push({role: 'tool', name: 'recall', id: 'auto_recall_' + Math.random().toString(), args: {}, content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
options.tools = [...options.tools || [], { options.tools = [{
name: 'read_memory', name: 'recall',
description: 'Check your long-term memory for more information', description: 'Recall the closest memories you have regarding a query using RAG',
args: { args: {
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'}, subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'}, query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
limit: {type: 'number', description: 'Result limit, default 5'}, topK: {type: 'number', description: 'Result limit, default 5'},
}, },
fn: (args) => { fn: (args) => {
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required'); if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
return search(args.query, args.subject, args.limit || 5); return search(args.query, args.subject, args.topK);
} }
}]; }, {
name: 'remember',
description: 'Store important facts user shares for future recall',
args: {
owner: {type: 'string', description: 'Subject/person this fact is about'},
fact: {type: 'string', description: 'The information to remember'}
},
fn: async (args) => {
if(!options.memory) return;
const e = await Promise.all([
this.embedding(args.owner),
this.embedding(`${args.owner}: ${args.fact}`)
]);
const newMem = {owner: args.owner, fact: args.fact, embeddings: <any>[e[0][0].embedding, e[1][0].embedding]};
options.memory.splice(0, options.memory.length, ...[
...options.memory.filter(m => {
return !(this.cosineSimilarity(newMem.embeddings[0], m.embeddings[0]) >= 0.9 && this.cosineSimilarity(newMem.embeddings[1], m.embeddings[1]) >= 0.8);
}),
newMem
]);
return 'Remembered!';
}
}, ...options.tools || []];
} }
// Ask // Ask
const resp = await this.models[m].ask(message, options); const resp = await this.models[m].ask(message, options);
// Remove any memory calls // Remove any memory calls from history
if(options.memory) { if(options.memory) options.history.splice(0, options.history.length, ...options.history.filter(h => h.role != 'tool' || (h.name != 'recall' && h.name != 'remember')));
const i = options.history?.findIndex((h: any) => h.role == 'assistant' && h.content.startsWith('Things I remembered:'));
if(i != null && i >= 0) options.history?.splice(i, 1); // Compress message history
if(options.compress) {
const compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
options.history.splice(0, options.history.length, ...compressed);
} }
// Handle compression and memory extraction
if(options.compress || options.memory) {
let compressed = null;
if(options.compress) {
compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
options.history.splice(0, options.history.length, ...compressed.history);
} else {
const i = options.history?.findLastIndex(m => m.role == 'user') ?? -1;
compressed = await this.ai.language.compressHistory(i != -1 ? options.history.slice(i) : options.history, 0, 0, options);
}
if(options.memory) {
const updated = options.memory
.filter(m => !compressed.memory.some(m2 => this.cosineSimilarity(m.embeddings[1], m2.embeddings[1]) > 0.8))
.concat(compressed.memory);
options.memory.splice(0, options.memory.length, ...updated);
}
}
return res(resp); return res(resp);
}), {abort}); }), {abort});
} }
async code(message: string, options?: LLMRequest): Promise<any> {
const resp = await this.ask(message, {...options, system: [
options?.system,
'Return your response in a code block'
].filter(t => !!t).join(('\n'))});
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
return codeBlock ? codeBlock[1].trim() : null;
}
/** /**
* Compress chat history to reduce context size * Compress chat history to reduce context size
* @param {LLMMessage[]} history Chatlog that will be compressed * @param {LLMMessage[]} history Chatlog that will be compressed
@@ -172,32 +193,24 @@ class LLM {
* @param {LLMRequest} options LLM options * @param {LLMRequest} options LLM options
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0 * @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
*/ */
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<{history: LLMMessage[], memory: LLMMemory[]}> { async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
if(this.estimateTokens(history) < max) return {history, memory: []}; if(this.estimateTokens(history) < max) return history;
let keep = 0, tokens = 0; let keep = 0, tokens = 0;
for(let m of history.toReversed()) { for(let m of history.toReversed()) {
tokens += this.estimateTokens(m.content); tokens += this.estimateTokens(m.content);
if(tokens < min) keep++; if(tokens < min) keep++;
else break; else break;
} }
if(history.length <= keep) return {history, memory: []}; if(history.length <= keep) return history;
const system = history[0].role == 'system' ? history[0] : null, const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep), recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user'); process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', { const summary: any = await this.summarize(process.map(m => `[${m.role}]: ${m.content}`).join('\n\n'), 500, options);
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.', const d = Date.now();
model: options?.model, const h = [{role: <any>'tool', name: 'summary', id: `summary_` + d, args: {}, content: `Conversation Summary: ${summary?.summary}`, timestamp: d}, ...recent];
temperature: options?.temperature || 0.3
});
const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
return {owner, fact, embeddings: [e[0][0].embedding, e[1][0].embedding], timestamp};
}));
const h = [{role: 'assistant', content: `Conversation Summary: ${summary?.summary}`, timestamp: Date.now()}, ...recent];
if(system) h.splice(0, 0, system); if(system) h.splice(0, 0, system);
return {history: <any>h, memory}; return h;
} }
/** /**
@@ -234,7 +247,7 @@ class LLM {
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`; return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
}); });
}; };
const lines = typeof target === 'object' ? objString(target) : target.split('\n'); const lines = typeof target === 'object' ? objString(target) : target.toString().split('\n');
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']); const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
const chunks: string[] = []; const chunks: string[] = [];
for(let i = 0; i < tokens.length;) { for(let i = 0; i < tokens.length;) {
@@ -258,34 +271,54 @@ class LLM {
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes * @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings * @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/ */
async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}) { embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}): AbortablePromise<any[]> {
let {maxTokens = 500, overlapTokens = 50} = opts; let {maxTokens = 500, overlapTokens = 50} = opts;
let aborted = false;
const abort = () => { aborted = true; };
const embed = (text: string): Promise<number[]> => { const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js')); if(aborted) return reject(new Error('Aborted'));
const handleMessage = ({ embedding }: any) => {
worker.terminate(); const args: string[] = [
resolve(embedding); join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'),
}; <string>this.ai.options.path,
const handleError = (err: Error) => { this.ai.options?.embedder || 'bge-small-en-v1.5'
worker.terminate(); ];
reject(err); const proc = spawn('node', args, {stdio: ['pipe', 'pipe', 'ignore']});
}; proc.stdin.write(text);
worker.on('message', handleMessage); proc.stdin.end();
worker.on('error', handleError);
worker.on('exit', (code) => { let output = '';
if(code !== 0) reject(new Error(`Worker exited with code ${code}`)); proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(aborted) return reject(new Error('Aborted'));
if(code === 0) {
try {
const result = JSON.parse(output);
resolve(result.embedding);
} catch(err) {
reject(new Error('Failed to parse embedding output'));
}
} else {
reject(new Error(`Embedder process exited with code ${code}`));
}
}); });
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path}); proc.on('error', reject);
}); });
}; };
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
for(let i = 0; i < chunks.length; i++) { const p = (async () => {
const text= chunks[i]; const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
const embedding = await embed(text); for(let i = 0; i < chunks.length; i++) {
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)}); if(aborted) break;
} const text = chunks[i];
return results; const embedding = await embed(text);
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
}
return results;
})();
return Object.assign(p, { abort });
} }
/** /**
@@ -323,14 +356,11 @@ class LLM {
* @returns {Promise<{} | {} | RegExpExecArray | null>} * @returns {Promise<{} | {} | RegExpExecArray | null>}
*/ */
async json(text: string, schema: string, options?: LLMRequest): Promise<any> { async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema: const code = await this.code(text, {...options, system: [
\`\`\`json options?.system,
${schema} `Only respond using JSON matching this schema:\n\`\`\`json\n${schema}\n\`\`\``
\`\`\``}); ].filter(t => !!t).join('\n')});
if(!resp) return {}; return code ? JSONAttemptParse(code, {}) : null;
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
return JSONAttemptParse(jsonStr, {});
} }
/** /**
@@ -340,8 +370,8 @@ ${schema}
* @param options LLM request options * @param options LLM request options
* @returns {Promise<string>} Summary * @returns {Promise<string>} Summary
*/ */
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> { summarize(text: string, tokens: number = 500, options?: LLMRequest): Promise<string | null> {
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options}); return this.ask(text, {system: `Generate the shortest summary possible <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
} }
} }

View File

@@ -11,7 +11,7 @@ export class OpenAi extends LLMProvider {
super(); super();
this.client = new openAI(clean({ this.client = new openAI(clean({
baseURL: host, baseURL: host,
apiKey: token apiKey: token || host ? 'ignored' : undefined
})); }));
} }
@@ -67,7 +67,10 @@ export class OpenAi extends LLMProvider {
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController(); const controller = new AbortController();
return Object.assign(new Promise<any>(async (res, rej) => { return Object.assign(new Promise<any>(async (res, rej) => {
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()}); if(options.system) {
if(options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
else options.history[0].content = options.system;
}
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]); let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const tools = options.tools || this.ai.options.llm?.tools || []; const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = { const requestParams: any = {
@@ -100,15 +103,37 @@ export class OpenAi extends LLMProvider {
if(options.stream) { if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'}); if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false; else isFirstMessage = false;
resp.choices = [{message: {content: '', tool_calls: []}}]; resp.choices = [{message: {role: 'assistant', content: '', tool_calls: []}}];
for await (const chunk of resp) { for await (const chunk of resp) {
if(controller.signal.aborted) break; if(controller.signal.aborted) break;
if(chunk.choices[0].delta.content) { if(chunk.choices[0].delta.content) {
resp.choices[0].message.content += chunk.choices[0].delta.content; resp.choices[0].message.content += chunk.choices[0].delta.content;
options.stream({text: chunk.choices[0].delta.content}); options.stream({text: chunk.choices[0].delta.content});
} }
if(chunk.choices[0].delta.tool_calls) { if(chunk.choices[0].delta.tool_calls) {
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls; for(const deltaTC of chunk.choices[0].delta.tool_calls) {
const existing = resp.choices[0].message.tool_calls.find(tc => tc.index === deltaTC.index);
if(existing) {
if(deltaTC.id) existing.id = deltaTC.id;
if(deltaTC.type) existing.type = deltaTC.type;
if(deltaTC.function) {
if(!existing.function) existing.function = {};
if(deltaTC.function.name) existing.function.name = deltaTC.function.name;
if(deltaTC.function.arguments) existing.function.arguments = (existing.function.arguments || '') + deltaTC.function.arguments;
}
} else {
resp.choices[0].message.tool_calls.push({
index: deltaTC.index,
id: deltaTC.id || '',
type: deltaTC.type || 'function',
function: {
name: deltaTC.function?.name || '',
arguments: deltaTC.function?.arguments || ''
}
});
}
}
} }
} }
} }

View File

@@ -1,9 +1,15 @@
import * as cheerio from 'cheerio'; import * as cheerio from 'cheerio';
import {$, $Sync} from '@ztimson/node-utils'; import {$Sync} from '@ztimson/node-utils';
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils'; import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
import * as os from 'node:os';
import {Ai} from './ai.ts'; import {Ai} from './ai.ts';
import {LLMRequest} from './llm.ts'; import {LLMRequest} from './llm.ts';
const getShell = () => {
if(os.platform() == 'win32') return 'cmd';
return $Sync`echo $SHELL`?.split('/').pop() || 'bash';
}
export type AiToolArg = {[key: string]: { export type AiToolArg = {[key: string]: {
/** Argument type */ /** Argument type */
type: 'array' | 'boolean' | 'number' | 'object' | 'string', type: 'array' | 'boolean' | 'number' | 'object' | 'string',
@@ -40,7 +46,7 @@ export const CliTool: AiTool = {
name: 'cli', name: 'cli',
description: 'Use the command line interface, returns any output', description: 'Use the command line interface, returns any output',
args: {command: {type: 'string', description: 'Command to run', required: true}}, args: {command: {type: 'string', description: 'Command to run', required: true}},
fn: (args: {command: string}) => $`${args.command}` fn: (args: {command: string}) => $Sync`${args.command}`
} }
export const DateTimeTool: AiTool = { export const DateTimeTool: AiTool = {
@@ -54,13 +60,13 @@ export const ExecTool: AiTool = {
name: 'exec', name: 'exec',
description: 'Run code/scripts', description: 'Run code/scripts',
args: { args: {
language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true}, language: {type: 'string', description: `Execution language (CLI: ${getShell()})`, enum: ['cli', 'node', 'python'], required: true},
code: {type: 'string', description: 'Code to execute', required: true} code: {type: 'string', description: 'Code to execute', required: true}
}, },
fn: async (args, stream, ai) => { fn: async (args, stream, ai) => {
try { try {
switch(args.type) { switch(args.type) {
case 'bash': case 'cli':
return await CliTool.fn({command: args.code}, stream, ai); return await CliTool.fn({command: args.code}, stream, ai);
case 'node': case 'node':
return await JSTool.fn({code: args.code}, stream, ai); return await JSTool.fn({code: args.code}, stream, ai);

View File

@@ -3,7 +3,7 @@ import {AbortablePromise, Ai} from './ai.ts';
export class Vision { export class Vision {
constructor(private ai: Ai) { } constructor(private ai: Ai) {}
/** /**
* Convert image to text using Optical Character Recognition * Convert image to text using Optical Character Recognition

View File

@@ -15,6 +15,7 @@
"noEmit": true, "noEmit": true,
/* Linting */ /* Linting */
"strict": true "strict": true,
"noImplicitAny": false
} }
} }

View File

@@ -5,7 +5,6 @@ export default defineConfig({
build: { build: {
lib: { lib: {
entry: { entry: {
asr: './src/asr.ts',
index: './src/index.ts', index: './src/index.ts',
embedder: './src/embedder.ts', embedder: './src/embedder.ts',
}, },