Compare commits

...

28 Commits
0.5.4 ... 0.8.0

Author SHA1 Message Date
ca66e8e304 Improved whisper + pyannote, sentence diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 7s
2026-02-21 14:16:20 -05:00
cec892563e Whisper ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 01:03:25 -05:00
91066e070f WIP ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 00:51:01 -05:00
a94b153c6d Fixed embedder autostart bug
All checks were successful
Publish Library / Build NPM Project (push) Successful in 36s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 00:30:38 -05:00
39537a4a8f Switching to processes and whisper.cpp to avoid transformers.js memory leaks
All checks were successful
Publish Library / Build NPM Project (push) Successful in 38s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 21:50:01 -05:00
790608f020 Queue OCR & ASR work
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 19:05:19 -05:00
473424ae23 segfault fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 17:31:49 -05:00
9b831f7d95 Better ASR IDing
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 16:55:25 -05:00
498b326e45 Bump 0.7.4
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 14:19:17 -05:00
56e4efec94 Use either python or python3 or diarization 2026-02-20 14:14:30 -05:00
a07f069ad0 One embedding at a time
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 7s
2026-02-19 22:58:53 -05:00
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
4143d00de7 Working speaker detection with advanced LLM identifying. Improved LLM json function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 5s
2026-02-14 09:39:17 -05:00
0360f2493d Added hugging face token
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 22:15:57 -05:00
0172887877 audio worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:24:12 -05:00
8f89f5e3cf embedding worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:18:56 -05:00
5bd41f8c6a worker fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:17:31 -05:00
e4399e1b7b Updataes?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 26s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:14:00 -05:00
ad1ee48763 Use one-off workers to process requests without blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 19:45:17 -05:00
3ed206923f Fix ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 18:32:19 -05:00
22d5427e86 Fix ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 17:49:33 -05:00
43b53164c0 Bump 0.6.3
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 4s
2026-02-12 17:24:15 -05:00
575fbac099 Fixed ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 4s
2026-02-12 13:31:30 -05:00
46ae0f7913 expose diarization support checking function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 25s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 11:55:29 -05:00
54730a2b9a Speaker diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 11:26:11 -05:00
27506d20af Fix anthropic message history
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 22:45:30 -05:00
8c64129200 Removed log statement
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 21:58:39 -05:00
12 changed files with 540 additions and 1077 deletions

View File

@@ -3,7 +3,7 @@
<br />
<!-- Logo -->
<img src="https://git.zakscode.com/repo-avatars/a90851ca730480ec37a5c0c2c4f1b4609eee5eadf806eaf16c83ac4cb7493aa9" alt="Logo" width="200" height="200">
<img alt="Logo" width="200" height="200" src="https://git.zakscode.com/repo-avatars/a82d423674763e7a0c1c945bdbb07e249b2bb786d3c9beae76d5b196a10f5c0f">
<!-- Title -->
### @ztimson/ai-utils
@@ -53,13 +53,15 @@ A TypeScript library that provides a unified interface for working with multiple
- **Provider Abstraction**: Switch between AI providers without changing your code
### Built With
[![Anthropic](https://img.shields.io/badge/Anthropic-191919?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/)
[![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white)](https://openai.com/)
[![Ollama](https://img.shields.io/badge/Ollama-000000?style=for-the-badge&logo=ollama&logoColor=white)](https://ollama.com/)
[![TensorFlow](https://img.shields.io/badge/TensorFlow-FF6F00?style=for-the-badge&logo=tensorflow&logoColor=white)](https://tensorflow.org/)
[![Tesseract](https://img.shields.io/badge/Tesseract-3C8FC7?style=for-the-badge&logo=tesseract&logoColor=white)](https://tesseract-ocr.github.io/)
[![Anthropic](https://img.shields.io/badge/Anthropic-de7356?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/)
[![llama](https://img.shields.io/badge/llama.cpp-fff?style=for-the-badge&logo=ollama&logoColor=black)](https://github.com/ggml-org/llama.cpp)
[![OpenAI](https://img.shields.io/badge/OpenAI-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://openai.com/)
[![Pyannote](https://img.shields.io/badge/Pyannote-458864?style=for-the-badge&logo=python&logoColor=white)](https://github.com/pyannote)
[![TensorFlow](https://img.shields.io/badge/TensorFlow-fff?style=for-the-badge&logo=tensorflow&logoColor=ff6f00)](https://tensorflow.org/)
[![Tesseract](https://img.shields.io/badge/Tesseract-B874B2?style=for-the-badge&logo=hack-the-box&logoColor=white)](https://tesseract-ocr.github.io/)
[![Transformers.js](https://img.shields.io/badge/Transformers.js-000?style=for-the-badge&logo=hugging-face&logoColor=yellow)](https://huggingface.co/docs/transformers.js/en/index)
[![TypeScript](https://img.shields.io/badge/TypeScript-3178C6?style=for-the-badge&logo=typescript&logoColor=white)](https://typescriptlang.org/)
[![Whisper](https://img.shields.io/badge/Whisper-412991?style=for-the-badge&logo=openai&logoColor=white)](https://github.com/ggerganov/whisper.cpp)
[![Whisper](https://img.shields.io/badge/Whisper.cpp-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://github.com/ggerganov/whisper.cpp)
## Setup
@@ -75,6 +77,7 @@ A TypeScript library that provides a unified interface for working with multiple
#### Instructions
1. Install the package: `npm i @ztimson/ai-utils`
2. For speaker diarization: `pip install pyannote.audio`
</details>
@@ -87,11 +90,14 @@ A TypeScript library that provides a unified interface for working with multiple
#### Prerequisites
- [Node.js](https://nodejs.org/en/download)
- _[Whisper.cpp](https://github.com/ggml-org/whisper.cpp/releases/tag) (ASR)_
- _[Pyannote](https://github.com/pyannote) (ASR Diarization):_ `pip install pyannote.audio`
#### Instructions
1. Install the dependencies: `npm i`
2. Build library: `npm build`
3. Run unit tests: `npm test`
2. For speaker diarization: `pip install pyannote.audio`
3. Build library: `npm build`
4. Run unit tests: `npm test`
</details>

1177
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.5.4",
"version": "0.8.0",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",
@@ -25,14 +25,14 @@
"watch": "npx vite build --watch"
},
"dependencies": {
"@anthropic-ai/sdk": "^0.67.0",
"@anthropic-ai/sdk": "^0.78.0",
"@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4",
"@ztimson/utils": "^0.27.9",
"@ztimson/node-utils": "^1.0.7",
"@ztimson/utils": "^0.28.13",
"cheerio": "^1.2.0",
"openai": "^6.6.0",
"tesseract.js": "^6.0.1"
"openai": "^6.22.0",
"tesseract.js": "^7.0.0"
},
"devDependencies": {
"@types/node": "^24.8.1",

View File

@@ -8,26 +8,22 @@ export type AbortablePromise<T> = Promise<T> & {
};
export type AiOptions = {
/** Token to pull models from hugging face */
hfToken?: string;
/** Path to models */
path?: string;
/** Embedding model */
embedder?: string; // all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5
/** Whisper ASR model: ggml-tiny.en.bin, ggml-base.en.bin */
asr?: string;
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
embedder?: string;
/** Large language models, first is default */
llm?: Omit<LLMRequest, 'model'> & {
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
}
/** Tesseract OCR configuration */
tesseract?: {
/** Model: eng, eng_best, eng_fast */
model?: string;
}
/** Whisper ASR configuration */
whisper?: {
/** Whisper binary location */
binary: string;
/** Model: `ggml-base.en.bin` */
model: string;
}
/** OCR model: eng, eng_best, eng_fast */
ocr?: string;
/** Whisper binary */
whisper?: string;
}
export class Ai {

View File

@@ -13,25 +13,25 @@ export class Anthropic extends LLMProvider {
}
private toStandard(history: any[]): LLMMessage[] {
for(let i = 0; i < history.length; i++) {
const orgI = i;
if(typeof history[orgI].content != 'string') {
if(history[orgI].role == 'assistant') {
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
history.splice(i + 1, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
});
} else if(history[orgI].role == 'user') {
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
const h = history.find((h: any) => h.id == c.tool_use_id);
h[c.is_error ? 'error' : 'content'] = c.content;
const timestamp = Date.now();
const messages: LLMMessage[] = [];
for(let h of history) {
if(typeof h.content == 'string') {
messages.push(<any>{timestamp, ...h});
} else {
const textContent = h.content?.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
if(textContent) messages.push({timestamp, role: h.role, content: textContent});
h.content.forEach((c: any) => {
if(c.type == 'tool_use') {
messages.push({timestamp, role: 'tool', id: c.id, name: c.name, args: c.input, content: undefined});
} else if(c.type == 'tool_result') {
const m: any = messages.findLast(m => (<any>m).id == c.tool_use_id);
if(m) m[c.is_error ? 'error' : 'content'] = c.content;
}
});
}
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
if(!history[orgI].content) history.splice(orgI, 1);
}
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
}
return history.filter(h => !!h.content);
return messages;
}
private fromStandard(history: LLMMessage[]): any[] {
@@ -50,8 +50,8 @@ export class Anthropic extends LLMProvider {
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController();
return Object.assign(new Promise<any>(async (res, rej) => {
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
return Object.assign(new Promise<any>(async (res) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = {
model: options.model || this.model,
@@ -73,7 +73,6 @@ export class Anthropic extends LLMProvider {
};
let resp: any, isFirstMessage = true;
const assistantMessages: string[] = [];
do {
resp = await this.client.messages.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
@@ -119,7 +118,6 @@ export class Anthropic extends LLMProvider {
if(options.stream) options.stream({tool: toolCall.name});
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
try {
console.log(typeof tool.fn);
const result = await tool.fn(toolCall.input, options?.stream, this.ai);
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) {
@@ -131,7 +129,7 @@ export class Anthropic extends LLMProvider {
}
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
this.toStandard(history);
history = this.toStandard(history);
if(options.stream) options.stream({done: true});
if(options.history) options.history.splice(0, options.history.length, ...history);

View File

@@ -1,39 +1,218 @@
import {spawn} from 'node:child_process';
import {execSync, spawn} from 'node:child_process';
import {mkdtempSync} from 'node:fs';
import fs from 'node:fs/promises';
import Path from 'node:path';
import {tmpdir} from 'node:os';
import * as path from 'node:path';
import Path, {join} from 'node:path';
import {AbortablePromise, Ai} from './ai.ts';
export class Audio {
private downloads: {[key: string]: Promise<string>} = {};
private pyannote!: string;
private whisperModel!: string;
constructor(private ai: Ai) {
if(ai.options.whisper?.binary) {
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
if(ai.options.whisper) {
this.whisperModel = ai.options.asr || 'ggml-base.en.bin';
this.downloadAsrModel();
}
this.pyannote = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = r"${ai.options.path}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${ai.options.hfToken}")
output = pipeline(sys.argv[1])
segments = []
for turn, speaker in output.speaker_diarization:
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
print(json.dumps(segments))
`;
}
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
let abort: any = () => {};
const p = new Promise<string | null>(async (resolve, reject) => {
const m = await this.downloadAsrModel(model);
private runAsr(file: string, opts: {model?: string, diarization?: boolean} = {}): AbortablePromise<any> {
let proc: any;
const p = new Promise<any>((resolve, reject) => {
this.downloadAsrModel(opts.model).then(m => {
if(opts.diarization) {
let output = path.join(path.dirname(file), 'transcript');
proc = spawn(<string>this.ai.options.whisper,
['-m', m, '-f', file, '-np', '-ml', '1', '-oj', '-of', output],
{stdio: ['ignore', 'ignore', 'pipe']}
);
proc.on('error', (err: Error) => reject(err));
proc.on('close', async (code: number) => {
if(code === 0) {
output = await fs.readFile(output + '.json', 'utf-8');
fs.rm(output + '.json').catch(() => { });
try { resolve(JSON.parse(output)); }
catch(e) { reject(new Error('Failed to parse whisper JSON')); }
} else {
reject(new Error(`Exit code ${code}`));
}
});
} else {
let output = '';
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
abort = () => proc.kill('SIGTERM');
proc = spawn(<string>this.ai.options.whisper, ['-m', m, '-f', file, '-np', '-nt']);
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', async (code: number) => {
if(code === 0) {
resolve(output.trim() || null);
} else {
reject(new Error(`Exit code ${code}`));
}
});
}
});
});
return <any>Object.assign(p, {abort: () => proc?.kill('SIGTERM')});
}
private runDiarization(file: string): AbortablePromise<any> {
let aborted = false, abort = () => { aborted = true; };
const checkPython = (cmd: string) => {
return new Promise<boolean>((resolve) => {
const proc = spawn(cmd, ['-W', 'ignore', '-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
};
const p = Promise.all<any>([
checkPython('python'),
checkPython('python3'),
]).then(<any>(async ([p, p3]: [boolean, boolean]) => {
if(aborted) return;
if(!p && !p3) throw new Error('Pyannote is not installed: pip install pyannote.audio');
const binary = p3 ? 'python3' : 'python';
return new Promise((resolve, reject) => {
if(aborted) return;
let output = '';
const proc = spawn(binary, ['-W', 'ignore', '-c', this.pyannote, file]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
if(code === 0) resolve(output.trim() || null);
else reject(new Error(`Exit code ${code}`));
if(code === 0) {
try { resolve(JSON.parse(output)); }
catch (err) { reject(new Error('Failed to parse diarization output')); }
} else {
reject(new Error(`Python process exited with code ${code}`));
}
});
proc.on('error', reject);
abort = () => proc.kill('SIGTERM');
});
}));
return <any>Object.assign(p, {abort});
}
private async combineSpeakerTranscript(punctuatedText: string, timestampData: any, speakers: any[]): Promise<string> {
const speakerMap = new Map();
let speakerCount = 0;
speakers.forEach((seg: any) => {
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
});
const sentences = punctuatedText.match(/[^.!?]+[.!?]+/g) || [punctuatedText];
const lines: string[] = [];
sentences.forEach(sentence => {
sentence = sentence.trim();
if(!sentence) return;
const words = sentence.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
let startTime = Infinity, endTime = 0;
const wordTimings: {start: number, end: number}[] = [];
timestampData.transcription.forEach((word: any) => {
const wordText = word.text.trim().toLowerCase();
if(words.some(w => wordText.includes(w))) {
const start = word.offsets.from / 1000;
const end = word.offsets.to / 1000;
wordTimings.push({start, end});
if(start < startTime) startTime = start;
if(end > endTime) endTime = end;
}
});
if(startTime === Infinity) return;
// Weight by word-level overlap instead of sentence span
const speakerScores = new Map<number, number>();
wordTimings.forEach(wt => {
speakers.forEach((seg: any) => {
const overlap = Math.max(0, Math.min(wt.end, seg.end) - Math.max(wt.start, seg.start));
const duration = wt.end - wt.start;
if(duration > 0) {
const score = overlap / duration; // % of word covered
const spkNum = speakerMap.get(seg.speaker);
speakerScores.set(spkNum, (speakerScores.get(spkNum) || 0) + score);
}
});
});
return Object.assign(p, {abort});
let bestSpeaker = 1;
let maxScore = 0;
speakerScores.forEach((score, speaker) => {
if(score > maxScore) {
maxScore = score;
bestSpeaker = speaker;
}
});
lines.push(`[Speaker ${bestSpeaker}]: ${sentence}`);
});
return lines.join('\n').trim();
}
asr(file: string, options: { model?: string; diarization?: boolean | 'id' } = {}): AbortablePromise<string | null> {
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
const tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
const clean = () => fs.rm(Path.dirname(tmp), {recursive: true, force: true}).catch(() => {});
const transcript = this.runAsr(tmp, {model: options.model, diarization: false});
const timestamps: any = !options.diarization ? Promise.resolve(null) : this.runAsr(tmp, {model: options.model, diarization: true});
const diarization: any = !options.diarization ? Promise.resolve(null) : this.runDiarization(tmp);
let aborted = false, abort = () => {
aborted = true;
transcript.abort();
timestamps?.abort?.();
diarization?.abort?.();
clean();
};
const response = Promise.allSettled([transcript, timestamps, diarization]).then(async ([t, ts, d]) => {
if(t.status == 'rejected') throw new Error('Whisper.cpp punctuated:\n' + t.reason);
if(ts.status == 'rejected') throw new Error('Whisper.cpp timestamps:\n' + ts.reason);
if(d.status == 'rejected') throw new Error('Pyannote:\n' + d.reason);
if(aborted || !options.diarization) return t.value;
let transcript = await this.combineSpeakerTranscript(t.value, ts.value, d.value);
if(!aborted && options.diarization === 'id') {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => transcript = transcript.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
}
return transcript;
}).finally(() => clean());
return <any>Object.assign(response, {abort});
}
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(<string>this.ai.options.path, model);
if(await fs.stat(p).then(() => true).catch(() => false)) return p;

View File

@@ -1,14 +1,13 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
let embedder: any;
const [modelDir, model] = process.argv.slice(2);
parentPort?.on('message', async ({ id, text, model, path }) => {
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {
quantized: true,
cache_dir: path,
});
let text = '';
process.stdin.on('data', chunk => text += chunk);
process.stdin.on('end', async () => {
const embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data);
parentPort?.postMessage({ id, embedding });
console.log(JSON.stringify({embedding}));
process.exit();
});

View File

@@ -1,7 +1,6 @@
export * from './ai';
export * from './antrhopic';
export * from './audio';
export * from './embedder'
export * from './llm';
export * from './open-ai';
export * from './provider';

View File

@@ -4,9 +4,9 @@ import {Anthropic} from './antrhopic.ts';
import {OpenAi} from './open-ai.ts';
import {LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts';
import {Worker} from 'worker_threads';
import {fileURLToPath} from 'url';
import {dirname, join} from 'path';
import { spawn } from 'node:child_process';
export type AnthropicConfig = {proto: 'anthropic', token: string};
export type OllamaConfig = {proto: 'ollama', host: string};
@@ -75,22 +75,10 @@ export type LLMRequest = {
}
class LLM {
private embedWorker: Worker | null = null;
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
private embedId = 0;
private models: {[model: string]: LLMProvider} = {};
private defaultModel!: string;
defaultModel!: string;
models: {[model: string]: LLMProvider} = {};
constructor(public readonly ai: Ai) {
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
this.embedWorker.on('message', ({ id, embedding }) => {
const pending = this.embedQueue.get(id);
if (pending) {
pending.resolve(embedding);
this.embedQueue.delete(id);
}
});
if(!ai.options.llm?.models) return;
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
if(!this.defaultModel) this.defaultModel = model;
@@ -196,7 +184,12 @@ class LLM {
const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', {
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.',
model: options?.model,
temperature: options?.temperature || 0.3
});
const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
@@ -262,30 +255,57 @@ class LLM {
/**
* Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}): AbortablePromise<any[]> {
let {maxTokens = 500, overlapTokens = 50} = opts;
let aborted = false;
const abort = () => { aborted = true; };
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
const id = this.embedId++;
this.embedQueue.set(id, { resolve, reject });
this.embedWorker?.postMessage({
id,
text,
model: this.ai.options?.embedder || 'bge-small-en-v1.5',
path: this.ai.options.path
if(aborted) return reject(new Error('Aborted'));
const args: string[] = [
join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'),
<string>this.ai.options.path,
this.ai.options?.embedder || 'bge-small-en-v1.5'
];
const proc = spawn('node', args, {stdio: ['pipe', 'pipe', 'ignore']});
proc.stdin.write(text);
proc.stdin.end();
let output = '';
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(aborted) return reject(new Error('Aborted'));
if(code === 0) {
try {
const result = JSON.parse(output);
resolve(result.embedding);
} catch(err) {
reject(new Error('Failed to parse embedding output'));
}
} else {
reject(new Error(`Embedder process exited with code ${code}`));
}
});
proc.on('error', reject);
});
};
const chunks = this.chunk(target, maxTokens, overlapTokens);
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),
text,
tokens: this.estimateTokens(text),
})));
const p = (async () => {
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
for(let i = 0; i < chunks.length; i++) {
if(aborted) break;
const text = chunks[i];
const embedding = await embed(text);
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
}
return results;
})();
return Object.assign(p, { abort });
}
/**
@@ -317,12 +337,16 @@ class LLM {
/**
* Ask a question with JSON response
* @param {string} message Question
* @param {string} text Text to process
* @param {string} schema JSON schema the AI should match
* @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>}
*/
async json(message: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema:
\`\`\`json
${schema}
\`\`\``});
if(!resp) return {};
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;

View File

@@ -68,7 +68,7 @@ export class OpenAi extends LLMProvider {
const controller = new AbortController();
return Object.assign(new Promise<any>(async (res, rej) => {
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = {
model: options.model || this.model,
@@ -133,7 +133,7 @@ export class OpenAi extends LLMProvider {
}
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
this.toStandard(history);
history = this.toStandard(history);
if(options.stream) options.stream({done: true});
if(options.history) options.history.splice(0, options.history.length, ...history);

View File

@@ -13,7 +13,7 @@ export class Vision {
ocr(path: string): AbortablePromise<string | null> {
let worker: any;
const p = new Promise<string | null>(async res => {
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);

View File

@@ -1,6 +1,5 @@
import {defineConfig} from 'vite';
import dts from 'vite-plugin-dts';
import {resolve} from 'path';
export default defineConfig({
build: {