Compare commits

...

41 Commits

Author SHA1 Message Date
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
4143d00de7 Working speaker detection with advanced LLM identifying. Improved LLM json function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 5s
2026-02-14 09:39:17 -05:00
0360f2493d Added hugging face token
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 22:15:57 -05:00
0172887877 audio worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:24:12 -05:00
8f89f5e3cf embedding worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:18:56 -05:00
5bd41f8c6a worker fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:17:31 -05:00
e4399e1b7b Updataes?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 26s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:14:00 -05:00
ad1ee48763 Use one-off workers to process requests without blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 19:45:17 -05:00
3ed206923f Fix ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 18:32:19 -05:00
22d5427e86 Fix ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 17:49:33 -05:00
43b53164c0 Bump 0.6.3
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 4s
2026-02-12 17:24:15 -05:00
575fbac099 Fixed ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 4s
2026-02-12 13:31:30 -05:00
46ae0f7913 expose diarization support checking function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 25s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 11:55:29 -05:00
54730a2b9a Speaker diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 11:26:11 -05:00
27506d20af Fix anthropic message history
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 22:45:30 -05:00
8c64129200 Removed log statement
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 21:58:39 -05:00
013aa942c0 Added save directory for embedder
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 4s
2026-02-11 21:45:54 -05:00
c8d5660b1a Enable quantized embedder for speed boost
All checks were successful
Publish Library / Build NPM Project (push) Successful in 23s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 20:28:14 -05:00
f2c66b0cb8 Updated default embedder
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 8s
2026-02-11 20:23:50 -05:00
cda7db4f45 Added memory system
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 5s
2026-02-08 19:52:02 -05:00
d71a6be120 Fixed timezones with date time tool
All checks were successful
Publish Library / Build NPM Project (push) Successful in 57s
Publish Library / Tag Version (push) Successful in 8s
2026-02-02 09:30:48 -05:00
7b57a0ded1 Updated LLM config and added read_webpage
All checks were successful
Publish Library / Build NPM Project (push) Successful in 46s
Publish Library / Tag Version (push) Successful in 6s
2026-02-01 13:16:08 -05:00
28904cddbe TTS
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 16s
2026-01-30 15:39:29 -05:00
d5bf1ec47e Pulled chunking out into its own exported function for easy access
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-30 10:38:51 -05:00
cb60a0b0c5 Moved embeddings to worker to prevent blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-28 22:17:39 -05:00
1c59379c7d Set tesseract model
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-01-16 20:33:51 -05:00
6dce0e8954 Fixed tool calls
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 8s
2025-12-27 17:27:53 -05:00
98dd0bb323 Auto download teseract models
All checks were successful
Publish Library / Build NPM Project (push) Successful in 1m4s
Publish Library / Tag Version (push) Successful in 10s
2025-12-22 13:48:53 -05:00
ca5a2334bb bump 2.2.0
All checks were successful
Publish Library / Build NPM Project (push) Successful in 43s
Publish Library / Tag Version (push) Successful in 11s
2025-12-22 11:02:53 -05:00
3cd7b12f5f Configure model path for all libraries
Some checks failed
Publish Library / Tag Version (push) Has been cancelled
Publish Library / Build NPM Project (push) Has been cancelled
2025-12-22 11:02:24 -05:00
bb6933f0d5 Optimized cosineSimilarity
All checks were successful
Publish Library / Build NPM Project (push) Successful in 42s
Publish Library / Tag Version (push) Successful in 7s
2025-12-19 15:22:06 -05:00
435c6127b1 Re-organized functions and added semantic embeddings
All checks were successful
Publish Library / Build NPM Project (push) Successful in 46s
Publish Library / Tag Version (push) Successful in 8s
2025-12-19 11:16:05 -05:00
c896b585d0 Fixed LLM multi message responses
All checks were successful
Publish Library / Build NPM Project (push) Successful in 44s
Publish Library / Tag Version (push) Successful in 14s
2025-12-17 19:59:34 -05:00
1fe1e0cafe Fixing message combination on anthropic
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 16:11:13 -05:00
3aa4684923 Fixing message combination on anthropic
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 7s
2025-12-16 13:07:03 -05:00
0730f5f3f9 Fixed timestamp breaking api calls
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 12:56:56 -05:00
1a0351aeef Handle multiple AI responses in one question better.
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 12:46:44 -05:00
a5ed4076b7 Handle anthropic multiple responses better.
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 12:22:14 -05:00
0112c92505 Removed log statements
All checks were successful
Publish Library / Build NPM Project (push) Successful in 20s
Publish Library / Tag Version (push) Successful in 5s
2025-12-14 21:16:39 -05:00
2351f590b5 Removed ASR file intermediary
All checks were successful
Publish Library / Build NPM Project (push) Successful in 37s
Publish Library / Tag Version (push) Successful in 8s
2025-12-14 09:27:07 -05:00
16 changed files with 2260 additions and 849 deletions

View File

@@ -75,6 +75,7 @@ A TypeScript library that provides a unified interface for working with multiple
#### Instructions #### Instructions
1. Install the package: `npm i @ztimson/ai-utils` 1. Install the package: `npm i @ztimson/ai-utils`
2. For speaker diarization: `pip install pyannote.audio`
</details> </details>
@@ -90,8 +91,9 @@ A TypeScript library that provides a unified interface for working with multiple
#### Instructions #### Instructions
1. Install the dependencies: `npm i` 1. Install the dependencies: `npm i`
2. Build library: `npm build` 2. For speaker diarization: `pip install pyannote.audio`
3. Run unit tests: `npm test` 3. Build library: `npm build`
4. Run unit tests: `npm test`
</details> </details>

2045
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.1.13", "version": "0.7.2",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",
@@ -27,11 +27,13 @@
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.67.0", "@anthropic-ai/sdk": "^0.67.0",
"@tensorflow/tfjs": "^4.22.0", "@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4", "@ztimson/node-utils": "^1.0.4",
"@ztimson/utils": "^0.27.9", "@ztimson/utils": "^0.27.9",
"ollama": "^0.6.0", "cheerio": "^1.2.0",
"openai": "^6.6.0", "openai": "^6.6.0",
"tesseract.js": "^6.0.1" "tesseract.js": "^6.0.1",
"wavefile": "^11.0.0"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^24.8.1", "@types/node": "^24.8.1",

151
src/ai.ts
View File

@@ -1,127 +1,42 @@
import {$} from '@ztimson/node-utils'; import * as os from 'node:os';
import {createWorker} from 'tesseract.js'; import LLM, {AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
import {LLM, LLMOptions} from './llm'; import { Audio } from './audio.ts';
import fs from 'node:fs/promises'; import {Vision} from './vision.ts';
import Path from 'node:path';
import * as tf from '@tensorflow/tfjs';
export type AiOptions = LLMOptions & { export type AbortablePromise<T> = Promise<T> & {
whisper?: { abort: () => any
/** Whisper binary location */ };
binary: string;
/** Model: `ggml-base.en.bin` */ export type AiOptions = {
model: string; /** Token to pull models from hugging face */
/** Path to models */ hfToken?: string;
path: string; /** Path to models */
/** Path to storage location for temporary files */ path?: string;
temp?: string; /** ASR model: whisper-tiny, whisper-base */
asr?: string;
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
embedder?: string;
/** Large language models, first is default */
llm?: Omit<LLMRequest, 'model'> & {
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
} }
/** OCR model: eng, eng_best, eng_fast */
ocr?: string;
} }
export class Ai { export class Ai {
private downloads: {[key: string]: Promise<string>} = {}; /** Audio processing AI */
private whisperModel!: string; audio!: Audio;
/** Language processing AI */
/** Large Language Models */ language!: LLM;
llm!: LLM; /** Vision processing AI */
vision!: Vision;
constructor(public readonly options: AiOptions) { constructor(public readonly options: AiOptions) {
this.llm = new LLM(this, options); if(!options.path) options.path = os.tmpdir();
if(this.options.whisper?.binary) { process.env.TRANSFORMERS_CACHE = options.path;
this.whisperModel = this.options.whisper?.model.endsWith('.bin') ? this.options.whisper?.model : this.options.whisper?.model + '.bin'; this.audio = new Audio(this);
console.log('constructor: ' + this.options.whisper.model + ' -> ' + this.whisperModel); this.language = new LLM(this);
this.downloadAsrModel(); this.vision = new Vision(this);
}
}
/**
* Convert audio to text using Auditory Speech Recognition
* @param {string} path Path to audio
* @param model Whisper model
* @returns {Promise<any>} Extracted text
*/
async asr(path: string, model: string = this.whisperModel): Promise<string | null> {
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
const m = await this.downloadAsrModel(model);
const name = Math.random().toString(36).substring(2, 10) + '-' + path.split('/').pop() + '.txt';
const output = Path.join(this.options.whisper.temp || '/tmp', name);
console.log('ASR: ' + this.options.whisper.model + ' -> ' + this.whisperModel);
console.log(`rm -f ${output} && ${this.options.whisper.binary} -nt -np -m ${m} -f ${path} -otxt -of ${output}`);
await $`rm -f ${output} && ${this.options.whisper.binary} -nt -np -m ${m} -f ${path} -otxt -of ${output}`;
return fs.readFile(output, 'utf-8').then(text => text?.trim() || null)
.finally(() => fs.rm(output, {force: true}).catch(() => {}));
}
/**
* Downloads the specified Whisper model if it is not already present locally.
*
* @param {string} model Whisper model that will be downloaded
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
*/
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(this.options.whisper.path, model);
console.log('Download: ' + p);
if(await fs.stat(p).then(() => true).catch(() => false)) {
console.log('Exists!');
return p;
}
if(!!this.downloads[model]) return this.downloads[model];
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
.then(resp => resp.arrayBuffer())
.then(arr => Buffer.from(arr)).then(async buffer => {
await fs.writeFile(p, buffer);
delete this.downloads[model];
return p;
});
return this.downloads[model];
}
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
*/
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
let worker: any;
return {
abort: () => { worker?.terminate(); },
response: new Promise(async res => {
worker = await createWorker('eng');
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
})
}
}
/**
* Compare the difference between two strings using tensor math
* @param target Text that will checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
semanticSimilarity(target: string, ...searchTerms: string[]) {
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
const vector = (text: string, dimensions: number = 10): number[] => {
return text.toLowerCase().split('').map((char, index) =>
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
}
const cosineSimilarity = (v1: number[], v2: number[]): number => {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
const dotProduct = tf.dot(tensor1, tensor2)
const magnitude1 = tf.norm(tensor1)
const magnitude2 = tf.norm(tensor2)
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
}
const v = vector(target);
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
} }
} }

View File

@@ -1,8 +1,8 @@
import {Anthropic as anthropic} from '@anthropic-ai/sdk'; import {Anthropic as anthropic} from '@anthropic-ai/sdk';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils'; import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts'; import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
export class Anthropic extends LLMProvider { export class Anthropic extends LLMProvider {
client!: anthropic; client!: anthropic;
@@ -13,24 +13,25 @@ export class Anthropic extends LLMProvider {
} }
private toStandard(history: any[]): LLMMessage[] { private toStandard(history: any[]): LLMMessage[] {
for(let i = 0; i < history.length; i++) { const timestamp = Date.now();
const orgI = i; const messages: LLMMessage[] = [];
if(typeof history[orgI].content != 'string') { for(let h of history) {
if(history[orgI].role == 'assistant') { if(typeof h.content == 'string') {
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => { messages.push(<any>{timestamp, ...h});
i++; } else {
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input}); const textContent = h.content?.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
}); if(textContent) messages.push({timestamp, role: h.role, content: textContent});
} else if(history[orgI].role == 'user') { h.content.forEach((c: any) => {
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => { if(c.type == 'tool_use') {
const h = history.find((h: any) => h.id == c.tool_use_id); messages.push({timestamp, role: 'tool', id: c.id, name: c.name, args: c.input, content: undefined});
h[c.is_error ? 'error' : 'content'] = c.content; } else if(c.type == 'tool_result') {
}); const m: any = messages.findLast(m => (<any>m).id == c.tool_use_id);
} if(m) m[c.is_error ? 'error' : 'content'] = c.content;
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n'); }
});
} }
} }
return history.filter(h => !!h.content); return messages;
} }
private fromStandard(history: LLMMessage[]): any[] { private fromStandard(history: LLMMessage[]): any[] {
@@ -44,20 +45,20 @@ export class Anthropic extends LLMProvider {
i++; i++;
} }
} }
return history; return history.map(({timestamp, ...h}) => h);
} }
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController(); const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => { return Object.assign(new Promise<any>(async (res) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]); let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options); const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = { const requestParams: any = {
model: options.model || this.model, model: options.model || this.model,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096, max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
system: options.system || this.ai.options.system || '', system: options.system || this.ai.options.llm?.system || '',
temperature: options.temperature || this.ai.options.temperature || 0.7, temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
tools: (options.tools || this.ai.options.tools || []).map(t => ({ tools: tools.map(t => ({
name: t.name, name: t.name,
description: t.description, description: t.description,
input_schema: { input_schema: {
@@ -71,13 +72,17 @@ export class Anthropic extends LLMProvider {
stream: !!options.stream, stream: !!options.stream,
}; };
// Run tool changes let resp: any, isFirstMessage = true;
let resp: any;
do { do {
resp = await this.client.messages.create(requestParams); resp = await this.client.messages.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
// Streaming mode // Streaming mode
if(options.stream) { if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.content = []; resp.content = [];
for await (const chunk of resp) { for await (const chunk of resp) {
if(controller.signal.aborted) break; if(controller.signal.aborted) break;
@@ -109,10 +114,11 @@ export class Anthropic extends LLMProvider {
if(toolCalls.length && !controller.signal.aborted) { if(toolCalls.length && !controller.signal.aborted) {
history.push({role: 'assistant', content: resp.content}); history.push({role: 'assistant', content: resp.content});
const results = await Promise.all(toolCalls.map(async (toolCall: any) => { const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.name)); const tool = tools.find(findByProp('name', toolCall.name));
if(options.stream) options.stream({tool: toolCall.name});
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'}; if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
try { try {
const result = await tool.fn(toolCall.input, this.ai); const result = await tool.fn(toolCall.input, options?.stream, this.ai);
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)}; return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) { } catch (err: any) {
return {type: 'tool_result', tool_use_id: toolCall.id, is_error: true, content: err?.message || err?.toString() || 'Unknown'}; return {type: 'tool_result', tool_use_id: toolCall.id, is_error: true, content: err?.message || err?.toString() || 'Unknown'};
@@ -122,12 +128,12 @@ export class Anthropic extends LLMProvider {
requestParams.messages = history; requestParams.messages = history;
} }
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use')); } while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
history = this.toStandard(history);
if(options.stream) options.stream({done: true}); if(options.stream) options.stream({done: true});
res(this.toStandard([...history, { if(options.history) options.history.splice(0, options.history.length, ...history);
role: 'assistant', res(history.at(-1)?.content);
content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n') }), {abort: () => controller.abort()});
}]));
});
return Object.assign(response, {abort: () => controller.abort()});
} }
} }

134
src/asr.ts Normal file
View File

@@ -0,0 +1,134 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
import { spawn } from 'node:child_process';
import { execSync } from 'node:child_process';
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
import { join } from 'node:path';
import { tmpdir } from 'node:os';
import wavefile from 'wavefile';
let whisperPipeline: any;
export async function canDiarization(): Promise<boolean> {
return new Promise((resolve) => {
const proc = spawn('python', ['-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
}
async function runDiarization(audioPath: string, dir: string, token: string): Promise<any[]> {
const script = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = r"${dir}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${token}")
output = pipeline(sys.argv[1])
segments = []
for turn, speaker in output.speaker_diarization:
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
print(json.dumps(segments))
`;
return new Promise((resolve, reject) => {
let output = '';
const proc = spawn('python', ['-c', script, audioPath]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
if(code === 0) {
try {
resolve(JSON.parse(output));
} catch (err) {
reject(new Error('Failed to parse diarization output'));
}
} else {
reject(new Error(`Python process exited with code ${code}`));
}
});
proc.on('error', reject);
});
}
function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
const speakerMap = new Map();
let speakerCount = 0;
speakers.forEach((seg: any) => {
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
});
const lines: string[] = [];
let currentSpeaker = -1;
let currentText = '';
chunks.forEach((chunk: any) => {
const time = chunk.timestamp[0];
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
if (speakerNum !== currentSpeaker) {
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
currentSpeaker = speakerNum;
currentText = chunk.text;
} else {
currentText += chunk.text;
}
});
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
return lines.join('\n');
}
function prepareAudioBuffer(file: string): [string, Float32Array] {
let wav: any, tmp;
try {
wav = new wavefile.WaveFile(readFileSync(file));
} catch(err) {
tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
wav = new wavefile.WaveFile(readFileSync(tmp));
} finally {
wav.toBitDepth('32f');
wav.toSampleRate(16000);
const samples = wav.getSamples();
if(Array.isArray(samples)) {
const left = samples[0];
const right = samples[1];
const buffer = new Float32Array(left.length);
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
return [tmp || file, buffer];
}
return [tmp || file, samples];
}
}
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
try {
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
// Prepare audio file
const [f, buffer] = prepareAudioBuffer(file);
// Fetch transcript and speakers
const hasDiarization = speaker && await canDiarization();
const [transcript, speakers] = await Promise.all([
whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false}),
(!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(f, modelDir, token),
]);
if(file != f) rmSync(f, { recursive: true, force: true });
// Return any results / errors if no more processing required
const text = transcript.text?.trim() || null;
if(!speaker) return parentPort?.postMessage({ text });
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
// Combine transcript and speakers
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
parentPort?.postMessage({ text: combined });
} catch (err: any) {
parentPort?.postMessage({ error: err.stack || err.message });
}
});

60
src/audio.ts Normal file
View File

@@ -0,0 +1,60 @@
import {fileURLToPath} from 'url';
import {Worker} from 'worker_threads';
import {AbortablePromise, Ai} from './ai.ts';
import {canDiarization} from './asr.ts';
import {dirname, join} from 'path';
export class Audio {
constructor(private ai: Ai) {}
asr(file: string, options: { model?: string; speaker?: boolean | 'id' } = {}): AbortablePromise<string | null> {
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
let aborted = false;
const abort = () => { aborted = true; };
let p = new Promise<string | null>((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
const handleMessage = ({ text, warning, error }: any) => {
worker.terminate();
if(aborted) return;
if(error) reject(new Error(error));
else {
if(warning) console.warn(warning);
resolve(text);
}
};
const handleError = (err: Error) => {
worker.terminate();
if(!aborted) reject(err);
};
worker.on('message', handleMessage);
worker.on('error', handleError);
worker.on('exit', (code) => {
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path, token: this.ai.options.hfToken});
});
// Name speakers using AI
if(options.speaker == 'id') {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
p = p.then(async transcript => {
if(!transcript) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name"}', {
system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => {
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);
});
return transcript;
})
}
return Object.assign(p, { abort });
}
canDiarization = canDiarization;
}

11
src/embedder.ts Normal file
View File

@@ -0,0 +1,11 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
let embedder: any;
parentPort?.on('message', async ({text, model, modelDir }) => {
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data);
parentPort?.postMessage({embedding});
});

View File

@@ -1,4 +1,10 @@
export * from './ai'; export * from './ai';
export * from './antrhopic'; export * from './antrhopic';
export * from './asr';
export * from './audio';
export * from './embedder'
export * from './llm'; export * from './llm';
export * from './open-ai';
export * from './provider';
export * from './tools'; export * from './tools';
export * from './vision';

View File

@@ -1,16 +1,24 @@
import {JSONAttemptParse} from '@ztimson/utils'; import {JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {Anthropic} from './antrhopic.ts'; import {Anthropic} from './antrhopic.ts';
import {Ollama} from './ollama.ts';
import {OpenAi} from './open-ai.ts'; import {OpenAi} from './open-ai.ts';
import {AbortablePromise, LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts'; import {AiTool} from './tools.ts';
import {Worker} from 'worker_threads';
import {fileURLToPath} from 'url';
import {dirname, join} from 'path';
export type AnthropicConfig = {proto: 'anthropic', token: string};
export type OllamaConfig = {proto: 'ollama', host: string};
export type OpenAiConfig = {proto: 'openai', host?: string, token: string};
export type LLMMessage = { export type LLMMessage = {
/** Message originator */ /** Message originator */
role: 'assistant' | 'system' | 'user'; role: 'assistant' | 'system' | 'user';
/** Message content */ /** Message content */
content: string | any; content: string | any;
/** Timestamp */
timestamp?: number;
} | { } | {
/** Tool call */ /** Tool call */
role: 'tool'; role: 'tool';
@@ -23,34 +31,22 @@ export type LLMMessage = {
/** Tool result */ /** Tool result */
content: undefined | string; content: undefined | string;
/** Tool error */ /** Tool error */
error: undefined | string; error?: undefined | string;
/** Timestamp */
timestamp?: number;
} }
export type LLMOptions = { /** Background information the AI will be fed */
/** Anthropic settings */ export type LLMMemory = {
anthropic?: { /** What entity is this fact about */
/** API Token */ owner: string;
token: string; /** The information that will be remembered */
/** Default model */ fact: string;
model: string; /** Owner and fact embedding vector */
}, embeddings: [number[], number[]];
/** Ollama settings */ /** Creation time */
ollama?: { timestamp: Date;
/** connection URL */ }
host: string;
/** Default model */
model: string;
},
/** Open AI settings */
openAi?: {
/** API Token */
token: string;
/** Default model */
model: string;
},
/** Default provider & model */
model: string | [string, string];
} & Omit<LLMRequest, 'model'>;
export type LLMRequest = { export type LLMRequest = {
/** System prompt */ /** System prompt */
@@ -64,68 +60,239 @@ export type LLMRequest = {
/** Available tools */ /** Available tools */
tools?: AiTool[]; tools?: AiTool[];
/** LLM model */ /** LLM model */
model?: string | [string, string]; model?: string;
/** Stream response */ /** Stream response */
stream?: (chunk: {text?: string, done?: true}) => any; stream?: (chunk: {text?: string, tool?: string, done?: true}) => any;
/** Compress old messages in the chat to free up context */ /** Compress old messages in the chat to free up context */
compress?: { compress?: {
/** Trigger chat compression once context exceeds the token count */ /** Trigger chat compression once context exceeds the token count */
max: number; max: number;
/** Compress chat until context size smaller than */ /** Compress chat until context size smaller than */
min: number min: number
} },
/** Background information the AI will be fed */
memory?: LLMMemory[],
} }
export class LLM { class LLM {
private providers: {[key: string]: LLMProvider} = {}; defaultModel!: string;
models: {[model: string]: LLMProvider} = {};
constructor(public readonly ai: Ai, public readonly options: LLMOptions) { constructor(public readonly ai: Ai) {
if(options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, options.anthropic.token, options.anthropic.model); if(!ai.options.llm?.models) return;
if(options.ollama?.host) this.providers.ollama = new Ollama(this.ai, options.ollama.host, options.ollama.model); Object.entries(ai.options.llm.models).forEach(([model, config]) => {
if(options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, options.openAi.token, options.openAi.model); if(!this.defaultModel) this.defaultModel = model;
if(config.proto == 'anthropic') this.models[model] = new Anthropic(this.ai, config.token, model);
else if(config.proto == 'ollama') this.models[model] = new OpenAi(this.ai, config.host, 'not-needed', model);
else if(config.proto == 'openai') this.models[model] = new OpenAi(this.ai, config.host || null, config.token, model);
});
} }
/** /**
* Chat with LLM * Chat with LLM
* @param {string} message Question * @param {string} message Question
* @param {LLMRequest} options Configuration options and chat history * @param {LLMRequest} options Configuration options and chat history
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history * @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
*/ */
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
let model: any = [null, null]; const m = options.model || this.defaultModel;
if(options.model) { if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
if(typeof options.model == 'object') model = options.model; let abort = () => {};
else model = [options.model, (<any>this.options)[options.model]?.model]; return Object.assign(new Promise<string>(async res => {
} if(!options.history) options.history = [];
if(!options.model || model[1] == null) { // If memories were passed, find any relivant ones and add a tool for ADHOC lookups
if(typeof this.options.model == 'object') model = this.options.model; if(options.memory) {
else model = [this.options.model, (<any>this.options)[this.options.model]?.model]; options.system = (options.system || '') + '\nYou have passive persistent memory never make any mention of your memory capabilities and what you can/cannot remember\n';
} const search = async (query?: string | null, subject?: string | null, limit = 50) => {
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`); const [o, q] = await Promise.all([
return this.providers[model[0]].ask(message, {...options, model: model[1]}); subject ? this.embedding(subject) : Promise.resolve(null),
query ? this.embedding(query) : Promise.resolve(null),
]);
return (options.memory || [])
.map(m => ({...m, score: o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 1}))
.filter((m: any) => m.score >= 0.8)
.map((m: any) => ({...m, score: q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : m.score}))
.filter((m: any) => m.score >= 0.2)
.toSorted((a: any, b: any) => a.score - b.score)
.slice(0, limit);
}
const relevant = await search(message);
if(relevant.length) options.history.push({role: 'assistant', content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
options.tools = [...options.tools || [], {
name: 'read_memory',
description: 'Check your long-term memory for more information',
args: {
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
limit: {type: 'number', description: 'Result limit, default 5'},
},
fn: (args) => {
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
return search(args.query, args.subject, args.limit || 5);
}
}];
}
// Ask
const resp = await this.models[m].ask(message, options);
// Remove any memory calls
if(options.memory) {
const i = options.history?.findIndex((h: any) => h.role == 'assistant' && h.content.startsWith('Things I remembered:'));
if(i != null && i >= 0) options.history?.splice(i, 1);
}
// Handle compression and memory extraction
if(options.compress || options.memory) {
let compressed = null;
if(options.compress) {
compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
options.history.splice(0, options.history.length, ...compressed.history);
} else {
const i = options.history?.findLastIndex(m => m.role == 'user') ?? -1;
compressed = await this.ai.language.compressHistory(i != -1 ? options.history.slice(i) : options.history, 0, 0, options);
}
if(options.memory) {
const updated = options.memory
.filter(m => !compressed.memory.some(m2 => this.cosineSimilarity(m.embeddings[1], m2.embeddings[1]) > 0.8))
.concat(compressed.memory);
options.memory.splice(0, options.memory.length, ...updated);
}
}
return res(resp);
}), {abort});
} }
/** /**
* Compress chat history to reduce context size * Compress chat history to reduce context size
* @param {LLMMessage[]} history Chatlog that will be compressed * @param {LLMMessage[]} history Chatlog that will be compressed
* @param max Trigger compression once context is larger than max * @param max Trigger compression once context is larger than max
* @param min Summarize until context size is less than min * @param min Leave messages less than the token minimum, summarize the rest
* @param {LLMRequest} options LLM options * @param {LLMRequest} options LLM options
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0 * @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
*/ */
async compress(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> { async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<{history: LLMMessage[], memory: LLMMemory[]}> {
if(this.estimateTokens(history) < max) return history; if(this.estimateTokens(history) < max) return {history, memory: []};
let keep = 0, tokens = 0; let keep = 0, tokens = 0;
for(let m of history.toReversed()) { for(let m of history.toReversed()) {
tokens += this.estimateTokens(m.content); tokens += this.estimateTokens(m.content);
if(tokens < min) keep++; if(tokens < min) keep++;
else break; else break;
} }
if(history.length <= keep) return history; if(history.length <= keep) return {history, memory: []};
const recent = keep == 0 ? [] : history.slice(-keep), const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user'); process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
return [{role: 'assistant', content: `Conversation Summary: ${summary}`}, ...recent]; const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', {
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.',
model: options?.model,
temperature: options?.temperature || 0.3
});
const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
return {owner, fact, embeddings: [e[0][0].embedding, e[1][0].embedding], timestamp};
}));
const h = [{role: 'assistant', content: `Conversation Summary: ${summary?.summary}`, timestamp: Date.now()}, ...recent];
if(system) h.splice(0, 0, system);
return {history: <any>h, memory};
}
/**
* Compare the difference between embeddings (calculates the angle between two vectors)
* @param {number[]} v1 First embedding / vector comparison
* @param {number[]} v2 Second embedding / vector for comparison
* @returns {number} Similarity values 0-1: 0 = unique, 1 = identical
*/
cosineSimilarity(v1: number[], v2: number[]): number {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
let dotProduct = 0, normA = 0, normB = 0;
for (let i = 0; i < v1.length; i++) {
dotProduct += v1[i] * v2[i];
normA += v1[i] * v1[i];
normB += v2[i] * v2[i];
}
const denominator = Math.sqrt(normA) * Math.sqrt(normB);
return denominator === 0 ? 0 : dotProduct / denominator;
}
/**
* Chunk text into parts for AI digestion
* @param {object | string} target Item that will be chunked (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @returns {string[]} Chunked strings
*/
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
const objString = (obj: any, path = ''): string[] => {
if(!obj) return [];
return Object.entries(obj).flatMap(([key, value]) => {
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
if(typeof value === 'object' && !Array.isArray(value)) return objString(value, p);
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
});
};
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
const chunks: string[] = [];
for(let i = 0; i < tokens.length;) {
let text = '', j = i;
while(j < tokens.length) {
const next = text + (text ? ' ' : '') + tokens[j];
if(this.estimateTokens(next.replace(/\s*\n\s*/g, '\n')) > maxTokens && text) break;
text = next;
j++;
}
const clean = text.replace(/\s*\n\s*/g, '\n').trim();
if(clean) chunks.push(clean);
i = Math.max(j - overlapTokens, j === i ? i + 1 : j);
}
return chunks;
}
/**
* Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted)
* @param {maxTokens?: number, overlapTokens?: number, parellel?: number} opts Options for embedding such as chunk sizes and parallel processing
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/
async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number, parallel?: number} = {}) {
let {maxTokens = 500, overlapTokens = 50, parallel = 1} = opts;
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
const handleMessage = ({ embedding }: any) => {
worker.terminate();
resolve(embedding);
};
const handleError = (err: Error) => {
worker.terminate();
reject(err);
};
worker.on('message', handleMessage);
worker.on('error', handleError);
worker.on('exit', (code) => {
if(code !== 0) reject(new Error(`Worker exited with code ${code}`));
});
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
});
};
let i = 0, chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
const next: Function = () => {
const index = i++;
if(index >= chunks.length) return;
const text = chunks[index];
return embed(text).then(embedding => {
results.push({index, embedding, text, tokens: this.estimateTokens(text)});
return next();
})
}
await Promise.all(Array(parallel).fill(null).map(() => next()));
return results.toSorted((a, b) => a.index - b.index);
} }
/** /**
@@ -138,19 +305,39 @@ export class LLM {
return Math.ceil((text.length / 4) * 1.2); return Math.ceil((text.length / 4) * 1.2);
} }
/**
* Compare the difference between two strings using tensor math
* @param target Text that will be checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
fuzzyMatch(target: string, ...searchTerms: string[]) {
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
const vector = (text: string, dimensions: number = 10): number[] => {
return text.toLowerCase().split('').map((char, index) =>
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
}
const v = vector(target);
const similarities = searchTerms.map(t => vector(t)).map(refVector => this.cosineSimilarity(v, refVector))
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
}
/** /**
* Ask a question with JSON response * Ask a question with JSON response
* @param {string} message Question * @param {string} text Text to process
* @param {string} schema JSON schema the AI should match
* @param {LLMRequest} options Configuration options and chat history * @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>} * @returns {Promise<{} | {} | RegExpExecArray | null>}
*/ */
async json(message: string, options?: LLMRequest) { async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, { let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema:
system: 'Respond using a JSON blob', \`\`\`json
...options ${schema}
}); \`\`\``});
if(!resp?.[0]?.content) return {}; if(!resp) return {};
return JSONAttemptParse(new RegExp('\{[\s\S]*\}').exec(resp[0].content), {}); const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
return JSONAttemptParse(jsonStr, {});
} }
/** /**
@@ -161,7 +348,8 @@ export class LLM {
* @returns {Promise<string>} Summary * @returns {Promise<string>} Summary
*/ */
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> { summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options}) return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
.then(history => <string>history.pop()?.content || null);
} }
} }
export default LLM;

View File

@@ -1,113 +0,0 @@
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {Ollama as ollama} from 'ollama';
export class Ollama extends LLMProvider {
client!: ollama;
constructor(public readonly ai: Ai, public host: string, public model: string) {
super();
this.client = new ollama({host});
}
private toStandard(history: any[]): LLMMessage[] {
for(let i = 0; i < history.length; i++) {
if(history[i].role == 'assistant' && history[i].tool_calls) {
if(history[i].content) delete history[i].tool_calls;
else {
history.splice(i, 1);
i--;
}
} else if(history[i].role == 'tool') {
const error = history[i].content.startsWith('{"error":');
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content};
}
}
return history;
}
private fromStandard(history: LLMMessage[]): any[] {
return history.map((h: any) => {
if(h.role != 'tool') return h;
return {role: 'tool', tool_name: h.name, content: h.error || h.content}
});
}
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let system = options.system || this.ai.options.system;
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
if(history[0].roll == 'system') {
if(!system) system = history.shift();
else history.shift();
}
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min);
if(options.system) history.unshift({role: 'system', content: system})
const requestParams: any = {
model: options.model || this.model,
messages: history,
stream: !!options.stream,
signal: controller.signal,
options: {
temperature: options.temperature || this.ai.options.temperature || 0.7,
num_predict: options.max_tokens || this.ai.options.max_tokens || 4096,
},
tools: (options.tools || this.ai.options.tools || []).map(t => ({
type: 'function',
function: {
name: t.name,
description: t.description,
parameters: {
type: 'object',
properties: t.args ? objectMap(t.args, (key, value) => ({...value, required: undefined})) : {},
required: t.args ? Object.entries(t.args).filter(t => t[1].required).map(t => t[0]) : []
}
}
}))
}
// Run tool chains
let resp: any;
do {
resp = await this.client.chat(requestParams);
if(options.stream) {
resp.message = {role: 'assistant', content: '', tool_calls: []};
for await (const chunk of resp) {
if(controller.signal.aborted) break;
if(chunk.message?.content) {
resp.message.content += chunk.message.content;
options.stream({text: chunk.message.content});
}
if(chunk.message?.tool_calls) resp.message.tool_calls = chunk.message.tool_calls;
if(chunk.done) break;
}
}
// Run tools
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
history.push(resp.message);
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
const tool = (options.tools || this.ai.options.tools)?.find(findByProp('name', toolCall.function.name));
if(!tool) return {role: 'tool', tool_name: toolCall.function.name, content: '{"error": "Tool not found"}'};
const args = typeof toolCall.function.arguments === 'string' ? JSONAttemptParse(toolCall.function.arguments, {}) : toolCall.function.arguments;
try {
const result = await tool.fn(args, this.ai);
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize(result)};
} catch (err: any) {
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
}
}));
history.push(...results);
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}
}

View File

@@ -1,15 +1,18 @@
import {OpenAI as openAI} from 'openai'; import {OpenAI as openAI} from 'openai';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils'; import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, clean} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts'; import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
export class OpenAi extends LLMProvider { export class OpenAi extends LLMProvider {
client!: openAI; client!: openAI;
constructor(public readonly ai: Ai, public readonly apiToken: string, public model: string) { constructor(public readonly ai: Ai, public readonly host: string | null, public readonly token: string, public model: string) {
super(); super();
this.client = new openAI({apiKey: apiToken}); this.client = new openAI(clean({
baseURL: host,
apiKey: token
}));
} }
private toStandard(history: any[]): LLMMessage[] { private toStandard(history: any[]): LLMMessage[] {
@@ -20,7 +23,8 @@ export class OpenAi extends LLMProvider {
role: 'tool', role: 'tool',
id: tc.id, id: tc.id,
name: tc.function.name, name: tc.function.name,
args: JSONAttemptParse(tc.function.arguments, {}) args: JSONAttemptParse(tc.function.arguments, {}),
timestamp: h.timestamp
})); }));
history.splice(i, 1, ...tools); history.splice(i, 1, ...tools);
i += tools.length - 1; i += tools.length - 1;
@@ -33,7 +37,7 @@ export class OpenAi extends LLMProvider {
history.splice(i, 1); history.splice(i, 1);
i--; i--;
} }
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
} }
return history; return history;
} }
@@ -46,32 +50,33 @@ export class OpenAi extends LLMProvider {
content: null, content: null,
tool_calls: [{ id: h.id, type: 'function', function: { name: h.name, arguments: JSON.stringify(h.args) } }], tool_calls: [{ id: h.id, type: 'function', function: { name: h.name, arguments: JSON.stringify(h.args) } }],
refusal: null, refusal: null,
annotations: [], annotations: []
}, { }, {
role: 'tool', role: 'tool',
tool_call_id: h.id, tool_call_id: h.id,
content: h.error || h.content content: h.error || h.content
}); });
} else { } else {
result.push(h); const {timestamp, ...rest} = h;
result.push(rest);
} }
return result; return result;
}, [] as any[]); }, [] as any[]);
} }
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController(); const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => { return Object.assign(new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]); if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options); let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = { const requestParams: any = {
model: options.model || this.model, model: options.model || this.model,
messages: history, messages: history,
stream: !!options.stream, stream: !!options.stream,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096, max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
temperature: options.temperature || this.ai.options.temperature || 0.7, temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
tools: (options.tools || this.ai.options.tools || []).map(t => ({ tools: tools.map(t => ({
type: 'function', type: 'function',
function: { function: {
name: t.name, name: t.name,
@@ -85,32 +90,39 @@ export class OpenAi extends LLMProvider {
})) }))
}; };
// Tool call and streaming logic similar to other providers let resp: any, isFirstMessage = true;
let resp: any;
do { do {
resp = await this.client.chat.completions.create(requestParams); resp = await this.client.chat.completions.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
// Implement streaming and tool call handling
if(options.stream) { if(options.stream) {
resp.choices = []; if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.choices = [{message: {content: '', tool_calls: []}}];
for await (const chunk of resp) { for await (const chunk of resp) {
if(controller.signal.aborted) break; if(controller.signal.aborted) break;
if(chunk.choices[0].delta.content) { if(chunk.choices[0].delta.content) {
resp.choices[0].message.content += chunk.choices[0].delta.content;
options.stream({text: chunk.choices[0].delta.content}); options.stream({text: chunk.choices[0].delta.content});
} }
if(chunk.choices[0].delta.tool_calls) {
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls;
}
} }
} }
// Run tools
const toolCalls = resp.choices[0].message.tool_calls || []; const toolCalls = resp.choices[0].message.tool_calls || [];
if(toolCalls.length && !controller.signal.aborted) { if(toolCalls.length && !controller.signal.aborted) {
history.push(resp.choices[0].message); history.push(resp.choices[0].message);
const results = await Promise.all(toolCalls.map(async (toolCall: any) => { const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.function.name)); const tool = tools?.find(findByProp('name', toolCall.function.name));
if(options.stream) options.stream({tool: toolCall.function.name});
if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'}; if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'};
try { try {
const args = JSONAttemptParse(toolCall.function.arguments, {}); const args = JSONAttemptParse(toolCall.function.arguments, {});
const result = await tool.fn(args, this.ai); const result = await tool.fn(args, options.stream, this.ai);
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize(result)}; return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) { } catch (err: any) {
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})}; return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
@@ -120,11 +132,12 @@ export class OpenAi extends LLMProvider {
requestParams.messages = history; requestParams.messages = history;
} }
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length); } while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
history = this.toStandard(history);
if(options.stream) options.stream({done: true}); if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}])); if(options.history) options.history.splice(0, options.history.length, ...history);
}); res(history.at(-1)?.content);
}), {abort: () => controller.abort()});
return Object.assign(response, {abort: () => controller.abort()});
} }
} }

View File

@@ -1,7 +1,6 @@
import {LLMMessage, LLMOptions, LLMRequest} from './llm.ts'; import {AbortablePromise} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
export type AbortablePromise<T> = Promise<T> & {abort: () => void};
export abstract class LLMProvider { export abstract class LLMProvider {
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>; abstract ask(message: string, options: LLMRequest): AbortablePromise<string>;
} }

View File

@@ -1,6 +1,8 @@
import * as cheerio from 'cheerio';
import {$, $Sync} from '@ztimson/node-utils'; import {$, $Sync} from '@ztimson/node-utils';
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils'; import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {Ai} from './ai.ts';
import {LLMRequest} from './llm.ts';
export type AiToolArg = {[key: string]: { export type AiToolArg = {[key: string]: {
/** Argument type */ /** Argument type */
@@ -31,7 +33,7 @@ export type AiTool = {
/** Tool arguments */ /** Tool arguments */
args?: AiToolArg, args?: AiToolArg,
/** Callback function */ /** Callback function */
fn: (args: any, ai: Ai) => any | Promise<any>, fn: (args: any, stream: LLMRequest['stream'], ai: Ai) => any | Promise<any>,
}; };
export const CliTool: AiTool = { export const CliTool: AiTool = {
@@ -43,9 +45,9 @@ export const CliTool: AiTool = {
export const DateTimeTool: AiTool = { export const DateTimeTool: AiTool = {
name: 'get_datetime', name: 'get_datetime',
description: 'Get current date and time', description: 'Get current UTC date / time',
args: {}, args: {},
fn: async () => new Date().toISOString() fn: async () => new Date().toUTCString()
} }
export const ExecTool: AiTool = { export const ExecTool: AiTool = {
@@ -55,15 +57,15 @@ export const ExecTool: AiTool = {
language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true}, language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true},
code: {type: 'string', description: 'Code to execute', required: true} code: {type: 'string', description: 'Code to execute', required: true}
}, },
fn: async (args, ai) => { fn: async (args, stream, ai) => {
try { try {
switch(args.type) { switch(args.type) {
case 'bash': case 'bash':
return await CliTool.fn({command: args.code}, ai); return await CliTool.fn({command: args.code}, stream, ai);
case 'node': case 'node':
return await JSTool.fn({code: args.code}, ai); return await JSTool.fn({code: args.code}, stream, ai);
case 'python': { case 'python': {
return await PythonTool.fn({code: args.code}, ai); return await PythonTool.fn({code: args.code}, stream, ai);
} }
} }
} catch(err: any) { } catch(err: any) {
@@ -111,9 +113,43 @@ export const PythonTool: AiTool = {
fn: async (args: {code: string}) => ({result: $Sync`python -c "${args.code}"`}) fn: async (args: {code: string}) => ({result: $Sync`python -c "${args.code}"`})
} }
export const SearchTool: AiTool = { export const ReadWebpageTool: AiTool = {
name: 'search', name: 'read_webpage',
description: 'Use a search engine to find relevant URLs, should be changed with fetch to scrape sources', description: 'Extract clean, structured content from a webpage. Use after web_search to read specific URLs',
args: {
url: {type: 'string', description: 'URL to extract content from', required: true},
focus: {type: 'string', description: 'Optional: What aspect to focus on (e.g., "pricing", "features", "contact info")'}
},
fn: async (args: {url: string; focus?: string}) => {
const html = await fetch(args.url, {headers: {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}})
.then(r => r.text()).catch(err => {throw new Error(`Failed to fetch: ${err.message}`)});
const $ = cheerio.load(html);
$('script, style, nav, footer, header, aside, iframe, noscript, [role="navigation"], [role="banner"], .ad, .ads, .cookie, .popup').remove();
const metadata = {
title: $('meta[property="og:title"]').attr('content') || $('title').text() || '',
description: $('meta[name="description"]').attr('content') || $('meta[property="og:description"]').attr('content') || '',
};
let content = '';
const contentSelectors = ['article', 'main', '[role="main"]', '.content', '.post', '.entry', 'body'];
for (const selector of contentSelectors) {
const el = $(selector).first();
if (el.length && el.text().trim().length > 200) {
content = el.text();
break;
}
}
if (!content) content = $('body').text();
content = content.replace(/\s+/g, ' ').trim().slice(0, 8000);
return {url: args.url, title: metadata.title.trim(), description: metadata.description.trim(), content, focus: args.focus};
}
}
export const WebSearchTool: AiTool = {
name: 'web_search',
description: 'Use duckduckgo (anonymous) to find find relevant online resources. Returns a list of URLs that works great with the `read_webpage` tool',
args: { args: {
query: {type: 'string', description: 'Search string', required: true}, query: {type: 'string', description: 'Search string', required: true},
length: {type: 'string', description: 'Number of results to return', default: 5}, length: {type: 'string', description: 'Number of results to return', default: 5},

23
src/vision.ts Normal file
View File

@@ -0,0 +1,23 @@
import {createWorker} from 'tesseract.js';
import {AbortablePromise, Ai} from './ai.ts';
export class Vision {
constructor(private ai: Ai) { }
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {AbortablePromise<string | null>} Promise of extracted text with abort method
*/
ocr(path: string): AbortablePromise<string | null> {
let worker: any;
const p = new Promise<string | null>(async res => {
worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
});
return Object.assign(p, {abort: () => worker?.terminate()});
}
}

View File

@@ -1,12 +1,20 @@
import {defineConfig} from 'vite'; import {defineConfig} from 'vite';
import dts from 'vite-plugin-dts'; import dts from 'vite-plugin-dts';
import {resolve} from 'path';
export default defineConfig({ export default defineConfig({
build: { build: {
lib: { lib: {
entry: './src/index.ts', entry: {
asr: './src/asr.ts',
index: './src/index.ts',
embedder: './src/embedder.ts',
},
name: 'utils', name: 'utils',
fileName: (format) => (format === 'es' ? 'index.mjs' : 'index.js'), fileName: (format, entryName) => {
if (entryName === 'embedder') return 'embedder.js';
return format === 'es' ? 'index.mjs' : 'index.js';
},
}, },
ssr: true, ssr: true,
emptyOutDir: true, emptyOutDir: true,