21 Commits

Author SHA1 Message Date
28904cddbe TTS
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 16s
2026-01-30 15:39:29 -05:00
d5bf1ec47e Pulled chunking out into its own exported function for easy access
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-30 10:38:51 -05:00
cb60a0b0c5 Moved embeddings to worker to prevent blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-28 22:17:39 -05:00
1c59379c7d Set tesseract model
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-01-16 20:33:51 -05:00
6dce0e8954 Fixed tool calls
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 8s
2025-12-27 17:27:53 -05:00
98dd0bb323 Auto download teseract models
All checks were successful
Publish Library / Build NPM Project (push) Successful in 1m4s
Publish Library / Tag Version (push) Successful in 10s
2025-12-22 13:48:53 -05:00
ca5a2334bb bump 2.2.0
All checks were successful
Publish Library / Build NPM Project (push) Successful in 43s
Publish Library / Tag Version (push) Successful in 11s
2025-12-22 11:02:53 -05:00
3cd7b12f5f Configure model path for all libraries
Some checks failed
Publish Library / Tag Version (push) Has been cancelled
Publish Library / Build NPM Project (push) Has been cancelled
2025-12-22 11:02:24 -05:00
bb6933f0d5 Optimized cosineSimilarity
All checks were successful
Publish Library / Build NPM Project (push) Successful in 42s
Publish Library / Tag Version (push) Successful in 7s
2025-12-19 15:22:06 -05:00
435c6127b1 Re-organized functions and added semantic embeddings
All checks were successful
Publish Library / Build NPM Project (push) Successful in 46s
Publish Library / Tag Version (push) Successful in 8s
2025-12-19 11:16:05 -05:00
c896b585d0 Fixed LLM multi message responses
All checks were successful
Publish Library / Build NPM Project (push) Successful in 44s
Publish Library / Tag Version (push) Successful in 14s
2025-12-17 19:59:34 -05:00
1fe1e0cafe Fixing message combination on anthropic
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 16:11:13 -05:00
3aa4684923 Fixing message combination on anthropic
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 7s
2025-12-16 13:07:03 -05:00
0730f5f3f9 Fixed timestamp breaking api calls
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 12:56:56 -05:00
1a0351aeef Handle multiple AI responses in one question better.
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 12:46:44 -05:00
a5ed4076b7 Handle anthropic multiple responses better.
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 12:22:14 -05:00
0112c92505 Removed log statements
All checks were successful
Publish Library / Build NPM Project (push) Successful in 20s
Publish Library / Tag Version (push) Successful in 5s
2025-12-14 21:16:39 -05:00
2351f590b5 Removed ASR file intermediary
All checks were successful
Publish Library / Build NPM Project (push) Successful in 37s
Publish Library / Tag Version (push) Successful in 8s
2025-12-14 09:27:07 -05:00
2c2acef84e ASR logging
All checks were successful
Publish Library / Build NPM Project (push) Successful in 37s
Publish Library / Tag Version (push) Successful in 8s
2025-12-14 08:49:02 -05:00
a6de121551 Fixed ASR command
All checks were successful
Publish Library / Build NPM Project (push) Successful in 26s
Publish Library / Tag Version (push) Successful in 7s
2025-12-13 23:19:30 -05:00
31d9ee4390 ASR Debugging
All checks were successful
Publish Library / Build NPM Project (push) Successful in 43s
Publish Library / Tag Version (push) Successful in 17s
2025-12-13 22:59:23 -05:00
12 changed files with 1630 additions and 948 deletions

2036
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.1.10",
"version": "0.3.0",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",
@@ -27,6 +27,7 @@
"dependencies": {
"@anthropic-ai/sdk": "^0.67.0",
"@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4",
"@ztimson/utils": "^0.27.9",
"ollama": "^0.6.0",
@@ -41,6 +42,7 @@
"vite-plugin-dts": "^4.5.3"
},
"files": [
"bin",
"dist"
]
}

139
src/ai.ts
View File

@@ -1,122 +1,45 @@
import {$} from '@ztimson/node-utils';
import {createWorker} from 'tesseract.js';
import * as os from 'node:os';
import {LLM, LLMOptions} from './llm';
import fs from 'node:fs/promises';
import Path from 'node:path';
import * as tf from '@tensorflow/tfjs';
import { Audio } from './audio.ts';
import {Vision} from './vision.ts';
export type AbortablePromise<T> = Promise<T> & {abort: () => any};
export type AiOptions = LLMOptions & {
/** Path to models */
path?: string;
/** Piper TTS configuratoin */
piper?: {
/** Model URL: `https://huggingface.co/rhasspy/piper-voices/tree/main/.../model.onnx` */
model: string;
},
/** Tesseract OCR configuration */
tesseract?: {
/** Model: eng, eng_best, eng_fast */
model?: string;
}
/** Whisper ASR configuration */
whisper?: {
/** Whisper binary location */
binary: string;
/** Model */
model: WhisperModel;
/** Path to models */
path: string;
/** Path to storage location for temporary files */
temp?: string;
/** Model: `ggml-base.en.bin` */
model: string;
}
}
export type WhisperModel = 'tiny' | 'base' | 'small' | 'medium' | 'large';
export class Ai {
private downloads: {[key: string]: Promise<string>} = {};
private whisperModel!: string;
/** Large Language Models */
llm!: LLM;
/** Audio processing AI */
audio!: Audio;
/** Language processing AI */
language!: LLM;
/** Vision processing AI */
vision!: Vision;
constructor(public readonly options: AiOptions) {
this.llm = new LLM(this, options);
if(this.options.whisper?.binary) {
this.whisperModel = Path.join(<string>this.options.whisper?.path, this.options.whisper?.model + this.options.whisper?.model.endsWith('.bin') ? '' : '.bin');
this.downloadAsrModel();
}
}
/**
* Convert audio to text using Auditory Speech Recognition
* @param {string} path Path to audio
* @param model Whisper model
* @returns {Promise<any>} Extracted text
*/
async asr(path: string, model?: WhisperModel): Promise<string | null> {
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
const m = await this.downloadAsrModel(model);
const name = Math.random().toString(36).substring(2, 10) + '-' + path.split('/').pop() + '.txt';
const output = Path.join(this.options.whisper.temp || '/tmp', name);
await $`rm -f ${output} && ${this.options.whisper.binary} -nt -np -m ${m} -f ${path} -otxt -of ${output}`;
return fs.readFile(output, 'utf-8').then(text => text?.trim() || null)
.finally(() => fs.rm(output, {force: true}).catch(() => {}));
}
/**
* Downloads the specified Whisper model if it is not already present locally.
*
* @param {string} model Whisper model that will be downloaded
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
*/
async downloadAsrModel(model?: string): Promise<string> {
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
const m = model ? (model.endsWith('.bin') ? model : model + '.bin') : this.whisperModel.split('/').pop()!;
const p = Path.join(this.options.whisper.path, m);
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
if(!!this.downloads[m]) return this.downloads[m];
this.downloads[m] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${m}`)
.then(resp => resp.arrayBuffer())
.then(arr => Buffer.from(arr)).then(async buffer => {
await fs.writeFile(p, buffer);
delete this.downloads[m];
return p;
});
return this.downloads[m];
}
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
*/
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
let worker: any;
return {
abort: () => { worker?.terminate(); },
response: new Promise(async res => {
worker = await createWorker('eng');
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
})
}
}
/**
* Compare the difference between two strings using tensor math
* @param target Text that will checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
semanticSimilarity(target: string, ...searchTerms: string[]) {
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
const vector = (text: string, dimensions: number = 10): number[] => {
return text.toLowerCase().split('').map((char, index) =>
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
}
const cosineSimilarity = (v1: number[], v2: number[]): number => {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
const dotProduct = tf.dot(tensor1, tensor2)
const magnitude1 = tf.norm(tensor1)
const magnitude2 = tf.norm(tensor2)
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
}
const v = vector(target);
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
if(!options.path) options.path = os.tmpdir();
process.env.TRANSFORMERS_CACHE = options.path;
this.audio = new Audio(this);
this.language = new LLM(this);
this.vision = new Vision(this);
}
}

View File

@@ -1,8 +1,8 @@
import {Anthropic as anthropic} from '@anthropic-ai/sdk';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, deepCopy} from '@ztimson/utils';
import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
export class Anthropic extends LLMProvider {
client!: anthropic;
@@ -19,7 +19,7 @@ export class Anthropic extends LLMProvider {
if(history[orgI].role == 'assistant') {
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
i++;
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input});
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
});
} else if(history[orgI].role == 'user') {
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
@@ -29,6 +29,7 @@ export class Anthropic extends LLMProvider {
}
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
}
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
}
return history.filter(h => !!h.content);
}
@@ -44,20 +45,23 @@ export class Anthropic extends LLMProvider {
i++;
}
}
return history;
return history.map(({timestamp, ...h}) => h);
}
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options);
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const original = deepCopy(history);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
const tools = options.tools || this.ai.options.tools || [];
const requestParams: any = {
model: options.model || this.model,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
system: options.system || this.ai.options.system || '',
temperature: options.temperature || this.ai.options.temperature || 0.7,
tools: (options.tools || this.ai.options.tools || []).map(t => ({
tools: tools.map(t => ({
name: t.name,
description: t.description,
input_schema: {
@@ -71,13 +75,18 @@ export class Anthropic extends LLMProvider {
stream: !!options.stream,
};
// Run tool changes
let resp: any;
let resp: any, isFirstMessage = true;
const assistantMessages: string[] = [];
do {
resp = await this.client.messages.create(requestParams);
resp = await this.client.messages.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
// Streaming mode
if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.content = [];
for await (const chunk of resp) {
if(controller.signal.aborted) break;
@@ -108,8 +117,10 @@ export class Anthropic extends LLMProvider {
const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use');
if(toolCalls.length && !controller.signal.aborted) {
history.push({role: 'assistant', content: resp.content});
original.push({role: 'assistant', content: resp.content});
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.name));
const tool = tools.find(findByProp('name', toolCall.name));
if(options.stream) options.stream({tool: toolCall.name});
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
try {
const result = await tool.fn(toolCall.input, this.ai);
@@ -122,12 +133,11 @@ export class Anthropic extends LLMProvider {
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {
role: 'assistant',
content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')
}]));
res(this.toStandard([...history, {role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}
}

113
src/audio.ts Normal file
View File

@@ -0,0 +1,113 @@
import {spawn} from 'node:child_process';
import * as os from 'node:os';
import {platform, arch} from 'node:os';
import fs from 'node:fs/promises';
import Path from 'node:path';
import {AbortablePromise, Ai} from './ai.ts';
export class Audio {
private downloads: {[key: string]: Promise<string>} = {};
private whisperModel!: string;
private piperBinary?: string;
constructor(private ai: Ai) {
if(ai.options.whisper?.binary) {
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
this.downloadAsrModel();
}
if(ai.options.piper?.model) {
if(!ai.options.piper.model.startsWith('http') || !ai.options.piper.model.endsWith('.onnx'))
throw new Error('Piper model should be a URL to an onnx file to download');
if(platform() != 'linux' || (arch() != 'x64' && arch() != 'arm64'))
throw new Error('Piper TTS only supported on Linux x64/arm64');
this.piperBinary = Path.join(import.meta.dirname, '../bin/piper');
this.downloadTtsModel();
}
}
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
let abort: any = () => {};
const p = new Promise<string | null>(async (resolve, reject) => {
const m = await this.downloadAsrModel(model);
let output = '';
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
abort = () => proc.kill('SIGTERM');
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(code === 0) resolve(output.trim() || null);
else reject(new Error(`Exit code ${code}`));
});
});
return Object.assign(p, {abort});
}
tts(text: string, outputPath?: string, model: string = <string>this.ai.options.piper?.model): AbortablePromise<Buffer | string> {
if(!this.piperBinary) throw new Error('Piper not configured');
if(!model) throw new Error('Invalid Piper model');
let abort: any = () => {};
const p = new Promise<Buffer | string>(async (resolve, reject) => {
const modelPath = await this.downloadTtsModel(model);
const tmpFile = outputPath || Path.join(os.tmpdir(), `piper_${Date.now()}.wav`);
const proc = spawn(<string>this.piperBinary, ['--model', modelPath, '--output_file', tmpFile], {
stdio: ['pipe', 'ignore', 'ignore'],
env: {...process.env, LD_LIBRARY_PATH: Path.dirname(<string>this.piperBinary)}
});
abort = () => proc.kill('SIGTERM');
proc.stdin.write(text);
proc.stdin.end();
proc.on('error', (err: Error) => reject(err));
proc.on('close', async (code: number) => {
if(code === 0) {
if(outputPath) {
resolve(outputPath);
} else {
const buffer = await fs.readFile(tmpFile);
await fs.unlink(tmpFile).catch(() => {});
resolve(buffer);
}
} else {
reject(new Error(`Exit code ${code}`));
}
});
});
return Object.assign(p, {abort});
}
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(<string>this.ai.options.path, model);
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
if(!!this.downloads[model]) return this.downloads[model];
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
.then(resp => resp.arrayBuffer())
.then(arr => Buffer.from(arr)).then(async buffer => {
await fs.writeFile(p, buffer);
delete this.downloads[model];
return p;
});
return this.downloads[model];
}
async downloadTtsModel(model: string = <string>this.ai.options.piper?.model): Promise<string> {
if(!model) throw new Error('Invalid Piper model');
const m = <string>model.split('/').pop();
const p = Path.join(<string>this.ai.options.path, m);
const [onnxExists, jsonExists] = await Promise.all([
fs.stat(p).then(() => true).catch(() => false),
fs.stat(p + '.json').then(() => true).catch(() => false)
]);
if(onnxExists && jsonExists) return p;
if(!!this.downloads[m]) return this.downloads[m];
this.downloads[m] = Promise.all([
onnxExists ? Promise.resolve() : fetch(model).then(r => r.arrayBuffer()).then(b => fs.writeFile(p, Buffer.from(b))),
jsonExists ? Promise.resolve() : fetch(model + '.json').then(r => r.arrayBuffer()).then(b => fs.writeFile(p + '.json', Buffer.from(b)))
]).then(() => {
delete this.downloads[m];
return p;
});
return this.downloads[m];
}
}

11
src/embedder.ts Normal file
View File

@@ -0,0 +1,11 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
let model: any;
parentPort?.on('message', async ({ id, text }) => {
if(!model) model = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
const output = await model(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data);
parentPort?.postMessage({ id, embedding });
});

View File

@@ -1,16 +1,21 @@
import {JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {AbortablePromise, Ai} from './ai.ts';
import {Anthropic} from './antrhopic.ts';
import {Ollama} from './ollama.ts';
import {OpenAi} from './open-ai.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts';
import {Worker} from 'worker_threads';
import {fileURLToPath} from 'url';
import {dirname, join} from 'path';
export type LLMMessage = {
/** Message originator */
role: 'assistant' | 'system' | 'user';
/** Message content */
content: string | any;
/** Timestamp */
timestamp?: number;
} | {
/** Tool call */
role: 'tool';
@@ -24,6 +29,8 @@ export type LLMMessage = {
content: undefined | string;
/** Tool error */
error: undefined | string;
/** Timestamp */
timestamp?: number;
}
export type LLMOptions = {
@@ -66,7 +73,7 @@ export type LLMRequest = {
/** LLM model */
model?: string | [string, string];
/** Stream response */
stream?: (chunk: {text?: string, done?: true}) => any;
stream?: (chunk: {text?: string, tool?: string, done?: true}) => any;
/** Compress old messages in the chat to free up context */
compress?: {
/** Trigger chat compression once context exceeds the token count */
@@ -77,12 +84,25 @@ export type LLMRequest = {
}
export class LLM {
private embedWorker: Worker | null = null;
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
private embedId = 0;
private providers: {[key: string]: LLMProvider} = {};
constructor(public readonly ai: Ai, public readonly options: LLMOptions) {
if(options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, options.anthropic.token, options.anthropic.model);
if(options.ollama?.host) this.providers.ollama = new Ollama(this.ai, options.ollama.host, options.ollama.model);
if(options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, options.openAi.token, options.openAi.model);
constructor(public readonly ai: Ai) {
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
this.embedWorker.on('message', ({ id, embedding }) => {
const pending = this.embedQueue.get(id);
if (pending) {
pending.resolve(embedding);
this.embedQueue.delete(id);
}
});
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
}
/**
@@ -95,11 +115,11 @@ export class LLM {
let model: any = [null, null];
if(options.model) {
if(typeof options.model == 'object') model = options.model;
else model = [options.model, (<any>this.options)[options.model]?.model];
else model = [options.model, (<any>this.ai.options)[options.model]?.model];
}
if(!options.model || model[1] == null) {
if(typeof this.options.model == 'object') model = this.options.model;
else model = [this.options.model, (<any>this.options)[this.options.model]?.model];
if(typeof this.ai.options.model == 'object') model = this.ai.options.model;
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model];
}
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
return this.providers[model[0]].ask(message, {...options, model: model[1]});
@@ -113,7 +133,7 @@ export class LLM {
* @param {LLMRequest} options LLM options
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
*/
async compress(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
if(this.estimateTokens(history) < max) return history;
let keep = 0, tokens = 0;
for(let m of history.toReversed()) {
@@ -125,7 +145,65 @@ export class LLM {
const recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
return [{role: 'assistant', content: `Conversation Summary: ${summary}`}, ...recent];
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent];
}
cosineSimilarity(v1: number[], v2: number[]): number {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
let dotProduct = 0, normA = 0, normB = 0;
for (let i = 0; i < v1.length; i++) {
dotProduct += v1[i] * v2[i];
normA += v1[i] * v1[i];
normB += v2[i] * v2[i];
}
const denominator = Math.sqrt(normA) * Math.sqrt(normB);
return denominator === 0 ? 0 : dotProduct / denominator;
}
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
const objString = (obj: any, path = ''): string[] => {
if(!obj) return [];
return Object.entries(obj).flatMap(([key, value]) => {
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
if(typeof value === 'object' && !Array.isArray(value)) return objString(value, p);
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
});
};
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
const chunks: string[] = [];
for(let i = 0; i < tokens.length;) {
let text = '', j = i;
while(j < tokens.length) {
const next = text + (text ? ' ' : '') + tokens[j];
if(this.estimateTokens(next.replace(/\s*\n\s*/g, '\n')) > maxTokens && text) break;
text = next;
j++;
}
const clean = text.replace(/\s*\n\s*/g, '\n').trim();
if(clean) chunks.push(clean);
i = Math.max(j - overlapTokens, j === i ? i + 1 : j);
}
return chunks;
}
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
const id = this.embedId++;
this.embedQueue.set(id, { resolve, reject });
this.embedWorker?.postMessage({ id, text });
});
};
const chunks = this.chunk(target, maxTokens, overlapTokens);
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),
text,
tokens: this.estimateTokens(text),
})));
}
/**
@@ -138,6 +216,23 @@ export class LLM {
return Math.ceil((text.length / 4) * 1.2);
}
/**
* Compare the difference between two strings using tensor math
* @param target Text that will checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
fuzzyMatch(target: string, ...searchTerms: string[]) {
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
const vector = (text: string, dimensions: number = 10): number[] => {
return text.toLowerCase().split('').map((char, index) =>
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
}
const v = vector(target);
const similarities = searchTerms.map(t => vector(t)).map(refVector => this.cosineSimilarity(v, refVector))
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
}
/**
* Ask a question with JSON response
* @param {string} message Question

View File

@@ -1,7 +1,7 @@
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
import {Ollama as ollama} from 'ollama';
export class Ollama extends LLMProvider {
@@ -22,15 +22,17 @@ export class Ollama extends LLMProvider {
}
} else if(history[i].role == 'tool') {
const error = history[i].content.startsWith('{"error":');
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content};
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content, timestamp: history[i].timestamp};
}
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
}
return history;
}
private fromStandard(history: LLMMessage[]): any[] {
return history.map((h: any) => {
if(h.role != 'tool') return h;
const {timestamp, ...rest} = h;
if(h.role != 'tool') return rest;
return {role: 'tool', tool_name: h.name, content: h.error || h.content}
});
}
@@ -39,14 +41,15 @@ export class Ollama extends LLMProvider {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let system = options.system || this.ai.options.system;
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(history[0].roll == 'system') {
if(!system) system = history.shift();
else history.shift();
}
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min);
if(options.system) history.unshift({role: 'system', content: system})
const tools = options.tools || this.ai.options.tools || [];
const requestParams: any = {
model: options.model || this.model,
messages: history,
@@ -56,7 +59,7 @@ export class Ollama extends LLMProvider {
temperature: options.temperature || this.ai.options.temperature || 0.7,
num_predict: options.max_tokens || this.ai.options.max_tokens || 4096,
},
tools: (options.tools || this.ai.options.tools || []).map(t => ({
tools: tools.map(t => ({
type: 'function',
function: {
name: t.name,
@@ -70,11 +73,16 @@ export class Ollama extends LLMProvider {
}))
}
// Run tool chains
let resp: any;
let resp: any, isFirstMessage = true;
do {
resp = await this.client.chat(requestParams);
resp = await this.client.chat(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.message = {role: 'assistant', content: '', tool_calls: []};
for await (const chunk of resp) {
if(controller.signal.aborted) break;
@@ -87,11 +95,11 @@ export class Ollama extends LLMProvider {
}
}
// Run tools
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
history.push(resp.message);
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
const tool = (options.tools || this.ai.options.tools)?.find(findByProp('name', toolCall.function.name));
const tool = tools.find(findByProp('name', toolCall.function.name));
if(options.stream) options.stream({tool: toolCall.function.name});
if(!tool) return {role: 'tool', tool_name: toolCall.function.name, content: '{"error": "Tool not found"}'};
const args = typeof toolCall.function.arguments === 'string' ? JSONAttemptParse(toolCall.function.arguments, {}) : toolCall.function.arguments;
try {
@@ -105,9 +113,11 @@ export class Ollama extends LLMProvider {
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}
}

View File

@@ -1,8 +1,8 @@
import {OpenAI as openAI} from 'openai';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
export class OpenAi extends LLMProvider {
client!: openAI;
@@ -20,7 +20,8 @@ export class OpenAi extends LLMProvider {
role: 'tool',
id: tc.id,
name: tc.function.name,
args: JSONAttemptParse(tc.function.arguments, {})
args: JSONAttemptParse(tc.function.arguments, {}),
timestamp: h.timestamp
}));
history.splice(i, 1, ...tools);
i += tools.length - 1;
@@ -33,7 +34,7 @@ export class OpenAi extends LLMProvider {
history.splice(i, 1);
i--;
}
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
}
return history;
}
@@ -46,14 +47,15 @@ export class OpenAi extends LLMProvider {
content: null,
tool_calls: [{ id: h.id, type: 'function', function: { name: h.name, arguments: JSON.stringify(h.args) } }],
refusal: null,
annotations: [],
annotations: []
}, {
role: 'tool',
tool_call_id: h.id,
content: h.error || h.content
});
} else {
result.push(h);
const {timestamp, ...rest} = h;
result.push(rest);
}
return result;
}, [] as any[]);
@@ -62,16 +64,17 @@ export class OpenAi extends LLMProvider {
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options);
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
const tools = options.tools || this.ai.options.tools || [];
const requestParams: any = {
model: options.model || this.model,
messages: history,
stream: !!options.stream,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
temperature: options.temperature || this.ai.options.temperature || 0.7,
tools: (options.tools || this.ai.options.tools || []).map(t => ({
tools: tools.map(t => ({
type: 'function',
function: {
name: t.name,
@@ -85,28 +88,35 @@ export class OpenAi extends LLMProvider {
}))
};
// Tool call and streaming logic similar to other providers
let resp: any;
let resp: any, isFirstMessage = true;
do {
resp = await this.client.chat.completions.create(requestParams);
resp = await this.client.chat.completions.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
// Implement streaming and tool call handling
if(options.stream) {
resp.choices = [];
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.choices = [{message: {content: '', tool_calls: []}}];
for await (const chunk of resp) {
if(controller.signal.aborted) break;
if(chunk.choices[0].delta.content) {
resp.choices[0].message.content += chunk.choices[0].delta.content;
options.stream({text: chunk.choices[0].delta.content});
}
if(chunk.choices[0].delta.tool_calls) {
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls;
}
}
}
// Run tools
const toolCalls = resp.choices[0].message.tool_calls || [];
if(toolCalls.length && !controller.signal.aborted) {
history.push(resp.choices[0].message);
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.function.name));
const tool = tools?.find(findByProp('name', toolCall.function.name));
if(options.stream) options.stream({tool: toolCall.function.name});
if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'};
try {
const args = JSONAttemptParse(toolCall.function.arguments, {});
@@ -124,7 +134,6 @@ export class OpenAi extends LLMProvider {
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}
}

View File

@@ -1,7 +1,6 @@
import {AbortablePromise} from './ai.ts';
import {LLMMessage, LLMOptions, LLMRequest} from './llm.ts';
export type AbortablePromise<T> = Promise<T> & {abort: () => void};
export abstract class LLMProvider {
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>;
}

23
src/vision.ts Normal file
View File

@@ -0,0 +1,23 @@
import {createWorker} from 'tesseract.js';
import {AbortablePromise, Ai} from './ai.ts';
export class Vision {
constructor(private ai: Ai) { }
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {AbortablePromise<string | null>} Promise of extracted text with abort method
*/
ocr(path: string): AbortablePromise<string | null> {
let worker: any;
const p = new Promise<string | null>(async res => {
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
});
return Object.assign(p, {abort: () => worker?.terminate()});
}
}

View File

@@ -1,12 +1,19 @@
import {defineConfig} from 'vite';
import dts from 'vite-plugin-dts';
import {resolve} from 'path';
export default defineConfig({
build: {
lib: {
entry: './src/index.ts',
entry: {
index: './src/index.ts',
embedder: './src/embedder.ts',
},
name: 'utils',
fileName: (format) => (format === 'es' ? 'index.mjs' : 'index.js'),
fileName: (format, entryName) => {
if (entryName === 'embedder') return 'embedder.js';
return format === 'es' ? 'index.mjs' : 'index.js';
},
},
ssr: true,
emptyOutDir: true,