3 Commits

Author SHA1 Message Date
435c6127b1 Re-organized functions and added semantic embeddings
All checks were successful
Publish Library / Build NPM Project (push) Successful in 46s
Publish Library / Tag Version (push) Successful in 8s
2025-12-19 11:16:05 -05:00
c896b585d0 Fixed LLM multi message responses
All checks were successful
Publish Library / Build NPM Project (push) Successful in 44s
Publish Library / Tag Version (push) Successful in 14s
2025-12-17 19:59:34 -05:00
1fe1e0cafe Fixing message combination on anthropic
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 8s
2025-12-16 16:11:13 -05:00
9 changed files with 1006 additions and 146 deletions

796
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.1.20",
"version": "0.2.0",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",
@@ -27,6 +27,7 @@
"dependencies": {
"@anthropic-ai/sdk": "^0.67.0",
"@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4",
"@ztimson/utils": "^0.27.9",
"ollama": "^0.6.0",

117
src/ai.ts
View File

@@ -1,9 +1,6 @@
import {createWorker} from 'tesseract.js';
import {LLM, LLMOptions} from './llm';
import fs from 'node:fs/promises';
import Path from 'node:path';
import * as tf from '@tensorflow/tfjs';
import {spawn} from 'node:child_process';
import { Audio } from './audio.ts';
import {Vision} from './vision.ts';
export type AiOptions = LLMOptions & {
whisper?: {
@@ -20,108 +17,16 @@ export class Ai {
private downloads: {[key: string]: Promise<string>} = {};
private whisperModel!: string;
/** Large Language Models */
llm!: LLM;
/** Audio processing AI */
audio!: Audio;
/** Language processing AI */
language!: LLM;
/** Vision processing AI */
vision!: Vision;
constructor(public readonly options: AiOptions) {
this.llm = new LLM(this, options);
if(this.options.whisper?.binary) {
this.whisperModel = this.options.whisper?.model.endsWith('.bin') ? this.options.whisper?.model : this.options.whisper?.model + '.bin';
this.downloadAsrModel();
}
}
/**
* Convert audio to text using Auditory Speech Recognition
* @param {string} path Path to audio
* @param model Whisper model
* @returns {Promise<any>} Extracted text
*/
asr(path: string, model: string = this.whisperModel): {abort: () => void, response: Promise<string | null>} {
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
let abort: any = () => {};
const response = new Promise<string | null>((resolve, reject) => {
this.downloadAsrModel(model).then(m => {
let output = '';
const proc = spawn(<string>this.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
abort = () => proc.kill('SIGTERM');
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(code === 0) resolve(output.trim() || null);
else reject(new Error(`Exit code ${code}`));
});
});
});
return {response, abort};
}
/**
* Downloads the specified Whisper model if it is not already present locally.
*
* @param {string} model Whisper model that will be downloaded
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
*/
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(this.options.whisper.path, model);
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
if(!!this.downloads[model]) return this.downloads[model];
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
.then(resp => resp.arrayBuffer())
.then(arr => Buffer.from(arr)).then(async buffer => {
await fs.writeFile(p, buffer);
delete this.downloads[model];
return p;
});
return this.downloads[model];
}
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
*/
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
let worker: any;
return {
abort: () => { worker?.terminate(); },
response: new Promise(async res => {
worker = await createWorker('eng');
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
})
}
}
/**
* Compare the difference between two strings using tensor math
* @param target Text that will checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
semanticSimilarity(target: string, ...searchTerms: string[]) {
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
const vector = (text: string, dimensions: number = 10): number[] => {
return text.toLowerCase().split('').map((char, index) =>
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
}
const cosineSimilarity = (v1: number[], v2: number[]): number => {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
const dotProduct = tf.dot(tensor1, tensor2)
const magnitude1 = tf.norm(tensor1)
const magnitude2 = tf.norm(tensor2)
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
}
const v = vector(target);
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
this.audio = new Audio(this);
this.language = new LLM(this);
this.vision = new Vision(this);
}
}

View File

@@ -1,5 +1,5 @@
import {Anthropic as anthropic} from '@anthropic-ai/sdk';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, deepCopy} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
@@ -52,7 +52,8 @@ export class Anthropic extends LLMProvider {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options);
const original = deepCopy(history);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
const requestParams: any = {
model: options.model || this.model,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
@@ -72,12 +73,15 @@ export class Anthropic extends LLMProvider {
stream: !!options.stream,
};
let resp: any;
let resp: any, isFirstMessage = true;
const assistantMessages: string[] = [];
do {
resp = await this.client.messages.create(requestParams);
// Streaming mode
if(options.stream) {
if(assistantMessages.length) options.stream({text: '\n\n'});
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.content = [];
for await (const chunk of resp) {
if(controller.signal.aborted) break;
@@ -104,11 +108,11 @@ export class Anthropic extends LLMProvider {
}
}
const textContent = resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
if(textContent) assistantMessages.push(textContent);
// Run tools
const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use');
if(toolCalls.length && !controller.signal.aborted) {
history.push({role: 'assistant', content: resp.content});
original.push({role: 'assistant', content: resp.content});
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.name));
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
@@ -123,8 +127,9 @@ export class Anthropic extends LLMProvider {
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: assistantMessages.join('\n\n'), timestamp: Date.now()}]));
res(this.toStandard([...history, {role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')}]));
});
return Object.assign(response, {abort: () => controller.abort()});

63
src/audio.ts Normal file
View File

@@ -0,0 +1,63 @@
import {spawn} from 'node:child_process';
import fs from 'node:fs/promises';
import Path from 'node:path';
import {Ai} from './ai.ts';
export class Audio {
private downloads: {[key: string]: Promise<string>} = {};
private whisperModel!: string;
constructor(private ai: Ai) {
if(ai.options.whisper?.binary) {
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
this.downloadAsrModel();
}
}
/**
* Convert audio to text using Auditory Speech Recognition
* @param {string} path Path to audio
* @param model Whisper model
* @returns {Promise<any>} Extracted text
*/
asr(path: string, model: string = this.whisperModel): {abort: () => void, response: Promise<string | null>} {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
let abort: any = () => {};
const response = new Promise<string | null>((resolve, reject) => {
this.downloadAsrModel(model).then(m => {
let output = '';
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
abort = () => proc.kill('SIGTERM');
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(code === 0) resolve(output.trim() || null);
else reject(new Error(`Exit code ${code}`));
});
});
});
return {response, abort};
}
/**
* Downloads the specified Whisper model if it is not already present locally.
*
* @param {string} model Whisper model that will be downloaded
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
*/
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(this.ai.options.whisper.path, model);
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
if(!!this.downloads[model]) return this.downloads[model];
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
.then(resp => resp.arrayBuffer())
.then(arr => Buffer.from(arr)).then(async buffer => {
await fs.writeFile(p, buffer);
delete this.downloads[model];
return p;
});
return this.downloads[model];
}
}

View File

@@ -1,3 +1,4 @@
import {pipeline} from '@xenova/transformers';
import {JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {Anthropic} from './antrhopic.ts';
@@ -5,6 +6,7 @@ import {Ollama} from './ollama.ts';
import {OpenAi} from './open-ai.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts';
import * as tf from '@tensorflow/tfjs';
export type LLMMessage = {
/** Message originator */
@@ -81,12 +83,14 @@ export type LLMRequest = {
}
export class LLM {
private embedModel: any;
private providers: {[key: string]: LLMProvider} = {};
constructor(public readonly ai: Ai, public readonly options: LLMOptions) {
if(options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, options.anthropic.token, options.anthropic.model);
if(options.ollama?.host) this.providers.ollama = new Ollama(this.ai, options.ollama.host, options.ollama.model);
if(options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, options.openAi.token, options.openAi.model);
constructor(public readonly ai: Ai) {
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
}
/**
@@ -99,11 +103,11 @@ export class LLM {
let model: any = [null, null];
if(options.model) {
if(typeof options.model == 'object') model = options.model;
else model = [options.model, (<any>this.options)[options.model]?.model];
else model = [options.model, (<any>this.ai.options)[options.model]?.model];
}
if(!options.model || model[1] == null) {
if(typeof this.options.model == 'object') model = this.options.model;
else model = [this.options.model, (<any>this.options)[this.options.model]?.model];
if(typeof this.ai.options.model == 'object') model = this.ai.options.model;
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model];
}
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
return this.providers[model[0]].ask(message, {...options, model: model[1]});
@@ -117,7 +121,7 @@ export class LLM {
* @param {LLMRequest} options LLM options
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
*/
async compress(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
if(this.estimateTokens(history) < max) return history;
let keep = 0, tokens = 0;
for(let m of history.toReversed()) {
@@ -132,6 +136,57 @@ export class LLM {
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent];
}
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
const objString = (obj: any, path = ''): string[] => {
if(obj === null || obj === undefined) return [];
return Object.entries(obj).flatMap(([key, value]) => {
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
if(typeof value === 'object' && value !== null && !Array.isArray(value)) return objString(value, p);
const valueStr = Array.isArray(value) ? value.join(', ') : String(value);
return `${p}: ${valueStr}`;
});
};
const embed = async (text: string): Promise<number[]> => {
const model = await this.embedModel;
const output = await model(text, {pooling: 'mean', normalize: true});
return Array.from(output.data);
};
// Tokenize
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(line => [...line.split(/\s+/).filter(w => w.trim()), '\n']);
// Chunk
const chunks: string[] = [];
let start = 0;
while (start < tokens.length) {
let end = start;
let text = '';
// Build chunk
while (end < tokens.length) {
const nextToken = tokens[end];
const testText = text + (text ? ' ' : '') + nextToken;
const testTokens = this.estimateTokens(testText.replace(/\s*\n\s*/g, '\n'));
if (testTokens > maxTokens && text) break;
text = testText;
end++;
}
// Save chunk
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim();
if(cleanText) chunks.push(cleanText);
start = end - overlapTokens;
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
}
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),
text,
tokens: this.estimateTokens(text),
})));
}
/**
* Estimate variable as tokens
* @param history Object to size
@@ -142,6 +197,35 @@ export class LLM {
return Math.ceil((text.length / 4) * 1.2);
}
/**
* Compare the difference between two strings using tensor math
* @param target Text that will checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
fuzzyMatch(target: string, ...searchTerms: string[]) {
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
const vector = (text: string, dimensions: number = 10): number[] => {
return text.toLowerCase().split('').map((char, index) =>
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
}
const cosineSimilarity = (v1: number[], v2: number[]): number => {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
const dotProduct = tf.dot(tensor1, tensor2)
const magnitude1 = tf.norm(tensor1)
const magnitude2 = tf.norm(tensor2)
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
}
const v = vector(target);
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
}
/**
* Ask a question with JSON response
* @param {string} message Question

View File

@@ -46,7 +46,7 @@ export class Ollama extends LLMProvider {
if(!system) system = history.shift();
else history.shift();
}
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min);
if(options.system) history.unshift({role: 'system', content: system})
const requestParams: any = {
@@ -72,12 +72,12 @@ export class Ollama extends LLMProvider {
}))
}
let resp: any;
const loopMessages: any[] = [];
let resp: any, isFirstMessage = true;
do {
resp = await this.client.chat(requestParams);
if(options.stream) {
if(loopMessages.length) options.stream({text: '\n\n'});
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.message = {role: 'assistant', content: '', tool_calls: []};
for await (const chunk of resp) {
if(controller.signal.aborted) break;
@@ -90,7 +90,6 @@ export class Ollama extends LLMProvider {
}
}
loopMessages.push({role: 'assistant', content: resp.message?.content, timestamp: Date.now()});
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
history.push(resp.message);
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
@@ -105,15 +104,12 @@ export class Ollama extends LLMProvider {
}
}));
history.push(...results);
loopMessages.push(...results.map(r => ({...r, timestamp: Date.now()})));
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
const combinedContent = loopMessages.filter(m => m.role === 'assistant')
.map(m => m.content).filter(c => c).join('\n\n');
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: combinedContent, timestamp: Date.now()}]));
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
});
return Object.assign(response, {abort: () => controller.abort()});

View File

@@ -65,7 +65,7 @@ export class OpenAi extends LLMProvider {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
const requestParams: any = {
model: options.model || this.model,
@@ -87,12 +87,12 @@ export class OpenAi extends LLMProvider {
}))
};
let resp: any;
const loopMessages: any[] = [];
let resp: any, isFirstMessage = true;
do {
resp = await this.client.chat.completions.create(requestParams);
if(options.stream) {
if(loopMessages.length) options.stream({text: '\n\n'});
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.choices = [{message: {content: '', tool_calls: []}}];
for await (const chunk of resp) {
if(controller.signal.aborted) break;
@@ -106,8 +106,6 @@ export class OpenAi extends LLMProvider {
}
}
loopMessages.push({role: 'assistant', content: resp.choices[0].message.content || '', timestamp: Date.now()});
const toolCalls = resp.choices[0].message.tool_calls || [];
if(toolCalls.length && !controller.signal.aborted) {
history.push(resp.choices[0].message);
@@ -123,15 +121,12 @@ export class OpenAi extends LLMProvider {
}
}));
history.push(...results);
loopMessages.push(...results.map(r => ({...r, timestamp: Date.now()})));
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
const combinedContent = loopMessages.filter(m => m.role === 'assistant')
.map(m => m.content).filter(c => c).join('\n\n');
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: combinedContent, timestamp: Date.now()}]));
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}

25
src/vision.ts Normal file
View File

@@ -0,0 +1,25 @@
import {createWorker} from 'tesseract.js';
import {Ai} from './ai.ts';
export class Vision {
constructor(private ai: Ai) { }
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
*/
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
let worker: any;
return {
abort: () => { worker?.terminate(); },
response: new Promise(async res => {
worker = await createWorker('eng');
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
})
}
}
}