Compare commits
18 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 435c6127b1 | |||
| c896b585d0 | |||
| 1fe1e0cafe | |||
| 3aa4684923 | |||
| 0730f5f3f9 | |||
| 1a0351aeef | |||
| a5ed4076b7 | |||
| 0112c92505 | |||
| 2351f590b5 | |||
| 2c2acef84e | |||
| a6de121551 | |||
| 31d9ee4390 | |||
| d69bea3b38 | |||
| af4b09173c | |||
| 904cc10639 | |||
| 07f9593b6a | |||
| af42506174 | |||
| 08e105b033 |
1168
package-lock.json
generated
1168
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@ztimson/ai-utils",
|
||||
"version": "0.1.5",
|
||||
"version": "0.2.0",
|
||||
"description": "AI Utility library",
|
||||
"author": "Zak Timson",
|
||||
"license": "MIT",
|
||||
@@ -16,7 +16,7 @@
|
||||
".": {
|
||||
"types": "./dist/index.d.ts",
|
||||
"import": "./dist/index.mjs",
|
||||
"require": "./dist/index.cjs"
|
||||
"require": "./dist/index.js"
|
||||
}
|
||||
},
|
||||
"scripts": {
|
||||
@@ -27,6 +27,7 @@
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.67.0",
|
||||
"@tensorflow/tfjs": "^4.22.0",
|
||||
"@xenova/transformers": "^2.17.2",
|
||||
"@ztimson/node-utils": "^1.0.4",
|
||||
"@ztimson/utils": "^0.27.9",
|
||||
"ollama": "^0.6.0",
|
||||
@@ -37,7 +38,7 @@
|
||||
"@types/node": "^24.8.1",
|
||||
"typedoc": "^0.26.7",
|
||||
"typescript": "^5.3.3",
|
||||
"vite": "^5.0.12",
|
||||
"vite": "^7.2.7",
|
||||
"vite-plugin-dts": "^4.5.3"
|
||||
},
|
||||
"files": [
|
||||
|
||||
113
src/ai.ts
113
src/ai.ts
@@ -1,115 +1,32 @@
|
||||
import {$} from '@ztimson/node-utils';
|
||||
import {createWorker} from 'tesseract.js';
|
||||
import {LLM, LLMOptions} from './llm';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import * as tf from '@tensorflow/tfjs';
|
||||
import { Audio } from './audio.ts';
|
||||
import {Vision} from './vision.ts';
|
||||
|
||||
export type AiOptions = LLMOptions & {
|
||||
whisper?: {
|
||||
/** Whisper binary location */
|
||||
binary: string;
|
||||
/** Model */
|
||||
model: WhisperModel;
|
||||
/** Working directory for models and temporary files */
|
||||
/** Model: `ggml-base.en.bin` */
|
||||
model: string;
|
||||
/** Path to models */
|
||||
path: string;
|
||||
}
|
||||
}
|
||||
|
||||
export type WhisperModel = 'tiny' | 'base' | 'small' | 'medium' | 'large';
|
||||
|
||||
export class Ai {
|
||||
private downloads: {[key: string]: Promise<void>} = {};
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private whisperModel!: string;
|
||||
|
||||
/** Large Language Models */
|
||||
llm!: LLM;
|
||||
/** Audio processing AI */
|
||||
audio!: Audio;
|
||||
/** Language processing AI */
|
||||
language!: LLM;
|
||||
/** Vision processing AI */
|
||||
vision!: Vision;
|
||||
|
||||
constructor(public readonly options: AiOptions) {
|
||||
this.llm = new LLM(this, options);
|
||||
if(this.options.whisper?.binary) this.downloadAsrModel(this.options.whisper.model);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert audio to text using Auditory Speech Recognition
|
||||
* @param {string} path Path to audio
|
||||
* @param model Whisper model
|
||||
* @returns {Promise<any>} Extracted text
|
||||
*/
|
||||
async asr(path: string, model?: WhisperModel): Promise<string | null> {
|
||||
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!model) model = this.options.whisper.model;
|
||||
await this.downloadAsrModel(<string>model);
|
||||
const name = Math.random().toString(36).substring(2, 10) + '-' + path.split('/').pop();
|
||||
const output = Path.join(this.options.whisper.path || '/tmp', name);
|
||||
await $`rm -f /tmp/${name}.txt && ${this.options.whisper.binary} -nt -np -m ${this.whisperModel} -f ${path} -otxt -of ${output}`;
|
||||
return fs.readFile(output, 'utf-8').then(text => text?.trim() || null)
|
||||
.finally(() => fs.rm(output, {force: true}).catch(() => {}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads the specified Whisper model if it is not already present locally.
|
||||
*
|
||||
* @param {string} model Whisper model that will be downloaded
|
||||
* @return {Promise<void>} A promise that resolves once the model is downloaded and saved locally.
|
||||
*/
|
||||
async downloadAsrModel(model: string): Promise<void> {
|
||||
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
this.whisperModel = Path.join(<string>this.options.whisper?.path, this.options.whisper?.model + '.bin');
|
||||
if(await fs.stat(this.whisperModel).then(() => true).catch(() => false)) return;
|
||||
if(!!this.downloads[model]) return this.downloads[model];
|
||||
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${this.options.whisper?.model}.bin`)
|
||||
.then(resp => resp.arrayBuffer()).then(arr => Buffer.from(arr)).then(async buffer => {
|
||||
await fs.writeFile(this.whisperModel, buffer);
|
||||
delete this.downloads[model];
|
||||
});
|
||||
return this.downloads[model];
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert image to text using Optical Character Recognition
|
||||
* @param {string} path Path to image
|
||||
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
|
||||
*/
|
||||
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
|
||||
let worker: any;
|
||||
return {
|
||||
abort: () => { worker?.terminate(); },
|
||||
response: new Promise(async res => {
|
||||
worker = await createWorker('eng');
|
||||
const {data} = await worker.recognize(path);
|
||||
await worker.terminate();
|
||||
res(data.text.trim() || null);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare the difference between two strings using tensor math
|
||||
* @param target Text that will checked
|
||||
* @param {string} searchTerms Multiple search terms to check against target
|
||||
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
|
||||
*/
|
||||
semanticSimilarity(target: string, ...searchTerms: string[]) {
|
||||
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
|
||||
|
||||
const vector = (text: string, dimensions: number = 10): number[] => {
|
||||
return text.toLowerCase().split('').map((char, index) =>
|
||||
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
|
||||
}
|
||||
|
||||
const cosineSimilarity = (v1: number[], v2: number[]): number => {
|
||||
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
|
||||
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
|
||||
const dotProduct = tf.dot(tensor1, tensor2)
|
||||
const magnitude1 = tf.norm(tensor1)
|
||||
const magnitude2 = tf.norm(tensor2)
|
||||
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
|
||||
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
|
||||
}
|
||||
|
||||
const v = vector(target);
|
||||
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
|
||||
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
|
||||
this.audio = new Audio(this);
|
||||
this.language = new LLM(this);
|
||||
this.vision = new Vision(this);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import {Anthropic as anthropic} from '@anthropic-ai/sdk';
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, deepCopy} from '@ztimson/utils';
|
||||
import {Ai} from './ai.ts';
|
||||
import {LLMMessage, LLMRequest} from './llm.ts';
|
||||
import {AbortablePromise, LLMProvider} from './provider.ts';
|
||||
@@ -19,7 +19,7 @@ export class Anthropic extends LLMProvider {
|
||||
if(history[orgI].role == 'assistant') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
|
||||
i++;
|
||||
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input});
|
||||
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
|
||||
});
|
||||
} else if(history[orgI].role == 'user') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
|
||||
@@ -29,6 +29,7 @@ export class Anthropic extends LLMProvider {
|
||||
}
|
||||
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
}
|
||||
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
|
||||
}
|
||||
return history.filter(h => !!h.content);
|
||||
}
|
||||
@@ -44,14 +45,15 @@ export class Anthropic extends LLMProvider {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
return history;
|
||||
return history.map(({timestamp, ...h}) => h);
|
||||
}
|
||||
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
|
||||
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options);
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const original = deepCopy(history);
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
|
||||
@@ -71,13 +73,15 @@ export class Anthropic extends LLMProvider {
|
||||
stream: !!options.stream,
|
||||
};
|
||||
|
||||
// Run tool changes
|
||||
let resp: any;
|
||||
let resp: any, isFirstMessage = true;
|
||||
const assistantMessages: string[] = [];
|
||||
do {
|
||||
resp = await this.client.messages.create(requestParams);
|
||||
|
||||
// Streaming mode
|
||||
if(options.stream) {
|
||||
if(!isFirstMessage) options.stream({text: '\n\n'});
|
||||
else isFirstMessage = false;
|
||||
resp.content = [];
|
||||
for await (const chunk of resp) {
|
||||
if(controller.signal.aborted) break;
|
||||
@@ -108,6 +112,7 @@ export class Anthropic extends LLMProvider {
|
||||
const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use');
|
||||
if(toolCalls.length && !controller.signal.aborted) {
|
||||
history.push({role: 'assistant', content: resp.content});
|
||||
original.push({role: 'assistant', content: resp.content});
|
||||
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
|
||||
const tool = options.tools?.find(findByProp('name', toolCall.name));
|
||||
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
|
||||
@@ -122,12 +127,11 @@ export class Anthropic extends LLMProvider {
|
||||
requestParams.messages = history;
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
res(this.toStandard([...history, {
|
||||
role: 'assistant',
|
||||
content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')
|
||||
}]));
|
||||
res(this.toStandard([...history, {role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')}]));
|
||||
});
|
||||
|
||||
return Object.assign(response, {abort: () => controller.abort()});
|
||||
}
|
||||
}
|
||||
|
||||
63
src/audio.ts
Normal file
63
src/audio.ts
Normal file
@@ -0,0 +1,63 @@
|
||||
import {spawn} from 'node:child_process';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import {Ai} from './ai.ts';
|
||||
|
||||
export class Audio {
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private whisperModel!: string;
|
||||
|
||||
constructor(private ai: Ai) {
|
||||
if(ai.options.whisper?.binary) {
|
||||
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
|
||||
this.downloadAsrModel();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert audio to text using Auditory Speech Recognition
|
||||
* @param {string} path Path to audio
|
||||
* @param model Whisper model
|
||||
* @returns {Promise<any>} Extracted text
|
||||
*/
|
||||
asr(path: string, model: string = this.whisperModel): {abort: () => void, response: Promise<string | null>} {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
let abort: any = () => {};
|
||||
const response = new Promise<string | null>((resolve, reject) => {
|
||||
this.downloadAsrModel(model).then(m => {
|
||||
let output = '';
|
||||
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) resolve(output.trim() || null);
|
||||
else reject(new Error(`Exit code ${code}`));
|
||||
});
|
||||
});
|
||||
});
|
||||
return {response, abort};
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads the specified Whisper model if it is not already present locally.
|
||||
*
|
||||
* @param {string} model Whisper model that will be downloaded
|
||||
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
|
||||
*/
|
||||
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!model.endsWith('.bin')) model += '.bin';
|
||||
const p = Path.join(this.ai.options.whisper.path, model);
|
||||
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
||||
if(!!this.downloads[model]) return this.downloads[model];
|
||||
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
|
||||
.then(resp => resp.arrayBuffer())
|
||||
.then(arr => Buffer.from(arr)).then(async buffer => {
|
||||
await fs.writeFile(p, buffer);
|
||||
delete this.downloads[model];
|
||||
return p;
|
||||
});
|
||||
return this.downloads[model];
|
||||
}
|
||||
}
|
||||
106
src/llm.ts
106
src/llm.ts
@@ -1,3 +1,4 @@
|
||||
import {pipeline} from '@xenova/transformers';
|
||||
import {JSONAttemptParse} from '@ztimson/utils';
|
||||
import {Ai} from './ai.ts';
|
||||
import {Anthropic} from './antrhopic.ts';
|
||||
@@ -5,12 +6,15 @@ import {Ollama} from './ollama.ts';
|
||||
import {OpenAi} from './open-ai.ts';
|
||||
import {AbortablePromise, LLMProvider} from './provider.ts';
|
||||
import {AiTool} from './tools.ts';
|
||||
import * as tf from '@tensorflow/tfjs';
|
||||
|
||||
export type LLMMessage = {
|
||||
/** Message originator */
|
||||
role: 'assistant' | 'system' | 'user';
|
||||
/** Message content */
|
||||
content: string | any;
|
||||
/** Timestamp */
|
||||
timestamp?: number;
|
||||
} | {
|
||||
/** Tool call */
|
||||
role: 'tool';
|
||||
@@ -24,6 +28,8 @@ export type LLMMessage = {
|
||||
content: undefined | string;
|
||||
/** Tool error */
|
||||
error: undefined | string;
|
||||
/** Timestamp */
|
||||
timestamp?: number;
|
||||
}
|
||||
|
||||
export type LLMOptions = {
|
||||
@@ -77,12 +83,14 @@ export type LLMRequest = {
|
||||
}
|
||||
|
||||
export class LLM {
|
||||
private embedModel: any;
|
||||
private providers: {[key: string]: LLMProvider} = {};
|
||||
|
||||
constructor(public readonly ai: Ai, public readonly options: LLMOptions) {
|
||||
if(options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, options.anthropic.token, options.anthropic.model);
|
||||
if(options.ollama?.host) this.providers.ollama = new Ollama(this.ai, options.ollama.host, options.ollama.model);
|
||||
if(options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, options.openAi.token, options.openAi.model);
|
||||
constructor(public readonly ai: Ai) {
|
||||
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
||||
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
|
||||
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
|
||||
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -95,11 +103,11 @@ export class LLM {
|
||||
let model: any = [null, null];
|
||||
if(options.model) {
|
||||
if(typeof options.model == 'object') model = options.model;
|
||||
else model = [options.model, (<any>this.options)[options.model]?.model];
|
||||
else model = [options.model, (<any>this.ai.options)[options.model]?.model];
|
||||
}
|
||||
if(!options.model || model[1] == null) {
|
||||
if(typeof this.options.model == 'object') model = this.options.model;
|
||||
else model = [this.options.model, (<any>this.options)[this.options.model]?.model];
|
||||
if(typeof this.ai.options.model == 'object') model = this.ai.options.model;
|
||||
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model];
|
||||
}
|
||||
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
|
||||
return this.providers[model[0]].ask(message, {...options, model: model[1]});
|
||||
@@ -113,7 +121,7 @@ export class LLM {
|
||||
* @param {LLMRequest} options LLM options
|
||||
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
||||
*/
|
||||
async compress(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
||||
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
||||
if(this.estimateTokens(history) < max) return history;
|
||||
let keep = 0, tokens = 0;
|
||||
for(let m of history.toReversed()) {
|
||||
@@ -125,7 +133,58 @@ export class LLM {
|
||||
const recent = keep == 0 ? [] : history.slice(-keep),
|
||||
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
||||
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
|
||||
return [{role: 'assistant', content: `Conversation Summary: ${summary}`}, ...recent];
|
||||
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent];
|
||||
}
|
||||
|
||||
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
||||
const objString = (obj: any, path = ''): string[] => {
|
||||
if(obj === null || obj === undefined) return [];
|
||||
return Object.entries(obj).flatMap(([key, value]) => {
|
||||
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
|
||||
if(typeof value === 'object' && value !== null && !Array.isArray(value)) return objString(value, p);
|
||||
const valueStr = Array.isArray(value) ? value.join(', ') : String(value);
|
||||
return `${p}: ${valueStr}`;
|
||||
});
|
||||
};
|
||||
|
||||
const embed = async (text: string): Promise<number[]> => {
|
||||
const model = await this.embedModel;
|
||||
const output = await model(text, {pooling: 'mean', normalize: true});
|
||||
return Array.from(output.data);
|
||||
};
|
||||
|
||||
// Tokenize
|
||||
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
||||
const tokens = lines.flatMap(line => [...line.split(/\s+/).filter(w => w.trim()), '\n']);
|
||||
|
||||
// Chunk
|
||||
const chunks: string[] = [];
|
||||
let start = 0;
|
||||
while (start < tokens.length) {
|
||||
let end = start;
|
||||
let text = '';
|
||||
// Build chunk
|
||||
while (end < tokens.length) {
|
||||
const nextToken = tokens[end];
|
||||
const testText = text + (text ? ' ' : '') + nextToken;
|
||||
const testTokens = this.estimateTokens(testText.replace(/\s*\n\s*/g, '\n'));
|
||||
if (testTokens > maxTokens && text) break;
|
||||
text = testText;
|
||||
end++;
|
||||
}
|
||||
// Save chunk
|
||||
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim();
|
||||
if(cleanText) chunks.push(cleanText);
|
||||
start = end - overlapTokens;
|
||||
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
|
||||
}
|
||||
|
||||
return Promise.all(chunks.map(async (text, index) => ({
|
||||
index,
|
||||
embedding: await embed(text),
|
||||
text,
|
||||
tokens: this.estimateTokens(text),
|
||||
})));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -138,6 +197,35 @@ export class LLM {
|
||||
return Math.ceil((text.length / 4) * 1.2);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare the difference between two strings using tensor math
|
||||
* @param target Text that will checked
|
||||
* @param {string} searchTerms Multiple search terms to check against target
|
||||
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
|
||||
*/
|
||||
fuzzyMatch(target: string, ...searchTerms: string[]) {
|
||||
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
|
||||
|
||||
const vector = (text: string, dimensions: number = 10): number[] => {
|
||||
return text.toLowerCase().split('').map((char, index) =>
|
||||
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
|
||||
}
|
||||
|
||||
const cosineSimilarity = (v1: number[], v2: number[]): number => {
|
||||
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
|
||||
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
|
||||
const dotProduct = tf.dot(tensor1, tensor2)
|
||||
const magnitude1 = tf.norm(tensor1)
|
||||
const magnitude2 = tf.norm(tensor2)
|
||||
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
|
||||
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
|
||||
}
|
||||
|
||||
const v = vector(target);
|
||||
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
|
||||
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
|
||||
}
|
||||
|
||||
/**
|
||||
* Ask a question with JSON response
|
||||
* @param {string} message Question
|
||||
|
||||
@@ -22,15 +22,17 @@ export class Ollama extends LLMProvider {
|
||||
}
|
||||
} else if(history[i].role == 'tool') {
|
||||
const error = history[i].content.startsWith('{"error":');
|
||||
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content};
|
||||
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content, timestamp: history[i].timestamp};
|
||||
}
|
||||
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
|
||||
}
|
||||
return history;
|
||||
}
|
||||
|
||||
private fromStandard(history: LLMMessage[]): any[] {
|
||||
return history.map((h: any) => {
|
||||
if(h.role != 'tool') return h;
|
||||
const {timestamp, ...rest} = h;
|
||||
if(h.role != 'tool') return rest;
|
||||
return {role: 'tool', tool_name: h.name, content: h.error || h.content}
|
||||
});
|
||||
}
|
||||
@@ -39,12 +41,12 @@ export class Ollama extends LLMProvider {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let system = options.system || this.ai.options.system;
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
if(history[0].roll == 'system') {
|
||||
if(!system) system = history.shift();
|
||||
else history.shift();
|
||||
}
|
||||
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min);
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min);
|
||||
if(options.system) history.unshift({role: 'system', content: system})
|
||||
|
||||
const requestParams: any = {
|
||||
@@ -70,11 +72,12 @@ export class Ollama extends LLMProvider {
|
||||
}))
|
||||
}
|
||||
|
||||
// Run tool chains
|
||||
let resp: any;
|
||||
let resp: any, isFirstMessage = true;
|
||||
do {
|
||||
resp = await this.client.chat(requestParams);
|
||||
if(options.stream) {
|
||||
if(!isFirstMessage) options.stream({text: '\n\n'});
|
||||
else isFirstMessage = false;
|
||||
resp.message = {role: 'assistant', content: '', tool_calls: []};
|
||||
for await (const chunk of resp) {
|
||||
if(controller.signal.aborted) break;
|
||||
@@ -87,7 +90,6 @@ export class Ollama extends LLMProvider {
|
||||
}
|
||||
}
|
||||
|
||||
// Run tools
|
||||
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
|
||||
history.push(resp.message);
|
||||
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
|
||||
@@ -105,9 +107,11 @@ export class Ollama extends LLMProvider {
|
||||
requestParams.messages = history;
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
|
||||
});
|
||||
|
||||
return Object.assign(response, {abort: () => controller.abort()});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,8 @@ export class OpenAi extends LLMProvider {
|
||||
role: 'tool',
|
||||
id: tc.id,
|
||||
name: tc.function.name,
|
||||
args: JSONAttemptParse(tc.function.arguments, {})
|
||||
args: JSONAttemptParse(tc.function.arguments, {}),
|
||||
timestamp: h.timestamp
|
||||
}));
|
||||
history.splice(i, 1, ...tools);
|
||||
i += tools.length - 1;
|
||||
@@ -33,7 +34,7 @@ export class OpenAi extends LLMProvider {
|
||||
history.splice(i, 1);
|
||||
i--;
|
||||
}
|
||||
|
||||
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
|
||||
}
|
||||
return history;
|
||||
}
|
||||
@@ -46,14 +47,15 @@ export class OpenAi extends LLMProvider {
|
||||
content: null,
|
||||
tool_calls: [{ id: h.id, type: 'function', function: { name: h.name, arguments: JSON.stringify(h.args) } }],
|
||||
refusal: null,
|
||||
annotations: [],
|
||||
annotations: []
|
||||
}, {
|
||||
role: 'tool',
|
||||
tool_call_id: h.id,
|
||||
content: h.error || h.content
|
||||
});
|
||||
} else {
|
||||
result.push(h);
|
||||
const {timestamp, ...rest} = h;
|
||||
result.push(rest);
|
||||
}
|
||||
return result;
|
||||
}, [] as any[]);
|
||||
@@ -62,8 +64,8 @@ export class OpenAi extends LLMProvider {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message}]);
|
||||
if(options.compress) history = await this.ai.llm.compress(<any>history, options.compress.max, options.compress.min, options);
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
|
||||
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -85,23 +87,25 @@ export class OpenAi extends LLMProvider {
|
||||
}))
|
||||
};
|
||||
|
||||
// Tool call and streaming logic similar to other providers
|
||||
let resp: any;
|
||||
let resp: any, isFirstMessage = true;
|
||||
do {
|
||||
resp = await this.client.chat.completions.create(requestParams);
|
||||
|
||||
// Implement streaming and tool call handling
|
||||
if(options.stream) {
|
||||
resp.choices = [];
|
||||
if(!isFirstMessage) options.stream({text: '\n\n'});
|
||||
else isFirstMessage = false;
|
||||
resp.choices = [{message: {content: '', tool_calls: []}}];
|
||||
for await (const chunk of resp) {
|
||||
if(controller.signal.aborted) break;
|
||||
if(chunk.choices[0].delta.content) {
|
||||
resp.choices[0].message.content += chunk.choices[0].delta.content;
|
||||
options.stream({text: chunk.choices[0].delta.content});
|
||||
}
|
||||
if(chunk.choices[0].delta.tool_calls) {
|
||||
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run tools
|
||||
const toolCalls = resp.choices[0].message.tool_calls || [];
|
||||
if(toolCalls.length && !controller.signal.aborted) {
|
||||
history.push(resp.choices[0].message);
|
||||
@@ -124,7 +128,6 @@ export class OpenAi extends LLMProvider {
|
||||
if(options.stream) options.stream({done: true});
|
||||
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}]));
|
||||
});
|
||||
|
||||
return Object.assign(response, {abort: () => controller.abort()});
|
||||
}
|
||||
}
|
||||
|
||||
25
src/vision.ts
Normal file
25
src/vision.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import {createWorker} from 'tesseract.js';
|
||||
import {Ai} from './ai.ts';
|
||||
|
||||
export class Vision {
|
||||
|
||||
constructor(private ai: Ai) { }
|
||||
|
||||
/**
|
||||
* Convert image to text using Optical Character Recognition
|
||||
* @param {string} path Path to image
|
||||
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
|
||||
*/
|
||||
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
|
||||
let worker: any;
|
||||
return {
|
||||
abort: () => { worker?.terminate(); },
|
||||
response: new Promise(async res => {
|
||||
worker = await createWorker('eng');
|
||||
const {data} = await worker.recognize(path);
|
||||
await worker.terminate();
|
||||
res(data.text.trim() || null);
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +1,16 @@
|
||||
import {resolve} from 'path';
|
||||
import {defineConfig} from 'vite';
|
||||
import dts from 'vite-plugin-dts';
|
||||
|
||||
export default defineConfig({
|
||||
build: {
|
||||
lib: {
|
||||
entry: resolve(process.cwd(), 'src/index.ts'),
|
||||
entry: './src/index.ts',
|
||||
name: 'utils',
|
||||
fileName: (module, entryName) => {
|
||||
if(module == 'es') return 'index.mjs';
|
||||
if(module == 'umd') return 'index.cjs';
|
||||
}
|
||||
fileName: (format) => (format === 'es' ? 'index.mjs' : 'index.js'),
|
||||
},
|
||||
ssr: true,
|
||||
emptyOutDir: true,
|
||||
minify: false,
|
||||
minify: true,
|
||||
sourcemap: true
|
||||
},
|
||||
plugins: [dts()],
|
||||
|
||||
Reference in New Issue
Block a user