Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 575fbac099 | |||
| 46ae0f7913 | |||
| 54730a2b9a | |||
| 27506d20af | |||
| 8c64129200 | |||
| 013aa942c0 | |||
| c8d5660b1a | |||
| f2c66b0cb8 | |||
| cda7db4f45 | |||
| d71a6be120 |
@@ -75,6 +75,7 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
|
||||
#### Instructions
|
||||
1. Install the package: `npm i @ztimson/ai-utils`
|
||||
2. For speaker diarization: `pip install pyannote.audio`
|
||||
|
||||
</details>
|
||||
|
||||
@@ -90,8 +91,9 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
|
||||
#### Instructions
|
||||
1. Install the dependencies: `npm i`
|
||||
2. Build library: `npm build`
|
||||
3. Run unit tests: `npm test`
|
||||
2. For speaker diarization: `pip install pyannote.audio`
|
||||
3. Build library: `npm build`
|
||||
4. Run unit tests: `npm test`
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@ztimson/ai-utils",
|
||||
"version": "0.4.0",
|
||||
"version": "0.6.2",
|
||||
"description": "AI Utility library",
|
||||
"author": "Zak Timson",
|
||||
"license": "MIT",
|
||||
|
||||
24
src/ai.ts
24
src/ai.ts
@@ -1,29 +1,25 @@
|
||||
import * as os from 'node:os';
|
||||
import {LLM, AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
|
||||
import LLM, {AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
|
||||
import { Audio } from './audio.ts';
|
||||
import {Vision} from './vision.ts';
|
||||
|
||||
export type AbortablePromise<T> = Promise<T> & {abort: () => any};
|
||||
export type AbortablePromise<T> = Promise<T> & {
|
||||
abort: () => any
|
||||
};
|
||||
|
||||
export type AiOptions = {
|
||||
/** Path to models */
|
||||
path?: string;
|
||||
/** ASR model: whisper-tiny, whisper-base */
|
||||
asr?: string;
|
||||
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
|
||||
embedder?: string;
|
||||
/** Large language models, first is default */
|
||||
llm?: Omit<LLMRequest, 'model'> & {
|
||||
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
|
||||
}
|
||||
/** Tesseract OCR configuration */
|
||||
tesseract?: {
|
||||
/** Model: eng, eng_best, eng_fast */
|
||||
model?: string;
|
||||
}
|
||||
/** Whisper ASR configuration */
|
||||
whisper?: {
|
||||
/** Whisper binary location */
|
||||
binary: string;
|
||||
/** Model: `ggml-base.en.bin` */
|
||||
model: string;
|
||||
}
|
||||
/** OCR model: eng, eng_best, eng_fast */
|
||||
ocr?: string;
|
||||
}
|
||||
|
||||
export class Ai {
|
||||
|
||||
@@ -13,25 +13,25 @@ export class Anthropic extends LLMProvider {
|
||||
}
|
||||
|
||||
private toStandard(history: any[]): LLMMessage[] {
|
||||
for(let i = 0; i < history.length; i++) {
|
||||
const orgI = i;
|
||||
if(typeof history[orgI].content != 'string') {
|
||||
if(history[orgI].role == 'assistant') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
|
||||
i++;
|
||||
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
|
||||
});
|
||||
} else if(history[orgI].role == 'user') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
|
||||
const h = history.find((h: any) => h.id == c.tool_use_id);
|
||||
h[c.is_error ? 'error' : 'content'] = c.content;
|
||||
});
|
||||
}
|
||||
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
const timestamp = Date.now();
|
||||
const messages: LLMMessage[] = [];
|
||||
for(let h of history) {
|
||||
if(typeof h.content == 'string') {
|
||||
messages.push(<any>{timestamp, ...h});
|
||||
} else {
|
||||
const textContent = h.content?.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
if(textContent) messages.push({timestamp, role: h.role, content: textContent});
|
||||
h.content.forEach((c: any) => {
|
||||
if(c.type == 'tool_use') {
|
||||
messages.push({timestamp, role: 'tool', id: c.id, name: c.name, args: c.input, content: undefined});
|
||||
} else if(c.type == 'tool_result') {
|
||||
const m: any = messages.findLast(m => (<any>m).id == c.tool_use_id);
|
||||
if(m) m[c.is_error ? 'error' : 'content'] = c.content;
|
||||
}
|
||||
});
|
||||
}
|
||||
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
|
||||
}
|
||||
return history.filter(h => !!h.content);
|
||||
return messages;
|
||||
}
|
||||
|
||||
private fromStandard(history: LLMMessage[]): any[] {
|
||||
@@ -48,13 +48,10 @@ export class Anthropic extends LLMProvider {
|
||||
return history.map(({timestamp, ...h}) => h);
|
||||
}
|
||||
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
|
||||
history = this.fromStandard(<any>history);
|
||||
|
||||
return Object.assign(new Promise<any>(async (res) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -76,7 +73,6 @@ export class Anthropic extends LLMProvider {
|
||||
};
|
||||
|
||||
let resp: any, isFirstMessage = true;
|
||||
const assistantMessages: string[] = [];
|
||||
do {
|
||||
resp = await this.client.messages.create(requestParams).catch(err => {
|
||||
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
|
||||
@@ -122,7 +118,7 @@ export class Anthropic extends LLMProvider {
|
||||
if(options.stream) options.stream({tool: toolCall.name});
|
||||
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
|
||||
try {
|
||||
const result = await tool.fn(toolCall.input, this.ai);
|
||||
const result = await tool.fn(toolCall.input, options?.stream, this.ai);
|
||||
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
|
||||
} catch (err: any) {
|
||||
return {type: 'tool_result', tool_use_id: toolCall.id, is_error: true, content: err?.message || err?.toString() || 'Unknown'};
|
||||
@@ -132,11 +128,12 @@ export class Anthropic extends LLMProvider {
|
||||
requestParams.messages = history;
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
|
||||
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
|
||||
history = this.toStandard(history);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
res(this.toStandard([...history, {role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')}]));
|
||||
});
|
||||
|
||||
return Object.assign(response, {abort: () => controller.abort()});
|
||||
if(options.history) options.history.splice(0, options.history.length, ...history);
|
||||
res(history.at(-1)?.content);
|
||||
}), {abort: () => controller.abort()});
|
||||
}
|
||||
}
|
||||
|
||||
142
src/audio.ts
142
src/audio.ts
@@ -1,50 +1,116 @@
|
||||
import {spawn} from 'node:child_process';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import {pipeline, read_audio} from '@xenova/transformers';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
|
||||
export class Audio {
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private whisperModel!: string;
|
||||
private whisperPipeline: any;
|
||||
|
||||
constructor(private ai: Ai) {
|
||||
if(ai.options.whisper?.binary) {
|
||||
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
|
||||
this.downloadAsrModel();
|
||||
}
|
||||
}
|
||||
constructor(private ai: Ai) {}
|
||||
|
||||
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
let abort: any = () => {};
|
||||
const p = new Promise<string | null>(async (resolve, reject) => {
|
||||
const m = await this.downloadAsrModel(model);
|
||||
let output = '';
|
||||
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) resolve(output.trim() || null);
|
||||
else reject(new Error(`Exit code ${code}`));
|
||||
});
|
||||
private combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
|
||||
const speakerMap = new Map();
|
||||
let speakerCount = 0;
|
||||
speakers.forEach((seg: any) => {
|
||||
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
||||
});
|
||||
return Object.assign(p, {abort});
|
||||
|
||||
const lines: string[] = [];
|
||||
let currentSpeaker = -1;
|
||||
let currentText = '';
|
||||
chunks.forEach((chunk: any) => {
|
||||
const time = chunk.timestamp[0];
|
||||
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
|
||||
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
|
||||
if (speakerNum !== currentSpeaker) {
|
||||
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
||||
currentSpeaker = speakerNum;
|
||||
currentText = chunk.text;
|
||||
} else {
|
||||
currentText += chunk.text;
|
||||
}
|
||||
});
|
||||
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!model.endsWith('.bin')) model += '.bin';
|
||||
const p = Path.join(<string>this.ai.options.path, model);
|
||||
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
||||
if(!!this.downloads[model]) return this.downloads[model];
|
||||
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
|
||||
.then(resp => resp.arrayBuffer())
|
||||
.then(arr => Buffer.from(arr)).then(async buffer => {
|
||||
await fs.writeFile(p, buffer);
|
||||
delete this.downloads[model];
|
||||
return p;
|
||||
async canDiarization(): Promise<boolean> {
|
||||
return new Promise((resolve) => {
|
||||
const proc = spawn('python3', ['-c', 'import pyannote.audio']);
|
||||
proc.on('close', (code: number) => resolve(code === 0));
|
||||
proc.on('error', () => resolve(false));
|
||||
});
|
||||
}
|
||||
|
||||
private async runDiarization(audioPath: string): Promise<any[]> {
|
||||
if(!await this.canDiarization()) throw new Error('Pyannote is not installed: pip install pyannote.audio');
|
||||
const script = `
|
||||
import sys
|
||||
import json
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
os.environ['TORCH_HOME'] = "${this.ai.options.path}"
|
||||
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
|
||||
diarization = pipeline(sys.argv[1])
|
||||
|
||||
segments = []
|
||||
for turn, _, speaker in diarization.itertracks(yield_label=True):
|
||||
segments.append({
|
||||
"start": turn.start,
|
||||
"end": turn.end,
|
||||
"speaker": speaker
|
||||
})
|
||||
|
||||
print(json.dumps(segments))
|
||||
`;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
let output = '';
|
||||
const proc = spawn('python3', ['-c', script, audioPath]);
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) {
|
||||
try {
|
||||
resolve(JSON.parse(output));
|
||||
} catch (err) {
|
||||
reject(new Error('Failed to parse diarization output'));
|
||||
}
|
||||
} else {
|
||||
reject(new Error(`Python process exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
return this.downloads[model];
|
||||
|
||||
proc.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
asr(path: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise<string | null> {
|
||||
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
|
||||
let aborted = false;
|
||||
const abort = () => { aborted = true; };
|
||||
|
||||
const p = new Promise<string | null>(async (resolve, reject) => {
|
||||
try {
|
||||
if(aborted) return resolve(null);
|
||||
if(!this.whisperPipeline) this.whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, { cache_dir: this.ai.options.path, quantized: true });
|
||||
|
||||
// Transcript
|
||||
if(aborted) return resolve(null);
|
||||
const audio = await read_audio(path, 16000);
|
||||
const transcriptResult = await this.whisperPipeline(audio, {return_timestamps: speaker ? 'word' : false, chunk_length_s: 30,});
|
||||
if(!speaker) return resolve(transcriptResult.text?.trim() || null);
|
||||
|
||||
// Speaker Diarization
|
||||
if(aborted) return resolve(null);
|
||||
const speakers = await this.runDiarization(path);
|
||||
if(aborted) return resolve(null);
|
||||
const combined = this.combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
|
||||
resolve(combined);
|
||||
} catch (err) {
|
||||
reject(err);
|
||||
}
|
||||
});
|
||||
|
||||
return Object.assign(p, { abort });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
import { pipeline } from '@xenova/transformers';
|
||||
import { parentPort } from 'worker_threads';
|
||||
|
||||
let model: any;
|
||||
let embedder: any;
|
||||
|
||||
parentPort?.on('message', async ({ id, text }) => {
|
||||
if(!model) model = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
||||
const output = await model(text, { pooling: 'mean', normalize: true });
|
||||
parentPort?.on('message', async ({ id, text, model, path }) => {
|
||||
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {
|
||||
quantized: true,
|
||||
cache_dir: path,
|
||||
});
|
||||
const output = await embedder(text, { pooling: 'mean', normalize: true });
|
||||
const embedding = Array.from(output.data);
|
||||
parentPort?.postMessage({ id, embedding });
|
||||
});
|
||||
|
||||
161
src/llm.ts
161
src/llm.ts
@@ -31,11 +31,23 @@ export type LLMMessage = {
|
||||
/** Tool result */
|
||||
content: undefined | string;
|
||||
/** Tool error */
|
||||
error: undefined | string;
|
||||
error?: undefined | string;
|
||||
/** Timestamp */
|
||||
timestamp?: number;
|
||||
}
|
||||
|
||||
/** Background information the AI will be fed */
|
||||
export type LLMMemory = {
|
||||
/** What entity is this fact about */
|
||||
owner: string;
|
||||
/** The information that will be remembered */
|
||||
fact: string;
|
||||
/** Owner and fact embedding vector */
|
||||
embeddings: [number[], number[]];
|
||||
/** Creation time */
|
||||
timestamp: Date;
|
||||
}
|
||||
|
||||
export type LLMRequest = {
|
||||
/** System prompt */
|
||||
system?: string;
|
||||
@@ -57,10 +69,12 @@ export type LLMRequest = {
|
||||
max: number;
|
||||
/** Compress chat until context size smaller than */
|
||||
min: number
|
||||
}
|
||||
},
|
||||
/** Background information the AI will be fed */
|
||||
memory?: LLMMemory[],
|
||||
}
|
||||
|
||||
export class LLM {
|
||||
class LLM {
|
||||
private embedWorker: Worker | null = null;
|
||||
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
||||
private embedId = 0;
|
||||
@@ -90,37 +104,115 @@ export class LLM {
|
||||
* Chat with LLM
|
||||
* @param {string} message Question
|
||||
* @param {LLMRequest} options Configuration options and chat history
|
||||
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history
|
||||
* @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
|
||||
*/
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
const m = options.model || this.defaultModel;
|
||||
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
|
||||
return this.models[m].ask(message, options);
|
||||
let abort = () => {};
|
||||
return Object.assign(new Promise<string>(async res => {
|
||||
if(!options.history) options.history = [];
|
||||
// If memories were passed, find any relivant ones and add a tool for ADHOC lookups
|
||||
if(options.memory) {
|
||||
options.system = (options.system || '') + '\nYou have passive persistent memory never make any mention of your memory capabilities and what you can/cannot remember\n';
|
||||
const search = async (query?: string | null, subject?: string | null, limit = 50) => {
|
||||
const [o, q] = await Promise.all([
|
||||
subject ? this.embedding(subject) : Promise.resolve(null),
|
||||
query ? this.embedding(query) : Promise.resolve(null),
|
||||
]);
|
||||
return (options.memory || [])
|
||||
.map(m => ({...m, score: o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 1}))
|
||||
.filter((m: any) => m.score >= 0.8)
|
||||
.map((m: any) => ({...m, score: q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : m.score}))
|
||||
.filter((m: any) => m.score >= 0.2)
|
||||
.toSorted((a: any, b: any) => a.score - b.score)
|
||||
.slice(0, limit);
|
||||
}
|
||||
|
||||
const relevant = await search(message);
|
||||
if(relevant.length) options.history.push({role: 'assistant', content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
|
||||
options.tools = [...options.tools || [], {
|
||||
name: 'read_memory',
|
||||
description: 'Check your long-term memory for more information',
|
||||
args: {
|
||||
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
|
||||
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
|
||||
limit: {type: 'number', description: 'Result limit, default 5'},
|
||||
},
|
||||
fn: (args) => {
|
||||
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
|
||||
return search(args.query, args.subject, args.limit || 5);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
// Ask
|
||||
const resp = await this.models[m].ask(message, options);
|
||||
|
||||
// Remove any memory calls
|
||||
if(options.memory) {
|
||||
const i = options.history?.findIndex((h: any) => h.role == 'assistant' && h.content.startsWith('Things I remembered:'));
|
||||
if(i != null && i >= 0) options.history?.splice(i, 1);
|
||||
}
|
||||
|
||||
// Handle compression and memory extraction
|
||||
if(options.compress || options.memory) {
|
||||
let compressed = null;
|
||||
if(options.compress) {
|
||||
compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
|
||||
options.history.splice(0, options.history.length, ...compressed.history);
|
||||
} else {
|
||||
const i = options.history?.findLastIndex(m => m.role == 'user') ?? -1;
|
||||
compressed = await this.ai.language.compressHistory(i != -1 ? options.history.slice(i) : options.history, 0, 0, options);
|
||||
}
|
||||
if(options.memory) {
|
||||
const updated = options.memory
|
||||
.filter(m => !compressed.memory.some(m2 => this.cosineSimilarity(m.embeddings[1], m2.embeddings[1]) > 0.8))
|
||||
.concat(compressed.memory);
|
||||
options.memory.splice(0, options.memory.length, ...updated);
|
||||
}
|
||||
}
|
||||
return res(resp);
|
||||
}), {abort});
|
||||
}
|
||||
|
||||
/**
|
||||
* Compress chat history to reduce context size
|
||||
* @param {LLMMessage[]} history Chatlog that will be compressed
|
||||
* @param max Trigger compression once context is larger than max
|
||||
* @param min Summarize until context size is less than min
|
||||
* @param min Leave messages less than the token minimum, summarize the rest
|
||||
* @param {LLMRequest} options LLM options
|
||||
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
||||
*/
|
||||
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
||||
if(this.estimateTokens(history) < max) return history;
|
||||
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<{history: LLMMessage[], memory: LLMMemory[]}> {
|
||||
if(this.estimateTokens(history) < max) return {history, memory: []};
|
||||
let keep = 0, tokens = 0;
|
||||
for(let m of history.toReversed()) {
|
||||
tokens += this.estimateTokens(m.content);
|
||||
if(tokens < min) keep++;
|
||||
else break;
|
||||
}
|
||||
if(history.length <= keep) return history;
|
||||
const recent = keep == 0 ? [] : history.slice(-keep),
|
||||
if(history.length <= keep) return {history, memory: []};
|
||||
const system = history[0].role == 'system' ? history[0] : null,
|
||||
recent = keep == 0 ? [] : history.slice(-keep),
|
||||
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
||||
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
|
||||
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent];
|
||||
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
|
||||
const timestamp = new Date();
|
||||
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
|
||||
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
|
||||
return {owner, fact, embeddings: [e[0][0].embedding, e[1][0].embedding], timestamp};
|
||||
}));
|
||||
const h = [{role: 'assistant', content: `Conversation Summary: ${summary?.summary}`, timestamp: Date.now()}, ...recent];
|
||||
if(system) h.splice(0, 0, system);
|
||||
return {history: <any>h, memory};
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare the difference between embeddings (calculates the angle between two vectors)
|
||||
* @param {number[]} v1 First embedding / vector comparison
|
||||
* @param {number[]} v2 Second embedding / vector for comparison
|
||||
* @returns {number} Similarity values 0-1: 0 = unique, 1 = identical
|
||||
*/
|
||||
cosineSimilarity(v1: number[], v2: number[]): number {
|
||||
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
|
||||
let dotProduct = 0, normA = 0, normB = 0;
|
||||
@@ -133,6 +225,13 @@ export class LLM {
|
||||
return denominator === 0 ? 0 : dotProduct / denominator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Chunk text into parts for AI digestion
|
||||
* @param {object | string} target Item that will be chunked (objects get converted)
|
||||
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
|
||||
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
|
||||
* @returns {string[]} Chunked strings
|
||||
*/
|
||||
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
|
||||
const objString = (obj: any, path = ''): string[] => {
|
||||
if(!obj) return [];
|
||||
@@ -142,7 +241,6 @@ export class LLM {
|
||||
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
|
||||
});
|
||||
};
|
||||
|
||||
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
||||
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
|
||||
const chunks: string[] = [];
|
||||
@@ -161,15 +259,26 @@ export class LLM {
|
||||
return chunks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a vector representation of a string
|
||||
* @param {object | string} target Item that will be embedded (objects get converted)
|
||||
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
|
||||
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
|
||||
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
|
||||
*/
|
||||
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
||||
const embed = (text: string): Promise<number[]> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const id = this.embedId++;
|
||||
this.embedQueue.set(id, { resolve, reject });
|
||||
this.embedWorker?.postMessage({ id, text });
|
||||
this.embedWorker?.postMessage({
|
||||
id,
|
||||
text,
|
||||
model: this.ai.options?.embedder || 'bge-small-en-v1.5',
|
||||
path: this.ai.options.path
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
||||
return Promise.all(chunks.map(async (text, index) => ({
|
||||
index,
|
||||
@@ -191,7 +300,7 @@ export class LLM {
|
||||
|
||||
/**
|
||||
* Compare the difference between two strings using tensor math
|
||||
* @param target Text that will checked
|
||||
* @param target Text that will be checked
|
||||
* @param {string} searchTerms Multiple search terms to check against target
|
||||
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
|
||||
*/
|
||||
@@ -212,13 +321,12 @@ export class LLM {
|
||||
* @param {LLMRequest} options Configuration options and chat history
|
||||
* @returns {Promise<{} | {} | RegExpExecArray | null>}
|
||||
*/
|
||||
async json(message: string, options?: LLMRequest) {
|
||||
let resp = await this.ask(message, {
|
||||
system: 'Respond using a JSON blob',
|
||||
...options
|
||||
});
|
||||
if(!resp?.[0]?.content) return {};
|
||||
return JSONAttemptParse(new RegExp('\{[\s\S]*\}').exec(resp[0].content), {});
|
||||
async json(message: string, options?: LLMRequest): Promise<any> {
|
||||
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
|
||||
if(!resp) return {};
|
||||
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
|
||||
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
|
||||
return JSONAttemptParse(jsonStr, {});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -229,7 +337,8 @@ export class LLM {
|
||||
* @returns {Promise<string>} Summary
|
||||
*/
|
||||
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
|
||||
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options})
|
||||
.then(history => <string>history.pop()?.content || null);
|
||||
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
|
||||
}
|
||||
}
|
||||
|
||||
export default LLM;
|
||||
|
||||
@@ -64,13 +64,11 @@ export class OpenAi extends LLMProvider {
|
||||
}, [] as any[]);
|
||||
}
|
||||
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
|
||||
history = this.fromStandard(<any>history);
|
||||
|
||||
return Object.assign(new Promise<any>(async (res, rej) => {
|
||||
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -124,7 +122,7 @@ export class OpenAi extends LLMProvider {
|
||||
if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'};
|
||||
try {
|
||||
const args = JSONAttemptParse(toolCall.function.arguments, {});
|
||||
const result = await tool.fn(args, this.ai);
|
||||
const result = await tool.fn(args, options.stream, this.ai);
|
||||
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize(result)};
|
||||
} catch (err: any) {
|
||||
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
|
||||
@@ -134,10 +132,12 @@ export class OpenAi extends LLMProvider {
|
||||
requestParams.messages = history;
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
|
||||
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
|
||||
history = this.toStandard(history);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}]));
|
||||
});
|
||||
return Object.assign(response, {abort: () => controller.abort()});
|
||||
if(options.history) options.history.splice(0, options.history.length, ...history);
|
||||
res(history.at(-1)?.content);
|
||||
}), {abort: () => controller.abort()});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,5 +2,5 @@ import {AbortablePromise} from './ai.ts';
|
||||
import {LLMMessage, LLMRequest} from './llm.ts';
|
||||
|
||||
export abstract class LLMProvider {
|
||||
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>;
|
||||
abstract ask(message: string, options: LLMRequest): AbortablePromise<string>;
|
||||
}
|
||||
|
||||
15
src/tools.ts
15
src/tools.ts
@@ -2,6 +2,7 @@ import * as cheerio from 'cheerio';
|
||||
import {$, $Sync} from '@ztimson/node-utils';
|
||||
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
|
||||
import {Ai} from './ai.ts';
|
||||
import {LLMRequest} from './llm.ts';
|
||||
|
||||
export type AiToolArg = {[key: string]: {
|
||||
/** Argument type */
|
||||
@@ -32,7 +33,7 @@ export type AiTool = {
|
||||
/** Tool arguments */
|
||||
args?: AiToolArg,
|
||||
/** Callback function */
|
||||
fn: (args: any, ai: Ai) => any | Promise<any>,
|
||||
fn: (args: any, stream: LLMRequest['stream'], ai: Ai) => any | Promise<any>,
|
||||
};
|
||||
|
||||
export const CliTool: AiTool = {
|
||||
@@ -44,9 +45,9 @@ export const CliTool: AiTool = {
|
||||
|
||||
export const DateTimeTool: AiTool = {
|
||||
name: 'get_datetime',
|
||||
description: 'Get current date and time',
|
||||
description: 'Get current UTC date / time',
|
||||
args: {},
|
||||
fn: async () => new Date().toISOString()
|
||||
fn: async () => new Date().toUTCString()
|
||||
}
|
||||
|
||||
export const ExecTool: AiTool = {
|
||||
@@ -56,15 +57,15 @@ export const ExecTool: AiTool = {
|
||||
language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true},
|
||||
code: {type: 'string', description: 'Code to execute', required: true}
|
||||
},
|
||||
fn: async (args, ai) => {
|
||||
fn: async (args, stream, ai) => {
|
||||
try {
|
||||
switch(args.type) {
|
||||
case 'bash':
|
||||
return await CliTool.fn({command: args.code}, ai);
|
||||
return await CliTool.fn({command: args.code}, stream, ai);
|
||||
case 'node':
|
||||
return await JSTool.fn({code: args.code}, ai);
|
||||
return await JSTool.fn({code: args.code}, stream, ai);
|
||||
case 'python': {
|
||||
return await PythonTool.fn({code: args.code}, ai);
|
||||
return await PythonTool.fn({code: args.code}, stream, ai);
|
||||
}
|
||||
}
|
||||
} catch(err: any) {
|
||||
|
||||
@@ -13,7 +13,7 @@ export class Vision {
|
||||
ocr(path: string): AbortablePromise<string | null> {
|
||||
let worker: any;
|
||||
const p = new Promise<string | null>(async res => {
|
||||
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
|
||||
worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
|
||||
const {data} = await worker.recognize(path);
|
||||
await worker.terminate();
|
||||
res(data.text.trim() || null);
|
||||
|
||||
Reference in New Issue
Block a user