Added memory system
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 5s

This commit is contained in:
2026-02-08 19:52:02 -05:00
parent d71a6be120
commit cda7db4f45
7 changed files with 163 additions and 57 deletions

View File

@@ -1,9 +1,11 @@
import * as os from 'node:os';
import {LLM, AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
import LLM, {AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
import { Audio } from './audio.ts';
import {Vision} from './vision.ts';
export type AbortablePromise<T> = Promise<T> & {abort: () => any};
export type AbortablePromise<T> = Promise<T> & {
abort: () => any
};
export type AiOptions = {
/** Path to models */

View File

@@ -18,8 +18,7 @@ export class Anthropic extends LLMProvider {
if(typeof history[orgI].content != 'string') {
if(history[orgI].role == 'assistant') {
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
i++;
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
history.splice(i + 1, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
});
} else if(history[orgI].role == 'user') {
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
@@ -28,6 +27,7 @@ export class Anthropic extends LLMProvider {
});
}
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
if(!history[orgI].content) history.splice(orgI, 1);
}
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
}
@@ -48,13 +48,10 @@ export class Anthropic extends LLMProvider {
return history.map(({timestamp, ...h}) => h);
}
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
history = this.fromStandard(<any>history);
return Object.assign(new Promise<any>(async (res, rej) => {
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = {
model: options.model || this.model,
@@ -122,7 +119,8 @@ export class Anthropic extends LLMProvider {
if(options.stream) options.stream({tool: toolCall.name});
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
try {
const result = await tool.fn(toolCall.input, this.ai);
console.log(typeof tool.fn);
const result = await tool.fn(toolCall.input, options?.stream, this.ai);
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) {
return {type: 'tool_result', tool_use_id: toolCall.id, is_error: true, content: err?.message || err?.toString() || 'Unknown'};
@@ -132,11 +130,12 @@ export class Anthropic extends LLMProvider {
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
this.toStandard(history);
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')}]));
});
return Object.assign(response, {abort: () => controller.abort()});
if(options.history) options.history.splice(0, options.history.length, ...history);
res(history.at(-1)?.content);
}), {abort: () => controller.abort()});
}
}

View File

@@ -31,11 +31,23 @@ export type LLMMessage = {
/** Tool result */
content: undefined | string;
/** Tool error */
error: undefined | string;
error?: undefined | string;
/** Timestamp */
timestamp?: number;
}
/** Background information the AI will be fed */
export type LLMMemory = {
/** What entity is this fact about */
owner: string;
/** The information that will be remembered */
fact: string;
/** Owner and fact embedding vector */
embeddings: [number[], number[]];
/** Creation time */
timestamp: Date;
}
export type LLMRequest = {
/** System prompt */
system?: string;
@@ -57,10 +69,12 @@ export type LLMRequest = {
max: number;
/** Compress chat until context size smaller than */
min: number
}
},
/** Background information the AI will be fed */
memory?: LLMMemory[],
}
export class LLM {
class LLM {
private embedWorker: Worker | null = null;
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
private embedId = 0;
@@ -90,37 +104,115 @@ export class LLM {
* Chat with LLM
* @param {string} message Question
* @param {LLMRequest} options Configuration options and chat history
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history
* @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
*/
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const m = options.model || this.defaultModel;
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
return this.models[m].ask(message, options);
let abort = () => {};
return Object.assign(new Promise<string>(async res => {
if(!options.history) options.history = [];
// If memories were passed, find any relivant ones and add a tool for ADHOC lookups
if(options.memory) {
options.system = (options.system || '') + '\nYou have passive persistent memory never make any mention of your memory capabilities and what you can/cannot remember\n';
const search = async (query?: string | null, subject?: string | null, limit = 50) => {
const [o, q] = await Promise.all([
subject ? this.embedding(subject) : Promise.resolve(null),
query ? this.embedding(query) : Promise.resolve(null),
]);
return (options.memory || [])
.map(m => ({...m, score: o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 1}))
.filter((m: any) => m.score >= 0.8)
.map((m: any) => ({...m, score: q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : m.score}))
.filter((m: any) => m.score >= 0.2)
.toSorted((a: any, b: any) => a.score - b.score)
.slice(0, limit);
}
const relevant = await search(message);
if(relevant.length) options.history.push({role: 'assistant', content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
options.tools = [...options.tools || [], {
name: 'read_memory',
description: 'Check your long-term memory for more information',
args: {
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
limit: {type: 'number', description: 'Result limit, default 5'},
},
fn: (args) => {
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
return search(args.query, args.subject, args.limit || 5);
}
}];
}
// Ask
const resp = await this.models[m].ask(message, options);
// Remove any memory calls
if(options.memory) {
const i = options.history?.findIndex((h: any) => h.role == 'assistant' && h.content.startsWith('Things I remembered:'));
if(i != null && i >= 0) options.history?.splice(i, 1);
}
// Handle compression and memory extraction
if(options.compress || options.memory) {
let compressed = null;
if(options.compress) {
compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
options.history.splice(0, options.history.length, ...compressed.history);
} else {
const i = options.history?.findLastIndex(m => m.role == 'user') ?? -1;
compressed = await this.ai.language.compressHistory(i != -1 ? options.history.slice(i) : options.history, 0, 0, options);
}
if(options.memory) {
const updated = options.memory
.filter(m => !compressed.memory.some(m2 => this.cosineSimilarity(m.embeddings[1], m2.embeddings[1]) > 0.8))
.concat(compressed.memory);
options.memory.splice(0, options.memory.length, ...updated);
}
}
return res(resp);
}), {abort});
}
/**
* Compress chat history to reduce context size
* @param {LLMMessage[]} history Chatlog that will be compressed
* @param max Trigger compression once context is larger than max
* @param min Summarize until context size is less than min
* @param min Leave messages less than the token minimum, summarize the rest
* @param {LLMRequest} options LLM options
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
*/
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
if(this.estimateTokens(history) < max) return history;
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<{history: LLMMessage[], memory: LLMMemory[]}> {
if(this.estimateTokens(history) < max) return {history, memory: []};
let keep = 0, tokens = 0;
for(let m of history.toReversed()) {
tokens += this.estimateTokens(m.content);
if(tokens < min) keep++;
else break;
}
if(history.length <= keep) return history;
const recent = keep == 0 ? [] : history.slice(-keep),
if(history.length <= keep) return {history, memory: []};
const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent];
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
return {owner, fact, embeddings: [e[0][0].embedding, e[1][0].embedding], timestamp};
}));
const h = [{role: 'assistant', content: `Conversation Summary: ${summary?.summary}`, timestamp: Date.now()}, ...recent];
if(system) h.splice(0, 0, system);
return {history: <any>h, memory};
}
/**
* Compare the difference between embeddings (calculates the angle between two vectors)
* @param {number[]} v1 First embedding / vector comparison
* @param {number[]} v2 Second embedding / vector for comparison
* @returns {number} Similarity values 0-1: 0 = unique, 1 = identical
*/
cosineSimilarity(v1: number[], v2: number[]): number {
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
let dotProduct = 0, normA = 0, normB = 0;
@@ -133,6 +225,13 @@ export class LLM {
return denominator === 0 ? 0 : dotProduct / denominator;
}
/**
* Chunk text into parts for AI digestion
* @param {object | string} target Item that will be chunked (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @returns {string[]} Chunked strings
*/
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
const objString = (obj: any, path = ''): string[] => {
if(!obj) return [];
@@ -142,7 +241,6 @@ export class LLM {
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
});
};
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
const chunks: string[] = [];
@@ -161,6 +259,13 @@ export class LLM {
return chunks;
}
/**
* Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
@@ -169,7 +274,6 @@ export class LLM {
this.embedWorker?.postMessage({ id, text });
});
};
const chunks = this.chunk(target, maxTokens, overlapTokens);
return Promise.all(chunks.map(async (text, index) => ({
index,
@@ -191,7 +295,7 @@ export class LLM {
/**
* Compare the difference between two strings using tensor math
* @param target Text that will checked
* @param target Text that will be checked
* @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/
@@ -212,13 +316,12 @@ export class LLM {
* @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>}
*/
async json(message: string, options?: LLMRequest) {
let resp = await this.ask(message, {
system: 'Respond using a JSON blob',
...options
});
if(!resp?.[0]?.content) return {};
return JSONAttemptParse(new RegExp('\{[\s\S]*\}').exec(resp[0].content), {});
async json(message: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
if(!resp) return {};
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
return JSONAttemptParse(jsonStr, {});
}
/**
@@ -229,7 +332,8 @@ export class LLM {
* @returns {Promise<string>} Summary
*/
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options})
.then(history => <string>history.pop()?.content || null);
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
}
}
export default LLM;

View File

@@ -64,13 +64,11 @@ export class OpenAi extends LLMProvider {
}, [] as any[]);
}
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
history = this.fromStandard(<any>history);
return Object.assign(new Promise<any>(async (res, rej) => {
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = {
model: options.model || this.model,
@@ -124,7 +122,7 @@ export class OpenAi extends LLMProvider {
if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'};
try {
const args = JSONAttemptParse(toolCall.function.arguments, {});
const result = await tool.fn(args, this.ai);
const result = await tool.fn(args, options.stream, this.ai);
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) {
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
@@ -134,10 +132,12 @@ export class OpenAi extends LLMProvider {
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
this.toStandard(history);
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}]));
});
return Object.assign(response, {abort: () => controller.abort()});
if(options.history) options.history.splice(0, options.history.length, ...history);
res(history.at(-1)?.content);
}), {abort: () => controller.abort()});
}
}

View File

@@ -2,5 +2,5 @@ import {AbortablePromise} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
export abstract class LLMProvider {
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>;
abstract ask(message: string, options: LLMRequest): AbortablePromise<string>;
}

View File

@@ -2,6 +2,7 @@ import * as cheerio from 'cheerio';
import {$, $Sync} from '@ztimson/node-utils';
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {LLMRequest} from './llm.ts';
export type AiToolArg = {[key: string]: {
/** Argument type */
@@ -32,7 +33,7 @@ export type AiTool = {
/** Tool arguments */
args?: AiToolArg,
/** Callback function */
fn: (args: any, ai: Ai) => any | Promise<any>,
fn: (args: any, stream: LLMRequest['stream'], ai: Ai) => any | Promise<any>,
};
export const CliTool: AiTool = {
@@ -56,15 +57,15 @@ export const ExecTool: AiTool = {
language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true},
code: {type: 'string', description: 'Code to execute', required: true}
},
fn: async (args, ai) => {
fn: async (args, stream, ai) => {
try {
switch(args.type) {
case 'bash':
return await CliTool.fn({command: args.code}, ai);
return await CliTool.fn({command: args.code}, stream, ai);
case 'node':
return await JSTool.fn({code: args.code}, ai);
return await JSTool.fn({code: args.code}, stream, ai);
case 'python': {
return await PythonTool.fn({code: args.code}, ai);
return await PythonTool.fn({code: args.code}, stream, ai);
}
}
} catch(err: any) {