256 lines
9.0 KiB
TypeScript
256 lines
9.0 KiB
TypeScript
import {pipeline} from '@xenova/transformers';
|
|
import {JSONAttemptParse} from '@ztimson/utils';
|
|
import {Ai} from './ai.ts';
|
|
import {Anthropic} from './antrhopic.ts';
|
|
import {Ollama} from './ollama.ts';
|
|
import {OpenAi} from './open-ai.ts';
|
|
import {AbortablePromise, LLMProvider} from './provider.ts';
|
|
import {AiTool} from './tools.ts';
|
|
import * as tf from '@tensorflow/tfjs';
|
|
|
|
export type LLMMessage = {
|
|
/** Message originator */
|
|
role: 'assistant' | 'system' | 'user';
|
|
/** Message content */
|
|
content: string | any;
|
|
/** Timestamp */
|
|
timestamp?: number;
|
|
} | {
|
|
/** Tool call */
|
|
role: 'tool';
|
|
/** Unique ID for call */
|
|
id: string;
|
|
/** Tool that was run */
|
|
name: string;
|
|
/** Tool arguments */
|
|
args: any;
|
|
/** Tool result */
|
|
content: undefined | string;
|
|
/** Tool error */
|
|
error: undefined | string;
|
|
/** Timestamp */
|
|
timestamp?: number;
|
|
}
|
|
|
|
export type LLMOptions = {
|
|
/** Anthropic settings */
|
|
anthropic?: {
|
|
/** API Token */
|
|
token: string;
|
|
/** Default model */
|
|
model: string;
|
|
},
|
|
/** Ollama settings */
|
|
ollama?: {
|
|
/** connection URL */
|
|
host: string;
|
|
/** Default model */
|
|
model: string;
|
|
},
|
|
/** Open AI settings */
|
|
openAi?: {
|
|
/** API Token */
|
|
token: string;
|
|
/** Default model */
|
|
model: string;
|
|
},
|
|
/** Default provider & model */
|
|
model: string | [string, string];
|
|
} & Omit<LLMRequest, 'model'>;
|
|
|
|
export type LLMRequest = {
|
|
/** System prompt */
|
|
system?: string;
|
|
/** Message history */
|
|
history?: LLMMessage[];
|
|
/** Max tokens for request */
|
|
max_tokens?: number;
|
|
/** 0 = Rigid Logic, 1 = Balanced, 2 = Hyper Creative **/
|
|
temperature?: number;
|
|
/** Available tools */
|
|
tools?: AiTool[];
|
|
/** LLM model */
|
|
model?: string | [string, string];
|
|
/** Stream response */
|
|
stream?: (chunk: {text?: string, done?: true}) => any;
|
|
/** Compress old messages in the chat to free up context */
|
|
compress?: {
|
|
/** Trigger chat compression once context exceeds the token count */
|
|
max: number;
|
|
/** Compress chat until context size smaller than */
|
|
min: number
|
|
}
|
|
}
|
|
|
|
export class LLM {
|
|
private embedModel: any;
|
|
private providers: {[key: string]: LLMProvider} = {};
|
|
|
|
constructor(public readonly ai: Ai) {
|
|
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
|
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
|
|
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
|
|
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
|
|
}
|
|
|
|
/**
|
|
* Chat with LLM
|
|
* @param {string} message Question
|
|
* @param {LLMRequest} options Configuration options and chat history
|
|
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history
|
|
*/
|
|
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
|
let model: any = [null, null];
|
|
if(options.model) {
|
|
if(typeof options.model == 'object') model = options.model;
|
|
else model = [options.model, (<any>this.ai.options)[options.model]?.model];
|
|
}
|
|
if(!options.model || model[1] == null) {
|
|
if(typeof this.ai.options.model == 'object') model = this.ai.options.model;
|
|
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model];
|
|
}
|
|
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
|
|
return this.providers[model[0]].ask(message, {...options, model: model[1]});
|
|
}
|
|
|
|
/**
|
|
* Compress chat history to reduce context size
|
|
* @param {LLMMessage[]} history Chatlog that will be compressed
|
|
* @param max Trigger compression once context is larger than max
|
|
* @param min Summarize until context size is less than min
|
|
* @param {LLMRequest} options LLM options
|
|
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
|
*/
|
|
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
|
if(this.estimateTokens(history) < max) return history;
|
|
let keep = 0, tokens = 0;
|
|
for(let m of history.toReversed()) {
|
|
tokens += this.estimateTokens(m.content);
|
|
if(tokens < min) keep++;
|
|
else break;
|
|
}
|
|
if(history.length <= keep) return history;
|
|
const recent = keep == 0 ? [] : history.slice(-keep),
|
|
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
|
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
|
|
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent];
|
|
}
|
|
|
|
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
|
const objString = (obj: any, path = ''): string[] => {
|
|
if(obj === null || obj === undefined) return [];
|
|
return Object.entries(obj).flatMap(([key, value]) => {
|
|
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
|
|
if(typeof value === 'object' && value !== null && !Array.isArray(value)) return objString(value, p);
|
|
const valueStr = Array.isArray(value) ? value.join(', ') : String(value);
|
|
return `${p}: ${valueStr}`;
|
|
});
|
|
};
|
|
|
|
const embed = async (text: string): Promise<number[]> => {
|
|
const model = await this.embedModel;
|
|
const output = await model(text, {pooling: 'mean', normalize: true});
|
|
return Array.from(output.data);
|
|
};
|
|
|
|
// Tokenize
|
|
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
|
const tokens = lines.flatMap(line => [...line.split(/\s+/).filter(w => w.trim()), '\n']);
|
|
|
|
// Chunk
|
|
const chunks: string[] = [];
|
|
let start = 0;
|
|
while (start < tokens.length) {
|
|
let end = start;
|
|
let text = '';
|
|
// Build chunk
|
|
while (end < tokens.length) {
|
|
const nextToken = tokens[end];
|
|
const testText = text + (text ? ' ' : '') + nextToken;
|
|
const testTokens = this.estimateTokens(testText.replace(/\s*\n\s*/g, '\n'));
|
|
if (testTokens > maxTokens && text) break;
|
|
text = testText;
|
|
end++;
|
|
}
|
|
// Save chunk
|
|
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim();
|
|
if(cleanText) chunks.push(cleanText);
|
|
start = end - overlapTokens;
|
|
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
|
|
}
|
|
|
|
return Promise.all(chunks.map(async (text, index) => ({
|
|
index,
|
|
embedding: await embed(text),
|
|
text,
|
|
tokens: this.estimateTokens(text),
|
|
})));
|
|
}
|
|
|
|
/**
|
|
* Estimate variable as tokens
|
|
* @param history Object to size
|
|
* @returns {number} Rough token count
|
|
*/
|
|
estimateTokens(history: any): number {
|
|
const text = JSON.stringify(history);
|
|
return Math.ceil((text.length / 4) * 1.2);
|
|
}
|
|
|
|
/**
|
|
* Compare the difference between two strings using tensor math
|
|
* @param target Text that will checked
|
|
* @param {string} searchTerms Multiple search terms to check against target
|
|
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
|
|
*/
|
|
fuzzyMatch(target: string, ...searchTerms: string[]) {
|
|
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
|
|
|
|
const vector = (text: string, dimensions: number = 10): number[] => {
|
|
return text.toLowerCase().split('').map((char, index) =>
|
|
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
|
|
}
|
|
|
|
const cosineSimilarity = (v1: number[], v2: number[]): number => {
|
|
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
|
|
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
|
|
const dotProduct = tf.dot(tensor1, tensor2)
|
|
const magnitude1 = tf.norm(tensor1)
|
|
const magnitude2 = tf.norm(tensor2)
|
|
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
|
|
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
|
|
}
|
|
|
|
const v = vector(target);
|
|
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
|
|
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
|
|
}
|
|
|
|
/**
|
|
* Ask a question with JSON response
|
|
* @param {string} message Question
|
|
* @param {LLMRequest} options Configuration options and chat history
|
|
* @returns {Promise<{} | {} | RegExpExecArray | null>}
|
|
*/
|
|
async json(message: string, options?: LLMRequest) {
|
|
let resp = await this.ask(message, {
|
|
system: 'Respond using a JSON blob',
|
|
...options
|
|
});
|
|
if(!resp?.[0]?.content) return {};
|
|
return JSONAttemptParse(new RegExp('\{[\s\S]*\}').exec(resp[0].content), {});
|
|
}
|
|
|
|
/**
|
|
* Create a summary of some text
|
|
* @param {string} text Text to summarize
|
|
* @param {number} tokens Max number of tokens
|
|
* @param options LLM request options
|
|
* @returns {Promise<string>} Summary
|
|
*/
|
|
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
|
|
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options})
|
|
.then(history => <string>history.pop()?.content || null);
|
|
}
|
|
}
|