init
This commit is contained in:
161
src/llm.ts
Normal file
161
src/llm.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
import {JSONAttemptParse} from '@ztimson/utils';
|
||||
import {Ai} from './ai.ts';
|
||||
import {Anthropic} from './antrhopic.ts';
|
||||
import {Ollama} from './ollama.ts';
|
||||
import {OpenAi} from './open-ai.ts';
|
||||
import {AbortablePromise, LLMProvider} from './provider.ts';
|
||||
import {AiTool} from './tools.ts';
|
||||
|
||||
export type LLMMessage = {
|
||||
/** Message originator */
|
||||
role: 'assistant' | 'system' | 'user';
|
||||
/** Message content */
|
||||
content: string | any;
|
||||
} | {
|
||||
/** Tool call */
|
||||
role: 'tool';
|
||||
/** Unique ID for call */
|
||||
id: string;
|
||||
/** Tool that was run */
|
||||
name: string;
|
||||
/** Tool arguments */
|
||||
args: any;
|
||||
/** Tool result */
|
||||
content: undefined | string;
|
||||
/** Tool error */
|
||||
error: undefined | string;
|
||||
}
|
||||
|
||||
export type LLMOptions = {
|
||||
/** Anthropic settings */
|
||||
anthropic?: {
|
||||
/** API Token */
|
||||
token: string;
|
||||
/** Default model */
|
||||
model: string;
|
||||
},
|
||||
/** Ollama settings */
|
||||
ollama?: {
|
||||
/** connection URL */
|
||||
host: string;
|
||||
/** Default model */
|
||||
model: string;
|
||||
},
|
||||
/** Open AI settings */
|
||||
openAi?: {
|
||||
/** API Token */
|
||||
token: string;
|
||||
/** Default model */
|
||||
model: string;
|
||||
},
|
||||
/** Default provider & model */
|
||||
model: string | [string, string];
|
||||
} & Omit<LLMRequest, 'model'>;
|
||||
|
||||
export type LLMRequest = {
|
||||
/** System prompt */
|
||||
system?: string;
|
||||
/** Message history */
|
||||
history?: LLMMessage[];
|
||||
/** Max tokens for request */
|
||||
max_tokens?: number;
|
||||
/** 0 = Rigid Logic, 1 = Balanced, 2 = Hyper Creative **/
|
||||
temperature?: number;
|
||||
/** Available tools */
|
||||
tools?: AiTool[];
|
||||
/** LLM model */
|
||||
model?: string | [string, string];
|
||||
/** Stream response */
|
||||
stream?: (chunk: {text?: string, done?: true}) => any;
|
||||
/** Compress old messages in the chat to free up context */
|
||||
compress?: {
|
||||
/** Trigger chat compression once context exceeds the token count */
|
||||
max: number;
|
||||
/** Compress chat until context size smaller than */
|
||||
min: number
|
||||
}
|
||||
}
|
||||
|
||||
export class LLM {
|
||||
private providers: {[key: string]: LLMProvider} = {};
|
||||
|
||||
constructor(public readonly ai: Ai, public readonly options: LLMOptions) {
|
||||
if(options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, options.anthropic.token, options.anthropic.model);
|
||||
if(options.ollama?.host) this.providers.ollama = new Ollama(this.ai, options.ollama.host, options.ollama.model);
|
||||
if(options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, options.openAi.token, options.openAi.model);
|
||||
}
|
||||
|
||||
/**
|
||||
* Chat with LLM
|
||||
* @param {string} message Question
|
||||
* @param {LLMRequest} options Configuration options and chat history
|
||||
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history
|
||||
*/
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
let model: any = [null, null];
|
||||
if(options.model) {
|
||||
if(typeof options.model == 'object') model = options.model;
|
||||
else model = [options.model, (<any>this.options)[options.model]?.model];
|
||||
}
|
||||
if(!options.model || model[1] == null) {
|
||||
if(typeof this.options.model == 'object') model = this.options.model;
|
||||
else model = [this.options.model, (<any>this.options)[this.options.model]?.model];
|
||||
}
|
||||
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
|
||||
return this.providers[model[0]].ask(message, {...options, model: model[1]});
|
||||
}
|
||||
|
||||
/**
|
||||
* Compress chat history to reduce context size
|
||||
* @param {LLMMessage[]} history Chatlog that will be compressed
|
||||
* @param max Trigger compression once context is larger than max
|
||||
* @param min Summarize until context size is less than min
|
||||
* @param {LLMRequest} options LLM options
|
||||
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
||||
*/
|
||||
async compress(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
||||
if(this.estimateTokens(history) < max) return history;
|
||||
let keep = 0, tokens = 0;
|
||||
for(let m of history.toReversed()) {
|
||||
tokens += this.estimateTokens(m.content);
|
||||
if(tokens < min) keep++;
|
||||
else break;
|
||||
}
|
||||
if(history.length <= keep) return history;
|
||||
const recent = keep == 0 ? [] : history.slice(-keep),
|
||||
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
||||
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
|
||||
return [{role: 'assistant', content: `Conversation Summary: ${summary}`}, ...recent];
|
||||
}
|
||||
|
||||
/**
|
||||
* Estimate variable as tokens
|
||||
* @param history Object to size
|
||||
* @returns {number} Rough token count
|
||||
*/
|
||||
estimateTokens(history: any): number {
|
||||
const text = JSON.stringify(history);
|
||||
return Math.ceil((text.length / 4) * 1.2);
|
||||
}
|
||||
|
||||
async json(message: string, options: LLMRequest) {
|
||||
let resp = await this.ask(message, {
|
||||
system: '',
|
||||
...options
|
||||
});
|
||||
if(!resp?.[0]?.content) return {};
|
||||
return JSONAttemptParse(new RegExp('\{[\s\S]*\}').exec(resp[0].content), {});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a summary of some text
|
||||
* @param {string} text Text to summarize
|
||||
* @param {number} tokens Max number of tokens
|
||||
* @param options LLM request options
|
||||
* @returns {Promise<string>} Summary
|
||||
*/
|
||||
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
|
||||
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options})
|
||||
.then(history => <string>history.pop()?.content || null);
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user