Updated LLM config and added read_webpage
This commit is contained in:
878
package-lock.json
generated
878
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@ztimson/ai-utils",
|
||||
"version": "0.3.0",
|
||||
"version": "0.4.0",
|
||||
"description": "AI Utility library",
|
||||
"author": "Zak Timson",
|
||||
"license": "MIT",
|
||||
@@ -30,7 +30,7 @@
|
||||
"@xenova/transformers": "^2.17.2",
|
||||
"@ztimson/node-utils": "^1.0.4",
|
||||
"@ztimson/utils": "^0.27.9",
|
||||
"ollama": "^0.6.0",
|
||||
"cheerio": "^1.2.0",
|
||||
"openai": "^6.6.0",
|
||||
"tesseract.js": "^6.0.1"
|
||||
},
|
||||
@@ -42,7 +42,6 @@
|
||||
"vite-plugin-dts": "^4.5.3"
|
||||
},
|
||||
"files": [
|
||||
"bin",
|
||||
"dist"
|
||||
]
|
||||
}
|
||||
|
||||
13
src/ai.ts
13
src/ai.ts
@@ -1,18 +1,17 @@
|
||||
import * as os from 'node:os';
|
||||
import {LLM, LLMOptions} from './llm';
|
||||
import {LLM, AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
|
||||
import { Audio } from './audio.ts';
|
||||
import {Vision} from './vision.ts';
|
||||
|
||||
export type AbortablePromise<T> = Promise<T> & {abort: () => any};
|
||||
|
||||
export type AiOptions = LLMOptions & {
|
||||
export type AiOptions = {
|
||||
/** Path to models */
|
||||
path?: string;
|
||||
/** Piper TTS configuratoin */
|
||||
piper?: {
|
||||
/** Model URL: `https://huggingface.co/rhasspy/piper-voices/tree/main/.../model.onnx` */
|
||||
model: string;
|
||||
},
|
||||
/** Large language models, first is default */
|
||||
llm?: Omit<LLMRequest, 'model'> & {
|
||||
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
|
||||
}
|
||||
/** Tesseract OCR configuration */
|
||||
tesseract?: {
|
||||
/** Model: eng, eng_best, eng_fast */
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import {Anthropic as anthropic} from '@anthropic-ai/sdk';
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, deepCopy} from '@ztimson/utils';
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
import {LLMMessage, LLMRequest} from './llm.ts';
|
||||
import {LLMProvider} from './provider.ts';
|
||||
@@ -51,16 +51,16 @@ export class Anthropic extends LLMProvider {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const original = deepCopy(history);
|
||||
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
|
||||
history = this.fromStandard(<any>history);
|
||||
|
||||
const tools = options.tools || this.ai.options.tools || [];
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
|
||||
system: options.system || this.ai.options.system || '',
|
||||
temperature: options.temperature || this.ai.options.temperature || 0.7,
|
||||
max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
|
||||
system: options.system || this.ai.options.llm?.system || '',
|
||||
temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
|
||||
tools: tools.map(t => ({
|
||||
name: t.name,
|
||||
description: t.description,
|
||||
@@ -117,7 +117,6 @@ export class Anthropic extends LLMProvider {
|
||||
const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use');
|
||||
if(toolCalls.length && !controller.signal.aborted) {
|
||||
history.push({role: 'assistant', content: resp.content});
|
||||
original.push({role: 'assistant', content: resp.content});
|
||||
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
|
||||
const tool = tools.find(findByProp('name', toolCall.name));
|
||||
if(options.stream) options.stream({tool: toolCall.name});
|
||||
|
||||
63
src/audio.ts
63
src/audio.ts
@@ -1,6 +1,4 @@
|
||||
import {spawn} from 'node:child_process';
|
||||
import * as os from 'node:os';
|
||||
import {platform, arch} from 'node:os';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
@@ -8,21 +6,12 @@ import {AbortablePromise, Ai} from './ai.ts';
|
||||
export class Audio {
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private whisperModel!: string;
|
||||
private piperBinary?: string;
|
||||
|
||||
constructor(private ai: Ai) {
|
||||
if(ai.options.whisper?.binary) {
|
||||
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
|
||||
this.downloadAsrModel();
|
||||
}
|
||||
if(ai.options.piper?.model) {
|
||||
if(!ai.options.piper.model.startsWith('http') || !ai.options.piper.model.endsWith('.onnx'))
|
||||
throw new Error('Piper model should be a URL to an onnx file to download');
|
||||
if(platform() != 'linux' || (arch() != 'x64' && arch() != 'arm64'))
|
||||
throw new Error('Piper TTS only supported on Linux x64/arm64');
|
||||
this.piperBinary = Path.join(import.meta.dirname, '../bin/piper');
|
||||
this.downloadTtsModel();
|
||||
}
|
||||
}
|
||||
|
||||
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
|
||||
@@ -43,38 +32,6 @@ export class Audio {
|
||||
return Object.assign(p, {abort});
|
||||
}
|
||||
|
||||
tts(text: string, outputPath?: string, model: string = <string>this.ai.options.piper?.model): AbortablePromise<Buffer | string> {
|
||||
if(!this.piperBinary) throw new Error('Piper not configured');
|
||||
if(!model) throw new Error('Invalid Piper model');
|
||||
let abort: any = () => {};
|
||||
const p = new Promise<Buffer | string>(async (resolve, reject) => {
|
||||
const modelPath = await this.downloadTtsModel(model);
|
||||
const tmpFile = outputPath || Path.join(os.tmpdir(), `piper_${Date.now()}.wav`);
|
||||
const proc = spawn(<string>this.piperBinary, ['--model', modelPath, '--output_file', tmpFile], {
|
||||
stdio: ['pipe', 'ignore', 'ignore'],
|
||||
env: {...process.env, LD_LIBRARY_PATH: Path.dirname(<string>this.piperBinary)}
|
||||
});
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
proc.stdin.write(text);
|
||||
proc.stdin.end();
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.on('close', async (code: number) => {
|
||||
if(code === 0) {
|
||||
if(outputPath) {
|
||||
resolve(outputPath);
|
||||
} else {
|
||||
const buffer = await fs.readFile(tmpFile);
|
||||
await fs.unlink(tmpFile).catch(() => {});
|
||||
resolve(buffer);
|
||||
}
|
||||
} else {
|
||||
reject(new Error(`Exit code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
return Object.assign(p, {abort});
|
||||
}
|
||||
|
||||
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!model.endsWith('.bin')) model += '.bin';
|
||||
@@ -90,24 +47,4 @@ export class Audio {
|
||||
});
|
||||
return this.downloads[model];
|
||||
}
|
||||
|
||||
async downloadTtsModel(model: string = <string>this.ai.options.piper?.model): Promise<string> {
|
||||
if(!model) throw new Error('Invalid Piper model');
|
||||
const m = <string>model.split('/').pop();
|
||||
const p = Path.join(<string>this.ai.options.path, m);
|
||||
const [onnxExists, jsonExists] = await Promise.all([
|
||||
fs.stat(p).then(() => true).catch(() => false),
|
||||
fs.stat(p + '.json').then(() => true).catch(() => false)
|
||||
]);
|
||||
if(onnxExists && jsonExists) return p;
|
||||
if(!!this.downloads[m]) return this.downloads[m];
|
||||
this.downloads[m] = Promise.all([
|
||||
onnxExists ? Promise.resolve() : fetch(model).then(r => r.arrayBuffer()).then(b => fs.writeFile(p, Buffer.from(b))),
|
||||
jsonExists ? Promise.resolve() : fetch(model + '.json').then(r => r.arrayBuffer()).then(b => fs.writeFile(p + '.json', Buffer.from(b)))
|
||||
]).then(() => {
|
||||
delete this.downloads[m];
|
||||
return p;
|
||||
});
|
||||
return this.downloads[m];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
export * from './ai';
|
||||
export * from './antrhopic';
|
||||
export * from './audio';
|
||||
export * from './embedder'
|
||||
export * from './llm';
|
||||
export * from './open-ai';
|
||||
export * from './provider';
|
||||
export * from './tools';
|
||||
export * from './vision';
|
||||
|
||||
61
src/llm.ts
61
src/llm.ts
@@ -1,7 +1,6 @@
|
||||
import {JSONAttemptParse} from '@ztimson/utils';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
import {Anthropic} from './antrhopic.ts';
|
||||
import {Ollama} from './ollama.ts';
|
||||
import {OpenAi} from './open-ai.ts';
|
||||
import {LLMProvider} from './provider.ts';
|
||||
import {AiTool} from './tools.ts';
|
||||
@@ -9,6 +8,10 @@ import {Worker} from 'worker_threads';
|
||||
import {fileURLToPath} from 'url';
|
||||
import {dirname, join} from 'path';
|
||||
|
||||
export type AnthropicConfig = {proto: 'anthropic', token: string};
|
||||
export type OllamaConfig = {proto: 'ollama', host: string};
|
||||
export type OpenAiConfig = {proto: 'openai', host?: string, token: string};
|
||||
|
||||
export type LLMMessage = {
|
||||
/** Message originator */
|
||||
role: 'assistant' | 'system' | 'user';
|
||||
@@ -33,32 +36,6 @@ export type LLMMessage = {
|
||||
timestamp?: number;
|
||||
}
|
||||
|
||||
export type LLMOptions = {
|
||||
/** Anthropic settings */
|
||||
anthropic?: {
|
||||
/** API Token */
|
||||
token: string;
|
||||
/** Default model */
|
||||
model: string;
|
||||
},
|
||||
/** Ollama settings */
|
||||
ollama?: {
|
||||
/** connection URL */
|
||||
host: string;
|
||||
/** Default model */
|
||||
model: string;
|
||||
},
|
||||
/** Open AI settings */
|
||||
openAi?: {
|
||||
/** API Token */
|
||||
token: string;
|
||||
/** Default model */
|
||||
model: string;
|
||||
},
|
||||
/** Default provider & model */
|
||||
model: string | [string, string];
|
||||
} & Omit<LLMRequest, 'model'>;
|
||||
|
||||
export type LLMRequest = {
|
||||
/** System prompt */
|
||||
system?: string;
|
||||
@@ -71,7 +48,7 @@ export type LLMRequest = {
|
||||
/** Available tools */
|
||||
tools?: AiTool[];
|
||||
/** LLM model */
|
||||
model?: string | [string, string];
|
||||
model?: string;
|
||||
/** Stream response */
|
||||
stream?: (chunk: {text?: string, tool?: string, done?: true}) => any;
|
||||
/** Compress old messages in the chat to free up context */
|
||||
@@ -87,8 +64,8 @@ export class LLM {
|
||||
private embedWorker: Worker | null = null;
|
||||
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
||||
private embedId = 0;
|
||||
private providers: {[key: string]: LLMProvider} = {};
|
||||
|
||||
private models: {[model: string]: LLMProvider} = {};
|
||||
private defaultModel!: string;
|
||||
|
||||
constructor(public readonly ai: Ai) {
|
||||
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
||||
@@ -100,9 +77,13 @@ export class LLM {
|
||||
}
|
||||
});
|
||||
|
||||
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
|
||||
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
|
||||
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
|
||||
if(!ai.options.llm?.models) return;
|
||||
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
|
||||
if(!this.defaultModel) this.defaultModel = model;
|
||||
if(config.proto == 'anthropic') this.models[model] = new Anthropic(this.ai, config.token, model);
|
||||
else if(config.proto == 'ollama') this.models[model] = new OpenAi(this.ai, config.host, 'not-needed', model);
|
||||
else if(config.proto == 'openai') this.models[model] = new OpenAi(this.ai, config.host || null, config.token, model);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -112,17 +93,9 @@ export class LLM {
|
||||
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history
|
||||
*/
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
let model: any = [null, null];
|
||||
if(options.model) {
|
||||
if(typeof options.model == 'object') model = options.model;
|
||||
else model = [options.model, (<any>this.ai.options)[options.model]?.model];
|
||||
}
|
||||
if(!options.model || model[1] == null) {
|
||||
if(typeof this.ai.options.model == 'object') model = this.ai.options.model;
|
||||
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model];
|
||||
}
|
||||
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
|
||||
return this.providers[model[0]].ask(message, {...options, model: model[1]});
|
||||
const m = options.model || this.defaultModel;
|
||||
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
|
||||
return this.models[m].ask(message, options);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
123
src/ollama.ts
123
src/ollama.ts
@@ -1,123 +0,0 @@
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
import {LLMMessage, LLMRequest} from './llm.ts';
|
||||
import {LLMProvider} from './provider.ts';
|
||||
import {Ollama as ollama} from 'ollama';
|
||||
|
||||
export class Ollama extends LLMProvider {
|
||||
client!: ollama;
|
||||
|
||||
constructor(public readonly ai: Ai, public host: string, public model: string) {
|
||||
super();
|
||||
this.client = new ollama({host});
|
||||
}
|
||||
|
||||
private toStandard(history: any[]): LLMMessage[] {
|
||||
for(let i = 0; i < history.length; i++) {
|
||||
if(history[i].role == 'assistant' && history[i].tool_calls) {
|
||||
if(history[i].content) delete history[i].tool_calls;
|
||||
else {
|
||||
history.splice(i, 1);
|
||||
i--;
|
||||
}
|
||||
} else if(history[i].role == 'tool') {
|
||||
const error = history[i].content.startsWith('{"error":');
|
||||
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content, timestamp: history[i].timestamp};
|
||||
}
|
||||
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
|
||||
}
|
||||
return history;
|
||||
}
|
||||
|
||||
private fromStandard(history: LLMMessage[]): any[] {
|
||||
return history.map((h: any) => {
|
||||
const {timestamp, ...rest} = h;
|
||||
if(h.role != 'tool') return rest;
|
||||
return {role: 'tool', tool_name: h.name, content: h.error || h.content}
|
||||
});
|
||||
}
|
||||
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let system = options.system || this.ai.options.system;
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
if(history[0].roll == 'system') {
|
||||
if(!system) system = history.shift();
|
||||
else history.shift();
|
||||
}
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min);
|
||||
if(options.system) history.unshift({role: 'system', content: system})
|
||||
|
||||
const tools = options.tools || this.ai.options.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
messages: history,
|
||||
stream: !!options.stream,
|
||||
signal: controller.signal,
|
||||
options: {
|
||||
temperature: options.temperature || this.ai.options.temperature || 0.7,
|
||||
num_predict: options.max_tokens || this.ai.options.max_tokens || 4096,
|
||||
},
|
||||
tools: tools.map(t => ({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: t.name,
|
||||
description: t.description,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: t.args ? objectMap(t.args, (key, value) => ({...value, required: undefined})) : {},
|
||||
required: t.args ? Object.entries(t.args).filter(t => t[1].required).map(t => t[0]) : []
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
let resp: any, isFirstMessage = true;
|
||||
do {
|
||||
resp = await this.client.chat(requestParams).catch(err => {
|
||||
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
|
||||
throw err;
|
||||
});
|
||||
|
||||
if(options.stream) {
|
||||
if(!isFirstMessage) options.stream({text: '\n\n'});
|
||||
else isFirstMessage = false;
|
||||
resp.message = {role: 'assistant', content: '', tool_calls: []};
|
||||
for await (const chunk of resp) {
|
||||
if(controller.signal.aborted) break;
|
||||
if(chunk.message?.content) {
|
||||
resp.message.content += chunk.message.content;
|
||||
options.stream({text: chunk.message.content});
|
||||
}
|
||||
if(chunk.message?.tool_calls) resp.message.tool_calls = chunk.message.tool_calls;
|
||||
if(chunk.done) break;
|
||||
}
|
||||
}
|
||||
|
||||
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
|
||||
history.push(resp.message);
|
||||
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
|
||||
const tool = tools.find(findByProp('name', toolCall.function.name));
|
||||
if(options.stream) options.stream({tool: toolCall.function.name});
|
||||
if(!tool) return {role: 'tool', tool_name: toolCall.function.name, content: '{"error": "Tool not found"}'};
|
||||
const args = typeof toolCall.function.arguments === 'string' ? JSONAttemptParse(toolCall.function.arguments, {}) : toolCall.function.arguments;
|
||||
try {
|
||||
const result = await tool.fn(args, this.ai);
|
||||
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize(result)};
|
||||
} catch (err: any) {
|
||||
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
|
||||
}
|
||||
}));
|
||||
history.push(...results);
|
||||
requestParams.messages = history;
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
|
||||
});
|
||||
|
||||
return Object.assign(response, {abort: () => controller.abort()});
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
import {OpenAI as openAI} from 'openai';
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
|
||||
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, clean} from '@ztimson/utils';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
import {LLMMessage, LLMRequest} from './llm.ts';
|
||||
import {LLMProvider} from './provider.ts';
|
||||
@@ -7,9 +7,12 @@ import {LLMProvider} from './provider.ts';
|
||||
export class OpenAi extends LLMProvider {
|
||||
client!: openAI;
|
||||
|
||||
constructor(public readonly ai: Ai, public readonly apiToken: string, public model: string) {
|
||||
constructor(public readonly ai: Ai, public readonly host: string | null, public readonly token: string, public model: string) {
|
||||
super();
|
||||
this.client = new openAI({apiKey: apiToken});
|
||||
this.client = new openAI(clean({
|
||||
baseURL: host,
|
||||
apiKey: token
|
||||
}));
|
||||
}
|
||||
|
||||
private toStandard(history: any[]): LLMMessage[] {
|
||||
@@ -64,16 +67,17 @@ export class OpenAi extends LLMProvider {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
|
||||
const controller = new AbortController();
|
||||
const response = new Promise<any>(async (res, rej) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
|
||||
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
|
||||
history = this.fromStandard(<any>history);
|
||||
|
||||
const tools = options.tools || this.ai.options.tools || [];
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
messages: history,
|
||||
stream: !!options.stream,
|
||||
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
|
||||
temperature: options.temperature || this.ai.options.temperature || 0.7,
|
||||
max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
|
||||
temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
|
||||
tools: tools.map(t => ({
|
||||
type: 'function',
|
||||
function: {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import {AbortablePromise} from './ai.ts';
|
||||
import {LLMMessage, LLMOptions, LLMRequest} from './llm.ts';
|
||||
import {LLMMessage, LLMRequest} from './llm.ts';
|
||||
|
||||
export abstract class LLMProvider {
|
||||
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>;
|
||||
|
||||
41
src/tools.ts
41
src/tools.ts
@@ -1,3 +1,4 @@
|
||||
import * as cheerio from 'cheerio';
|
||||
import {$, $Sync} from '@ztimson/node-utils';
|
||||
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
|
||||
import {Ai} from './ai.ts';
|
||||
@@ -111,9 +112,43 @@ export const PythonTool: AiTool = {
|
||||
fn: async (args: {code: string}) => ({result: $Sync`python -c "${args.code}"`})
|
||||
}
|
||||
|
||||
export const SearchTool: AiTool = {
|
||||
name: 'search',
|
||||
description: 'Use a search engine to find relevant URLs, should be changed with fetch to scrape sources',
|
||||
export const ReadWebpageTool: AiTool = {
|
||||
name: 'read_webpage',
|
||||
description: 'Extract clean, structured content from a webpage. Use after web_search to read specific URLs',
|
||||
args: {
|
||||
url: {type: 'string', description: 'URL to extract content from', required: true},
|
||||
focus: {type: 'string', description: 'Optional: What aspect to focus on (e.g., "pricing", "features", "contact info")'}
|
||||
},
|
||||
fn: async (args: {url: string; focus?: string}) => {
|
||||
const html = await fetch(args.url, {headers: {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}})
|
||||
.then(r => r.text()).catch(err => {throw new Error(`Failed to fetch: ${err.message}`)});
|
||||
|
||||
const $ = cheerio.load(html);
|
||||
$('script, style, nav, footer, header, aside, iframe, noscript, [role="navigation"], [role="banner"], .ad, .ads, .cookie, .popup').remove();
|
||||
const metadata = {
|
||||
title: $('meta[property="og:title"]').attr('content') || $('title').text() || '',
|
||||
description: $('meta[name="description"]').attr('content') || $('meta[property="og:description"]').attr('content') || '',
|
||||
};
|
||||
|
||||
let content = '';
|
||||
const contentSelectors = ['article', 'main', '[role="main"]', '.content', '.post', '.entry', 'body'];
|
||||
for (const selector of contentSelectors) {
|
||||
const el = $(selector).first();
|
||||
if (el.length && el.text().trim().length > 200) {
|
||||
content = el.text();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!content) content = $('body').text();
|
||||
content = content.replace(/\s+/g, ' ').trim().slice(0, 8000);
|
||||
|
||||
return {url: args.url, title: metadata.title.trim(), description: metadata.description.trim(), content, focus: args.focus};
|
||||
}
|
||||
}
|
||||
|
||||
export const WebSearchTool: AiTool = {
|
||||
name: 'web_search',
|
||||
description: 'Use duckduckgo (anonymous) to find find relevant online resources. Returns a list of URLs that works great with the `read_webpage` tool',
|
||||
args: {
|
||||
query: {type: 'string', description: 'Search string', required: true},
|
||||
length: {type: 'string', description: 'Number of results to return', default: 5},
|
||||
|
||||
Reference in New Issue
Block a user