4 Commits

Author SHA1 Message Date
7b57a0ded1 Updated LLM config and added read_webpage
All checks were successful
Publish Library / Build NPM Project (push) Successful in 46s
Publish Library / Tag Version (push) Successful in 6s
2026-02-01 13:16:08 -05:00
28904cddbe TTS
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 16s
2026-01-30 15:39:29 -05:00
d5bf1ec47e Pulled chunking out into its own exported function for easy access
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-30 10:38:51 -05:00
cb60a0b0c5 Moved embeddings to worker to prevent blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-28 22:17:39 -05:00
14 changed files with 973 additions and 740 deletions

1240
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.2.5",
"version": "0.4.0",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",
@@ -30,7 +30,7 @@
"@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4",
"@ztimson/utils": "^0.27.9",
"ollama": "^0.6.0",
"cheerio": "^1.2.0",
"openai": "^6.6.0",
"tesseract.js": "^6.0.1"
},

View File

@@ -1,11 +1,22 @@
import * as os from 'node:os';
import {LLM, LLMOptions} from './llm';
import {LLM, AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
import { Audio } from './audio.ts';
import {Vision} from './vision.ts';
export type AiOptions = LLMOptions & {
export type AbortablePromise<T> = Promise<T> & {abort: () => any};
export type AiOptions = {
/** Path to models */
path?: string;
/** Large language models, first is default */
llm?: Omit<LLMRequest, 'model'> & {
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
}
/** Tesseract OCR configuration */
tesseract?: {
/** Model: eng, eng_best, eng_fast */
model?: string;
}
/** Whisper ASR configuration */
whisper?: {
/** Whisper binary location */
@@ -13,11 +24,6 @@ export type AiOptions = LLMOptions & {
/** Model: `ggml-base.en.bin` */
model: string;
}
/** Tesseract OCR configuration */
tesseract?: {
/** Model: eng, eng_best, eng_fast */
model?: string;
}
}
export class Ai {

View File

@@ -1,8 +1,8 @@
import {Anthropic as anthropic} from '@anthropic-ai/sdk';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, deepCopy} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
export class Anthropic extends LLMProvider {
client!: anthropic;
@@ -51,16 +51,16 @@ export class Anthropic extends LLMProvider {
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const original = deepCopy(history);
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
history = this.fromStandard(<any>history);
const tools = options.tools || this.ai.options.tools || [];
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = {
model: options.model || this.model,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
system: options.system || this.ai.options.system || '',
temperature: options.temperature || this.ai.options.temperature || 0.7,
max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
system: options.system || this.ai.options.llm?.system || '',
temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
tools: tools.map(t => ({
name: t.name,
description: t.description,
@@ -117,9 +117,9 @@ export class Anthropic extends LLMProvider {
const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use');
if(toolCalls.length && !controller.signal.aborted) {
history.push({role: 'assistant', content: resp.content});
original.push({role: 'assistant', content: resp.content});
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = tools.find(findByProp('name', toolCall.name));
if(options.stream) options.stream({tool: toolCall.name});
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
try {
const result = await tool.fn(toolCall.input, this.ai);

View File

@@ -1,7 +1,7 @@
import {spawn} from 'node:child_process';
import fs from 'node:fs/promises';
import Path from 'node:path';
import {Ai} from './ai.ts';
import {AbortablePromise, Ai} from './ai.ts';
export class Audio {
private downloads: {[key: string]: Promise<string>} = {};
@@ -14,37 +14,24 @@ export class Audio {
}
}
/**
* Convert audio to text using Auditory Speech Recognition
* @param {string} path Path to audio
* @param model Whisper model
* @returns {Promise<any>} Extracted text
*/
asr(path: string, model: string = this.whisperModel): {abort: () => void, response: Promise<string | null>} {
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
let abort: any = () => {};
const response = new Promise<string | null>((resolve, reject) => {
this.downloadAsrModel(model).then(m => {
let output = '';
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
abort = () => proc.kill('SIGTERM');
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(code === 0) resolve(output.trim() || null);
else reject(new Error(`Exit code ${code}`));
});
const p = new Promise<string | null>(async (resolve, reject) => {
const m = await this.downloadAsrModel(model);
let output = '';
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
abort = () => proc.kill('SIGTERM');
proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(code === 0) resolve(output.trim() || null);
else reject(new Error(`Exit code ${code}`));
});
});
return {response, abort};
return Object.assign(p, {abort});
}
/**
* Downloads the specified Whisper model if it is not already present locally.
*
* @param {string} model Whisper model that will be downloaded
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
*/
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin';

11
src/embedder.ts Normal file
View File

@@ -0,0 +1,11 @@
import { pipeline } from '@xenova/transformers';
import { parentPort } from 'worker_threads';
let model: any;
parentPort?.on('message', async ({ id, text }) => {
if(!model) model = await pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
const output = await model(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data);
parentPort?.postMessage({ id, embedding });
});

View File

@@ -1,4 +1,9 @@
export * from './ai';
export * from './antrhopic';
export * from './audio';
export * from './embedder'
export * from './llm';
export * from './open-ai';
export * from './provider';
export * from './tools';
export * from './vision';

View File

@@ -1,12 +1,16 @@
import {pipeline} from '@xenova/transformers';
import {JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {AbortablePromise, Ai} from './ai.ts';
import {Anthropic} from './antrhopic.ts';
import {Ollama} from './ollama.ts';
import {OpenAi} from './open-ai.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts';
import * as tf from '@tensorflow/tfjs';
import {Worker} from 'worker_threads';
import {fileURLToPath} from 'url';
import {dirname, join} from 'path';
export type AnthropicConfig = {proto: 'anthropic', token: string};
export type OllamaConfig = {proto: 'ollama', host: string};
export type OpenAiConfig = {proto: 'openai', host?: string, token: string};
export type LLMMessage = {
/** Message originator */
@@ -32,32 +36,6 @@ export type LLMMessage = {
timestamp?: number;
}
export type LLMOptions = {
/** Anthropic settings */
anthropic?: {
/** API Token */
token: string;
/** Default model */
model: string;
},
/** Ollama settings */
ollama?: {
/** connection URL */
host: string;
/** Default model */
model: string;
},
/** Open AI settings */
openAi?: {
/** API Token */
token: string;
/** Default model */
model: string;
},
/** Default provider & model */
model: string | [string, string];
} & Omit<LLMRequest, 'model'>;
export type LLMRequest = {
/** System prompt */
system?: string;
@@ -70,9 +48,9 @@ export type LLMRequest = {
/** Available tools */
tools?: AiTool[];
/** LLM model */
model?: string | [string, string];
model?: string;
/** Stream response */
stream?: (chunk: {text?: string, done?: true}) => any;
stream?: (chunk: {text?: string, tool?: string, done?: true}) => any;
/** Compress old messages in the chat to free up context */
compress?: {
/** Trigger chat compression once context exceeds the token count */
@@ -83,14 +61,29 @@ export type LLMRequest = {
}
export class LLM {
private embedModel: any;
private providers: {[key: string]: LLMProvider} = {};
private embedWorker: Worker | null = null;
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
private embedId = 0;
private models: {[model: string]: LLMProvider} = {};
private defaultModel!: string;
constructor(public readonly ai: Ai) {
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
this.embedWorker.on('message', ({ id, embedding }) => {
const pending = this.embedQueue.get(id);
if (pending) {
pending.resolve(embedding);
this.embedQueue.delete(id);
}
});
if(!ai.options.llm?.models) return;
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
if(!this.defaultModel) this.defaultModel = model;
if(config.proto == 'anthropic') this.models[model] = new Anthropic(this.ai, config.token, model);
else if(config.proto == 'ollama') this.models[model] = new OpenAi(this.ai, config.host, 'not-needed', model);
else if(config.proto == 'openai') this.models[model] = new OpenAi(this.ai, config.host || null, config.token, model);
});
}
/**
@@ -100,17 +93,9 @@ export class LLM {
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history
*/
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
let model: any = [null, null];
if(options.model) {
if(typeof options.model == 'object') model = options.model;
else model = [options.model, (<any>this.ai.options)[options.model]?.model];
}
if(!options.model || model[1] == null) {
if(typeof this.ai.options.model == 'object') model = this.ai.options.model;
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model];
}
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
return this.providers[model[0]].ask(message, {...options, model: model[1]});
const m = options.model || this.defaultModel;
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
return this.models[m].ask(message, options);
}
/**
@@ -148,49 +133,44 @@ export class LLM {
return denominator === 0 ? 0 : dotProduct / denominator;
}
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
const objString = (obj: any, path = ''): string[] => {
if(obj === null || obj === undefined) return [];
if(!obj) return [];
return Object.entries(obj).flatMap(([key, value]) => {
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
if(typeof value === 'object' && value !== null && !Array.isArray(value)) return objString(value, p);
const valueStr = Array.isArray(value) ? value.join(', ') : String(value);
return `${p}: ${valueStr}`;
if(typeof value === 'object' && !Array.isArray(value)) return objString(value, p);
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
});
};
const embed = async (text: string): Promise<number[]> => {
const model = await this.embedModel;
const output = await model(text, {pooling: 'mean', normalize: true});
return Array.from(output.data);
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
const chunks: string[] = [];
for(let i = 0; i < tokens.length;) {
let text = '', j = i;
while(j < tokens.length) {
const next = text + (text ? ' ' : '') + tokens[j];
if(this.estimateTokens(next.replace(/\s*\n\s*/g, '\n')) > maxTokens && text) break;
text = next;
j++;
}
const clean = text.replace(/\s*\n\s*/g, '\n').trim();
if(clean) chunks.push(clean);
i = Math.max(j - overlapTokens, j === i ? i + 1 : j);
}
return chunks;
}
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
const id = this.embedId++;
this.embedQueue.set(id, { resolve, reject });
this.embedWorker?.postMessage({ id, text });
});
};
// Tokenize
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(line => [...line.split(/\s+/).filter(w => w.trim()), '\n']);
// Chunk
const chunks: string[] = [];
let start = 0;
while (start < tokens.length) {
let end = start;
let text = '';
// Build chunk
while (end < tokens.length) {
const nextToken = tokens[end];
const testText = text + (text ? ' ' : '') + nextToken;
const testTokens = this.estimateTokens(testText.replace(/\s*\n\s*/g, '\n'));
if (testTokens > maxTokens && text) break;
text = testText;
end++;
}
// Save chunk
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim();
if(cleanText) chunks.push(cleanText);
start = end - overlapTokens;
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
}
const chunks = this.chunk(target, maxTokens, overlapTokens);
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),

View File

@@ -1,122 +0,0 @@
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {Ollama as ollama} from 'ollama';
export class Ollama extends LLMProvider {
client!: ollama;
constructor(public readonly ai: Ai, public host: string, public model: string) {
super();
this.client = new ollama({host});
}
private toStandard(history: any[]): LLMMessage[] {
for(let i = 0; i < history.length; i++) {
if(history[i].role == 'assistant' && history[i].tool_calls) {
if(history[i].content) delete history[i].tool_calls;
else {
history.splice(i, 1);
i--;
}
} else if(history[i].role == 'tool') {
const error = history[i].content.startsWith('{"error":');
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content, timestamp: history[i].timestamp};
}
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
}
return history;
}
private fromStandard(history: LLMMessage[]): any[] {
return history.map((h: any) => {
const {timestamp, ...rest} = h;
if(h.role != 'tool') return rest;
return {role: 'tool', tool_name: h.name, content: h.error || h.content}
});
}
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let system = options.system || this.ai.options.system;
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(history[0].roll == 'system') {
if(!system) system = history.shift();
else history.shift();
}
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min);
if(options.system) history.unshift({role: 'system', content: system})
const tools = options.tools || this.ai.options.tools || [];
const requestParams: any = {
model: options.model || this.model,
messages: history,
stream: !!options.stream,
signal: controller.signal,
options: {
temperature: options.temperature || this.ai.options.temperature || 0.7,
num_predict: options.max_tokens || this.ai.options.max_tokens || 4096,
},
tools: tools.map(t => ({
type: 'function',
function: {
name: t.name,
description: t.description,
parameters: {
type: 'object',
properties: t.args ? objectMap(t.args, (key, value) => ({...value, required: undefined})) : {},
required: t.args ? Object.entries(t.args).filter(t => t[1].required).map(t => t[0]) : []
}
}
}))
}
let resp: any, isFirstMessage = true;
do {
resp = await this.client.chat(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.message = {role: 'assistant', content: '', tool_calls: []};
for await (const chunk of resp) {
if(controller.signal.aborted) break;
if(chunk.message?.content) {
resp.message.content += chunk.message.content;
options.stream({text: chunk.message.content});
}
if(chunk.message?.tool_calls) resp.message.tool_calls = chunk.message.tool_calls;
if(chunk.done) break;
}
}
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
history.push(resp.message);
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
const tool = tools.find(findByProp('name', toolCall.function.name));
if(!tool) return {role: 'tool', tool_name: toolCall.function.name, content: '{"error": "Tool not found"}'};
const args = typeof toolCall.function.arguments === 'string' ? JSONAttemptParse(toolCall.function.arguments, {}) : toolCall.function.arguments;
try {
const result = await tool.fn(args, this.ai);
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize(result)};
} catch (err: any) {
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
}
}));
history.push(...results);
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}
}

View File

@@ -1,15 +1,18 @@
import {OpenAI as openAI} from 'openai';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, clean} from '@ztimson/utils';
import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {LLMProvider} from './provider.ts';
export class OpenAi extends LLMProvider {
client!: openAI;
constructor(public readonly ai: Ai, public readonly apiToken: string, public model: string) {
constructor(public readonly ai: Ai, public readonly host: string | null, public readonly token: string, public model: string) {
super();
this.client = new openAI({apiKey: apiToken});
this.client = new openAI(clean({
baseURL: host,
apiKey: token
}));
}
private toStandard(history: any[]): LLMMessage[] {
@@ -64,16 +67,17 @@ export class OpenAi extends LLMProvider {
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
let history = [...options.history || [], {role: 'user', content: message, timestamp: Date.now()}];
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
history = this.fromStandard(<any>history);
const tools = options.tools || this.ai.options.tools || [];
const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = {
model: options.model || this.model,
messages: history,
stream: !!options.stream,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096,
temperature: options.temperature || this.ai.options.temperature || 0.7,
max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
tools: tools.map(t => ({
type: 'function',
function: {
@@ -116,6 +120,7 @@ export class OpenAi extends LLMProvider {
history.push(resp.choices[0].message);
const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = tools?.find(findByProp('name', toolCall.function.name));
if(options.stream) options.stream({tool: toolCall.function.name});
if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'};
try {
const args = JSONAttemptParse(toolCall.function.arguments, {});

View File

@@ -1,6 +1,5 @@
import {LLMMessage, LLMOptions, LLMRequest} from './llm.ts';
export type AbortablePromise<T> = Promise<T> & {abort: () => void};
import {AbortablePromise} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
export abstract class LLMProvider {
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>;

View File

@@ -1,3 +1,4 @@
import * as cheerio from 'cheerio';
import {$, $Sync} from '@ztimson/node-utils';
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
import {Ai} from './ai.ts';
@@ -111,9 +112,43 @@ export const PythonTool: AiTool = {
fn: async (args: {code: string}) => ({result: $Sync`python -c "${args.code}"`})
}
export const SearchTool: AiTool = {
name: 'search',
description: 'Use a search engine to find relevant URLs, should be changed with fetch to scrape sources',
export const ReadWebpageTool: AiTool = {
name: 'read_webpage',
description: 'Extract clean, structured content from a webpage. Use after web_search to read specific URLs',
args: {
url: {type: 'string', description: 'URL to extract content from', required: true},
focus: {type: 'string', description: 'Optional: What aspect to focus on (e.g., "pricing", "features", "contact info")'}
},
fn: async (args: {url: string; focus?: string}) => {
const html = await fetch(args.url, {headers: {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}})
.then(r => r.text()).catch(err => {throw new Error(`Failed to fetch: ${err.message}`)});
const $ = cheerio.load(html);
$('script, style, nav, footer, header, aside, iframe, noscript, [role="navigation"], [role="banner"], .ad, .ads, .cookie, .popup').remove();
const metadata = {
title: $('meta[property="og:title"]').attr('content') || $('title').text() || '',
description: $('meta[name="description"]').attr('content') || $('meta[property="og:description"]').attr('content') || '',
};
let content = '';
const contentSelectors = ['article', 'main', '[role="main"]', '.content', '.post', '.entry', 'body'];
for (const selector of contentSelectors) {
const el = $(selector).first();
if (el.length && el.text().trim().length > 200) {
content = el.text();
break;
}
}
if (!content) content = $('body').text();
content = content.replace(/\s+/g, ' ').trim().slice(0, 8000);
return {url: args.url, title: metadata.title.trim(), description: metadata.description.trim(), content, focus: args.focus};
}
}
export const WebSearchTool: AiTool = {
name: 'web_search',
description: 'Use duckduckgo (anonymous) to find find relevant online resources. Returns a list of URLs that works great with the `read_webpage` tool',
args: {
query: {type: 'string', description: 'Search string', required: true},
length: {type: 'string', description: 'Number of results to return', default: 5},

View File

@@ -1,5 +1,5 @@
import {createWorker} from 'tesseract.js';
import {Ai} from './ai.ts';
import {AbortablePromise, Ai} from './ai.ts';
export class Vision {
@@ -8,18 +8,16 @@ export class Vision {
/**
* Convert image to text using Optical Character Recognition
* @param {string} path Path to image
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
* @returns {AbortablePromise<string | null>} Promise of extracted text with abort method
*/
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
ocr(path: string): AbortablePromise<string | null> {
let worker: any;
return {
abort: () => { worker?.terminate(); },
response: new Promise(async res => {
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
})
}
const p = new Promise<string | null>(async res => {
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
const {data} = await worker.recognize(path);
await worker.terminate();
res(data.text.trim() || null);
});
return Object.assign(p, {abort: () => worker?.terminate()});
}
}

View File

@@ -1,12 +1,19 @@
import {defineConfig} from 'vite';
import dts from 'vite-plugin-dts';
import {resolve} from 'path';
export default defineConfig({
build: {
lib: {
entry: './src/index.ts',
entry: {
index: './src/index.ts',
embedder: './src/embedder.ts',
},
name: 'utils',
fileName: (format) => (format === 'es' ? 'index.mjs' : 'index.js'),
fileName: (format, entryName) => {
if (entryName === 'embedder') return 'embedder.js';
return format === 'es' ? 'index.mjs' : 'index.js';
},
},
ssr: true,
emptyOutDir: true,