Compare commits

...

46 Commits
0.2.1 ... 0.8.4

Author SHA1 Message Date
cfde2ac4d3 Fixed open AI tool call streaming!
All checks were successful
Publish Library / Build NPM Project (push) Successful in 42s
Publish Library / Tag Version (push) Successful in 8s
2026-02-27 13:11:41 -05:00
e4ba89d3db Open ai tool call history fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 29s
2026-02-27 13:00:49 -05:00
71a7e2a904 Better RAG memory
All checks were successful
Publish Library / Build NPM Project (push) Successful in 50s
Publish Library / Tag Version (push) Successful in 9s
2026-02-27 12:32:27 -05:00
abd290246c LLM ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 43s
Publish Library / Tag Version (push) Successful in 13s
2026-02-22 09:29:31 -05:00
ca66e8e304 Improved whisper + pyannote, sentence diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 7s
2026-02-21 14:16:20 -05:00
cec892563e Whisper ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 01:03:25 -05:00
91066e070f WIP ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 00:51:01 -05:00
a94b153c6d Fixed embedder autostart bug
All checks were successful
Publish Library / Build NPM Project (push) Successful in 36s
Publish Library / Tag Version (push) Successful in 5s
2026-02-21 00:30:38 -05:00
39537a4a8f Switching to processes and whisper.cpp to avoid transformers.js memory leaks
All checks were successful
Publish Library / Build NPM Project (push) Successful in 38s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 21:50:01 -05:00
790608f020 Queue OCR & ASR work
All checks were successful
Publish Library / Build NPM Project (push) Successful in 35s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 19:05:19 -05:00
473424ae23 segfault fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 6s
2026-02-20 17:31:49 -05:00
9b831f7d95 Better ASR IDing
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 16:55:25 -05:00
498b326e45 Bump 0.7.4
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 5s
2026-02-20 14:19:17 -05:00
56e4efec94 Use either python or python3 or diarization 2026-02-20 14:14:30 -05:00
a07f069ad0 One embedding at a time
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 7s
2026-02-19 22:58:53 -05:00
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
4143d00de7 Working speaker detection with advanced LLM identifying. Improved LLM json function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 5s
2026-02-14 09:39:17 -05:00
0360f2493d Added hugging face token
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 22:15:57 -05:00
0172887877 audio worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:24:12 -05:00
8f89f5e3cf embedding worker fix
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:18:56 -05:00
5bd41f8c6a worker fix?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:17:31 -05:00
e4399e1b7b Updataes?
All checks were successful
Publish Library / Build NPM Project (push) Successful in 26s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 20:14:00 -05:00
ad1ee48763 Use one-off workers to process requests without blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 19:45:17 -05:00
3ed206923f Fix ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 18:32:19 -05:00
22d5427e86 Fix ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 28s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 17:49:33 -05:00
43b53164c0 Bump 0.6.3
All checks were successful
Publish Library / Build NPM Project (push) Successful in 29s
Publish Library / Tag Version (push) Successful in 4s
2026-02-12 17:24:15 -05:00
575fbac099 Fixed ASR
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 4s
2026-02-12 13:31:30 -05:00
46ae0f7913 expose diarization support checking function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 25s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 11:55:29 -05:00
54730a2b9a Speaker diarization
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-12 11:26:11 -05:00
27506d20af Fix anthropic message history
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 22:45:30 -05:00
8c64129200 Removed log statement
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 21:58:39 -05:00
013aa942c0 Added save directory for embedder
All checks were successful
Publish Library / Build NPM Project (push) Successful in 33s
Publish Library / Tag Version (push) Successful in 4s
2026-02-11 21:45:54 -05:00
c8d5660b1a Enable quantized embedder for speed boost
All checks were successful
Publish Library / Build NPM Project (push) Successful in 23s
Publish Library / Tag Version (push) Successful in 5s
2026-02-11 20:28:14 -05:00
f2c66b0cb8 Updated default embedder
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 8s
2026-02-11 20:23:50 -05:00
cda7db4f45 Added memory system
All checks were successful
Publish Library / Build NPM Project (push) Successful in 30s
Publish Library / Tag Version (push) Successful in 5s
2026-02-08 19:52:02 -05:00
d71a6be120 Fixed timezones with date time tool
All checks were successful
Publish Library / Build NPM Project (push) Successful in 57s
Publish Library / Tag Version (push) Successful in 8s
2026-02-02 09:30:48 -05:00
7b57a0ded1 Updated LLM config and added read_webpage
All checks were successful
Publish Library / Build NPM Project (push) Successful in 46s
Publish Library / Tag Version (push) Successful in 6s
2026-02-01 13:16:08 -05:00
28904cddbe TTS
All checks were successful
Publish Library / Build NPM Project (push) Successful in 49s
Publish Library / Tag Version (push) Successful in 16s
2026-01-30 15:39:29 -05:00
d5bf1ec47e Pulled chunking out into its own exported function for easy access
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-30 10:38:51 -05:00
cb60a0b0c5 Moved embeddings to worker to prevent blocking
All checks were successful
Publish Library / Build NPM Project (push) Successful in 41s
Publish Library / Tag Version (push) Successful in 7s
2026-01-28 22:17:39 -05:00
1c59379c7d Set tesseract model
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-01-16 20:33:51 -05:00
6dce0e8954 Fixed tool calls
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 8s
2025-12-27 17:27:53 -05:00
98dd0bb323 Auto download teseract models
All checks were successful
Publish Library / Build NPM Project (push) Successful in 1m4s
Publish Library / Tag Version (push) Successful in 10s
2025-12-22 13:48:53 -05:00
ca5a2334bb bump 2.2.0
All checks were successful
Publish Library / Build NPM Project (push) Successful in 43s
Publish Library / Tag Version (push) Successful in 11s
2025-12-22 11:02:53 -05:00
3cd7b12f5f Configure model path for all libraries
Some checks failed
Publish Library / Tag Version (push) Has been cancelled
Publish Library / Build NPM Project (push) Has been cancelled
2025-12-22 11:02:24 -05:00
16 changed files with 1398 additions and 1488 deletions

View File

@@ -3,7 +3,7 @@
<br /> <br />
<!-- Logo --> <!-- Logo -->
<img src="https://git.zakscode.com/repo-avatars/a90851ca730480ec37a5c0c2c4f1b4609eee5eadf806eaf16c83ac4cb7493aa9" alt="Logo" width="200" height="200"> <img alt="Logo" width="200" height="200" src="https://git.zakscode.com/repo-avatars/a82d423674763e7a0c1c945bdbb07e249b2bb786d3c9beae76d5b196a10f5c0f">
<!-- Title --> <!-- Title -->
### @ztimson/ai-utils ### @ztimson/ai-utils
@@ -53,13 +53,15 @@ A TypeScript library that provides a unified interface for working with multiple
- **Provider Abstraction**: Switch between AI providers without changing your code - **Provider Abstraction**: Switch between AI providers without changing your code
### Built With ### Built With
[![Anthropic](https://img.shields.io/badge/Anthropic-191919?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/) [![Anthropic](https://img.shields.io/badge/Anthropic-de7356?style=for-the-badge&logo=anthropic&logoColor=white)](https://anthropic.com/)
[![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white)](https://openai.com/) [![llama](https://img.shields.io/badge/llama.cpp-fff?style=for-the-badge&logo=ollama&logoColor=black)](https://github.com/ggml-org/llama.cpp)
[![Ollama](https://img.shields.io/badge/Ollama-000000?style=for-the-badge&logo=ollama&logoColor=white)](https://ollama.com/) [![OpenAI](https://img.shields.io/badge/OpenAI-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://openai.com/)
[![TensorFlow](https://img.shields.io/badge/TensorFlow-FF6F00?style=for-the-badge&logo=tensorflow&logoColor=white)](https://tensorflow.org/) [![Pyannote](https://img.shields.io/badge/Pyannote-458864?style=for-the-badge&logo=python&logoColor=white)](https://github.com/pyannote)
[![Tesseract](https://img.shields.io/badge/Tesseract-3C8FC7?style=for-the-badge&logo=tesseract&logoColor=white)](https://tesseract-ocr.github.io/) [![TensorFlow](https://img.shields.io/badge/TensorFlow-fff?style=for-the-badge&logo=tensorflow&logoColor=ff6f00)](https://tensorflow.org/)
[![Tesseract](https://img.shields.io/badge/Tesseract-B874B2?style=for-the-badge&logo=hack-the-box&logoColor=white)](https://tesseract-ocr.github.io/)
[![Transformers.js](https://img.shields.io/badge/Transformers.js-000?style=for-the-badge&logo=hugging-face&logoColor=yellow)](https://huggingface.co/docs/transformers.js/en/index)
[![TypeScript](https://img.shields.io/badge/TypeScript-3178C6?style=for-the-badge&logo=typescript&logoColor=white)](https://typescriptlang.org/) [![TypeScript](https://img.shields.io/badge/TypeScript-3178C6?style=for-the-badge&logo=typescript&logoColor=white)](https://typescriptlang.org/)
[![Whisper](https://img.shields.io/badge/Whisper-412991?style=for-the-badge&logo=openai&logoColor=white)](https://github.com/ggerganov/whisper.cpp) [![Whisper](https://img.shields.io/badge/Whisper.cpp-000?style=for-the-badge&logo=openai-gym&logoColor=white)](https://github.com/ggerganov/whisper.cpp)
## Setup ## Setup
@@ -75,6 +77,7 @@ A TypeScript library that provides a unified interface for working with multiple
#### Instructions #### Instructions
1. Install the package: `npm i @ztimson/ai-utils` 1. Install the package: `npm i @ztimson/ai-utils`
2. For speaker diarization: `pip install pyannote.audio`
</details> </details>
@@ -87,11 +90,14 @@ A TypeScript library that provides a unified interface for working with multiple
#### Prerequisites #### Prerequisites
- [Node.js](https://nodejs.org/en/download) - [Node.js](https://nodejs.org/en/download)
- _[Whisper.cpp](https://github.com/ggml-org/whisper.cpp/releases/tag) (ASR)_
- _[Pyannote](https://github.com/pyannote) (ASR Diarization):_ `pip install pyannote.audio`
#### Instructions #### Instructions
1. Install the dependencies: `npm i` 1. Install the dependencies: `npm i`
2. Build library: `npm build` 2. For speaker diarization: `pip install pyannote.audio`
3. Run unit tests: `npm test` 3. Build library: `npm build`
4. Run unit tests: `npm test`
</details> </details>

1807
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.2.1", "version": "0.8.4",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",
@@ -25,14 +25,14 @@
"watch": "npx vite build --watch" "watch": "npx vite build --watch"
}, },
"dependencies": { "dependencies": {
"@anthropic-ai/sdk": "^0.67.0", "@anthropic-ai/sdk": "^0.78.0",
"@tensorflow/tfjs": "^4.22.0", "@tensorflow/tfjs": "^4.22.0",
"@xenova/transformers": "^2.17.2", "@xenova/transformers": "^2.17.2",
"@ztimson/node-utils": "^1.0.4", "@ztimson/node-utils": "^1.0.7",
"@ztimson/utils": "^0.27.9", "@ztimson/utils": "^0.28.13",
"ollama": "^0.6.0", "cheerio": "^1.2.0",
"openai": "^6.6.0", "openai": "^6.22.0",
"tesseract.js": "^6.0.1" "tesseract.js": "^7.0.0"
}, },
"devDependencies": { "devDependencies": {
"@types/node": "^24.8.1", "@types/node": "^24.8.1",

View File

@@ -1,22 +1,32 @@
import {LLM, LLMOptions} from './llm'; import * as os from 'node:os';
import LLM, {AnthropicConfig, OllamaConfig, OpenAiConfig, LLMRequest} from './llm';
import { Audio } from './audio.ts'; import { Audio } from './audio.ts';
import {Vision} from './vision.ts'; import {Vision} from './vision.ts';
export type AiOptions = LLMOptions & { export type AbortablePromise<T> = Promise<T> & {
whisper?: { abort: () => any
/** Whisper binary location */ };
binary: string;
/** Model: `ggml-base.en.bin` */ export type AiOptions = {
model: string; /** Token to pull models from hugging face */
hfToken?: string;
/** Path to models */ /** Path to models */
path: string; path?: string;
/** Whisper ASR model: ggml-tiny.en.bin, ggml-base.en.bin */
asr?: string;
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
embedder?: string;
/** Large language models, first is default */
llm?: Omit<LLMRequest, 'model'> & {
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
} }
/** OCR model: eng, eng_best, eng_fast */
ocr?: string;
/** Whisper binary */
whisper?: string;
} }
export class Ai { export class Ai {
private downloads: {[key: string]: Promise<string>} = {};
private whisperModel!: string;
/** Audio processing AI */ /** Audio processing AI */
audio!: Audio; audio!: Audio;
/** Language processing AI */ /** Language processing AI */
@@ -25,6 +35,8 @@ export class Ai {
vision!: Vision; vision!: Vision;
constructor(public readonly options: AiOptions) { constructor(public readonly options: AiOptions) {
if(!options.path) options.path = os.tmpdir();
process.env.TRANSFORMERS_CACHE = options.path;
this.audio = new Audio(this); this.audio = new Audio(this);
this.language = new LLM(this); this.language = new LLM(this);
this.vision = new Vision(this); this.vision = new Vision(this);

View File

@@ -1,8 +1,8 @@
import {Anthropic as anthropic} from '@anthropic-ai/sdk'; import {Anthropic as anthropic} from '@anthropic-ai/sdk';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, deepCopy} from '@ztimson/utils'; import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts'; import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
export class Anthropic extends LLMProvider { export class Anthropic extends LLMProvider {
client!: anthropic; client!: anthropic;
@@ -13,25 +13,25 @@ export class Anthropic extends LLMProvider {
} }
private toStandard(history: any[]): LLMMessage[] { private toStandard(history: any[]): LLMMessage[] {
for(let i = 0; i < history.length; i++) { const timestamp = Date.now();
const orgI = i; const messages: LLMMessage[] = [];
if(typeof history[orgI].content != 'string') { for(let h of history) {
if(history[orgI].role == 'assistant') { if(typeof h.content == 'string') {
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => { messages.push(<any>{timestamp, ...h});
i++; } else {
history.splice(i, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()}); const textContent = h.content?.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
}); if(textContent) messages.push({timestamp, role: h.role, content: textContent});
} else if(history[orgI].role == 'user') { h.content.forEach((c: any) => {
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => { if(c.type == 'tool_use') {
const h = history.find((h: any) => h.id == c.tool_use_id); messages.push({timestamp, role: 'tool', id: c.id, name: c.name, args: c.input, content: undefined});
h[c.is_error ? 'error' : 'content'] = c.content; } else if(c.type == 'tool_result') {
const m: any = messages.findLast(m => (<any>m).id == c.tool_use_id);
if(m) m[c.is_error ? 'error' : 'content'] = c.content;
}
}); });
} }
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
} }
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now(); return messages;
}
return history.filter(h => !!h.content);
} }
private fromStandard(history: LLMMessage[]): any[] { private fromStandard(history: LLMMessage[]): any[] {
@@ -48,18 +48,17 @@ export class Anthropic extends LLMProvider {
return history.map(({timestamp, ...h}) => h); return history.map(({timestamp, ...h}) => h);
} }
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController(); const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => { return Object.assign(new Promise<any>(async (res) => {
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]); let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
const original = deepCopy(history); const tools = options.tools || this.ai.options.llm?.tools || [];
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options);
const requestParams: any = { const requestParams: any = {
model: options.model || this.model, model: options.model || this.model,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096, max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
system: options.system || this.ai.options.system || '', system: options.system || this.ai.options.llm?.system || '',
temperature: options.temperature || this.ai.options.temperature || 0.7, temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
tools: (options.tools || this.ai.options.tools || []).map(t => ({ tools: tools.map(t => ({
name: t.name, name: t.name,
description: t.description, description: t.description,
input_schema: { input_schema: {
@@ -74,9 +73,11 @@ export class Anthropic extends LLMProvider {
}; };
let resp: any, isFirstMessage = true; let resp: any, isFirstMessage = true;
const assistantMessages: string[] = [];
do { do {
resp = await this.client.messages.create(requestParams); resp = await this.client.messages.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
// Streaming mode // Streaming mode
if(options.stream) { if(options.stream) {
@@ -112,12 +113,12 @@ export class Anthropic extends LLMProvider {
const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use'); const toolCalls = resp.content.filter((c: any) => c.type === 'tool_use');
if(toolCalls.length && !controller.signal.aborted) { if(toolCalls.length && !controller.signal.aborted) {
history.push({role: 'assistant', content: resp.content}); history.push({role: 'assistant', content: resp.content});
original.push({role: 'assistant', content: resp.content});
const results = await Promise.all(toolCalls.map(async (toolCall: any) => { const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.name)); const tool = tools.find(findByProp('name', toolCall.name));
if(options.stream) options.stream({tool: toolCall.name});
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'}; if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
try { try {
const result = await tool.fn(toolCall.input, this.ai); const result = await tool.fn(toolCall.input, options?.stream, this.ai);
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)}; return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) { } catch (err: any) {
return {type: 'tool_result', tool_use_id: toolCall.id, is_error: true, content: err?.message || err?.toString() || 'Unknown'}; return {type: 'tool_result', tool_use_id: toolCall.id, is_error: true, content: err?.message || err?.toString() || 'Unknown'};
@@ -127,11 +128,12 @@ export class Anthropic extends LLMProvider {
requestParams.messages = history; requestParams.messages = history;
} }
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use')); } while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
history = this.toStandard(history);
if(options.stream) options.stream({done: true}); if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')}])); if(options.history) options.history.splice(0, options.history.length, ...history);
}); res(history.at(-1)?.content);
}), {abort: () => controller.abort()});
return Object.assign(response, {abort: () => controller.abort()});
} }
} }

View File

@@ -1,54 +1,261 @@
import {spawn} from 'node:child_process'; import {execSync, spawn} from 'node:child_process';
import {mkdtempSync} from 'node:fs';
import fs from 'node:fs/promises'; import fs from 'node:fs/promises';
import Path from 'node:path'; import {tmpdir} from 'node:os';
import {Ai} from './ai.ts'; import * as path from 'node:path';
import Path, {join} from 'node:path';
import {AbortablePromise, Ai} from './ai.ts';
export class Audio { export class Audio {
private downloads: {[key: string]: Promise<string>} = {}; private downloads: {[key: string]: Promise<string>} = {};
private pyannote!: string;
private whisperModel!: string; private whisperModel!: string;
constructor(private ai: Ai) { constructor(private ai: Ai) {
if(ai.options.whisper?.binary) { if(ai.options.whisper) {
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin'; this.whisperModel = ai.options.asr || 'ggml-base.en.bin';
this.downloadAsrModel(); this.downloadAsrModel();
} }
this.pyannote = `
import sys
import json
import os
from pyannote.audio import Pipeline
os.environ['TORCH_HOME'] = r"${ai.options.path}"
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${ai.options.hfToken}")
output = pipeline(sys.argv[1])
segments = []
for turn, speaker in output.speaker_diarization:
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
print(json.dumps(segments))
`;
} }
/** private async addPunctuation(timestampData: any, llm?: boolean, cadence = 150): Promise<string> {
* Convert audio to text using Auditory Speech Recognition const countSyllables = (word: string): number => {
* @param {string} path Path to audio word = word.toLowerCase().replace(/[^a-z]/g, '');
* @param model Whisper model if(word.length <= 3) return 1;
* @returns {Promise<any>} Extracted text const matches = word.match(/[aeiouy]+/g);
*/ let count = matches ? matches.length : 1;
asr(path: string, model: string = this.whisperModel): {abort: () => void, response: Promise<string | null>} { if(word.endsWith('e')) count--;
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured'); return Math.max(1, count);
let abort: any = () => {}; };
const response = new Promise<string | null>((resolve, reject) => {
this.downloadAsrModel(model).then(m => { let result = '';
timestampData.transcription.filter((word, i) => {
let skip = false;
const prevWord = timestampData.transcription[i - 1];
const nextWord = timestampData.transcription[i + 1];
if(!word.text && nextWord) {
nextWord.offsets.from = word.offsets.from;
nextWord.timestamps.from = word.offsets.from;
} else if(word.text && word.text[0] != ' ' && prevWord) {
prevWord.offsets.to = word.offsets.to;
prevWord.timestamps.to = word.timestamps.to;
prevWord.text += word.text;
skip = true;
}
return !!word.text && !skip;
}).forEach((word: any) => {
const capital = /^[A-Z]/.test(word.text.trim());
const length = word.offsets.to - word.offsets.from;
const syllables = countSyllables(word.text.trim());
const expected = syllables * cadence;
if(capital && length > expected * 2 && word.text[0] == ' ') result += '.';
result += word.text;
});
if(!llm) return result.trim();
return this.ai.language.ask(result, {
system: 'Remove any misplaced punctuation from the following ASR transcript using the replace tool. Avoid modifying words unless there is an obvious typo',
temperature: 0.1,
tools: [{
name: 'replace',
description: 'Use find and replace to fix errors',
args: {
find: {type: 'string', description: 'Text to find', required: true},
replace: {type: 'string', description: 'Text to replace', required: true}
},
fn: (args) => result = result.replace(args.find, args.replace)
}]
}).then(() => result);
}
private async diarizeTranscript(timestampData: any, speakers: any[], llm: boolean): Promise<string> {
const speakerMap = new Map();
let speakerCount = 0;
speakers.forEach((seg: any) => {
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
});
const punctuatedText = await this.addPunctuation(timestampData, llm);
const sentences = punctuatedText.match(/[^.!?]+[.!?]+/g) || [punctuatedText];
const words = timestampData.transcription.filter((w: any) => w.text.trim());
// Assign speaker to each sentence
const sentencesWithSpeakers = sentences.map(sentence => {
sentence = sentence.trim();
if(!sentence) return null;
const sentenceWords = sentence.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
const speakerWordCount = new Map<number, number>();
sentenceWords.forEach(sw => {
const word = words.find((w: any) => sw === w.text.trim().toLowerCase().replace(/[^\w]/g, ''));
if(!word) return;
const wordTime = word.offsets.from / 1000;
const speaker = speakers.find((seg: any) => wordTime >= seg.start && wordTime <= seg.end);
if(speaker) {
const spkNum = speakerMap.get(speaker.speaker);
speakerWordCount.set(spkNum, (speakerWordCount.get(spkNum) || 0) + 1);
}
});
let bestSpeaker = 1;
let maxWords = 0;
speakerWordCount.forEach((count, speaker) => {
if(count > maxWords) {
maxWords = count;
bestSpeaker = speaker;
}
});
return {speaker: bestSpeaker, text: sentence};
}).filter(s => s !== null);
// Merge adjacent sentences from same speaker
const merged: Array<{speaker: number, text: string}> = [];
sentencesWithSpeakers.forEach(item => {
const last = merged[merged.length - 1];
if(last && last.speaker === item.speaker) {
last.text += ' ' + item.text;
} else {
merged.push({...item});
}
});
let transcript = merged.map(item => `[Speaker ${item.speaker}]: ${item.text}`).join('\n').trim();
if(!llm) return transcript;
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => transcript = transcript.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
return transcript;
}
private runAsr(file: string, opts: {model?: string, diarization?: boolean} = {}): AbortablePromise<any> {
let proc: any;
const p = new Promise<any>((resolve, reject) => {
this.downloadAsrModel(opts.model).then(m => {
if(opts.diarization) {
let output = path.join(path.dirname(file), 'transcript');
proc = spawn(<string>this.ai.options.whisper,
['-m', m, '-f', file, '-np', '-ml', '1', '-oj', '-of', output],
{stdio: ['ignore', 'ignore', 'pipe']}
);
proc.on('error', (err: Error) => reject(err));
proc.on('close', async (code: number) => {
if(code === 0) {
output = await fs.readFile(output + '.json', 'utf-8');
fs.rm(output + '.json').catch(() => { });
try { resolve(JSON.parse(output)); }
catch(e) { reject(new Error('Failed to parse whisper JSON')); }
} else {
reject(new Error(`Exit code ${code}`));
}
});
} else {
let output = ''; let output = '';
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']}); proc = spawn(<string>this.ai.options.whisper, ['-m', m, '-f', file, '-np', '-nt']);
abort = () => proc.kill('SIGTERM');
proc.on('error', (err: Error) => reject(err)); proc.on('error', (err: Error) => reject(err));
proc.stdout.on('data', (data: Buffer) => output += data.toString()); proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => { proc.on('close', async (code: number) => {
if(code === 0) resolve(output.trim() || null); if(code === 0) {
else reject(new Error(`Exit code ${code}`)); resolve(output.trim() || null);
} else {
reject(new Error(`Exit code ${code}`));
}
});
}
}); });
}); });
}); return <any>Object.assign(p, {abort: () => proc?.kill('SIGTERM')});
return {response, abort}; }
private runDiarization(file: string): AbortablePromise<any> {
let aborted = false, abort = () => { aborted = true; };
const checkPython = (cmd: string) => {
return new Promise<boolean>((resolve) => {
const proc = spawn(cmd, ['-W', 'ignore', '-c', 'import pyannote.audio']);
proc.on('close', (code: number) => resolve(code === 0));
proc.on('error', () => resolve(false));
});
};
const p = Promise.all<any>([
checkPython('python'),
checkPython('python3'),
]).then(<any>(async ([p, p3]: [boolean, boolean]) => {
if(aborted) return;
if(!p && !p3) throw new Error('Pyannote is not installed: pip install pyannote.audio');
const binary = p3 ? 'python3' : 'python';
return new Promise((resolve, reject) => {
if(aborted) return;
let output = '';
const proc = spawn(binary, ['-W', 'ignore', '-c', this.pyannote, file]);
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
proc.on('close', (code: number) => {
if(code === 0) {
try { resolve(JSON.parse(output)); }
catch (err) { reject(new Error('Failed to parse diarization output')); }
} else {
reject(new Error(`Python process exited with code ${code}`));
}
});
proc.on('error', reject);
abort = () => proc.kill('SIGTERM');
});
}));
return <any>Object.assign(p, {abort});
}
asr(file: string, options: { model?: string; diarization?: boolean | 'llm' } = {}): AbortablePromise<string | null> {
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
const tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
const clean = () => fs.rm(Path.dirname(tmp), {recursive: true, force: true}).catch(() => {});
if(!options.diarization) return this.runAsr(tmp, {model: options.model});
const timestamps = this.runAsr(tmp, {model: options.model, diarization: true});
const diarization = this.runDiarization(tmp);
let aborted = false, abort = () => {
aborted = true;
timestamps.abort();
diarization.abort();
clean();
};
const response = Promise.allSettled([timestamps, diarization]).then(async ([ts, d]) => {
if(ts.status == 'rejected') throw new Error('Whisper.cpp timestamps:\n' + ts.reason);
if(d.status == 'rejected') throw new Error('Pyannote:\n' + d.reason);
if(aborted || !options.diarization) return ts.value;
return this.diarizeTranscript(ts.value, d.value, options.diarization == 'llm');
}).finally(() => clean());
return <any>Object.assign(response, {abort});
} }
/**
* Downloads the specified Whisper model if it is not already present locally.
*
* @param {string} model Whisper model that will be downloaded
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
*/
async downloadAsrModel(model: string = this.whisperModel): Promise<string> { async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured'); if(!this.ai.options.whisper) throw new Error('Whisper not configured');
if(!model.endsWith('.bin')) model += '.bin'; if(!model.endsWith('.bin')) model += '.bin';
const p = Path.join(this.ai.options.whisper.path, model); const p = Path.join(<string>this.ai.options.path, model);
if(await fs.stat(p).then(() => true).catch(() => false)) return p; if(await fs.stat(p).then(() => true).catch(() => false)) return p;
if(!!this.downloads[model]) return this.downloads[model]; if(!!this.downloads[model]) return this.downloads[model];
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`) this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)

13
src/embedder.ts Normal file
View File

@@ -0,0 +1,13 @@
import { pipeline } from '@xenova/transformers';
const [modelDir, model] = process.argv.slice(2);
let text = '';
process.stdin.on('data', chunk => text += chunk);
process.stdin.on('end', async () => {
const embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
const output = await embedder(text, { pooling: 'mean', normalize: true });
const embedding = Array.from(output.data);
console.log(JSON.stringify({embedding}));
process.exit();
});

View File

@@ -1,4 +1,8 @@
export * from './ai'; export * from './ai';
export * from './antrhopic'; export * from './antrhopic';
export * from './audio';
export * from './llm'; export * from './llm';
export * from './open-ai';
export * from './provider';
export * from './tools'; export * from './tools';
export * from './vision';

View File

@@ -1,12 +1,16 @@
import {pipeline} from '@xenova/transformers';
import {JSONAttemptParse} from '@ztimson/utils'; import {JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {Anthropic} from './antrhopic.ts'; import {Anthropic} from './antrhopic.ts';
import {Ollama} from './ollama.ts';
import {OpenAi} from './open-ai.ts'; import {OpenAi} from './open-ai.ts';
import {AbortablePromise, LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
import {AiTool} from './tools.ts'; import {AiTool} from './tools.ts';
import * as tf from '@tensorflow/tfjs'; import {fileURLToPath} from 'url';
import {dirname, join} from 'path';
import { spawn } from 'node:child_process';
export type AnthropicConfig = {proto: 'anthropic', token: string};
export type OllamaConfig = {proto: 'ollama', host: string};
export type OpenAiConfig = {proto: 'openai', host?: string, token: string};
export type LLMMessage = { export type LLMMessage = {
/** Message originator */ /** Message originator */
@@ -27,36 +31,20 @@ export type LLMMessage = {
/** Tool result */ /** Tool result */
content: undefined | string; content: undefined | string;
/** Tool error */ /** Tool error */
error: undefined | string; error?: undefined | string;
/** Timestamp */ /** Timestamp */
timestamp?: number; timestamp?: number;
} }
export type LLMOptions = { /** Background information the AI will be fed */
/** Anthropic settings */ export type LLMMemory = {
anthropic?: { /** What entity is this fact about */
/** API Token */ owner: string;
token: string; /** The information that will be remembered */
/** Default model */ fact: string;
model: string; /** Owner and fact embedding vector */
}, embeddings: [number[], number[]];
/** Ollama settings */ }
ollama?: {
/** connection URL */
host: string;
/** Default model */
model: string;
},
/** Open AI settings */
openAi?: {
/** API Token */
token: string;
/** Default model */
model: string;
},
/** Default provider & model */
model: string | [string, string];
} & Omit<LLMRequest, 'model'>;
export type LLMRequest = { export type LLMRequest = {
/** System prompt */ /** System prompt */
@@ -70,54 +58,138 @@ export type LLMRequest = {
/** Available tools */ /** Available tools */
tools?: AiTool[]; tools?: AiTool[];
/** LLM model */ /** LLM model */
model?: string | [string, string]; model?: string;
/** Stream response */ /** Stream response */
stream?: (chunk: {text?: string, done?: true}) => any; stream?: (chunk: {text?: string, tool?: string, done?: true}) => any;
/** Compress old messages in the chat to free up context */ /** Compress old messages in the chat to free up context */
compress?: { compress?: {
/** Trigger chat compression once context exceeds the token count */ /** Trigger chat compression once context exceeds the token count */
max: number; max: number;
/** Compress chat until context size smaller than */ /** Compress chat until context size smaller than */
min: number min: number
} },
/** Background information the AI will be fed */
memory?: LLMMemory[],
} }
export class LLM { class LLM {
private embedModel: any; defaultModel!: string;
private providers: {[key: string]: LLMProvider} = {}; models: {[model: string]: LLMProvider} = {};
constructor(public readonly ai: Ai) { constructor(public readonly ai: Ai) {
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2'); if(!ai.options.llm?.models) return;
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model); Object.entries(ai.options.llm.models).forEach(([model, config]) => {
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model); if(!this.defaultModel) this.defaultModel = model;
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model); if(config.proto == 'anthropic') this.models[model] = new Anthropic(this.ai, config.token, model);
else if(config.proto == 'ollama') this.models[model] = new OpenAi(this.ai, config.host, 'not-needed', model);
else if(config.proto == 'openai') this.models[model] = new OpenAi(this.ai, config.host || null, config.token, model);
});
} }
/** /**
* Chat with LLM * Chat with LLM
* @param {string} message Question * @param {string} message Question
* @param {LLMRequest} options Configuration options and chat history * @param {LLMRequest} options Configuration options and chat history
* @returns {{abort: () => void, response: Promise<LLMMessage[]>}} Function to abort response and chat history * @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
*/ */
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
let model: any = [null, null]; options = <any>{
if(options.model) { system: '',
if(typeof options.model == 'object') model = options.model; temperature: 0.8,
else model = [options.model, (<any>this.ai.options)[options.model]?.model]; ...this.ai.options.llm,
models: undefined,
history: [],
...options,
} }
if(!options.model || model[1] == null) { const m = options.model || this.defaultModel;
if(typeof this.ai.options.model == 'object') model = this.ai.options.model; if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
else model = [this.ai.options.model, (<any>this.ai.options)[this.ai.options.model]?.model]; let abort = () => {};
return Object.assign(new Promise<string>(async res => {
if(!options.history) options.history = [];
// If memories were passed, find any relevant ones and add a tool for ADHOC lookups
if(options.memory) {
const search = async (query?: string | null, subject?: string | null, limit = 10) => {
const [o, q] = await Promise.all([
subject ? this.embedding(subject) : Promise.resolve(null),
query ? this.embedding(query) : Promise.resolve(null),
]);
return (options.memory || []).map(m => {
const score = (o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 0)
+ (q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : 0);
return {...m, score};
}).toSorted((a: any, b: any) => a.score - b.score).slice(0, limit);
} }
if(!model[0] || !model[1]) throw new Error(`Unknown LLM provider or model: ${model[0]} / ${model[1]}`);
return this.providers[model[0]].ask(message, {...options, model: model[1]}); options.system += '\nYou have RAG memory and will be given the top_k closest memories regarding the users query. Save anything new you have learned worth remembering from the user message using the remember tool and feel free to recall memories manually.\n';
const relevant = await search(message);
if(relevant.length) options.history.push({role: 'tool', name: 'recall', id: 'auto_recall_' + Math.random().toString(), args: {}, content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
options.tools = [{
name: 'recall',
description: 'Recall the closest memories you have regarding a query using RAG',
args: {
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
topK: {type: 'number', description: 'Result limit, default 5'},
},
fn: (args) => {
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
return search(args.query, args.subject, args.topK);
}
}, {
name: 'remember',
description: 'Store important facts user shares for future recall',
args: {
owner: {type: 'string', description: 'Subject/person this fact is about'},
fact: {type: 'string', description: 'The information to remember'}
},
fn: async (args) => {
if(!options.memory) return;
const e = await Promise.all([
this.embedding(args.owner),
this.embedding(`${args.owner}: ${args.fact}`)
]);
const newMem = {owner: args.owner, fact: args.fact, embeddings: <any>[e[0][0].embedding, e[1][0].embedding]};
options.memory.splice(0, options.memory.length, ...[
...options.memory.filter(m => {
return this.cosineSimilarity(newMem.embeddings[0], m.embeddings[0]) < 0.9 && this.cosineSimilarity(newMem.embeddings[1], m.embeddings[1]) < 0.8;
}),
newMem
]);
return 'Remembered!';
}
}, ...options.tools || []];
}
// Ask
const resp = await this.models[m].ask(message, options);
// Remove any memory calls from history
if(options.memory) options.history.splice(0, options.history.length, ...options.history.filter(h => h.role != 'tool' || (h.name != 'recall' && h.name != 'remember')));
// Compress message history
if(options.compress) {
const compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
options.history.splice(0, options.history.length, ...compressed);
}
return res(resp);
}), {abort});
}
async code(message: string, options?: LLMRequest): Promise<any> {
const resp = await this.ask(message, {...options, system: [
options?.system,
'Return your response in a code block'
].filter(t => !!t).join(('\n'))});
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
return codeBlock ? codeBlock[1].trim() : null;
} }
/** /**
* Compress chat history to reduce context size * Compress chat history to reduce context size
* @param {LLMMessage[]} history Chatlog that will be compressed * @param {LLMMessage[]} history Chatlog that will be compressed
* @param max Trigger compression once context is larger than max * @param max Trigger compression once context is larger than max
* @param min Summarize until context size is less than min * @param min Leave messages less than the token minimum, summarize the rest
* @param {LLMRequest} options LLM options * @param {LLMRequest} options LLM options
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0 * @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
*/ */
@@ -130,12 +202,23 @@ export class LLM {
else break; else break;
} }
if(history.length <= keep) return history; if(history.length <= keep) return history;
const recent = keep == 0 ? [] : history.slice(-keep), const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user'); process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary = await this.summarize(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), 250, options);
return [{role: 'assistant', content: `Conversation Summary: ${summary}`, timestamp: Date.now()}, ...recent]; const summary: any = await this.summarize(process.map(m => `[${m.role}]: ${m.content}`).join('\n\n'), 500, options);
const d = Date.now();
const h = [{role: <any>'tool', name: 'summary', id: `summary_` + d, args: {}, content: `Conversation Summary: ${summary?.summary}`, timestamp: d}, ...recent];
if(system) h.splice(0, 0, system);
return h;
} }
/**
* Compare the difference between embeddings (calculates the angle between two vectors)
* @param {number[]} v1 First embedding / vector comparison
* @param {number[]} v2 Second embedding / vector for comparison
* @returns {number} Similarity values 0-1: 0 = unique, 1 = identical
*/
cosineSimilarity(v1: number[], v2: number[]): number { cosineSimilarity(v1: number[], v2: number[]): number {
if (v1.length !== v2.length) throw new Error('Vectors must be same length'); if (v1.length !== v2.length) throw new Error('Vectors must be same length');
let dotProduct = 0, normA = 0, normB = 0; let dotProduct = 0, normA = 0, normB = 0;
@@ -148,55 +231,94 @@ export class LLM {
return denominator === 0 ? 0 : dotProduct / denominator; return denominator === 0 ? 0 : dotProduct / denominator;
} }
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) { /**
* Chunk text into parts for AI digestion
* @param {object | string} target Item that will be chunked (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @returns {string[]} Chunked strings
*/
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
const objString = (obj: any, path = ''): string[] => { const objString = (obj: any, path = ''): string[] => {
if(obj === null || obj === undefined) return []; if(!obj) return [];
return Object.entries(obj).flatMap(([key, value]) => { return Object.entries(obj).flatMap(([key, value]) => {
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key; const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
if(typeof value === 'object' && value !== null && !Array.isArray(value)) return objString(value, p); if(typeof value === 'object' && !Array.isArray(value)) return objString(value, p);
const valueStr = Array.isArray(value) ? value.join(', ') : String(value); return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
return `${p}: ${valueStr}`; });
};
const lines = typeof target === 'object' ? objString(target) : target.toString().split('\n');
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
const chunks: string[] = [];
for(let i = 0; i < tokens.length;) {
let text = '', j = i;
while(j < tokens.length) {
const next = text + (text ? ' ' : '') + tokens[j];
if(this.estimateTokens(next.replace(/\s*\n\s*/g, '\n')) > maxTokens && text) break;
text = next;
j++;
}
const clean = text.replace(/\s*\n\s*/g, '\n').trim();
if(clean) chunks.push(clean);
i = Math.max(j - overlapTokens, j === i ? i + 1 : j);
}
return chunks;
}
/**
* Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted)
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/
embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}): AbortablePromise<any[]> {
let {maxTokens = 500, overlapTokens = 50} = opts;
let aborted = false;
const abort = () => { aborted = true; };
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
if(aborted) return reject(new Error('Aborted'));
const args: string[] = [
join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'),
<string>this.ai.options.path,
this.ai.options?.embedder || 'bge-small-en-v1.5'
];
const proc = spawn('node', args, {stdio: ['pipe', 'pipe', 'ignore']});
proc.stdin.write(text);
proc.stdin.end();
let output = '';
proc.stdout.on('data', (data: Buffer) => output += data.toString());
proc.on('close', (code: number) => {
if(aborted) return reject(new Error('Aborted'));
if(code === 0) {
try {
const result = JSON.parse(output);
resolve(result.embedding);
} catch(err) {
reject(new Error('Failed to parse embedding output'));
}
} else {
reject(new Error(`Embedder process exited with code ${code}`));
}
});
proc.on('error', reject);
}); });
}; };
const embed = async (text: string): Promise<number[]> => { const p = (async () => {
const model = await this.embedModel; const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
const output = await model(text, {pooling: 'mean', normalize: true}); for(let i = 0; i < chunks.length; i++) {
return Array.from(output.data); if(aborted) break;
}; const text = chunks[i];
const embedding = await embed(text);
// Tokenize results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
const tokens = lines.flatMap(line => [...line.split(/\s+/).filter(w => w.trim()), '\n']);
// Chunk
const chunks: string[] = [];
let start = 0;
while (start < tokens.length) {
let end = start;
let text = '';
// Build chunk
while (end < tokens.length) {
const nextToken = tokens[end];
const testText = text + (text ? ' ' : '') + nextToken;
const testTokens = this.estimateTokens(testText.replace(/\s*\n\s*/g, '\n'));
if (testTokens > maxTokens && text) break;
text = testText;
end++;
} }
// Save chunk return results;
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim(); })();
if(cleanText) chunks.push(cleanText); return Object.assign(p, { abort });
start = end - overlapTokens;
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
}
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),
text,
tokens: this.estimateTokens(text),
})));
} }
/** /**
@@ -211,7 +333,7 @@ export class LLM {
/** /**
* Compare the difference between two strings using tensor math * Compare the difference between two strings using tensor math
* @param target Text that will checked * @param target Text that will be checked
* @param {string} searchTerms Multiple search terms to check against target * @param {string} searchTerms Multiple search terms to check against target
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical * @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
*/ */
@@ -228,17 +350,17 @@ export class LLM {
/** /**
* Ask a question with JSON response * Ask a question with JSON response
* @param {string} message Question * @param {string} text Text to process
* @param {string} schema JSON schema the AI should match
* @param {LLMRequest} options Configuration options and chat history * @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>} * @returns {Promise<{} | {} | RegExpExecArray | null>}
*/ */
async json(message: string, options?: LLMRequest) { async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, { const code = await this.code(text, {...options, system: [
system: 'Respond using a JSON blob', options?.system,
...options `Only respond using JSON matching this schema:\n\`\`\`json\n${schema}\n\`\`\``
}); ].filter(t => !!t).join('\n')});
if(!resp?.[0]?.content) return {}; return code ? JSONAttemptParse(code, {}) : null;
return JSONAttemptParse(new RegExp('\{[\s\S]*\}').exec(resp[0].content), {});
} }
/** /**
@@ -248,8 +370,9 @@ export class LLM {
* @param options LLM request options * @param options LLM request options
* @returns {Promise<string>} Summary * @returns {Promise<string>} Summary
*/ */
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> { summarize(text: string, tokens: number = 500, options?: LLMRequest): Promise<string | null> {
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options}) return this.ask(text, {system: `Generate the shortest summary possible <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
.then(history => <string>history.pop()?.content || null);
} }
} }
export default LLM;

View File

@@ -1,117 +0,0 @@
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils';
import {Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts';
import {Ollama as ollama} from 'ollama';
export class Ollama extends LLMProvider {
client!: ollama;
constructor(public readonly ai: Ai, public host: string, public model: string) {
super();
this.client = new ollama({host});
}
private toStandard(history: any[]): LLMMessage[] {
for(let i = 0; i < history.length; i++) {
if(history[i].role == 'assistant' && history[i].tool_calls) {
if(history[i].content) delete history[i].tool_calls;
else {
history.splice(i, 1);
i--;
}
} else if(history[i].role == 'tool') {
const error = history[i].content.startsWith('{"error":');
history[i] = {role: 'tool', name: history[i].tool_name, args: history[i].args, [error ? 'error' : 'content']: history[i].content, timestamp: history[i].timestamp};
}
if(!history[i]?.timestamp) history[i].timestamp = Date.now();
}
return history;
}
private fromStandard(history: LLMMessage[]): any[] {
return history.map((h: any) => {
const {timestamp, ...rest} = h;
if(h.role != 'tool') return rest;
return {role: 'tool', tool_name: h.name, content: h.error || h.content}
});
}
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> {
const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => {
let system = options.system || this.ai.options.system;
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(history[0].roll == 'system') {
if(!system) system = history.shift();
else history.shift();
}
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min);
if(options.system) history.unshift({role: 'system', content: system})
const requestParams: any = {
model: options.model || this.model,
messages: history,
stream: !!options.stream,
signal: controller.signal,
options: {
temperature: options.temperature || this.ai.options.temperature || 0.7,
num_predict: options.max_tokens || this.ai.options.max_tokens || 4096,
},
tools: (options.tools || this.ai.options.tools || []).map(t => ({
type: 'function',
function: {
name: t.name,
description: t.description,
parameters: {
type: 'object',
properties: t.args ? objectMap(t.args, (key, value) => ({...value, required: undefined})) : {},
required: t.args ? Object.entries(t.args).filter(t => t[1].required).map(t => t[0]) : []
}
}
}))
}
let resp: any, isFirstMessage = true;
do {
resp = await this.client.chat(requestParams);
if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false;
resp.message = {role: 'assistant', content: '', tool_calls: []};
for await (const chunk of resp) {
if(controller.signal.aborted) break;
if(chunk.message?.content) {
resp.message.content += chunk.message.content;
options.stream({text: chunk.message.content});
}
if(chunk.message?.tool_calls) resp.message.tool_calls = chunk.message.tool_calls;
if(chunk.done) break;
}
}
if(resp.message?.tool_calls?.length && !controller.signal.aborted) {
history.push(resp.message);
const results = await Promise.all(resp.message.tool_calls.map(async (toolCall: any) => {
const tool = (options.tools || this.ai.options.tools)?.find(findByProp('name', toolCall.function.name));
if(!tool) return {role: 'tool', tool_name: toolCall.function.name, content: '{"error": "Tool not found"}'};
const args = typeof toolCall.function.arguments === 'string' ? JSONAttemptParse(toolCall.function.arguments, {}) : toolCall.function.arguments;
try {
const result = await tool.fn(args, this.ai);
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize(result)};
} catch (err: any) {
return {role: 'tool', tool_name: toolCall.function.name, args, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
}
}));
history.push(...results);
requestParams.messages = history;
}
} while (!controller.signal.aborted && resp.message?.tool_calls?.length);
if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.message?.content}]));
});
return Object.assign(response, {abort: () => controller.abort()});
}
}

View File

@@ -1,15 +1,18 @@
import {OpenAI as openAI} from 'openai'; import {OpenAI as openAI} from 'openai';
import {findByProp, objectMap, JSONSanitize, JSONAttemptParse} from '@ztimson/utils'; import {findByProp, objectMap, JSONSanitize, JSONAttemptParse, clean} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts'; import {LLMMessage, LLMRequest} from './llm.ts';
import {AbortablePromise, LLMProvider} from './provider.ts'; import {LLMProvider} from './provider.ts';
export class OpenAi extends LLMProvider { export class OpenAi extends LLMProvider {
client!: openAI; client!: openAI;
constructor(public readonly ai: Ai, public readonly apiToken: string, public model: string) { constructor(public readonly ai: Ai, public readonly host: string | null, public readonly token: string, public model: string) {
super(); super();
this.client = new openAI({apiKey: apiToken}); this.client = new openAI(clean({
baseURL: host,
apiKey: token || host ? 'ignored' : undefined
}));
} }
private toStandard(history: any[]): LLMMessage[] { private toStandard(history: any[]): LLMMessage[] {
@@ -61,19 +64,22 @@ export class OpenAi extends LLMProvider {
}, [] as any[]); }, [] as any[]);
} }
ask(message: string, options: LLMRequest = {}): AbortablePromise<LLMMessage[]> { ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
const controller = new AbortController(); const controller = new AbortController();
const response = new Promise<any>(async (res, rej) => { return Object.assign(new Promise<any>(async (res, rej) => {
if(options.system) {
if(options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
else options.history[0].content = options.system;
}
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]); let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
if(options.compress) history = await this.ai.language.compressHistory(<any>history, options.compress.max, options.compress.min, options); const tools = options.tools || this.ai.options.llm?.tools || [];
const requestParams: any = { const requestParams: any = {
model: options.model || this.model, model: options.model || this.model,
messages: history, messages: history,
stream: !!options.stream, stream: !!options.stream,
max_tokens: options.max_tokens || this.ai.options.max_tokens || 4096, max_tokens: options.max_tokens || this.ai.options.llm?.max_tokens || 4096,
temperature: options.temperature || this.ai.options.temperature || 0.7, temperature: options.temperature || this.ai.options.llm?.temperature || 0.7,
tools: (options.tools || this.ai.options.tools || []).map(t => ({ tools: tools.map(t => ({
type: 'function', type: 'function',
function: { function: {
name: t.name, name: t.name,
@@ -89,19 +95,45 @@ export class OpenAi extends LLMProvider {
let resp: any, isFirstMessage = true; let resp: any, isFirstMessage = true;
do { do {
resp = await this.client.chat.completions.create(requestParams); resp = await this.client.chat.completions.create(requestParams).catch(err => {
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
throw err;
});
if(options.stream) { if(options.stream) {
if(!isFirstMessage) options.stream({text: '\n\n'}); if(!isFirstMessage) options.stream({text: '\n\n'});
else isFirstMessage = false; else isFirstMessage = false;
resp.choices = [{message: {content: '', tool_calls: []}}]; resp.choices = [{message: {role: 'assistant', content: '', tool_calls: []}}];
for await (const chunk of resp) { for await (const chunk of resp) {
if(controller.signal.aborted) break; if(controller.signal.aborted) break;
if(chunk.choices[0].delta.content) { if(chunk.choices[0].delta.content) {
resp.choices[0].message.content += chunk.choices[0].delta.content; resp.choices[0].message.content += chunk.choices[0].delta.content;
options.stream({text: chunk.choices[0].delta.content}); options.stream({text: chunk.choices[0].delta.content});
} }
if(chunk.choices[0].delta.tool_calls) { if(chunk.choices[0].delta.tool_calls) {
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls; for(const deltaTC of chunk.choices[0].delta.tool_calls) {
const existing = resp.choices[0].message.tool_calls.find(tc => tc.index === deltaTC.index);
if(existing) {
if(deltaTC.id) existing.id = deltaTC.id;
if(deltaTC.type) existing.type = deltaTC.type;
if(deltaTC.function) {
if(!existing.function) existing.function = {};
if(deltaTC.function.name) existing.function.name = deltaTC.function.name;
if(deltaTC.function.arguments) existing.function.arguments = (existing.function.arguments || '') + deltaTC.function.arguments;
}
} else {
resp.choices[0].message.tool_calls.push({
index: deltaTC.index,
id: deltaTC.id || '',
type: deltaTC.type || 'function',
function: {
name: deltaTC.function?.name || '',
arguments: deltaTC.function?.arguments || ''
}
});
}
}
} }
} }
} }
@@ -110,11 +142,12 @@ export class OpenAi extends LLMProvider {
if(toolCalls.length && !controller.signal.aborted) { if(toolCalls.length && !controller.signal.aborted) {
history.push(resp.choices[0].message); history.push(resp.choices[0].message);
const results = await Promise.all(toolCalls.map(async (toolCall: any) => { const results = await Promise.all(toolCalls.map(async (toolCall: any) => {
const tool = options.tools?.find(findByProp('name', toolCall.function.name)); const tool = tools?.find(findByProp('name', toolCall.function.name));
if(options.stream) options.stream({tool: toolCall.function.name});
if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'}; if(!tool) return {role: 'tool', tool_call_id: toolCall.id, content: '{"error": "Tool not found"}'};
try { try {
const args = JSONAttemptParse(toolCall.function.arguments, {}); const args = JSONAttemptParse(toolCall.function.arguments, {});
const result = await tool.fn(args, this.ai); const result = await tool.fn(args, options.stream, this.ai);
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize(result)}; return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize(result)};
} catch (err: any) { } catch (err: any) {
return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})}; return {role: 'tool', tool_call_id: toolCall.id, content: JSONSanitize({error: err?.message || err?.toString() || 'Unknown'})};
@@ -124,10 +157,12 @@ export class OpenAi extends LLMProvider {
requestParams.messages = history; requestParams.messages = history;
} }
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length); } while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
history = this.toStandard(history);
if(options.stream) options.stream({done: true}); if(options.stream) options.stream({done: true});
res(this.toStandard([...history, {role: 'assistant', content: resp.choices[0].message.content || ''}])); if(options.history) options.history.splice(0, options.history.length, ...history);
}); res(history.at(-1)?.content);
return Object.assign(response, {abort: () => controller.abort()}); }), {abort: () => controller.abort()});
} }
} }

View File

@@ -1,7 +1,6 @@
import {LLMMessage, LLMOptions, LLMRequest} from './llm.ts'; import {AbortablePromise} from './ai.ts';
import {LLMMessage, LLMRequest} from './llm.ts';
export type AbortablePromise<T> = Promise<T> & {abort: () => void};
export abstract class LLMProvider { export abstract class LLMProvider {
abstract ask(message: string, options: LLMRequest): AbortablePromise<LLMMessage[]>; abstract ask(message: string, options: LLMRequest): AbortablePromise<string>;
} }

View File

@@ -1,6 +1,8 @@
import * as cheerio from 'cheerio';
import {$, $Sync} from '@ztimson/node-utils'; import {$, $Sync} from '@ztimson/node-utils';
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils'; import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
import {Ai} from './ai.ts'; import {Ai} from './ai.ts';
import {LLMRequest} from './llm.ts';
export type AiToolArg = {[key: string]: { export type AiToolArg = {[key: string]: {
/** Argument type */ /** Argument type */
@@ -31,7 +33,7 @@ export type AiTool = {
/** Tool arguments */ /** Tool arguments */
args?: AiToolArg, args?: AiToolArg,
/** Callback function */ /** Callback function */
fn: (args: any, ai: Ai) => any | Promise<any>, fn: (args: any, stream: LLMRequest['stream'], ai: Ai) => any | Promise<any>,
}; };
export const CliTool: AiTool = { export const CliTool: AiTool = {
@@ -43,9 +45,9 @@ export const CliTool: AiTool = {
export const DateTimeTool: AiTool = { export const DateTimeTool: AiTool = {
name: 'get_datetime', name: 'get_datetime',
description: 'Get current date and time', description: 'Get current UTC date / time',
args: {}, args: {},
fn: async () => new Date().toISOString() fn: async () => new Date().toUTCString()
} }
export const ExecTool: AiTool = { export const ExecTool: AiTool = {
@@ -55,15 +57,15 @@ export const ExecTool: AiTool = {
language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true}, language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true},
code: {type: 'string', description: 'Code to execute', required: true} code: {type: 'string', description: 'Code to execute', required: true}
}, },
fn: async (args, ai) => { fn: async (args, stream, ai) => {
try { try {
switch(args.type) { switch(args.type) {
case 'bash': case 'bash':
return await CliTool.fn({command: args.code}, ai); return await CliTool.fn({command: args.code}, stream, ai);
case 'node': case 'node':
return await JSTool.fn({code: args.code}, ai); return await JSTool.fn({code: args.code}, stream, ai);
case 'python': { case 'python': {
return await PythonTool.fn({code: args.code}, ai); return await PythonTool.fn({code: args.code}, stream, ai);
} }
} }
} catch(err: any) { } catch(err: any) {
@@ -111,9 +113,43 @@ export const PythonTool: AiTool = {
fn: async (args: {code: string}) => ({result: $Sync`python -c "${args.code}"`}) fn: async (args: {code: string}) => ({result: $Sync`python -c "${args.code}"`})
} }
export const SearchTool: AiTool = { export const ReadWebpageTool: AiTool = {
name: 'search', name: 'read_webpage',
description: 'Use a search engine to find relevant URLs, should be changed with fetch to scrape sources', description: 'Extract clean, structured content from a webpage. Use after web_search to read specific URLs',
args: {
url: {type: 'string', description: 'URL to extract content from', required: true},
focus: {type: 'string', description: 'Optional: What aspect to focus on (e.g., "pricing", "features", "contact info")'}
},
fn: async (args: {url: string; focus?: string}) => {
const html = await fetch(args.url, {headers: {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"}})
.then(r => r.text()).catch(err => {throw new Error(`Failed to fetch: ${err.message}`)});
const $ = cheerio.load(html);
$('script, style, nav, footer, header, aside, iframe, noscript, [role="navigation"], [role="banner"], .ad, .ads, .cookie, .popup').remove();
const metadata = {
title: $('meta[property="og:title"]').attr('content') || $('title').text() || '',
description: $('meta[name="description"]').attr('content') || $('meta[property="og:description"]').attr('content') || '',
};
let content = '';
const contentSelectors = ['article', 'main', '[role="main"]', '.content', '.post', '.entry', 'body'];
for (const selector of contentSelectors) {
const el = $(selector).first();
if (el.length && el.text().trim().length > 200) {
content = el.text();
break;
}
}
if (!content) content = $('body').text();
content = content.replace(/\s+/g, ' ').trim().slice(0, 8000);
return {url: args.url, title: metadata.title.trim(), description: metadata.description.trim(), content, focus: args.focus};
}
}
export const WebSearchTool: AiTool = {
name: 'web_search',
description: 'Use duckduckgo (anonymous) to find find relevant online resources. Returns a list of URLs that works great with the `read_webpage` tool',
args: { args: {
query: {type: 'string', description: 'Search string', required: true}, query: {type: 'string', description: 'Search string', required: true},
length: {type: 'string', description: 'Number of results to return', default: 5}, length: {type: 'string', description: 'Number of results to return', default: 5},

View File

@@ -1,25 +1,23 @@
import {createWorker} from 'tesseract.js'; import {createWorker} from 'tesseract.js';
import {Ai} from './ai.ts'; import {AbortablePromise, Ai} from './ai.ts';
export class Vision { export class Vision {
constructor(private ai: Ai) { } constructor(private ai: Ai) {}
/** /**
* Convert image to text using Optical Character Recognition * Convert image to text using Optical Character Recognition
* @param {string} path Path to image * @param {string} path Path to image
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text * @returns {AbortablePromise<string | null>} Promise of extracted text with abort method
*/ */
ocr(path: string): {abort: () => void, response: Promise<string | null>} { ocr(path: string): AbortablePromise<string | null> {
let worker: any; let worker: any;
return { const p = new Promise<string | null>(async res => {
abort: () => { worker?.terminate(); }, worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
response: new Promise(async res => {
worker = await createWorker('eng');
const {data} = await worker.recognize(path); const {data} = await worker.recognize(path);
await worker.terminate(); await worker.terminate();
res(data.text.trim() || null); res(data.text.trim() || null);
}) });
} return Object.assign(p, {abort: () => worker?.terminate()});
} }
} }

View File

@@ -15,6 +15,7 @@
"noEmit": true, "noEmit": true,
/* Linting */ /* Linting */
"strict": true "strict": true,
"noImplicitAny": false
} }
} }

View File

@@ -4,9 +4,15 @@ import dts from 'vite-plugin-dts';
export default defineConfig({ export default defineConfig({
build: { build: {
lib: { lib: {
entry: './src/index.ts', entry: {
index: './src/index.ts',
embedder: './src/embedder.ts',
},
name: 'utils', name: 'utils',
fileName: (format) => (format === 'es' ? 'index.mjs' : 'index.js'), fileName: (format, entryName) => {
if (entryName === 'embedder') return 'embedder.js';
return format === 'es' ? 'index.mjs' : 'index.js';
},
}, },
ssr: true, ssr: true,
emptyOutDir: true, emptyOutDir: true,