Compare commits
24 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 473424ae23 | |||
| 9b831f7d95 | |||
| 498b326e45 | |||
| 56e4efec94 | |||
| a07f069ad0 | |||
| da15d299e6 | |||
| 7ef7c3f676 | |||
| 4143d00de7 | |||
| 0360f2493d | |||
| 0172887877 | |||
| 8f89f5e3cf | |||
| 5bd41f8c6a | |||
| e4399e1b7b | |||
| ad1ee48763 | |||
| 3ed206923f | |||
| 22d5427e86 | |||
| 43b53164c0 | |||
| 575fbac099 | |||
| 46ae0f7913 | |||
| 54730a2b9a | |||
| 27506d20af | |||
| 8c64129200 | |||
| 013aa942c0 | |||
| c8d5660b1a |
@@ -75,6 +75,7 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
|
||||
#### Instructions
|
||||
1. Install the package: `npm i @ztimson/ai-utils`
|
||||
2. For speaker diarization: `pip install pyannote.audio`
|
||||
|
||||
</details>
|
||||
|
||||
@@ -90,8 +91,9 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
|
||||
#### Instructions
|
||||
1. Install the dependencies: `npm i`
|
||||
2. Build library: `npm build`
|
||||
3. Run unit tests: `npm test`
|
||||
2. For speaker diarization: `pip install pyannote.audio`
|
||||
3. Build library: `npm build`
|
||||
4. Run unit tests: `npm test`
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
1177
package-lock.json
generated
1177
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
13
package.json
13
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@ztimson/ai-utils",
|
||||
"version": "0.5.2",
|
||||
"version": "0.7.6",
|
||||
"description": "AI Utility library",
|
||||
"author": "Zak Timson",
|
||||
"license": "MIT",
|
||||
@@ -25,14 +25,15 @@
|
||||
"watch": "npx vite build --watch"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.67.0",
|
||||
"@anthropic-ai/sdk": "^0.78.0",
|
||||
"@tensorflow/tfjs": "^4.22.0",
|
||||
"@xenova/transformers": "^2.17.2",
|
||||
"@ztimson/node-utils": "^1.0.4",
|
||||
"@ztimson/utils": "^0.27.9",
|
||||
"@ztimson/node-utils": "^1.0.7",
|
||||
"@ztimson/utils": "^0.28.13",
|
||||
"cheerio": "^1.2.0",
|
||||
"openai": "^6.6.0",
|
||||
"tesseract.js": "^6.0.1"
|
||||
"openai": "^6.22.0",
|
||||
"tesseract.js": "^7.0.0",
|
||||
"wavefile": "^11.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.8.1",
|
||||
|
||||
22
src/ai.ts
22
src/ai.ts
@@ -8,26 +8,20 @@ export type AbortablePromise<T> = Promise<T> & {
|
||||
};
|
||||
|
||||
export type AiOptions = {
|
||||
/** Token to pull models from hugging face */
|
||||
hfToken?: string;
|
||||
/** Path to models */
|
||||
path?: string;
|
||||
/** Embedding model */
|
||||
embedder?: string; // all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5
|
||||
/** ASR model: whisper-tiny, whisper-base */
|
||||
asr?: string;
|
||||
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
|
||||
embedder?: string;
|
||||
/** Large language models, first is default */
|
||||
llm?: Omit<LLMRequest, 'model'> & {
|
||||
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
|
||||
}
|
||||
/** Tesseract OCR configuration */
|
||||
tesseract?: {
|
||||
/** Model: eng, eng_best, eng_fast */
|
||||
model?: string;
|
||||
}
|
||||
/** Whisper ASR configuration */
|
||||
whisper?: {
|
||||
/** Whisper binary location */
|
||||
binary: string;
|
||||
/** Model: `ggml-base.en.bin` */
|
||||
model: string;
|
||||
}
|
||||
/** OCR model: eng, eng_best, eng_fast */
|
||||
ocr?: string;
|
||||
}
|
||||
|
||||
export class Ai {
|
||||
|
||||
@@ -13,25 +13,25 @@ export class Anthropic extends LLMProvider {
|
||||
}
|
||||
|
||||
private toStandard(history: any[]): LLMMessage[] {
|
||||
for(let i = 0; i < history.length; i++) {
|
||||
const orgI = i;
|
||||
if(typeof history[orgI].content != 'string') {
|
||||
if(history[orgI].role == 'assistant') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
|
||||
history.splice(i + 1, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
|
||||
});
|
||||
} else if(history[orgI].role == 'user') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
|
||||
const h = history.find((h: any) => h.id == c.tool_use_id);
|
||||
h[c.is_error ? 'error' : 'content'] = c.content;
|
||||
const timestamp = Date.now();
|
||||
const messages: LLMMessage[] = [];
|
||||
for(let h of history) {
|
||||
if(typeof h.content == 'string') {
|
||||
messages.push(<any>{timestamp, ...h});
|
||||
} else {
|
||||
const textContent = h.content?.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
if(textContent) messages.push({timestamp, role: h.role, content: textContent});
|
||||
h.content.forEach((c: any) => {
|
||||
if(c.type == 'tool_use') {
|
||||
messages.push({timestamp, role: 'tool', id: c.id, name: c.name, args: c.input, content: undefined});
|
||||
} else if(c.type == 'tool_result') {
|
||||
const m: any = messages.findLast(m => (<any>m).id == c.tool_use_id);
|
||||
if(m) m[c.is_error ? 'error' : 'content'] = c.content;
|
||||
}
|
||||
});
|
||||
}
|
||||
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
if(!history[orgI].content) history.splice(orgI, 1);
|
||||
}
|
||||
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
|
||||
}
|
||||
return history.filter(h => !!h.content);
|
||||
return messages;
|
||||
}
|
||||
|
||||
private fromStandard(history: LLMMessage[]): any[] {
|
||||
@@ -50,8 +50,8 @@ export class Anthropic extends LLMProvider {
|
||||
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
const controller = new AbortController();
|
||||
return Object.assign(new Promise<any>(async (res, rej) => {
|
||||
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
return Object.assign(new Promise<any>(async (res) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -73,7 +73,6 @@ export class Anthropic extends LLMProvider {
|
||||
};
|
||||
|
||||
let resp: any, isFirstMessage = true;
|
||||
const assistantMessages: string[] = [];
|
||||
do {
|
||||
resp = await this.client.messages.create(requestParams).catch(err => {
|
||||
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
|
||||
@@ -119,7 +118,6 @@ export class Anthropic extends LLMProvider {
|
||||
if(options.stream) options.stream({tool: toolCall.name});
|
||||
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
|
||||
try {
|
||||
console.log(typeof tool.fn);
|
||||
const result = await tool.fn(toolCall.input, options?.stream, this.ai);
|
||||
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
|
||||
} catch (err: any) {
|
||||
@@ -131,7 +129,7 @@ export class Anthropic extends LLMProvider {
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
|
||||
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
|
||||
this.toStandard(history);
|
||||
history = this.toStandard(history);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
if(options.history) options.history.splice(0, options.history.length, ...history);
|
||||
|
||||
134
src/asr.ts
Normal file
134
src/asr.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import { pipeline } from '@xenova/transformers';
|
||||
import { parentPort } from 'worker_threads';
|
||||
import { spawn } from 'node:child_process';
|
||||
import { execSync } from 'node:child_process';
|
||||
import { mkdtempSync, rmSync, readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
import { tmpdir } from 'node:os';
|
||||
import wavefile from 'wavefile';
|
||||
|
||||
export async function canDiarization(): Promise<string | null> {
|
||||
const checkPython = (cmd: string) => {
|
||||
return new Promise<boolean>((resolve) => {
|
||||
const proc = spawn(cmd, ['-c', 'import pyannote.audio']);
|
||||
proc.on('close', (code: number) => resolve(code === 0));
|
||||
proc.on('error', () => resolve(false));
|
||||
});
|
||||
};
|
||||
if(await checkPython('python3')) return 'python3';
|
||||
if(await checkPython('python')) return 'python';
|
||||
return null;
|
||||
}
|
||||
|
||||
async function runDiarization(binary: string, audioPath: string, dir: string, token: string): Promise<any[]> {
|
||||
const script = `
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
os.environ['TORCH_HOME'] = r"${dir}"
|
||||
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${token}")
|
||||
output = pipeline(sys.argv[1])
|
||||
|
||||
segments = []
|
||||
for turn, speaker in output.speaker_diarization:
|
||||
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
|
||||
|
||||
print(json.dumps(segments))
|
||||
`;
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
let output = '';
|
||||
const proc = spawn(binary, ['-c', script, audioPath]);
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) {
|
||||
try {
|
||||
resolve(JSON.parse(output));
|
||||
} catch (err) {
|
||||
reject(new Error('Failed to parse diarization output'));
|
||||
}
|
||||
} else {
|
||||
reject(new Error(`Python process exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
proc.on('error', reject);
|
||||
});
|
||||
}
|
||||
|
||||
function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
|
||||
const speakerMap = new Map();
|
||||
let speakerCount = 0;
|
||||
speakers.forEach((seg: any) => {
|
||||
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
||||
});
|
||||
|
||||
const lines: string[] = [];
|
||||
let currentSpeaker = -1;
|
||||
let currentText = '';
|
||||
chunks.forEach((chunk: any) => {
|
||||
const time = chunk.timestamp[0];
|
||||
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
|
||||
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
|
||||
if (speakerNum !== currentSpeaker) {
|
||||
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
||||
currentSpeaker = speakerNum;
|
||||
currentText = chunk.text;
|
||||
} else {
|
||||
currentText += chunk.text;
|
||||
}
|
||||
});
|
||||
if(currentText) lines.push(`[Speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
||||
return lines.join('\n');
|
||||
}
|
||||
|
||||
function prepareAudioBuffer(file: string): [string, Float32Array] {
|
||||
let wav: any, tmp;
|
||||
try {
|
||||
wav = new wavefile.WaveFile(readFileSync(file));
|
||||
} catch(err) {
|
||||
tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
|
||||
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
|
||||
wav = new wavefile.WaveFile(readFileSync(tmp));
|
||||
} finally {
|
||||
wav.toBitDepth('32f');
|
||||
wav.toSampleRate(16000);
|
||||
const samples = wav.getSamples();
|
||||
if(Array.isArray(samples)) {
|
||||
const left = samples[0];
|
||||
const right = samples[1];
|
||||
const buffer = new Float32Array(left.length);
|
||||
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
|
||||
return [tmp || file, buffer];
|
||||
}
|
||||
return [tmp || file, samples];
|
||||
}
|
||||
}
|
||||
|
||||
parentPort?.on('message', async ({ file, speaker, model, modelDir, token }) => {
|
||||
let tempFile = null;
|
||||
try {
|
||||
const asr: any = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
|
||||
const [f, buffer] = prepareAudioBuffer(file);
|
||||
tempFile = f !== file ? f : null;
|
||||
const hasDiarization = await canDiarization();
|
||||
const [transcript, speakers] = await Promise.all([
|
||||
asr(buffer, {return_timestamps: speaker ? 'word' : false}),
|
||||
(!speaker || !token || !hasDiarization) ? Promise.resolve(): runDiarization(hasDiarization, f, modelDir, token),
|
||||
]);
|
||||
|
||||
const text = transcript.text?.trim() || null;
|
||||
if(!speaker) return parentPort?.postMessage({ text });
|
||||
if(!token) return parentPort?.postMessage({ text, error: 'HuggingFace token required' });
|
||||
if(!hasDiarization) return parentPort?.postMessage({ text, error: 'Speaker diarization unavailable' });
|
||||
|
||||
const combined = combineSpeakerTranscript(transcript.chunks || [], speakers || []);
|
||||
parentPort?.postMessage({ text: combined });
|
||||
} catch (err: any) {
|
||||
parentPort?.postMessage({ error: err.stack || err.message });
|
||||
} finally {
|
||||
if(tempFile) rmSync(tempFile, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
90
src/audio.ts
90
src/audio.ts
@@ -1,50 +1,60 @@
|
||||
import {spawn} from 'node:child_process';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import {fileURLToPath} from 'url';
|
||||
import {Worker} from 'worker_threads';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
import {canDiarization} from './asr.ts';
|
||||
import {dirname, join} from 'path';
|
||||
|
||||
export class Audio {
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private whisperModel!: string;
|
||||
constructor(private ai: Ai) {}
|
||||
|
||||
constructor(private ai: Ai) {
|
||||
if(ai.options.whisper?.binary) {
|
||||
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
|
||||
this.downloadAsrModel();
|
||||
asr(file: string, options: { model?: string; speaker?: boolean | 'id' } = {}): AbortablePromise<string | null> {
|
||||
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
|
||||
let aborted = false;
|
||||
const abort = () => { aborted = true; };
|
||||
|
||||
let p = new Promise<string | null>((resolve, reject) => {
|
||||
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'asr.js'));
|
||||
const handleMessage = ({ text, warning, error }: any) => {
|
||||
setTimeout(() => worker.terminate(), 1000);
|
||||
if(aborted) return;
|
||||
if(error) reject(new Error(error));
|
||||
else {
|
||||
if(warning) console.warn(warning);
|
||||
resolve(text);
|
||||
}
|
||||
};
|
||||
const handleError = (err: Error) => {
|
||||
setTimeout(() => worker.terminate(), 1000);
|
||||
if(!aborted) reject(err);
|
||||
};
|
||||
worker.on('message', handleMessage);
|
||||
worker.on('error', handleError);
|
||||
worker.on('exit', (code) => {
|
||||
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
|
||||
});
|
||||
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path, token: this.ai.options.hfToken});
|
||||
});
|
||||
|
||||
// Name speakers using AI
|
||||
if(options.speaker == 'id') {
|
||||
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
|
||||
p = p.then(async transcript => {
|
||||
if(!transcript) return transcript;
|
||||
let chunks = this.ai.language.chunk(transcript, 500, 0);
|
||||
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
|
||||
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
|
||||
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
|
||||
temperature: 0.1,
|
||||
});
|
||||
Object.entries(names).forEach(([speaker, name]) => {
|
||||
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);
|
||||
});
|
||||
return transcript;
|
||||
})
|
||||
}
|
||||
|
||||
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
let abort: any = () => {};
|
||||
const p = new Promise<string | null>(async (resolve, reject) => {
|
||||
const m = await this.downloadAsrModel(model);
|
||||
let output = '';
|
||||
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) resolve(output.trim() || null);
|
||||
else reject(new Error(`Exit code ${code}`));
|
||||
});
|
||||
});
|
||||
return Object.assign(p, {abort});
|
||||
return Object.assign(p, { abort });
|
||||
}
|
||||
|
||||
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!model.endsWith('.bin')) model += '.bin';
|
||||
const p = Path.join(<string>this.ai.options.path, model);
|
||||
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
||||
if(!!this.downloads[model]) return this.downloads[model];
|
||||
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
|
||||
.then(resp => resp.arrayBuffer())
|
||||
.then(arr => Buffer.from(arr)).then(async buffer => {
|
||||
await fs.writeFile(p, buffer);
|
||||
delete this.downloads[model];
|
||||
return p;
|
||||
});
|
||||
return this.downloads[model];
|
||||
}
|
||||
canDiarization = () => canDiarization().then(resp => !!resp);
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ import { parentPort } from 'worker_threads';
|
||||
|
||||
let embedder: any;
|
||||
|
||||
parentPort?.on('message', async ({ id, text, model }) => {
|
||||
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model);
|
||||
parentPort?.on('message', async ({text, model, modelDir }) => {
|
||||
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
|
||||
const output = await embedder(text, { pooling: 'mean', normalize: true });
|
||||
const embedding = Array.from(output.data);
|
||||
parentPort?.postMessage({ id, embedding });
|
||||
parentPort?.postMessage({embedding});
|
||||
});
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
export * from './ai';
|
||||
export * from './antrhopic';
|
||||
export * from './asr';
|
||||
export * from './audio';
|
||||
export * from './embedder'
|
||||
export * from './llm';
|
||||
|
||||
71
src/llm.ts
71
src/llm.ts
@@ -75,22 +75,10 @@ export type LLMRequest = {
|
||||
}
|
||||
|
||||
class LLM {
|
||||
private embedWorker: Worker | null = null;
|
||||
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
||||
private embedId = 0;
|
||||
private models: {[model: string]: LLMProvider} = {};
|
||||
private defaultModel!: string;
|
||||
defaultModel!: string;
|
||||
models: {[model: string]: LLMProvider} = {};
|
||||
|
||||
constructor(public readonly ai: Ai) {
|
||||
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
||||
this.embedWorker.on('message', ({ id, embedding }) => {
|
||||
const pending = this.embedQueue.get(id);
|
||||
if (pending) {
|
||||
pending.resolve(embedding);
|
||||
this.embedQueue.delete(id);
|
||||
}
|
||||
});
|
||||
|
||||
if(!ai.options.llm?.models) return;
|
||||
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
|
||||
if(!this.defaultModel) this.defaultModel = model;
|
||||
@@ -196,7 +184,12 @@ class LLM {
|
||||
const system = history[0].role == 'system' ? history[0] : null,
|
||||
recent = keep == 0 ? [] : history.slice(-keep),
|
||||
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
||||
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
|
||||
|
||||
const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', {
|
||||
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.',
|
||||
model: options?.model,
|
||||
temperature: options?.temperature || 0.3
|
||||
});
|
||||
const timestamp = new Date();
|
||||
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
|
||||
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
|
||||
@@ -262,25 +255,37 @@ class LLM {
|
||||
/**
|
||||
* Create a vector representation of a string
|
||||
* @param {object | string} target Item that will be embedded (objects get converted)
|
||||
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
|
||||
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
|
||||
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
|
||||
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
|
||||
*/
|
||||
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
||||
async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}) {
|
||||
let {maxTokens = 500, overlapTokens = 50} = opts;
|
||||
const embed = (text: string): Promise<number[]> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const id = this.embedId++;
|
||||
this.embedQueue.set(id, { resolve, reject });
|
||||
this.embedWorker?.postMessage({ id, text, model: this.ai.options?.embedder || 'bge-small-en-v1.5' });
|
||||
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
||||
const handleMessage = ({ embedding }: any) => {
|
||||
worker.terminate();
|
||||
resolve(embedding);
|
||||
};
|
||||
const handleError = (err: Error) => {
|
||||
worker.terminate();
|
||||
reject(err);
|
||||
};
|
||||
worker.on('message', handleMessage);
|
||||
worker.on('error', handleError);
|
||||
worker.on('exit', (code) => {
|
||||
if(code !== 0) reject(new Error(`Worker exited with code ${code}`));
|
||||
});
|
||||
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
|
||||
});
|
||||
};
|
||||
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
||||
return Promise.all(chunks.map(async (text, index) => ({
|
||||
index,
|
||||
embedding: await embed(text),
|
||||
text,
|
||||
tokens: this.estimateTokens(text),
|
||||
})));
|
||||
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
|
||||
for(let i = 0; i < chunks.length; i++) {
|
||||
const text= chunks[i];
|
||||
const embedding = await embed(text);
|
||||
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -312,12 +317,16 @@ class LLM {
|
||||
|
||||
/**
|
||||
* Ask a question with JSON response
|
||||
* @param {string} message Question
|
||||
* @param {string} text Text to process
|
||||
* @param {string} schema JSON schema the AI should match
|
||||
* @param {LLMRequest} options Configuration options and chat history
|
||||
* @returns {Promise<{} | {} | RegExpExecArray | null>}
|
||||
*/
|
||||
async json(message: string, options?: LLMRequest): Promise<any> {
|
||||
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
|
||||
async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
|
||||
let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema:
|
||||
\`\`\`json
|
||||
${schema}
|
||||
\`\`\``});
|
||||
if(!resp) return {};
|
||||
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
|
||||
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
|
||||
|
||||
@@ -68,7 +68,7 @@ export class OpenAi extends LLMProvider {
|
||||
const controller = new AbortController();
|
||||
return Object.assign(new Promise<any>(async (res, rej) => {
|
||||
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
|
||||
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -133,7 +133,7 @@ export class OpenAi extends LLMProvider {
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
|
||||
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
|
||||
this.toStandard(history);
|
||||
history = this.toStandard(history);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
if(options.history) options.history.splice(0, options.history.length, ...history);
|
||||
|
||||
@@ -13,7 +13,7 @@ export class Vision {
|
||||
ocr(path: string): AbortablePromise<string | null> {
|
||||
let worker: any;
|
||||
const p = new Promise<string | null>(async res => {
|
||||
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
|
||||
worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
|
||||
const {data} = await worker.recognize(path);
|
||||
await worker.terminate();
|
||||
res(data.text.trim() || null);
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import {defineConfig} from 'vite';
|
||||
import dts from 'vite-plugin-dts';
|
||||
import {resolve} from 'path';
|
||||
|
||||
export default defineConfig({
|
||||
build: {
|
||||
lib: {
|
||||
entry: {
|
||||
asr: './src/asr.ts',
|
||||
index: './src/index.ts',
|
||||
embedder: './src/embedder.ts',
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user