Compare commits
23 Commits
0.6.7-rc.1
...
0.8.6
| Author | SHA1 | Date | |
|---|---|---|---|
| 6454548364 | |||
| 936317f2f2 | |||
| cfde2ac4d3 | |||
| e4ba89d3db | |||
| 71a7e2a904 | |||
| abd290246c | |||
| ca66e8e304 | |||
| cec892563e | |||
| 91066e070f | |||
| a94b153c6d | |||
| 39537a4a8f | |||
| 790608f020 | |||
| 473424ae23 | |||
| 9b831f7d95 | |||
| 498b326e45 | |||
| 56e4efec94 | |||
| a07f069ad0 | |||
| da15d299e6 | |||
| 7ef7c3f676 | |||
| 4143d00de7 | |||
| 0360f2493d | |||
| 0172887877 | |||
| 8f89f5e3cf |
18
README.md
18
README.md
@@ -3,7 +3,7 @@
|
|||||||
<br />
|
<br />
|
||||||
|
|
||||||
<!-- Logo -->
|
<!-- Logo -->
|
||||||
<img src="https://git.zakscode.com/repo-avatars/a90851ca730480ec37a5c0c2c4f1b4609eee5eadf806eaf16c83ac4cb7493aa9" alt="Logo" width="200" height="200">
|
<img alt="Logo" width="200" height="200" src="https://git.zakscode.com/repo-avatars/a82d423674763e7a0c1c945bdbb07e249b2bb786d3c9beae76d5b196a10f5c0f">
|
||||||
|
|
||||||
<!-- Title -->
|
<!-- Title -->
|
||||||
### @ztimson/ai-utils
|
### @ztimson/ai-utils
|
||||||
@@ -53,13 +53,15 @@ A TypeScript library that provides a unified interface for working with multiple
|
|||||||
- **Provider Abstraction**: Switch between AI providers without changing your code
|
- **Provider Abstraction**: Switch between AI providers without changing your code
|
||||||
|
|
||||||
### Built With
|
### Built With
|
||||||
[](https://anthropic.com/)
|
[](https://anthropic.com/)
|
||||||
[](https://openai.com/)
|
[](https://github.com/ggml-org/llama.cpp)
|
||||||
[](https://ollama.com/)
|
[](https://openai.com/)
|
||||||
[](https://tensorflow.org/)
|
[](https://github.com/pyannote)
|
||||||
[](https://tesseract-ocr.github.io/)
|
[](https://tensorflow.org/)
|
||||||
|
[](https://tesseract-ocr.github.io/)
|
||||||
|
[](https://huggingface.co/docs/transformers.js/en/index)
|
||||||
[](https://typescriptlang.org/)
|
[](https://typescriptlang.org/)
|
||||||
[](https://github.com/ggerganov/whisper.cpp)
|
[](https://github.com/ggerganov/whisper.cpp)
|
||||||
|
|
||||||
## Setup
|
## Setup
|
||||||
|
|
||||||
@@ -88,6 +90,8 @@ A TypeScript library that provides a unified interface for working with multiple
|
|||||||
|
|
||||||
#### Prerequisites
|
#### Prerequisites
|
||||||
- [Node.js](https://nodejs.org/en/download)
|
- [Node.js](https://nodejs.org/en/download)
|
||||||
|
- _[Whisper.cpp](https://github.com/ggml-org/whisper.cpp/releases/tag) (ASR)_
|
||||||
|
- _[Pyannote](https://github.com/pyannote) (ASR Diarization):_ `pip install pyannote.audio`
|
||||||
|
|
||||||
#### Instructions
|
#### Instructions
|
||||||
1. Install the dependencies: `npm i`
|
1. Install the dependencies: `npm i`
|
||||||
|
|||||||
1038
package-lock.json
generated
1038
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
13
package.json
13
package.json
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "@ztimson/ai-utils",
|
"name": "@ztimson/ai-utils",
|
||||||
"version": "0.6.7-rc.1",
|
"version": "0.8.6",
|
||||||
"description": "AI Utility library",
|
"description": "AI Utility library",
|
||||||
"author": "Zak Timson",
|
"author": "Zak Timson",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
@@ -25,15 +25,14 @@
|
|||||||
"watch": "npx vite build --watch"
|
"watch": "npx vite build --watch"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@anthropic-ai/sdk": "^0.67.0",
|
"@anthropic-ai/sdk": "^0.78.0",
|
||||||
"@tensorflow/tfjs": "^4.22.0",
|
"@tensorflow/tfjs": "^4.22.0",
|
||||||
"@xenova/transformers": "^2.17.2",
|
"@xenova/transformers": "^2.17.2",
|
||||||
"@ztimson/node-utils": "^1.0.4",
|
"@ztimson/node-utils": "^1.0.7",
|
||||||
"@ztimson/utils": "^0.27.9",
|
"@ztimson/utils": "^0.28.13",
|
||||||
"cheerio": "^1.2.0",
|
"cheerio": "^1.2.0",
|
||||||
"openai": "^6.6.0",
|
"openai": "^6.22.0",
|
||||||
"tesseract.js": "^6.0.1",
|
"tesseract.js": "^7.0.0"
|
||||||
"wavefile": "^11.0.0"
|
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/node": "^24.8.1",
|
"@types/node": "^24.8.1",
|
||||||
|
|||||||
@@ -8,9 +8,11 @@ export type AbortablePromise<T> = Promise<T> & {
|
|||||||
};
|
};
|
||||||
|
|
||||||
export type AiOptions = {
|
export type AiOptions = {
|
||||||
|
/** Token to pull models from hugging face */
|
||||||
|
hfToken?: string;
|
||||||
/** Path to models */
|
/** Path to models */
|
||||||
path?: string;
|
path?: string;
|
||||||
/** ASR model: whisper-tiny, whisper-base */
|
/** Whisper ASR model: ggml-tiny.en.bin, ggml-base.en.bin */
|
||||||
asr?: string;
|
asr?: string;
|
||||||
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
|
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
|
||||||
embedder?: string;
|
embedder?: string;
|
||||||
@@ -20,6 +22,8 @@ export type AiOptions = {
|
|||||||
}
|
}
|
||||||
/** OCR model: eng, eng_best, eng_fast */
|
/** OCR model: eng, eng_best, eng_fast */
|
||||||
ocr?: string;
|
ocr?: string;
|
||||||
|
/** Whisper binary */
|
||||||
|
whisper?: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export class Ai {
|
export class Ai {
|
||||||
|
|||||||
125
src/asr.ts
125
src/asr.ts
@@ -1,125 +0,0 @@
|
|||||||
import { pipeline } from '@xenova/transformers';
|
|
||||||
import { parentPort } from 'worker_threads';
|
|
||||||
import * as fs from 'node:fs';
|
|
||||||
import wavefile from 'wavefile';
|
|
||||||
import { spawn } from 'node:child_process';
|
|
||||||
|
|
||||||
let whisperPipeline: any;
|
|
||||||
|
|
||||||
export async function canDiarization(): Promise<boolean> {
|
|
||||||
return new Promise((resolve) => {
|
|
||||||
const proc = spawn('python3', ['-c', 'import pyannote.audio']);
|
|
||||||
proc.on('close', (code: number) => resolve(code === 0));
|
|
||||||
proc.on('error', () => resolve(false));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
async function runDiarization(audioPath: string, torchHome: string): Promise<any[]> {
|
|
||||||
const script = `
|
|
||||||
import sys
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from pyannote.audio import Pipeline
|
|
||||||
|
|
||||||
os.environ['TORCH_HOME'] = "${torchHome}"
|
|
||||||
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1")
|
|
||||||
diarization = pipeline(sys.argv[1])
|
|
||||||
|
|
||||||
segments = []
|
|
||||||
for turn, _, speaker in diarization.itertracks(yield_label=True):
|
|
||||||
segments.append({
|
|
||||||
"start": turn.start,
|
|
||||||
"end": turn.end,
|
|
||||||
"speaker": speaker
|
|
||||||
})
|
|
||||||
|
|
||||||
print(json.dumps(segments))
|
|
||||||
`;
|
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
|
||||||
let output = '';
|
|
||||||
const proc = spawn('python3', ['-c', script, audioPath]);
|
|
||||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
|
||||||
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
|
||||||
proc.on('close', (code: number) => {
|
|
||||||
if(code === 0) {
|
|
||||||
try {
|
|
||||||
resolve(JSON.parse(output));
|
|
||||||
} catch (err) {
|
|
||||||
reject(new Error('Failed to parse diarization output'));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
reject(new Error(`Python process exited with code ${code}`));
|
|
||||||
}
|
|
||||||
});
|
|
||||||
proc.on('error', reject);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function combineSpeakerTranscript(chunks: any[], speakers: any[]): string {
|
|
||||||
const speakerMap = new Map();
|
|
||||||
let speakerCount = 0;
|
|
||||||
speakers.forEach((seg: any) => {
|
|
||||||
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
|
||||||
});
|
|
||||||
|
|
||||||
const lines: string[] = [];
|
|
||||||
let currentSpeaker = -1;
|
|
||||||
let currentText = '';
|
|
||||||
chunks.forEach((chunk: any) => {
|
|
||||||
const time = chunk.timestamp[0];
|
|
||||||
const speaker = speakers.find((s: any) => time >= s.start && time <= s.end);
|
|
||||||
const speakerNum = speaker ? speakerMap.get(speaker.speaker) : 1;
|
|
||||||
if (speakerNum !== currentSpeaker) {
|
|
||||||
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
|
||||||
currentSpeaker = speakerNum;
|
|
||||||
currentText = chunk.text;
|
|
||||||
} else {
|
|
||||||
currentText += chunk.text;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if(currentText) lines.push(`[speaker ${currentSpeaker}]: ${currentText.trim()}`);
|
|
||||||
return lines.join('\n');
|
|
||||||
}
|
|
||||||
|
|
||||||
parentPort?.on('message', async ({ file, speaker, model, modelDir }) => {
|
|
||||||
try {
|
|
||||||
console.log('worker', file);
|
|
||||||
if(!whisperPipeline) whisperPipeline = await pipeline('automatic-speech-recognition', `Xenova/${model}`, {cache_dir: modelDir, quantized: true});
|
|
||||||
|
|
||||||
// Prepare audio file (convert to mono channel wave)
|
|
||||||
const wav = new wavefile.WaveFile(fs.readFileSync(file));
|
|
||||||
wav.toBitDepth('32f');
|
|
||||||
wav.toSampleRate(16000);
|
|
||||||
const samples = wav.getSamples();
|
|
||||||
let buffer;
|
|
||||||
if(Array.isArray(samples)) { // stereo to mono - average the channels
|
|
||||||
const left = samples[0];
|
|
||||||
const right = samples[1];
|
|
||||||
buffer = new Float32Array(left.length);
|
|
||||||
for (let i = 0; i < left.length; i++) buffer[i] = (left[i] + right[i]) / 2;
|
|
||||||
} else {
|
|
||||||
buffer = samples;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Transcribe
|
|
||||||
const transcriptResult = await whisperPipeline(buffer, {return_timestamps: speaker ? 'word' : false});
|
|
||||||
if(!speaker) {
|
|
||||||
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Speaker Diarization
|
|
||||||
const hasDiarization = await canDiarization();
|
|
||||||
if(!hasDiarization) {
|
|
||||||
parentPort?.postMessage({ text: transcriptResult.text?.trim() || null, error: 'Speaker diarization unavailable' });
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const speakers = await runDiarization(file, modelDir);
|
|
||||||
const combined = combineSpeakerTranscript(transcriptResult.chunks || [], speakers);
|
|
||||||
parentPort?.postMessage({ text: combined });
|
|
||||||
} catch (err) {
|
|
||||||
parentPort?.postMessage({ error: (err as Error).message });
|
|
||||||
}
|
|
||||||
});
|
|
||||||
287
src/audio.ts
287
src/audio.ts
@@ -1,41 +1,270 @@
|
|||||||
import {Worker} from 'worker_threads';
|
import {execSync, spawn} from 'node:child_process';
|
||||||
import Path from 'node:path';
|
import {mkdtempSync} from 'node:fs';
|
||||||
|
import fs from 'node:fs/promises';
|
||||||
|
import {tmpdir} from 'node:os';
|
||||||
|
import * as path from 'node:path';
|
||||||
|
import Path, {join} from 'node:path';
|
||||||
import {AbortablePromise, Ai} from './ai.ts';
|
import {AbortablePromise, Ai} from './ai.ts';
|
||||||
import {canDiarization} from './asr.ts';
|
|
||||||
|
|
||||||
export class Audio {
|
export class Audio {
|
||||||
constructor(private ai: Ai) {}
|
private downloads: {[key: string]: Promise<string>} = {};
|
||||||
|
private pyannote!: string;
|
||||||
|
private whisperModel!: string;
|
||||||
|
|
||||||
asr(file: string, options: { model?: string; speaker?: boolean } = {}): AbortablePromise<string | null> {
|
constructor(private ai: Ai) {
|
||||||
console.log('audio', file);
|
if(ai.options.whisper) {
|
||||||
const { model = this.ai.options.asr || 'whisper-base', speaker = false } = options;
|
this.whisperModel = ai.options.asr || 'ggml-base.en.bin';
|
||||||
let aborted = false;
|
this.downloadAsrModel();
|
||||||
const abort = () => { aborted = true; };
|
}
|
||||||
|
|
||||||
const p = new Promise<string | null>((resolve, reject) => {
|
this.pyannote = `
|
||||||
const worker = new Worker(Path.join(import.meta.dirname, 'asr.js'));
|
import sys
|
||||||
const handleMessage = ({ text, warning, error }: any) => {
|
import json
|
||||||
worker.terminate();
|
import os
|
||||||
|
from pyannote.audio import Pipeline
|
||||||
|
|
||||||
|
os.environ['TORCH_HOME'] = r"${ai.options.path}"
|
||||||
|
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${ai.options.hfToken}")
|
||||||
|
output = pipeline(sys.argv[1])
|
||||||
|
|
||||||
|
segments = []
|
||||||
|
for turn, speaker in output.speaker_diarization:
|
||||||
|
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
|
||||||
|
|
||||||
|
print(json.dumps(segments))
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
private async addPunctuation(timestampData: any, llm?: boolean, cadence = 150): Promise<string> {
|
||||||
|
const countSyllables = (word: string): number => {
|
||||||
|
word = word.toLowerCase().replace(/[^a-z]/g, '');
|
||||||
|
if(word.length <= 3) return 1;
|
||||||
|
const matches = word.match(/[aeiouy]+/g);
|
||||||
|
let count = matches ? matches.length : 1;
|
||||||
|
if(word.endsWith('e')) count--;
|
||||||
|
return Math.max(1, count);
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = '';
|
||||||
|
timestampData.transcription.filter((word, i) => {
|
||||||
|
let skip = false;
|
||||||
|
const prevWord = timestampData.transcription[i - 1];
|
||||||
|
const nextWord = timestampData.transcription[i + 1];
|
||||||
|
if(!word.text && nextWord) {
|
||||||
|
nextWord.offsets.from = word.offsets.from;
|
||||||
|
nextWord.timestamps.from = word.offsets.from;
|
||||||
|
} else if(word.text && word.text[0] != ' ' && prevWord) {
|
||||||
|
prevWord.offsets.to = word.offsets.to;
|
||||||
|
prevWord.timestamps.to = word.timestamps.to;
|
||||||
|
prevWord.text += word.text;
|
||||||
|
skip = true;
|
||||||
|
}
|
||||||
|
return !!word.text && !skip;
|
||||||
|
}).forEach((word: any) => {
|
||||||
|
const capital = /^[A-Z]/.test(word.text.trim());
|
||||||
|
const length = word.offsets.to - word.offsets.from;
|
||||||
|
const syllables = countSyllables(word.text.trim());
|
||||||
|
const expected = syllables * cadence;
|
||||||
|
if(capital && length > expected * 2 && word.text[0] == ' ') result += '.';
|
||||||
|
result += word.text;
|
||||||
|
});
|
||||||
|
if(!llm) return result.trim();
|
||||||
|
return this.ai.language.ask(result, {
|
||||||
|
system: 'Remove any misplaced punctuation from the following ASR transcript using the replace tool. Avoid modifying words unless there is an obvious typo',
|
||||||
|
temperature: 0.1,
|
||||||
|
tools: [{
|
||||||
|
name: 'replace',
|
||||||
|
description: 'Use find and replace to fix errors',
|
||||||
|
args: {
|
||||||
|
find: {type: 'string', description: 'Text to find', required: true},
|
||||||
|
replace: {type: 'string', description: 'Text to replace', required: true}
|
||||||
|
},
|
||||||
|
fn: (args) => result = result.replace(args.find, args.replace)
|
||||||
|
}]
|
||||||
|
}).then(() => result);
|
||||||
|
}
|
||||||
|
|
||||||
|
private async diarizeTranscript(timestampData: any, speakers: any[], llm: boolean): Promise<string> {
|
||||||
|
const speakerMap = new Map();
|
||||||
|
let speakerCount = 0;
|
||||||
|
speakers.forEach((seg: any) => {
|
||||||
|
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
||||||
|
});
|
||||||
|
|
||||||
|
const punctuatedText = await this.addPunctuation(timestampData, llm);
|
||||||
|
const sentences = punctuatedText.match(/[^.!?]+[.!?]+/g) || [punctuatedText];
|
||||||
|
const words = timestampData.transcription.filter((w: any) => w.text.trim());
|
||||||
|
|
||||||
|
// Assign speaker to each sentence
|
||||||
|
const sentencesWithSpeakers = sentences.map(sentence => {
|
||||||
|
sentence = sentence.trim();
|
||||||
|
if(!sentence) return null;
|
||||||
|
|
||||||
|
const sentenceWords = sentence.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
|
||||||
|
const speakerWordCount = new Map<number, number>();
|
||||||
|
|
||||||
|
sentenceWords.forEach(sw => {
|
||||||
|
const word = words.find((w: any) => sw === w.text.trim().toLowerCase().replace(/[^\w]/g, ''));
|
||||||
|
if(!word) return;
|
||||||
|
|
||||||
|
const wordTime = word.offsets.from / 1000;
|
||||||
|
const speaker = speakers.find((seg: any) => wordTime >= seg.start && wordTime <= seg.end);
|
||||||
|
if(speaker) {
|
||||||
|
const spkNum = speakerMap.get(speaker.speaker);
|
||||||
|
speakerWordCount.set(spkNum, (speakerWordCount.get(spkNum) || 0) + 1);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let bestSpeaker = 1;
|
||||||
|
let maxWords = 0;
|
||||||
|
speakerWordCount.forEach((count, speaker) => {
|
||||||
|
if(count > maxWords) {
|
||||||
|
maxWords = count;
|
||||||
|
bestSpeaker = speaker;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return {speaker: bestSpeaker, text: sentence};
|
||||||
|
}).filter(s => s !== null);
|
||||||
|
|
||||||
|
// Merge adjacent sentences from same speaker
|
||||||
|
const merged: Array<{speaker: number, text: string}> = [];
|
||||||
|
sentencesWithSpeakers.forEach(item => {
|
||||||
|
const last = merged[merged.length - 1];
|
||||||
|
if(last && last.speaker === item.speaker) {
|
||||||
|
last.text += ' ' + item.text;
|
||||||
|
} else {
|
||||||
|
merged.push({...item});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let transcript = merged.map(item => `[Speaker ${item.speaker}]: ${item.text}`).join('\n').trim();
|
||||||
|
if(!llm) return transcript;
|
||||||
|
let chunks = this.ai.language.chunk(transcript, 500, 0);
|
||||||
|
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
|
||||||
|
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
|
||||||
|
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
|
||||||
|
temperature: 0.1,
|
||||||
|
});
|
||||||
|
Object.entries(names).forEach(([speaker, name]) => transcript = transcript.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
|
||||||
|
return transcript;
|
||||||
|
}
|
||||||
|
|
||||||
|
private runAsr(file: string, opts: {model?: string, diarization?: boolean} = {}): AbortablePromise<any> {
|
||||||
|
let proc: any;
|
||||||
|
const p = new Promise<any>((resolve, reject) => {
|
||||||
|
this.downloadAsrModel(opts.model).then(m => {
|
||||||
|
if(opts.diarization) {
|
||||||
|
let output = path.join(path.dirname(file), 'transcript');
|
||||||
|
proc = spawn(<string>this.ai.options.whisper,
|
||||||
|
['-m', m, '-f', file, '-np', '-ml', '1', '-oj', '-of', output],
|
||||||
|
{stdio: ['ignore', 'ignore', 'pipe']}
|
||||||
|
);
|
||||||
|
proc.on('error', (err: Error) => reject(err));
|
||||||
|
proc.on('close', async (code: number) => {
|
||||||
|
if(code === 0) {
|
||||||
|
output = await fs.readFile(output + '.json', 'utf-8');
|
||||||
|
fs.rm(output + '.json').catch(() => { });
|
||||||
|
try { resolve(JSON.parse(output)); }
|
||||||
|
catch(e) { reject(new Error('Failed to parse whisper JSON')); }
|
||||||
|
} else {
|
||||||
|
reject(new Error(`Exit code ${code}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
let output = '';
|
||||||
|
proc = spawn(<string>this.ai.options.whisper, ['-m', m, '-f', file, '-np', '-nt']);
|
||||||
|
proc.on('error', (err: Error) => reject(err));
|
||||||
|
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||||
|
proc.on('close', async (code: number) => {
|
||||||
|
if(code === 0) {
|
||||||
|
resolve(output.trim() || null);
|
||||||
|
} else {
|
||||||
|
reject(new Error(`Exit code ${code}`));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
return <any>Object.assign(p, {abort: () => proc?.kill('SIGTERM')});
|
||||||
|
}
|
||||||
|
|
||||||
|
private runDiarization(file: string): AbortablePromise<any> {
|
||||||
|
let aborted = false, abort = () => { aborted = true; };
|
||||||
|
const checkPython = (cmd: string) => {
|
||||||
|
return new Promise<boolean>((resolve) => {
|
||||||
|
const proc = spawn(cmd, ['-W', 'ignore', '-c', 'import pyannote.audio']);
|
||||||
|
proc.on('close', (code: number) => resolve(code === 0));
|
||||||
|
proc.on('error', () => resolve(false));
|
||||||
|
});
|
||||||
|
};
|
||||||
|
const p = Promise.all<any>([
|
||||||
|
checkPython('python'),
|
||||||
|
checkPython('python3'),
|
||||||
|
]).then(<any>(async ([p, p3]: [boolean, boolean]) => {
|
||||||
if(aborted) return;
|
if(aborted) return;
|
||||||
if(error) reject(new Error(error));
|
if(!p && !p3) throw new Error('Pyannote is not installed: pip install pyannote.audio');
|
||||||
else {
|
const binary = p3 ? 'python3' : 'python';
|
||||||
if(warning) console.warn(warning);
|
return new Promise((resolve, reject) => {
|
||||||
resolve(text);
|
if(aborted) return;
|
||||||
|
let output = '';
|
||||||
|
const proc = spawn(binary, ['-W', 'ignore', '-c', this.pyannote, file]);
|
||||||
|
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||||
|
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
||||||
|
proc.on('close', (code: number) => {
|
||||||
|
if(code === 0) {
|
||||||
|
try { resolve(JSON.parse(output)); }
|
||||||
|
catch (err) { reject(new Error('Failed to parse diarization output')); }
|
||||||
|
} else {
|
||||||
|
reject(new Error(`Python process exited with code ${code}`));
|
||||||
}
|
}
|
||||||
};
|
|
||||||
const handleError = (err: Error) => {
|
|
||||||
worker.terminate();
|
|
||||||
if(!aborted) reject(err);
|
|
||||||
};
|
|
||||||
worker.on('message', handleMessage);
|
|
||||||
worker.on('error', handleError);
|
|
||||||
worker.on('exit', (code) => {
|
|
||||||
if(code !== 0 && !aborted) reject(new Error(`Worker exited with code ${code}`));
|
|
||||||
});
|
});
|
||||||
worker.postMessage({file, model, speaker, modelDir: this.ai.options.path});
|
proc.on('error', reject);
|
||||||
|
abort = () => proc.kill('SIGTERM');
|
||||||
});
|
});
|
||||||
return Object.assign(p, { abort });
|
}));
|
||||||
|
return <any>Object.assign(p, {abort});
|
||||||
}
|
}
|
||||||
|
|
||||||
canDiarization = canDiarization;
|
asr(file: string, options: { model?: string; diarization?: boolean | 'llm' } = {}): AbortablePromise<string | null> {
|
||||||
|
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
|
||||||
|
|
||||||
|
const tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
|
||||||
|
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
|
||||||
|
const clean = () => fs.rm(Path.dirname(tmp), {recursive: true, force: true}).catch(() => {});
|
||||||
|
|
||||||
|
if(!options.diarization) return this.runAsr(tmp, {model: options.model});
|
||||||
|
const timestamps = this.runAsr(tmp, {model: options.model, diarization: true});
|
||||||
|
const diarization = this.runDiarization(tmp);
|
||||||
|
let aborted = false, abort = () => {
|
||||||
|
aborted = true;
|
||||||
|
timestamps.abort();
|
||||||
|
diarization.abort();
|
||||||
|
clean();
|
||||||
|
};
|
||||||
|
|
||||||
|
const response = Promise.allSettled([timestamps, diarization]).then(async ([ts, d]) => {
|
||||||
|
if(ts.status == 'rejected') throw new Error('Whisper.cpp timestamps:\n' + ts.reason);
|
||||||
|
if(d.status == 'rejected') throw new Error('Pyannote:\n' + d.reason);
|
||||||
|
if(aborted || !options.diarization) return ts.value;
|
||||||
|
return this.diarizeTranscript(ts.value, d.value, options.diarization == 'llm');
|
||||||
|
}).finally(() => clean());
|
||||||
|
return <any>Object.assign(response, {abort});
|
||||||
|
}
|
||||||
|
|
||||||
|
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||||
|
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
|
||||||
|
if(!model.endsWith('.bin')) model += '.bin';
|
||||||
|
const p = Path.join(<string>this.ai.options.path, model);
|
||||||
|
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
||||||
|
if(!!this.downloads[model]) return this.downloads[model];
|
||||||
|
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
|
||||||
|
.then(resp => resp.arrayBuffer())
|
||||||
|
.then(arr => Buffer.from(arr)).then(async buffer => {
|
||||||
|
await fs.writeFile(p, buffer);
|
||||||
|
delete this.downloads[model];
|
||||||
|
return p;
|
||||||
|
});
|
||||||
|
return this.downloads[model];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +1,13 @@
|
|||||||
import { pipeline } from '@xenova/transformers';
|
import { pipeline } from '@xenova/transformers';
|
||||||
import { parentPort } from 'worker_threads';
|
|
||||||
|
|
||||||
let embedder: any;
|
const [modelDir, model] = process.argv.slice(2);
|
||||||
|
|
||||||
parentPort?.on('message', async ({ id, text, model, modelDir }) => {
|
let text = '';
|
||||||
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
|
process.stdin.on('data', chunk => text += chunk);
|
||||||
|
process.stdin.on('end', async () => {
|
||||||
|
const embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
|
||||||
const output = await embedder(text, { pooling: 'mean', normalize: true });
|
const output = await embedder(text, { pooling: 'mean', normalize: true });
|
||||||
const embedding = Array.from(output.data);
|
const embedding = Array.from(output.data);
|
||||||
parentPort?.postMessage({ id, embedding });
|
console.log(JSON.stringify({embedding}));
|
||||||
|
process.exit();
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
export * from './ai';
|
export * from './ai';
|
||||||
export * from './antrhopic';
|
export * from './antrhopic';
|
||||||
export * from './asr';
|
|
||||||
export * from './audio';
|
export * from './audio';
|
||||||
export * from './embedder'
|
|
||||||
export * from './llm';
|
export * from './llm';
|
||||||
export * from './open-ai';
|
export * from './open-ai';
|
||||||
export * from './provider';
|
export * from './provider';
|
||||||
|
|||||||
215
src/llm.ts
215
src/llm.ts
@@ -4,9 +4,9 @@ import {Anthropic} from './antrhopic.ts';
|
|||||||
import {OpenAi} from './open-ai.ts';
|
import {OpenAi} from './open-ai.ts';
|
||||||
import {LLMProvider} from './provider.ts';
|
import {LLMProvider} from './provider.ts';
|
||||||
import {AiTool} from './tools.ts';
|
import {AiTool} from './tools.ts';
|
||||||
import {Worker} from 'worker_threads';
|
|
||||||
import {fileURLToPath} from 'url';
|
import {fileURLToPath} from 'url';
|
||||||
import {dirname, join} from 'path';
|
import {dirname, join} from 'path';
|
||||||
|
import { spawn } from 'node:child_process';
|
||||||
|
|
||||||
export type AnthropicConfig = {proto: 'anthropic', token: string};
|
export type AnthropicConfig = {proto: 'anthropic', token: string};
|
||||||
export type OllamaConfig = {proto: 'ollama', host: string};
|
export type OllamaConfig = {proto: 'ollama', host: string};
|
||||||
@@ -44,8 +44,6 @@ export type LLMMemory = {
|
|||||||
fact: string;
|
fact: string;
|
||||||
/** Owner and fact embedding vector */
|
/** Owner and fact embedding vector */
|
||||||
embeddings: [number[], number[]];
|
embeddings: [number[], number[]];
|
||||||
/** Creation time */
|
|
||||||
timestamp: Date;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export type LLMRequest = {
|
export type LLMRequest = {
|
||||||
@@ -75,8 +73,8 @@ export type LLMRequest = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
class LLM {
|
class LLM {
|
||||||
private models: {[model: string]: LLMProvider} = {};
|
defaultModel!: string;
|
||||||
private defaultModel!: string;
|
models: {[model: string]: LLMProvider} = {};
|
||||||
|
|
||||||
constructor(public readonly ai: Ai) {
|
constructor(public readonly ai: Ai) {
|
||||||
if(!ai.options.llm?.models) return;
|
if(!ai.options.llm?.models) return;
|
||||||
@@ -95,75 +93,98 @@ class LLM {
|
|||||||
* @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
|
* @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
|
||||||
*/
|
*/
|
||||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||||
|
options = <any>{
|
||||||
|
system: '',
|
||||||
|
temperature: 0.8,
|
||||||
|
...this.ai.options.llm,
|
||||||
|
models: undefined,
|
||||||
|
history: [],
|
||||||
|
...options,
|
||||||
|
}
|
||||||
const m = options.model || this.defaultModel;
|
const m = options.model || this.defaultModel;
|
||||||
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
|
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
|
||||||
let abort = () => {};
|
let abort = () => {};
|
||||||
return Object.assign(new Promise<string>(async res => {
|
return Object.assign(new Promise<string>(async res => {
|
||||||
if(!options.history) options.history = [];
|
if(!options.history) options.history = [];
|
||||||
// If memories were passed, find any relivant ones and add a tool for ADHOC lookups
|
// If memories were passed, find any relevant ones and add a tool for ADHOC lookups
|
||||||
if(options.memory) {
|
if(options.memory) {
|
||||||
options.system = (options.system || '') + '\nYou have passive persistent memory never make any mention of your memory capabilities and what you can/cannot remember\n';
|
const search = async (query?: string | null, subject?: string | null, limit = 10) => {
|
||||||
const search = async (query?: string | null, subject?: string | null, limit = 50) => {
|
|
||||||
const [o, q] = await Promise.all([
|
const [o, q] = await Promise.all([
|
||||||
subject ? this.embedding(subject) : Promise.resolve(null),
|
subject ? this.embedding(subject) : Promise.resolve(null),
|
||||||
query ? this.embedding(query) : Promise.resolve(null),
|
query ? this.embedding(query) : Promise.resolve(null),
|
||||||
]);
|
]);
|
||||||
return (options.memory || [])
|
return (options.memory || []).map(m => {
|
||||||
.map(m => ({...m, score: o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 1}))
|
const score = (o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 0)
|
||||||
.filter((m: any) => m.score >= 0.8)
|
+ (q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : 0);
|
||||||
.map((m: any) => ({...m, score: q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : m.score}))
|
return {...m, score};
|
||||||
.filter((m: any) => m.score >= 0.2)
|
}).toSorted((a: any, b: any) => a.score - b.score).slice(0, limit);
|
||||||
.toSorted((a: any, b: any) => a.score - b.score)
|
|
||||||
.slice(0, limit);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
options.system += '\nYou have RAG memory and will be given the top_k closest memories regarding the users query. Save anything new you have learned worth remembering from the user message using the remember tool and feel free to recall memories manually.\n';
|
||||||
const relevant = await search(message);
|
const relevant = await search(message);
|
||||||
if(relevant.length) options.history.push({role: 'assistant', content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
|
if(relevant.length) options.history.push({role: 'tool', name: 'recall', id: 'auto_recall_' + Math.random().toString(), args: {}, content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
|
||||||
options.tools = [...options.tools || [], {
|
options.tools = [{
|
||||||
name: 'read_memory',
|
name: 'recall',
|
||||||
description: 'Check your long-term memory for more information',
|
description: 'Recall the closest memories you have regarding a query using RAG',
|
||||||
args: {
|
args: {
|
||||||
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
|
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
|
||||||
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
|
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
|
||||||
limit: {type: 'number', description: 'Result limit, default 5'},
|
topK: {type: 'number', description: 'Result limit, default 5'},
|
||||||
},
|
},
|
||||||
fn: (args) => {
|
fn: (args) => {
|
||||||
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
|
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
|
||||||
return search(args.query, args.subject, args.limit || 5);
|
return search(args.query, args.subject, args.topK);
|
||||||
}
|
}
|
||||||
}];
|
}, {
|
||||||
|
name: 'remember',
|
||||||
|
description: 'Store important facts user shares for future recall',
|
||||||
|
args: {
|
||||||
|
owner: {type: 'string', description: 'Subject/person this fact is about'},
|
||||||
|
fact: {type: 'string', description: 'The information to remember'}
|
||||||
|
},
|
||||||
|
fn: async (args) => {
|
||||||
|
if(!options.memory) return;
|
||||||
|
const e = await Promise.all([
|
||||||
|
this.embedding(args.owner),
|
||||||
|
this.embedding(`${args.owner}: ${args.fact}`)
|
||||||
|
]);
|
||||||
|
const newMem = {owner: args.owner, fact: args.fact, embeddings: <any>[e[0][0].embedding, e[1][0].embedding]};
|
||||||
|
options.memory.splice(0, options.memory.length, ...[
|
||||||
|
...options.memory.filter(m => {
|
||||||
|
return !(this.cosineSimilarity(newMem.embeddings[0], m.embeddings[0]) >= 0.9 && this.cosineSimilarity(newMem.embeddings[1], m.embeddings[1]) >= 0.8);
|
||||||
|
}),
|
||||||
|
newMem
|
||||||
|
]);
|
||||||
|
return 'Remembered!';
|
||||||
|
}
|
||||||
|
}, ...options.tools || []];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ask
|
// Ask
|
||||||
const resp = await this.models[m].ask(message, options);
|
const resp = await this.models[m].ask(message, options);
|
||||||
|
|
||||||
// Remove any memory calls
|
// Remove any memory calls from history
|
||||||
if(options.memory) {
|
if(options.memory) options.history.splice(0, options.history.length, ...options.history.filter(h => h.role != 'tool' || (h.name != 'recall' && h.name != 'remember')));
|
||||||
const i = options.history?.findIndex((h: any) => h.role == 'assistant' && h.content.startsWith('Things I remembered:'));
|
|
||||||
if(i != null && i >= 0) options.history?.splice(i, 1);
|
// Compress message history
|
||||||
|
if(options.compress) {
|
||||||
|
const compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
|
||||||
|
options.history.splice(0, options.history.length, ...compressed);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle compression and memory extraction
|
|
||||||
if(options.compress || options.memory) {
|
|
||||||
let compressed = null;
|
|
||||||
if(options.compress) {
|
|
||||||
compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
|
|
||||||
options.history.splice(0, options.history.length, ...compressed.history);
|
|
||||||
} else {
|
|
||||||
const i = options.history?.findLastIndex(m => m.role == 'user') ?? -1;
|
|
||||||
compressed = await this.ai.language.compressHistory(i != -1 ? options.history.slice(i) : options.history, 0, 0, options);
|
|
||||||
}
|
|
||||||
if(options.memory) {
|
|
||||||
const updated = options.memory
|
|
||||||
.filter(m => !compressed.memory.some(m2 => this.cosineSimilarity(m.embeddings[1], m2.embeddings[1]) > 0.8))
|
|
||||||
.concat(compressed.memory);
|
|
||||||
options.memory.splice(0, options.memory.length, ...updated);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res(resp);
|
return res(resp);
|
||||||
}), {abort});
|
}), {abort});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async code(message: string, options?: LLMRequest): Promise<any> {
|
||||||
|
const resp = await this.ask(message, {...options, system: [
|
||||||
|
options?.system,
|
||||||
|
'Return your response in a code block'
|
||||||
|
].filter(t => !!t).join(('\n'))});
|
||||||
|
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
|
||||||
|
return codeBlock ? codeBlock[1].trim() : null;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compress chat history to reduce context size
|
* Compress chat history to reduce context size
|
||||||
* @param {LLMMessage[]} history Chatlog that will be compressed
|
* @param {LLMMessage[]} history Chatlog that will be compressed
|
||||||
@@ -172,27 +193,24 @@ class LLM {
|
|||||||
* @param {LLMRequest} options LLM options
|
* @param {LLMRequest} options LLM options
|
||||||
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
||||||
*/
|
*/
|
||||||
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<{history: LLMMessage[], memory: LLMMemory[]}> {
|
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
||||||
if(this.estimateTokens(history) < max) return {history, memory: []};
|
if(this.estimateTokens(history) < max) return history;
|
||||||
let keep = 0, tokens = 0;
|
let keep = 0, tokens = 0;
|
||||||
for(let m of history.toReversed()) {
|
for(let m of history.toReversed()) {
|
||||||
tokens += this.estimateTokens(m.content);
|
tokens += this.estimateTokens(m.content);
|
||||||
if(tokens < min) keep++;
|
if(tokens < min) keep++;
|
||||||
else break;
|
else break;
|
||||||
}
|
}
|
||||||
if(history.length <= keep) return {history, memory: []};
|
if(history.length <= keep) return history;
|
||||||
const system = history[0].role == 'system' ? history[0] : null,
|
const system = history[0].role == 'system' ? history[0] : null,
|
||||||
recent = keep == 0 ? [] : history.slice(-keep),
|
recent = keep == 0 ? [] : history.slice(-keep),
|
||||||
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
||||||
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
|
|
||||||
const timestamp = new Date();
|
const summary: any = await this.summarize(process.map(m => `[${m.role}]: ${m.content}`).join('\n\n'), 500, options);
|
||||||
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
|
const d = Date.now();
|
||||||
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
|
const h = [{role: <any>'tool', name: 'summary', id: `summary_` + d, args: {}, content: `Conversation Summary: ${summary?.summary}`, timestamp: d}, ...recent];
|
||||||
return {owner, fact, embeddings: [e[0][0].embedding, e[1][0].embedding], timestamp};
|
|
||||||
}));
|
|
||||||
const h = [{role: 'assistant', content: `Conversation Summary: ${summary?.summary}`, timestamp: Date.now()}, ...recent];
|
|
||||||
if(system) h.splice(0, 0, system);
|
if(system) h.splice(0, 0, system);
|
||||||
return {history: <any>h, memory};
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -229,7 +247,7 @@ class LLM {
|
|||||||
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
|
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
const lines = typeof target === 'object' ? objString(target) : target.toString().split('\n');
|
||||||
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
|
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
|
||||||
const chunks: string[] = [];
|
const chunks: string[] = [];
|
||||||
for(let i = 0; i < tokens.length;) {
|
for(let i = 0; i < tokens.length;) {
|
||||||
@@ -250,37 +268,57 @@ class LLM {
|
|||||||
/**
|
/**
|
||||||
* Create a vector representation of a string
|
* Create a vector representation of a string
|
||||||
* @param {object | string} target Item that will be embedded (objects get converted)
|
* @param {object | string} target Item that will be embedded (objects get converted)
|
||||||
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
|
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
|
||||||
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
|
|
||||||
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
|
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
|
||||||
*/
|
*/
|
||||||
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}): AbortablePromise<any[]> {
|
||||||
|
let {maxTokens = 500, overlapTokens = 50} = opts;
|
||||||
|
let aborted = false;
|
||||||
|
const abort = () => { aborted = true; };
|
||||||
|
|
||||||
const embed = (text: string): Promise<number[]> => {
|
const embed = (text: string): Promise<number[]> => {
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
if(aborted) return reject(new Error('Aborted'));
|
||||||
const handleMessage = ({ embedding }: any) => {
|
|
||||||
worker.terminate();
|
const args: string[] = [
|
||||||
resolve(embedding);
|
join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'),
|
||||||
};
|
<string>this.ai.options.path,
|
||||||
const handleError = (err: Error) => {
|
this.ai.options?.embedder || 'bge-small-en-v1.5'
|
||||||
worker.terminate();
|
];
|
||||||
reject(err);
|
const proc = spawn('node', args, {stdio: ['pipe', 'pipe', 'ignore']});
|
||||||
};
|
proc.stdin.write(text);
|
||||||
worker.on('message', handleMessage);
|
proc.stdin.end();
|
||||||
worker.on('error', handleError);
|
|
||||||
worker.on('exit', (code) => {
|
let output = '';
|
||||||
if(code !== 0) reject(new Error(`Worker exited with code ${code}`));
|
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||||
|
proc.on('close', (code: number) => {
|
||||||
|
if(aborted) return reject(new Error('Aborted'));
|
||||||
|
if(code === 0) {
|
||||||
|
try {
|
||||||
|
const result = JSON.parse(output);
|
||||||
|
resolve(result.embedding);
|
||||||
|
} catch(err) {
|
||||||
|
reject(new Error('Failed to parse embedding output'));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
reject(new Error(`Embedder process exited with code ${code}`));
|
||||||
|
}
|
||||||
});
|
});
|
||||||
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', path: this.ai.options.path});
|
proc.on('error', reject);
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
|
||||||
return Promise.all(chunks.map(async (text, index) => ({
|
const p = (async () => {
|
||||||
index,
|
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
|
||||||
embedding: await embed(text),
|
for(let i = 0; i < chunks.length; i++) {
|
||||||
text,
|
if(aborted) break;
|
||||||
tokens: this.estimateTokens(text),
|
const text = chunks[i];
|
||||||
})));
|
const embedding = await embed(text);
|
||||||
|
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
|
||||||
|
}
|
||||||
|
return results;
|
||||||
|
})();
|
||||||
|
return Object.assign(p, { abort });
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -312,16 +350,17 @@ class LLM {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Ask a question with JSON response
|
* Ask a question with JSON response
|
||||||
* @param {string} message Question
|
* @param {string} text Text to process
|
||||||
|
* @param {string} schema JSON schema the AI should match
|
||||||
* @param {LLMRequest} options Configuration options and chat history
|
* @param {LLMRequest} options Configuration options and chat history
|
||||||
* @returns {Promise<{} | {} | RegExpExecArray | null>}
|
* @returns {Promise<{} | {} | RegExpExecArray | null>}
|
||||||
*/
|
*/
|
||||||
async json(message: string, options?: LLMRequest): Promise<any> {
|
async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
|
||||||
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
|
const code = await this.code(text, {...options, system: [
|
||||||
if(!resp) return {};
|
options?.system,
|
||||||
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
|
`Only respond using JSON matching this schema:\n\`\`\`json\n${schema}\n\`\`\``
|
||||||
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
|
].filter(t => !!t).join('\n')});
|
||||||
return JSONAttemptParse(jsonStr, {});
|
return code ? JSONAttemptParse(code, {}) : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -331,8 +370,8 @@ class LLM {
|
|||||||
* @param options LLM request options
|
* @param options LLM request options
|
||||||
* @returns {Promise<string>} Summary
|
* @returns {Promise<string>} Summary
|
||||||
*/
|
*/
|
||||||
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
|
summarize(text: string, tokens: number = 500, options?: LLMRequest): Promise<string | null> {
|
||||||
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
|
return this.ask(text, {system: `Generate the shortest summary possible <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ export class OpenAi extends LLMProvider {
|
|||||||
super();
|
super();
|
||||||
this.client = new openAI(clean({
|
this.client = new openAI(clean({
|
||||||
baseURL: host,
|
baseURL: host,
|
||||||
apiKey: token
|
apiKey: token || host ? 'ignored' : undefined
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -67,7 +67,10 @@ export class OpenAi extends LLMProvider {
|
|||||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||||
const controller = new AbortController();
|
const controller = new AbortController();
|
||||||
return Object.assign(new Promise<any>(async (res, rej) => {
|
return Object.assign(new Promise<any>(async (res, rej) => {
|
||||||
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
|
if(options.system) {
|
||||||
|
if(options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
|
||||||
|
else options.history[0].content = options.system;
|
||||||
|
}
|
||||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||||
const requestParams: any = {
|
const requestParams: any = {
|
||||||
@@ -100,15 +103,37 @@ export class OpenAi extends LLMProvider {
|
|||||||
if(options.stream) {
|
if(options.stream) {
|
||||||
if(!isFirstMessage) options.stream({text: '\n\n'});
|
if(!isFirstMessage) options.stream({text: '\n\n'});
|
||||||
else isFirstMessage = false;
|
else isFirstMessage = false;
|
||||||
resp.choices = [{message: {content: '', tool_calls: []}}];
|
resp.choices = [{message: {role: 'assistant', content: '', tool_calls: []}}];
|
||||||
for await (const chunk of resp) {
|
for await (const chunk of resp) {
|
||||||
if(controller.signal.aborted) break;
|
if(controller.signal.aborted) break;
|
||||||
if(chunk.choices[0].delta.content) {
|
if(chunk.choices[0].delta.content) {
|
||||||
resp.choices[0].message.content += chunk.choices[0].delta.content;
|
resp.choices[0].message.content += chunk.choices[0].delta.content;
|
||||||
options.stream({text: chunk.choices[0].delta.content});
|
options.stream({text: chunk.choices[0].delta.content});
|
||||||
}
|
}
|
||||||
|
|
||||||
if(chunk.choices[0].delta.tool_calls) {
|
if(chunk.choices[0].delta.tool_calls) {
|
||||||
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls;
|
for(const deltaTC of chunk.choices[0].delta.tool_calls) {
|
||||||
|
const existing = resp.choices[0].message.tool_calls.find(tc => tc.index === deltaTC.index);
|
||||||
|
if(existing) {
|
||||||
|
if(deltaTC.id) existing.id = deltaTC.id;
|
||||||
|
if(deltaTC.type) existing.type = deltaTC.type;
|
||||||
|
if(deltaTC.function) {
|
||||||
|
if(!existing.function) existing.function = {};
|
||||||
|
if(deltaTC.function.name) existing.function.name = deltaTC.function.name;
|
||||||
|
if(deltaTC.function.arguments) existing.function.arguments = (existing.function.arguments || '') + deltaTC.function.arguments;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
resp.choices[0].message.tool_calls.push({
|
||||||
|
index: deltaTC.index,
|
||||||
|
id: deltaTC.id || '',
|
||||||
|
type: deltaTC.type || 'function',
|
||||||
|
function: {
|
||||||
|
name: deltaTC.function?.name || '',
|
||||||
|
arguments: deltaTC.function?.arguments || ''
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
14
src/tools.ts
14
src/tools.ts
@@ -1,9 +1,15 @@
|
|||||||
import * as cheerio from 'cheerio';
|
import * as cheerio from 'cheerio';
|
||||||
import {$, $Sync} from '@ztimson/node-utils';
|
import {$Sync} from '@ztimson/node-utils';
|
||||||
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
|
import {ASet, consoleInterceptor, Http, fn as Fn} from '@ztimson/utils';
|
||||||
|
import * as os from 'node:os';
|
||||||
import {Ai} from './ai.ts';
|
import {Ai} from './ai.ts';
|
||||||
import {LLMRequest} from './llm.ts';
|
import {LLMRequest} from './llm.ts';
|
||||||
|
|
||||||
|
const getShell = () => {
|
||||||
|
if(os.platform() == 'win32') return 'cmd';
|
||||||
|
return $Sync`echo $SHELL`?.split('/').pop() || 'bash';
|
||||||
|
}
|
||||||
|
|
||||||
export type AiToolArg = {[key: string]: {
|
export type AiToolArg = {[key: string]: {
|
||||||
/** Argument type */
|
/** Argument type */
|
||||||
type: 'array' | 'boolean' | 'number' | 'object' | 'string',
|
type: 'array' | 'boolean' | 'number' | 'object' | 'string',
|
||||||
@@ -40,7 +46,7 @@ export const CliTool: AiTool = {
|
|||||||
name: 'cli',
|
name: 'cli',
|
||||||
description: 'Use the command line interface, returns any output',
|
description: 'Use the command line interface, returns any output',
|
||||||
args: {command: {type: 'string', description: 'Command to run', required: true}},
|
args: {command: {type: 'string', description: 'Command to run', required: true}},
|
||||||
fn: (args: {command: string}) => $`${args.command}`
|
fn: (args: {command: string}) => $Sync`${args.command}`
|
||||||
}
|
}
|
||||||
|
|
||||||
export const DateTimeTool: AiTool = {
|
export const DateTimeTool: AiTool = {
|
||||||
@@ -54,13 +60,13 @@ export const ExecTool: AiTool = {
|
|||||||
name: 'exec',
|
name: 'exec',
|
||||||
description: 'Run code/scripts',
|
description: 'Run code/scripts',
|
||||||
args: {
|
args: {
|
||||||
language: {type: 'string', description: 'Execution language', enum: ['cli', 'node', 'python'], required: true},
|
language: {type: 'string', description: `Execution language (CLI: ${getShell()})`, enum: ['cli', 'node', 'python'], required: true},
|
||||||
code: {type: 'string', description: 'Code to execute', required: true}
|
code: {type: 'string', description: 'Code to execute', required: true}
|
||||||
},
|
},
|
||||||
fn: async (args, stream, ai) => {
|
fn: async (args, stream, ai) => {
|
||||||
try {
|
try {
|
||||||
switch(args.type) {
|
switch(args.type) {
|
||||||
case 'bash':
|
case 'cli':
|
||||||
return await CliTool.fn({command: args.code}, stream, ai);
|
return await CliTool.fn({command: args.code}, stream, ai);
|
||||||
case 'node':
|
case 'node':
|
||||||
return await JSTool.fn({code: args.code}, stream, ai);
|
return await JSTool.fn({code: args.code}, stream, ai);
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import {AbortablePromise, Ai} from './ai.ts';
|
|||||||
|
|
||||||
export class Vision {
|
export class Vision {
|
||||||
|
|
||||||
constructor(private ai: Ai) { }
|
constructor(private ai: Ai) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Convert image to text using Optical Character Recognition
|
* Convert image to text using Optical Character Recognition
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
"noEmit": true,
|
"noEmit": true,
|
||||||
|
|
||||||
/* Linting */
|
/* Linting */
|
||||||
"strict": true
|
"strict": true,
|
||||||
|
"noImplicitAny": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
import {defineConfig} from 'vite';
|
import {defineConfig} from 'vite';
|
||||||
import dts from 'vite-plugin-dts';
|
import dts from 'vite-plugin-dts';
|
||||||
import {resolve} from 'path';
|
|
||||||
|
|
||||||
export default defineConfig({
|
export default defineConfig({
|
||||||
build: {
|
build: {
|
||||||
lib: {
|
lib: {
|
||||||
entry: {
|
entry: {
|
||||||
asr: './src/asr.ts',
|
|
||||||
index: './src/index.ts',
|
index: './src/index.ts',
|
||||||
embedder: './src/embedder.ts',
|
embedder: './src/embedder.ts',
|
||||||
},
|
},
|
||||||
|
|||||||
Reference in New Issue
Block a user