Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 936317f2f2 | |||
| cfde2ac4d3 | |||
| e4ba89d3db | |||
| 71a7e2a904 | |||
| abd290246c | |||
| ca66e8e304 | |||
| cec892563e | |||
| 91066e070f | |||
| a94b153c6d | |||
| 39537a4a8f | |||
| 790608f020 | |||
| 473424ae23 | |||
| 9b831f7d95 | |||
| 498b326e45 | |||
| 56e4efec94 | |||
| a07f069ad0 | |||
| da15d299e6 | |||
| 7ef7c3f676 | |||
| 4143d00de7 | |||
| 0360f2493d | |||
| 0172887877 | |||
| 8f89f5e3cf | |||
| 5bd41f8c6a | |||
| e4399e1b7b | |||
| ad1ee48763 | |||
| 3ed206923f | |||
| 22d5427e86 | |||
| 43b53164c0 | |||
| 575fbac099 | |||
| 46ae0f7913 | |||
| 54730a2b9a | |||
| 27506d20af | |||
| 8c64129200 | |||
| 013aa942c0 | |||
| c8d5660b1a |
24
README.md
24
README.md
@@ -3,7 +3,7 @@
|
||||
<br />
|
||||
|
||||
<!-- Logo -->
|
||||
<img src="https://git.zakscode.com/repo-avatars/a90851ca730480ec37a5c0c2c4f1b4609eee5eadf806eaf16c83ac4cb7493aa9" alt="Logo" width="200" height="200">
|
||||
<img alt="Logo" width="200" height="200" src="https://git.zakscode.com/repo-avatars/a82d423674763e7a0c1c945bdbb07e249b2bb786d3c9beae76d5b196a10f5c0f">
|
||||
|
||||
<!-- Title -->
|
||||
### @ztimson/ai-utils
|
||||
@@ -53,13 +53,15 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
- **Provider Abstraction**: Switch between AI providers without changing your code
|
||||
|
||||
### Built With
|
||||
[](https://anthropic.com/)
|
||||
[](https://openai.com/)
|
||||
[](https://ollama.com/)
|
||||
[](https://tensorflow.org/)
|
||||
[](https://tesseract-ocr.github.io/)
|
||||
[](https://anthropic.com/)
|
||||
[](https://github.com/ggml-org/llama.cpp)
|
||||
[](https://openai.com/)
|
||||
[](https://github.com/pyannote)
|
||||
[](https://tensorflow.org/)
|
||||
[](https://tesseract-ocr.github.io/)
|
||||
[](https://huggingface.co/docs/transformers.js/en/index)
|
||||
[](https://typescriptlang.org/)
|
||||
[](https://github.com/ggerganov/whisper.cpp)
|
||||
[](https://github.com/ggerganov/whisper.cpp)
|
||||
|
||||
## Setup
|
||||
|
||||
@@ -75,6 +77,7 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
|
||||
#### Instructions
|
||||
1. Install the package: `npm i @ztimson/ai-utils`
|
||||
2. For speaker diarization: `pip install pyannote.audio`
|
||||
|
||||
</details>
|
||||
|
||||
@@ -87,11 +90,14 @@ A TypeScript library that provides a unified interface for working with multiple
|
||||
|
||||
#### Prerequisites
|
||||
- [Node.js](https://nodejs.org/en/download)
|
||||
- _[Whisper.cpp](https://github.com/ggml-org/whisper.cpp/releases/tag) (ASR)_
|
||||
- _[Pyannote](https://github.com/pyannote) (ASR Diarization):_ `pip install pyannote.audio`
|
||||
|
||||
#### Instructions
|
||||
1. Install the dependencies: `npm i`
|
||||
2. Build library: `npm build`
|
||||
3. Run unit tests: `npm test`
|
||||
2. For speaker diarization: `pip install pyannote.audio`
|
||||
3. Build library: `npm build`
|
||||
4. Run unit tests: `npm test`
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
1177
package-lock.json
generated
1177
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
12
package.json
12
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@ztimson/ai-utils",
|
||||
"version": "0.5.2",
|
||||
"version": "0.8.5",
|
||||
"description": "AI Utility library",
|
||||
"author": "Zak Timson",
|
||||
"license": "MIT",
|
||||
@@ -25,14 +25,14 @@
|
||||
"watch": "npx vite build --watch"
|
||||
},
|
||||
"dependencies": {
|
||||
"@anthropic-ai/sdk": "^0.67.0",
|
||||
"@anthropic-ai/sdk": "^0.78.0",
|
||||
"@tensorflow/tfjs": "^4.22.0",
|
||||
"@xenova/transformers": "^2.17.2",
|
||||
"@ztimson/node-utils": "^1.0.4",
|
||||
"@ztimson/utils": "^0.27.9",
|
||||
"@ztimson/node-utils": "^1.0.7",
|
||||
"@ztimson/utils": "^0.28.13",
|
||||
"cheerio": "^1.2.0",
|
||||
"openai": "^6.6.0",
|
||||
"tesseract.js": "^6.0.1"
|
||||
"openai": "^6.22.0",
|
||||
"tesseract.js": "^7.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^24.8.1",
|
||||
|
||||
24
src/ai.ts
24
src/ai.ts
@@ -8,26 +8,22 @@ export type AbortablePromise<T> = Promise<T> & {
|
||||
};
|
||||
|
||||
export type AiOptions = {
|
||||
/** Token to pull models from hugging face */
|
||||
hfToken?: string;
|
||||
/** Path to models */
|
||||
path?: string;
|
||||
/** Embedding model */
|
||||
embedder?: string; // all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5
|
||||
/** Whisper ASR model: ggml-tiny.en.bin, ggml-base.en.bin */
|
||||
asr?: string;
|
||||
/** Embedding model: all-MiniLM-L6-v2, bge-small-en-v1.5, bge-large-en-v1.5 */
|
||||
embedder?: string;
|
||||
/** Large language models, first is default */
|
||||
llm?: Omit<LLMRequest, 'model'> & {
|
||||
models: {[model: string]: AnthropicConfig | OllamaConfig | OpenAiConfig};
|
||||
}
|
||||
/** Tesseract OCR configuration */
|
||||
tesseract?: {
|
||||
/** Model: eng, eng_best, eng_fast */
|
||||
model?: string;
|
||||
}
|
||||
/** Whisper ASR configuration */
|
||||
whisper?: {
|
||||
/** Whisper binary location */
|
||||
binary: string;
|
||||
/** Model: `ggml-base.en.bin` */
|
||||
model: string;
|
||||
}
|
||||
/** OCR model: eng, eng_best, eng_fast */
|
||||
ocr?: string;
|
||||
/** Whisper binary */
|
||||
whisper?: string;
|
||||
}
|
||||
|
||||
export class Ai {
|
||||
|
||||
@@ -13,25 +13,25 @@ export class Anthropic extends LLMProvider {
|
||||
}
|
||||
|
||||
private toStandard(history: any[]): LLMMessage[] {
|
||||
for(let i = 0; i < history.length; i++) {
|
||||
const orgI = i;
|
||||
if(typeof history[orgI].content != 'string') {
|
||||
if(history[orgI].role == 'assistant') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_use').forEach((c: any) => {
|
||||
history.splice(i + 1, 0, {role: 'tool', id: c.id, name: c.name, args: c.input, timestamp: Date.now()});
|
||||
});
|
||||
} else if(history[orgI].role == 'user') {
|
||||
history[orgI].content.filter((c: any) => c.type =='tool_result').forEach((c: any) => {
|
||||
const h = history.find((h: any) => h.id == c.tool_use_id);
|
||||
h[c.is_error ? 'error' : 'content'] = c.content;
|
||||
const timestamp = Date.now();
|
||||
const messages: LLMMessage[] = [];
|
||||
for(let h of history) {
|
||||
if(typeof h.content == 'string') {
|
||||
messages.push(<any>{timestamp, ...h});
|
||||
} else {
|
||||
const textContent = h.content?.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
if(textContent) messages.push({timestamp, role: h.role, content: textContent});
|
||||
h.content.forEach((c: any) => {
|
||||
if(c.type == 'tool_use') {
|
||||
messages.push({timestamp, role: 'tool', id: c.id, name: c.name, args: c.input, content: undefined});
|
||||
} else if(c.type == 'tool_result') {
|
||||
const m: any = messages.findLast(m => (<any>m).id == c.tool_use_id);
|
||||
if(m) m[c.is_error ? 'error' : 'content'] = c.content;
|
||||
}
|
||||
});
|
||||
}
|
||||
history[orgI].content = history[orgI].content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n');
|
||||
if(!history[orgI].content) history.splice(orgI, 1);
|
||||
}
|
||||
if(!history[orgI].timestamp) history[orgI].timestamp = Date.now();
|
||||
}
|
||||
return history.filter(h => !!h.content);
|
||||
return messages;
|
||||
}
|
||||
|
||||
private fromStandard(history: LLMMessage[]): any[] {
|
||||
@@ -50,8 +50,8 @@ export class Anthropic extends LLMProvider {
|
||||
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
const controller = new AbortController();
|
||||
return Object.assign(new Promise<any>(async (res, rej) => {
|
||||
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
return Object.assign(new Promise<any>(async (res) => {
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -73,7 +73,6 @@ export class Anthropic extends LLMProvider {
|
||||
};
|
||||
|
||||
let resp: any, isFirstMessage = true;
|
||||
const assistantMessages: string[] = [];
|
||||
do {
|
||||
resp = await this.client.messages.create(requestParams).catch(err => {
|
||||
err.message += `\n\nMessages:\n${JSON.stringify(history, null, 2)}`;
|
||||
@@ -119,7 +118,6 @@ export class Anthropic extends LLMProvider {
|
||||
if(options.stream) options.stream({tool: toolCall.name});
|
||||
if(!tool) return {tool_use_id: toolCall.id, is_error: true, content: 'Tool not found'};
|
||||
try {
|
||||
console.log(typeof tool.fn);
|
||||
const result = await tool.fn(toolCall.input, options?.stream, this.ai);
|
||||
return {type: 'tool_result', tool_use_id: toolCall.id, content: JSONSanitize(result)};
|
||||
} catch (err: any) {
|
||||
@@ -131,7 +129,7 @@ export class Anthropic extends LLMProvider {
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.content.some((c: any) => c.type === 'tool_use'));
|
||||
history.push({role: 'assistant', content: resp.content.filter((c: any) => c.type == 'text').map((c: any) => c.text).join('\n\n')});
|
||||
this.toStandard(history);
|
||||
history = this.toStandard(history);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
if(options.history) options.history.splice(0, options.history.length, ...history);
|
||||
|
||||
250
src/audio.ts
250
src/audio.ts
@@ -1,39 +1,259 @@
|
||||
import {spawn} from 'node:child_process';
|
||||
import {execSync, spawn} from 'node:child_process';
|
||||
import {mkdtempSync} from 'node:fs';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import {tmpdir} from 'node:os';
|
||||
import * as path from 'node:path';
|
||||
import Path, {join} from 'node:path';
|
||||
import {AbortablePromise, Ai} from './ai.ts';
|
||||
|
||||
export class Audio {
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private pyannote!: string;
|
||||
private whisperModel!: string;
|
||||
|
||||
constructor(private ai: Ai) {
|
||||
if(ai.options.whisper?.binary) {
|
||||
this.whisperModel = ai.options.whisper?.model.endsWith('.bin') ? ai.options.whisper?.model : ai.options.whisper?.model + '.bin';
|
||||
if(ai.options.whisper) {
|
||||
this.whisperModel = ai.options.asr || 'ggml-base.en.bin';
|
||||
this.downloadAsrModel();
|
||||
}
|
||||
|
||||
this.pyannote = `
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
from pyannote.audio import Pipeline
|
||||
|
||||
os.environ['TORCH_HOME'] = r"${ai.options.path}"
|
||||
pipeline = Pipeline.from_pretrained("pyannote/speaker-diarization-3.1", token="${ai.options.hfToken}")
|
||||
output = pipeline(sys.argv[1])
|
||||
|
||||
segments = []
|
||||
for turn, speaker in output.speaker_diarization:
|
||||
segments.append({"start": turn.start, "end": turn.end, "speaker": speaker})
|
||||
|
||||
print(json.dumps(segments))
|
||||
`;
|
||||
}
|
||||
|
||||
asr(path: string, model: string = this.whisperModel): AbortablePromise<string | null> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
let abort: any = () => {};
|
||||
const p = new Promise<string | null>(async (resolve, reject) => {
|
||||
const m = await this.downloadAsrModel(model);
|
||||
private async addPunctuation(timestampData: any, llm?: boolean, cadence = 150): Promise<string> {
|
||||
const countSyllables = (word: string): number => {
|
||||
word = word.toLowerCase().replace(/[^a-z]/g, '');
|
||||
if(word.length <= 3) return 1;
|
||||
const matches = word.match(/[aeiouy]+/g);
|
||||
let count = matches ? matches.length : 1;
|
||||
if(word.endsWith('e')) count--;
|
||||
return Math.max(1, count);
|
||||
};
|
||||
|
||||
let result = '';
|
||||
timestampData.transcription.filter((word, i) => {
|
||||
let skip = false;
|
||||
const prevWord = timestampData.transcription[i - 1];
|
||||
const nextWord = timestampData.transcription[i + 1];
|
||||
if(!word.text && nextWord) {
|
||||
nextWord.offsets.from = word.offsets.from;
|
||||
nextWord.timestamps.from = word.offsets.from;
|
||||
} else if(word.text && word.text[0] != ' ' && prevWord) {
|
||||
prevWord.offsets.to = word.offsets.to;
|
||||
prevWord.timestamps.to = word.timestamps.to;
|
||||
prevWord.text += word.text;
|
||||
skip = true;
|
||||
}
|
||||
return !!word.text && !skip;
|
||||
}).forEach((word: any) => {
|
||||
const capital = /^[A-Z]/.test(word.text.trim());
|
||||
const length = word.offsets.to - word.offsets.from;
|
||||
const syllables = countSyllables(word.text.trim());
|
||||
const expected = syllables * cadence;
|
||||
if(capital && length > expected * 2 && word.text[0] == ' ') result += '.';
|
||||
result += word.text;
|
||||
});
|
||||
if(!llm) return result.trim();
|
||||
return this.ai.language.ask(result, {
|
||||
system: 'Remove any misplaced punctuation from the following ASR transcript using the replace tool. Avoid modifying words unless there is an obvious typo',
|
||||
temperature: 0.1,
|
||||
tools: [{
|
||||
name: 'replace',
|
||||
description: 'Use find and replace to fix errors',
|
||||
args: {
|
||||
find: {type: 'string', description: 'Text to find', required: true},
|
||||
replace: {type: 'string', description: 'Text to replace', required: true}
|
||||
},
|
||||
fn: (args) => result = result.replace(args.find, args.replace)
|
||||
}]
|
||||
}).then(() => result);
|
||||
}
|
||||
|
||||
private async diarizeTranscript(timestampData: any, speakers: any[], llm: boolean): Promise<string> {
|
||||
const speakerMap = new Map();
|
||||
let speakerCount = 0;
|
||||
speakers.forEach((seg: any) => {
|
||||
if(!speakerMap.has(seg.speaker)) speakerMap.set(seg.speaker, ++speakerCount);
|
||||
});
|
||||
|
||||
const punctuatedText = await this.addPunctuation(timestampData, llm);
|
||||
const sentences = punctuatedText.match(/[^.!?]+[.!?]+/g) || [punctuatedText];
|
||||
const words = timestampData.transcription.filter((w: any) => w.text.trim());
|
||||
|
||||
// Assign speaker to each sentence
|
||||
const sentencesWithSpeakers = sentences.map(sentence => {
|
||||
sentence = sentence.trim();
|
||||
if(!sentence) return null;
|
||||
|
||||
const sentenceWords = sentence.toLowerCase().replace(/[^\w\s]/g, '').split(/\s+/);
|
||||
const speakerWordCount = new Map<number, number>();
|
||||
|
||||
sentenceWords.forEach(sw => {
|
||||
const word = words.find((w: any) => sw === w.text.trim().toLowerCase().replace(/[^\w]/g, ''));
|
||||
if(!word) return;
|
||||
|
||||
const wordTime = word.offsets.from / 1000;
|
||||
const speaker = speakers.find((seg: any) => wordTime >= seg.start && wordTime <= seg.end);
|
||||
if(speaker) {
|
||||
const spkNum = speakerMap.get(speaker.speaker);
|
||||
speakerWordCount.set(spkNum, (speakerWordCount.get(spkNum) || 0) + 1);
|
||||
}
|
||||
});
|
||||
|
||||
let bestSpeaker = 1;
|
||||
let maxWords = 0;
|
||||
speakerWordCount.forEach((count, speaker) => {
|
||||
if(count > maxWords) {
|
||||
maxWords = count;
|
||||
bestSpeaker = speaker;
|
||||
}
|
||||
});
|
||||
|
||||
return {speaker: bestSpeaker, text: sentence};
|
||||
}).filter(s => s !== null);
|
||||
|
||||
// Merge adjacent sentences from same speaker
|
||||
const merged: Array<{speaker: number, text: string}> = [];
|
||||
sentencesWithSpeakers.forEach(item => {
|
||||
const last = merged[merged.length - 1];
|
||||
if(last && last.speaker === item.speaker) {
|
||||
last.text += ' ' + item.text;
|
||||
} else {
|
||||
merged.push({...item});
|
||||
}
|
||||
});
|
||||
|
||||
let transcript = merged.map(item => `[Speaker ${item.speaker}]: ${item.text}`).join('\n').trim();
|
||||
if(!llm) return transcript;
|
||||
let chunks = this.ai.language.chunk(transcript, 500, 0);
|
||||
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
|
||||
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name", 2: "Second Name"}', {
|
||||
system: 'Use the following transcript to identify speakers. Only identify speakers you are positive about, dont mention speakers you are unsure about in your response',
|
||||
temperature: 0.1,
|
||||
});
|
||||
Object.entries(names).forEach(([speaker, name]) => transcript = transcript.replaceAll(`[Speaker ${speaker}]`, `[${name}]`));
|
||||
return transcript;
|
||||
}
|
||||
|
||||
private runAsr(file: string, opts: {model?: string, diarization?: boolean} = {}): AbortablePromise<any> {
|
||||
let proc: any;
|
||||
const p = new Promise<any>((resolve, reject) => {
|
||||
this.downloadAsrModel(opts.model).then(m => {
|
||||
if(opts.diarization) {
|
||||
let output = path.join(path.dirname(file), 'transcript');
|
||||
proc = spawn(<string>this.ai.options.whisper,
|
||||
['-m', m, '-f', file, '-np', '-ml', '1', '-oj', '-of', output],
|
||||
{stdio: ['ignore', 'ignore', 'pipe']}
|
||||
);
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.on('close', async (code: number) => {
|
||||
if(code === 0) {
|
||||
output = await fs.readFile(output + '.json', 'utf-8');
|
||||
fs.rm(output + '.json').catch(() => { });
|
||||
try { resolve(JSON.parse(output)); }
|
||||
catch(e) { reject(new Error('Failed to parse whisper JSON')); }
|
||||
} else {
|
||||
reject(new Error(`Exit code ${code}`));
|
||||
}
|
||||
});
|
||||
} else {
|
||||
let output = '';
|
||||
const proc = spawn(<string>this.ai.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
proc = spawn(<string>this.ai.options.whisper, ['-m', m, '-f', file, '-np', '-nt']);
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.on('close', async (code: number) => {
|
||||
if(code === 0) {
|
||||
resolve(output.trim() || null);
|
||||
} else {
|
||||
reject(new Error(`Exit code ${code}`));
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
return <any>Object.assign(p, {abort: () => proc?.kill('SIGTERM')});
|
||||
}
|
||||
|
||||
private runDiarization(file: string): AbortablePromise<any> {
|
||||
let aborted = false, abort = () => { aborted = true; };
|
||||
const checkPython = (cmd: string) => {
|
||||
return new Promise<boolean>((resolve) => {
|
||||
const proc = spawn(cmd, ['-W', 'ignore', '-c', 'import pyannote.audio']);
|
||||
proc.on('close', (code: number) => resolve(code === 0));
|
||||
proc.on('error', () => resolve(false));
|
||||
});
|
||||
};
|
||||
const p = Promise.all<any>([
|
||||
checkPython('python'),
|
||||
checkPython('python3'),
|
||||
]).then(<any>(async ([p, p3]: [boolean, boolean]) => {
|
||||
if(aborted) return;
|
||||
if(!p && !p3) throw new Error('Pyannote is not installed: pip install pyannote.audio');
|
||||
const binary = p3 ? 'python3' : 'python';
|
||||
return new Promise((resolve, reject) => {
|
||||
if(aborted) return;
|
||||
let output = '';
|
||||
const proc = spawn(binary, ['-W', 'ignore', '-c', this.pyannote, file]);
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.stderr.on('data', (data: Buffer) => console.error(data.toString()));
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) resolve(output.trim() || null);
|
||||
else reject(new Error(`Exit code ${code}`));
|
||||
if(code === 0) {
|
||||
try { resolve(JSON.parse(output)); }
|
||||
catch (err) { reject(new Error('Failed to parse diarization output')); }
|
||||
} else {
|
||||
reject(new Error(`Python process exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
proc.on('error', reject);
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
});
|
||||
return Object.assign(p, {abort});
|
||||
}));
|
||||
return <any>Object.assign(p, {abort});
|
||||
}
|
||||
|
||||
asr(file: string, options: { model?: string; diarization?: boolean | 'llm' } = {}): AbortablePromise<string | null> {
|
||||
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
|
||||
|
||||
const tmp = join(mkdtempSync(join(tmpdir(), 'audio-')), 'converted.wav');
|
||||
execSync(`ffmpeg -i "${file}" -ar 16000 -ac 1 -f wav "${tmp}"`, { stdio: 'ignore' });
|
||||
const clean = () => fs.rm(Path.dirname(tmp), {recursive: true, force: true}).catch(() => {});
|
||||
|
||||
if(!options.diarization) return this.runAsr(tmp, {model: options.model});
|
||||
const timestamps = this.runAsr(tmp, {model: options.model, diarization: true});
|
||||
const diarization = this.runDiarization(tmp);
|
||||
let aborted = false, abort = () => {
|
||||
aborted = true;
|
||||
timestamps.abort();
|
||||
diarization.abort();
|
||||
clean();
|
||||
};
|
||||
|
||||
const response = Promise.allSettled([timestamps, diarization]).then(async ([ts, d]) => {
|
||||
if(ts.status == 'rejected') throw new Error('Whisper.cpp timestamps:\n' + ts.reason);
|
||||
if(d.status == 'rejected') throw new Error('Pyannote:\n' + d.reason);
|
||||
if(aborted || !options.diarization) return ts.value;
|
||||
return this.diarizeTranscript(ts.value, d.value, options.diarization == 'llm');
|
||||
}).finally(() => clean());
|
||||
return <any>Object.assign(response, {abort});
|
||||
}
|
||||
|
||||
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||
if(!this.ai.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!this.ai.options.whisper) throw new Error('Whisper not configured');
|
||||
if(!model.endsWith('.bin')) model += '.bin';
|
||||
const p = Path.join(<string>this.ai.options.path, model);
|
||||
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
import { pipeline } from '@xenova/transformers';
|
||||
import { parentPort } from 'worker_threads';
|
||||
|
||||
let embedder: any;
|
||||
const [modelDir, model] = process.argv.slice(2);
|
||||
|
||||
parentPort?.on('message', async ({ id, text, model }) => {
|
||||
if(!embedder) embedder = await pipeline('feature-extraction', 'Xenova/' + model);
|
||||
let text = '';
|
||||
process.stdin.on('data', chunk => text += chunk);
|
||||
process.stdin.on('end', async () => {
|
||||
const embedder = await pipeline('feature-extraction', 'Xenova/' + model, {quantized: true, cache_dir: modelDir});
|
||||
const output = await embedder(text, { pooling: 'mean', normalize: true });
|
||||
const embedding = Array.from(output.data);
|
||||
parentPort?.postMessage({ id, embedding });
|
||||
console.log(JSON.stringify({embedding}));
|
||||
process.exit();
|
||||
});
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
export * from './ai';
|
||||
export * from './antrhopic';
|
||||
export * from './audio';
|
||||
export * from './embedder'
|
||||
export * from './llm';
|
||||
export * from './open-ai';
|
||||
export * from './provider';
|
||||
|
||||
217
src/llm.ts
217
src/llm.ts
@@ -4,9 +4,9 @@ import {Anthropic} from './antrhopic.ts';
|
||||
import {OpenAi} from './open-ai.ts';
|
||||
import {LLMProvider} from './provider.ts';
|
||||
import {AiTool} from './tools.ts';
|
||||
import {Worker} from 'worker_threads';
|
||||
import {fileURLToPath} from 'url';
|
||||
import {dirname, join} from 'path';
|
||||
import { spawn } from 'node:child_process';
|
||||
|
||||
export type AnthropicConfig = {proto: 'anthropic', token: string};
|
||||
export type OllamaConfig = {proto: 'ollama', host: string};
|
||||
@@ -44,8 +44,6 @@ export type LLMMemory = {
|
||||
fact: string;
|
||||
/** Owner and fact embedding vector */
|
||||
embeddings: [number[], number[]];
|
||||
/** Creation time */
|
||||
timestamp: Date;
|
||||
}
|
||||
|
||||
export type LLMRequest = {
|
||||
@@ -75,22 +73,10 @@ export type LLMRequest = {
|
||||
}
|
||||
|
||||
class LLM {
|
||||
private embedWorker: Worker | null = null;
|
||||
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
||||
private embedId = 0;
|
||||
private models: {[model: string]: LLMProvider} = {};
|
||||
private defaultModel!: string;
|
||||
defaultModel!: string;
|
||||
models: {[model: string]: LLMProvider} = {};
|
||||
|
||||
constructor(public readonly ai: Ai) {
|
||||
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
||||
this.embedWorker.on('message', ({ id, embedding }) => {
|
||||
const pending = this.embedQueue.get(id);
|
||||
if (pending) {
|
||||
pending.resolve(embedding);
|
||||
this.embedQueue.delete(id);
|
||||
}
|
||||
});
|
||||
|
||||
if(!ai.options.llm?.models) return;
|
||||
Object.entries(ai.options.llm.models).forEach(([model, config]) => {
|
||||
if(!this.defaultModel) this.defaultModel = model;
|
||||
@@ -107,75 +93,98 @@ class LLM {
|
||||
* @returns {{abort: () => void, response: Promise<string>}} Function to abort response and chat history
|
||||
*/
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
options = <any>{
|
||||
system: '',
|
||||
temperature: 0.8,
|
||||
...this.ai.options.llm,
|
||||
models: undefined,
|
||||
history: [],
|
||||
...options,
|
||||
}
|
||||
const m = options.model || this.defaultModel;
|
||||
if(!this.models[m]) throw new Error(`Model does not exist: ${m}`);
|
||||
let abort = () => {};
|
||||
return Object.assign(new Promise<string>(async res => {
|
||||
if(!options.history) options.history = [];
|
||||
// If memories were passed, find any relivant ones and add a tool for ADHOC lookups
|
||||
// If memories were passed, find any relevant ones and add a tool for ADHOC lookups
|
||||
if(options.memory) {
|
||||
options.system = (options.system || '') + '\nYou have passive persistent memory never make any mention of your memory capabilities and what you can/cannot remember\n';
|
||||
const search = async (query?: string | null, subject?: string | null, limit = 50) => {
|
||||
const search = async (query?: string | null, subject?: string | null, limit = 10) => {
|
||||
const [o, q] = await Promise.all([
|
||||
subject ? this.embedding(subject) : Promise.resolve(null),
|
||||
query ? this.embedding(query) : Promise.resolve(null),
|
||||
]);
|
||||
return (options.memory || [])
|
||||
.map(m => ({...m, score: o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 1}))
|
||||
.filter((m: any) => m.score >= 0.8)
|
||||
.map((m: any) => ({...m, score: q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : m.score}))
|
||||
.filter((m: any) => m.score >= 0.2)
|
||||
.toSorted((a: any, b: any) => a.score - b.score)
|
||||
.slice(0, limit);
|
||||
return (options.memory || []).map(m => {
|
||||
const score = (o ? this.cosineSimilarity(m.embeddings[0], o[0].embedding) : 0)
|
||||
+ (q ? this.cosineSimilarity(m.embeddings[1], q[0].embedding) : 0);
|
||||
return {...m, score};
|
||||
}).toSorted((a: any, b: any) => a.score - b.score).slice(0, limit);
|
||||
}
|
||||
|
||||
options.system += '\nYou have RAG memory and will be given the top_k closest memories regarding the users query. Save anything new you have learned worth remembering from the user message using the remember tool and feel free to recall memories manually.\n';
|
||||
const relevant = await search(message);
|
||||
if(relevant.length) options.history.push({role: 'assistant', content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
|
||||
options.tools = [...options.tools || [], {
|
||||
name: 'read_memory',
|
||||
description: 'Check your long-term memory for more information',
|
||||
if(relevant.length) options.history.push({role: 'tool', name: 'recall', id: 'auto_recall_' + Math.random().toString(), args: {}, content: 'Things I remembered:\n' + relevant.map(m => `${m.owner}: ${m.fact}`).join('\n')});
|
||||
options.tools = [{
|
||||
name: 'recall',
|
||||
description: 'Recall the closest memories you have regarding a query using RAG',
|
||||
args: {
|
||||
subject: {type: 'string', description: 'Find information by a subject topic, can be used with or without query argument'},
|
||||
query: {type: 'string', description: 'Search memory based on a query, can be used with or without subject argument'},
|
||||
limit: {type: 'number', description: 'Result limit, default 5'},
|
||||
topK: {type: 'number', description: 'Result limit, default 5'},
|
||||
},
|
||||
fn: (args) => {
|
||||
if(!args.subject && !args.query) throw new Error('Either a subject or query argument is required');
|
||||
return search(args.query, args.subject, args.limit || 5);
|
||||
return search(args.query, args.subject, args.topK);
|
||||
}
|
||||
}];
|
||||
}, {
|
||||
name: 'remember',
|
||||
description: 'Store important facts user shares for future recall',
|
||||
args: {
|
||||
owner: {type: 'string', description: 'Subject/person this fact is about'},
|
||||
fact: {type: 'string', description: 'The information to remember'}
|
||||
},
|
||||
fn: async (args) => {
|
||||
if(!options.memory) return;
|
||||
const e = await Promise.all([
|
||||
this.embedding(args.owner),
|
||||
this.embedding(`${args.owner}: ${args.fact}`)
|
||||
]);
|
||||
const newMem = {owner: args.owner, fact: args.fact, embeddings: <any>[e[0][0].embedding, e[1][0].embedding]};
|
||||
options.memory.splice(0, options.memory.length, ...[
|
||||
...options.memory.filter(m => {
|
||||
return !(this.cosineSimilarity(newMem.embeddings[0], m.embeddings[0]) >= 0.9 && this.cosineSimilarity(newMem.embeddings[1], m.embeddings[1]) >= 0.8);
|
||||
}),
|
||||
newMem
|
||||
]);
|
||||
return 'Remembered!';
|
||||
}
|
||||
}, ...options.tools || []];
|
||||
}
|
||||
|
||||
// Ask
|
||||
const resp = await this.models[m].ask(message, options);
|
||||
|
||||
// Remove any memory calls
|
||||
if(options.memory) {
|
||||
const i = options.history?.findIndex((h: any) => h.role == 'assistant' && h.content.startsWith('Things I remembered:'));
|
||||
if(i != null && i >= 0) options.history?.splice(i, 1);
|
||||
// Remove any memory calls from history
|
||||
if(options.memory) options.history.splice(0, options.history.length, ...options.history.filter(h => h.role != 'tool' || (h.name != 'recall' && h.name != 'remember')));
|
||||
|
||||
// Compress message history
|
||||
if(options.compress) {
|
||||
const compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
|
||||
options.history.splice(0, options.history.length, ...compressed);
|
||||
}
|
||||
|
||||
// Handle compression and memory extraction
|
||||
if(options.compress || options.memory) {
|
||||
let compressed = null;
|
||||
if(options.compress) {
|
||||
compressed = await this.ai.language.compressHistory(options.history, options.compress.max, options.compress.min, options);
|
||||
options.history.splice(0, options.history.length, ...compressed.history);
|
||||
} else {
|
||||
const i = options.history?.findLastIndex(m => m.role == 'user') ?? -1;
|
||||
compressed = await this.ai.language.compressHistory(i != -1 ? options.history.slice(i) : options.history, 0, 0, options);
|
||||
}
|
||||
if(options.memory) {
|
||||
const updated = options.memory
|
||||
.filter(m => !compressed.memory.some(m2 => this.cosineSimilarity(m.embeddings[1], m2.embeddings[1]) > 0.8))
|
||||
.concat(compressed.memory);
|
||||
options.memory.splice(0, options.memory.length, ...updated);
|
||||
}
|
||||
}
|
||||
return res(resp);
|
||||
}), {abort});
|
||||
}
|
||||
|
||||
async code(message: string, options?: LLMRequest): Promise<any> {
|
||||
const resp = await this.ask(message, {...options, system: [
|
||||
options?.system,
|
||||
'Return your response in a code block'
|
||||
].filter(t => !!t).join(('\n'))});
|
||||
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
|
||||
return codeBlock ? codeBlock[1].trim() : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compress chat history to reduce context size
|
||||
* @param {LLMMessage[]} history Chatlog that will be compressed
|
||||
@@ -184,27 +193,24 @@ class LLM {
|
||||
* @param {LLMRequest} options LLM options
|
||||
* @returns {Promise<LLMMessage[]>} New chat history will summary at index 0
|
||||
*/
|
||||
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<{history: LLMMessage[], memory: LLMMemory[]}> {
|
||||
if(this.estimateTokens(history) < max) return {history, memory: []};
|
||||
async compressHistory(history: LLMMessage[], max: number, min: number, options?: LLMRequest): Promise<LLMMessage[]> {
|
||||
if(this.estimateTokens(history) < max) return history;
|
||||
let keep = 0, tokens = 0;
|
||||
for(let m of history.toReversed()) {
|
||||
tokens += this.estimateTokens(m.content);
|
||||
if(tokens < min) keep++;
|
||||
else break;
|
||||
}
|
||||
if(history.length <= keep) return {history, memory: []};
|
||||
if(history.length <= keep) return history;
|
||||
const system = history[0].role == 'system' ? history[0] : null,
|
||||
recent = keep == 0 ? [] : history.slice(-keep),
|
||||
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
|
||||
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
|
||||
const timestamp = new Date();
|
||||
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
|
||||
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
|
||||
return {owner, fact, embeddings: [e[0][0].embedding, e[1][0].embedding], timestamp};
|
||||
}));
|
||||
const h = [{role: 'assistant', content: `Conversation Summary: ${summary?.summary}`, timestamp: Date.now()}, ...recent];
|
||||
|
||||
const summary: any = await this.summarize(process.map(m => `[${m.role}]: ${m.content}`).join('\n\n'), 500, options);
|
||||
const d = Date.now();
|
||||
const h = [{role: <any>'tool', name: 'summary', id: `summary_` + d, args: {}, content: `Conversation Summary: ${summary?.summary}`, timestamp: d}, ...recent];
|
||||
if(system) h.splice(0, 0, system);
|
||||
return {history: <any>h, memory};
|
||||
return h;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -241,7 +247,7 @@ class LLM {
|
||||
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
|
||||
});
|
||||
};
|
||||
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
||||
const lines = typeof target === 'object' ? objString(target) : target.toString().split('\n');
|
||||
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
|
||||
const chunks: string[] = [];
|
||||
for(let i = 0; i < tokens.length;) {
|
||||
@@ -262,25 +268,57 @@ class LLM {
|
||||
/**
|
||||
* Create a vector representation of a string
|
||||
* @param {object | string} target Item that will be embedded (objects get converted)
|
||||
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
|
||||
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
|
||||
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
|
||||
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
|
||||
*/
|
||||
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
||||
embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}): AbortablePromise<any[]> {
|
||||
let {maxTokens = 500, overlapTokens = 50} = opts;
|
||||
let aborted = false;
|
||||
const abort = () => { aborted = true; };
|
||||
|
||||
const embed = (text: string): Promise<number[]> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
const id = this.embedId++;
|
||||
this.embedQueue.set(id, { resolve, reject });
|
||||
this.embedWorker?.postMessage({ id, text, model: this.ai.options?.embedder || 'bge-small-en-v1.5' });
|
||||
if(aborted) return reject(new Error('Aborted'));
|
||||
|
||||
const args: string[] = [
|
||||
join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'),
|
||||
<string>this.ai.options.path,
|
||||
this.ai.options?.embedder || 'bge-small-en-v1.5'
|
||||
];
|
||||
const proc = spawn('node', args, {stdio: ['pipe', 'pipe', 'ignore']});
|
||||
proc.stdin.write(text);
|
||||
proc.stdin.end();
|
||||
|
||||
let output = '';
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.on('close', (code: number) => {
|
||||
if(aborted) return reject(new Error('Aborted'));
|
||||
if(code === 0) {
|
||||
try {
|
||||
const result = JSON.parse(output);
|
||||
resolve(result.embedding);
|
||||
} catch(err) {
|
||||
reject(new Error('Failed to parse embedding output'));
|
||||
}
|
||||
} else {
|
||||
reject(new Error(`Embedder process exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
proc.on('error', reject);
|
||||
});
|
||||
};
|
||||
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
||||
return Promise.all(chunks.map(async (text, index) => ({
|
||||
index,
|
||||
embedding: await embed(text),
|
||||
text,
|
||||
tokens: this.estimateTokens(text),
|
||||
})));
|
||||
|
||||
const p = (async () => {
|
||||
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
|
||||
for(let i = 0; i < chunks.length; i++) {
|
||||
if(aborted) break;
|
||||
const text = chunks[i];
|
||||
const embedding = await embed(text);
|
||||
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
|
||||
}
|
||||
return results;
|
||||
})();
|
||||
return Object.assign(p, { abort });
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -312,16 +350,17 @@ class LLM {
|
||||
|
||||
/**
|
||||
* Ask a question with JSON response
|
||||
* @param {string} message Question
|
||||
* @param {string} text Text to process
|
||||
* @param {string} schema JSON schema the AI should match
|
||||
* @param {LLMRequest} options Configuration options and chat history
|
||||
* @returns {Promise<{} | {} | RegExpExecArray | null>}
|
||||
*/
|
||||
async json(message: string, options?: LLMRequest): Promise<any> {
|
||||
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
|
||||
if(!resp) return {};
|
||||
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
|
||||
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;
|
||||
return JSONAttemptParse(jsonStr, {});
|
||||
async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
|
||||
const code = await this.code(text, {...options, system: [
|
||||
options?.system,
|
||||
`Only respond using JSON matching this schema:\n\`\`\`json\n${schema}\n\`\`\``
|
||||
].filter(t => !!t).join('\n')});
|
||||
return code ? JSONAttemptParse(code, {}) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -331,8 +370,8 @@ class LLM {
|
||||
* @param options LLM request options
|
||||
* @returns {Promise<string>} Summary
|
||||
*/
|
||||
summarize(text: string, tokens: number, options?: LLMRequest): Promise<string | null> {
|
||||
return this.ask(text, {system: `Generate a brief summary <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
|
||||
summarize(text: string, tokens: number = 500, options?: LLMRequest): Promise<string | null> {
|
||||
return this.ask(text, {system: `Generate the shortest summary possible <= ${tokens} tokens. Output nothing else`, temperature: 0.3, ...options});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ export class OpenAi extends LLMProvider {
|
||||
super();
|
||||
this.client = new openAI(clean({
|
||||
baseURL: host,
|
||||
apiKey: token
|
||||
apiKey: token || host ? 'ignored' : undefined
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -67,8 +67,11 @@ export class OpenAi extends LLMProvider {
|
||||
ask(message: string, options: LLMRequest = {}): AbortablePromise<string> {
|
||||
const controller = new AbortController();
|
||||
return Object.assign(new Promise<any>(async (res, rej) => {
|
||||
if(options.system && options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
|
||||
const history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
if(options.system) {
|
||||
if(options.history?.[0]?.role != 'system') options.history?.splice(0, 0, {role: 'system', content: options.system, timestamp: Date.now()});
|
||||
else options.history[0].content = options.system;
|
||||
}
|
||||
let history = this.fromStandard([...options.history || [], {role: 'user', content: message, timestamp: Date.now()}]);
|
||||
const tools = options.tools || this.ai.options.llm?.tools || [];
|
||||
const requestParams: any = {
|
||||
model: options.model || this.model,
|
||||
@@ -100,15 +103,37 @@ export class OpenAi extends LLMProvider {
|
||||
if(options.stream) {
|
||||
if(!isFirstMessage) options.stream({text: '\n\n'});
|
||||
else isFirstMessage = false;
|
||||
resp.choices = [{message: {content: '', tool_calls: []}}];
|
||||
resp.choices = [{message: {role: 'assistant', content: '', tool_calls: []}}];
|
||||
for await (const chunk of resp) {
|
||||
if(controller.signal.aborted) break;
|
||||
if(chunk.choices[0].delta.content) {
|
||||
resp.choices[0].message.content += chunk.choices[0].delta.content;
|
||||
options.stream({text: chunk.choices[0].delta.content});
|
||||
}
|
||||
|
||||
if(chunk.choices[0].delta.tool_calls) {
|
||||
resp.choices[0].message.tool_calls = chunk.choices[0].delta.tool_calls;
|
||||
for(const deltaTC of chunk.choices[0].delta.tool_calls) {
|
||||
const existing = resp.choices[0].message.tool_calls.find(tc => tc.index === deltaTC.index);
|
||||
if(existing) {
|
||||
if(deltaTC.id) existing.id = deltaTC.id;
|
||||
if(deltaTC.type) existing.type = deltaTC.type;
|
||||
if(deltaTC.function) {
|
||||
if(!existing.function) existing.function = {};
|
||||
if(deltaTC.function.name) existing.function.name = deltaTC.function.name;
|
||||
if(deltaTC.function.arguments) existing.function.arguments = (existing.function.arguments || '') + deltaTC.function.arguments;
|
||||
}
|
||||
} else {
|
||||
resp.choices[0].message.tool_calls.push({
|
||||
index: deltaTC.index,
|
||||
id: deltaTC.id || '',
|
||||
type: deltaTC.type || 'function',
|
||||
function: {
|
||||
name: deltaTC.function?.name || '',
|
||||
arguments: deltaTC.function?.arguments || ''
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,7 +158,7 @@ export class OpenAi extends LLMProvider {
|
||||
}
|
||||
} while (!controller.signal.aborted && resp.choices?.[0]?.message?.tool_calls?.length);
|
||||
history.push({role: 'assistant', content: resp.choices[0].message.content || ''});
|
||||
this.toStandard(history);
|
||||
history = this.toStandard(history);
|
||||
|
||||
if(options.stream) options.stream({done: true});
|
||||
if(options.history) options.history.splice(0, options.history.length, ...history);
|
||||
|
||||
@@ -13,7 +13,7 @@ export class Vision {
|
||||
ocr(path: string): AbortablePromise<string | null> {
|
||||
let worker: any;
|
||||
const p = new Promise<string | null>(async res => {
|
||||
worker = await createWorker(this.ai.options.tesseract?.model || 'eng', 2, {cachePath: this.ai.options.path});
|
||||
worker = await createWorker(this.ai.options.ocr || 'eng', 2, {cachePath: this.ai.options.path});
|
||||
const {data} = await worker.recognize(path);
|
||||
await worker.terminate();
|
||||
res(data.text.trim() || null);
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
"noEmit": true,
|
||||
|
||||
/* Linting */
|
||||
"strict": true
|
||||
"strict": true,
|
||||
"noImplicitAny": false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import {defineConfig} from 'vite';
|
||||
import dts from 'vite-plugin-dts';
|
||||
import {resolve} from 'path';
|
||||
|
||||
export default defineConfig({
|
||||
build: {
|
||||
|
||||
Reference in New Issue
Block a user