Re-organized functions and added semantic embeddings
This commit is contained in:
117
src/ai.ts
117
src/ai.ts
@@ -1,9 +1,6 @@
|
||||
import {createWorker} from 'tesseract.js';
|
||||
import {LLM, LLMOptions} from './llm';
|
||||
import fs from 'node:fs/promises';
|
||||
import Path from 'node:path';
|
||||
import * as tf from '@tensorflow/tfjs';
|
||||
import {spawn} from 'node:child_process';
|
||||
import { Audio } from './audio.ts';
|
||||
import {Vision} from './vision.ts';
|
||||
|
||||
export type AiOptions = LLMOptions & {
|
||||
whisper?: {
|
||||
@@ -20,108 +17,16 @@ export class Ai {
|
||||
private downloads: {[key: string]: Promise<string>} = {};
|
||||
private whisperModel!: string;
|
||||
|
||||
/** Large Language Models */
|
||||
llm!: LLM;
|
||||
/** Audio processing AI */
|
||||
audio!: Audio;
|
||||
/** Language processing AI */
|
||||
language!: LLM;
|
||||
/** Vision processing AI */
|
||||
vision!: Vision;
|
||||
|
||||
constructor(public readonly options: AiOptions) {
|
||||
this.llm = new LLM(this, options);
|
||||
if(this.options.whisper?.binary) {
|
||||
this.whisperModel = this.options.whisper?.model.endsWith('.bin') ? this.options.whisper?.model : this.options.whisper?.model + '.bin';
|
||||
this.downloadAsrModel();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert audio to text using Auditory Speech Recognition
|
||||
* @param {string} path Path to audio
|
||||
* @param model Whisper model
|
||||
* @returns {Promise<any>} Extracted text
|
||||
*/
|
||||
asr(path: string, model: string = this.whisperModel): {abort: () => void, response: Promise<string | null>} {
|
||||
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
let abort: any = () => {};
|
||||
const response = new Promise<string | null>((resolve, reject) => {
|
||||
this.downloadAsrModel(model).then(m => {
|
||||
let output = '';
|
||||
const proc = spawn(<string>this.options.whisper?.binary, ['-nt', '-np', '-m', m, '-f', path], {stdio: ['ignore', 'pipe', 'ignore']});
|
||||
abort = () => proc.kill('SIGTERM');
|
||||
proc.on('error', (err: Error) => reject(err));
|
||||
proc.stdout.on('data', (data: Buffer) => output += data.toString());
|
||||
proc.on('close', (code: number) => {
|
||||
if(code === 0) resolve(output.trim() || null);
|
||||
else reject(new Error(`Exit code ${code}`));
|
||||
});
|
||||
});
|
||||
});
|
||||
return {response, abort};
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads the specified Whisper model if it is not already present locally.
|
||||
*
|
||||
* @param {string} model Whisper model that will be downloaded
|
||||
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
|
||||
*/
|
||||
async downloadAsrModel(model: string = this.whisperModel): Promise<string> {
|
||||
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
|
||||
if(!model.endsWith('.bin')) model += '.bin';
|
||||
const p = Path.join(this.options.whisper.path, model);
|
||||
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
||||
if(!!this.downloads[model]) return this.downloads[model];
|
||||
this.downloads[model] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${model}`)
|
||||
.then(resp => resp.arrayBuffer())
|
||||
.then(arr => Buffer.from(arr)).then(async buffer => {
|
||||
await fs.writeFile(p, buffer);
|
||||
delete this.downloads[model];
|
||||
return p;
|
||||
});
|
||||
return this.downloads[model];
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert image to text using Optical Character Recognition
|
||||
* @param {string} path Path to image
|
||||
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
|
||||
*/
|
||||
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
|
||||
let worker: any;
|
||||
return {
|
||||
abort: () => { worker?.terminate(); },
|
||||
response: new Promise(async res => {
|
||||
worker = await createWorker('eng');
|
||||
const {data} = await worker.recognize(path);
|
||||
await worker.terminate();
|
||||
res(data.text.trim() || null);
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare the difference between two strings using tensor math
|
||||
* @param target Text that will checked
|
||||
* @param {string} searchTerms Multiple search terms to check against target
|
||||
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
|
||||
*/
|
||||
semanticSimilarity(target: string, ...searchTerms: string[]) {
|
||||
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
|
||||
|
||||
const vector = (text: string, dimensions: number = 10): number[] => {
|
||||
return text.toLowerCase().split('').map((char, index) =>
|
||||
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
|
||||
}
|
||||
|
||||
const cosineSimilarity = (v1: number[], v2: number[]): number => {
|
||||
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
|
||||
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
|
||||
const dotProduct = tf.dot(tensor1, tensor2)
|
||||
const magnitude1 = tf.norm(tensor1)
|
||||
const magnitude2 = tf.norm(tensor2)
|
||||
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
|
||||
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
|
||||
}
|
||||
|
||||
const v = vector(target);
|
||||
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
|
||||
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
|
||||
this.audio = new Audio(this);
|
||||
this.language = new LLM(this);
|
||||
this.vision = new Vision(this);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user