126 lines
5.0 KiB
TypeScript
126 lines
5.0 KiB
TypeScript
import {$} from '@ztimson/node-utils';
|
|
import {createWorker} from 'tesseract.js';
|
|
import {LLM, LLMOptions} from './llm';
|
|
import fs from 'node:fs/promises';
|
|
import Path from 'node:path';
|
|
import * as tf from '@tensorflow/tfjs';
|
|
|
|
export type AiOptions = LLMOptions & {
|
|
whisper?: {
|
|
/** Whisper binary location */
|
|
binary: string;
|
|
/** Model */
|
|
model: WhisperModel;
|
|
/** Path to models */
|
|
path: string;
|
|
/** Path to storage location for temporary files */
|
|
temp?: string;
|
|
}
|
|
}
|
|
|
|
export type WhisperModel = 'tiny' | 'base' | 'small' | 'medium' | 'large';
|
|
|
|
export class Ai {
|
|
private downloads: {[key: string]: Promise<string>} = {};
|
|
private whisperModel!: string;
|
|
|
|
/** Large Language Models */
|
|
llm!: LLM;
|
|
|
|
constructor(public readonly options: AiOptions) {
|
|
this.llm = new LLM(this, options);
|
|
if(this.options.whisper?.binary) {
|
|
this.whisperModel = Path.join(<string>this.options.whisper?.path, this.options.whisper?.model + this.options.whisper?.model.endsWith('.bin') ? '' : '.bin');
|
|
this.downloadAsrModel();
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Convert audio to text using Auditory Speech Recognition
|
|
* @param {string} path Path to audio
|
|
* @param model Whisper model
|
|
* @returns {Promise<any>} Extracted text
|
|
*/
|
|
async asr(path: string, model?: WhisperModel): Promise<string | null> {
|
|
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
|
|
await this.downloadAsrModel(model);
|
|
const name = Math.random().toString(36).substring(2, 10) + '-' + path.split('/').pop() + '.txt';
|
|
const output = Path.join(this.options.whisper.temp || '/tmp', name);
|
|
console.log(`rm -f ${output} && ${this.options.whisper.binary} -nt -np -m ${model ? Path.join(this.options.whisper.path, model) : this.whisperModel} -f ${path} -otxt -of ${output}`);
|
|
await $`rm -f ${output} && ${this.options.whisper.binary} -nt -np -m ${model ? Path.join(this.options.whisper.path, model) : this.whisperModel} -f ${path} -otxt -of ${output}`;
|
|
return fs.readFile(output, 'utf-8').then(text => text?.trim() || null)
|
|
.finally(() => fs.rm(output, {force: true}).catch(() => {}));
|
|
}
|
|
|
|
/**
|
|
* Downloads the specified Whisper model if it is not already present locally.
|
|
*
|
|
* @param {string} model Whisper model that will be downloaded
|
|
* @return {Promise<string>} Absolute path to model file, resolves once downloaded
|
|
*/
|
|
async downloadAsrModel(model?: string): Promise<string> {
|
|
if(!this.options.whisper?.binary) throw new Error('Whisper not configured');
|
|
let m;
|
|
if(model) m = model?.endsWith('.bin') ? model : model + '.bin';
|
|
else m = <string>this.whisperModel.split('/').at(-1);
|
|
const p = Path.join(this.options.whisper.path, m);
|
|
if(await fs.stat(p).then(() => true).catch(() => false)) return p;
|
|
if(!!this.downloads[m]) return this.downloads[m];
|
|
this.downloads[m] = fetch(`https://huggingface.co/ggerganov/whisper.cpp/resolve/main/${m}`)
|
|
.then(resp => resp.arrayBuffer())
|
|
.then(arr => Buffer.from(arr)).then(async buffer => {
|
|
await fs.writeFile(Path.join((<any>this.options.whisper).path, m), buffer);
|
|
delete this.downloads[m];
|
|
return p;
|
|
});
|
|
return this.downloads[m];
|
|
}
|
|
|
|
/**
|
|
* Convert image to text using Optical Character Recognition
|
|
* @param {string} path Path to image
|
|
* @returns {{abort: Function, response: Promise<string | null>}} Abort function & Promise of extracted text
|
|
*/
|
|
ocr(path: string): {abort: () => void, response: Promise<string | null>} {
|
|
let worker: any;
|
|
return {
|
|
abort: () => { worker?.terminate(); },
|
|
response: new Promise(async res => {
|
|
worker = await createWorker('eng');
|
|
const {data} = await worker.recognize(path);
|
|
await worker.terminate();
|
|
res(data.text.trim() || null);
|
|
})
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Compare the difference between two strings using tensor math
|
|
* @param target Text that will checked
|
|
* @param {string} searchTerms Multiple search terms to check against target
|
|
* @returns {{avg: number, max: number, similarities: number[]}} Similarity values 0-1: 0 = unique, 1 = identical
|
|
*/
|
|
semanticSimilarity(target: string, ...searchTerms: string[]) {
|
|
if(searchTerms.length < 2) throw new Error('Requires at least 2 strings to compare');
|
|
|
|
const vector = (text: string, dimensions: number = 10): number[] => {
|
|
return text.toLowerCase().split('').map((char, index) =>
|
|
(char.charCodeAt(0) * (index + 1)) % dimensions / dimensions).slice(0, dimensions);
|
|
}
|
|
|
|
const cosineSimilarity = (v1: number[], v2: number[]): number => {
|
|
if (v1.length !== v2.length) throw new Error('Vectors must be same length');
|
|
const tensor1 = tf.tensor1d(v1), tensor2 = tf.tensor1d(v2)
|
|
const dotProduct = tf.dot(tensor1, tensor2)
|
|
const magnitude1 = tf.norm(tensor1)
|
|
const magnitude2 = tf.norm(tensor2)
|
|
if(magnitude1.dataSync()[0] === 0 || magnitude2.dataSync()[0] === 0) return 0
|
|
return dotProduct.dataSync()[0] / (magnitude1.dataSync()[0] * magnitude2.dataSync()[0])
|
|
}
|
|
|
|
const v = vector(target);
|
|
const similarities = searchTerms.map(t => vector(t)).map(refVector => cosineSimilarity(v, refVector))
|
|
return {avg: similarities.reduce((acc, s) => acc + s, 0) / similarities.length, max: Math.max(...similarities), similarities}
|
|
}
|
|
}
|