Compare commits

..

2 Commits
0.7.0 ... 0.7.2

Author SHA1 Message Date
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
3 changed files with 22 additions and 13 deletions

View File

@@ -1,6 +1,6 @@
{ {
"name": "@ztimson/ai-utils", "name": "@ztimson/ai-utils",
"version": "0.7.0", "version": "0.7.2",
"description": "AI Utility library", "description": "AI Utility library",
"author": "Zak Timson", "author": "Zak Timson",
"license": "MIT", "license": "MIT",

View File

@@ -40,9 +40,11 @@ export class Audio {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection'); if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
p = p.then(async transcript => { p = p.then(async transcript => {
if(!transcript) return transcript; if(!transcript) return transcript;
const names = await this.ai.language.json(transcript, '{1: "Detected Name"}', { let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name"}', {
system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about', system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about',
temperature: 0.2, temperature: 0.1,
}); });
Object.entries(names).forEach(([speaker, name]) => { Object.entries(names).forEach(([speaker, name]) => {
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`); transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);

View File

@@ -255,11 +255,12 @@ class LLM {
/** /**
* Create a vector representation of a string * Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted) * @param {object | string} target Item that will be embedded (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines) * @param {maxTokens?: number, overlapTokens?: number, parellel?: number} opts Options for embedding such as chunk sizes and parallel processing
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings * @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/ */
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) { async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number, parallel?: number} = {}) {
let {maxTokens = 500, overlapTokens = 50, parallel = 1} = opts;
const embed = (text: string): Promise<number[]> => { const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => { return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js')); const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
@@ -279,13 +280,19 @@ class LLM {
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path}); worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
}); });
}; };
const chunks = this.chunk(target, maxTokens, overlapTokens); let i = 0, chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
return Promise.all(chunks.map(async (text, index) => ({ const next: Function = () => {
index, const index = i++;
embedding: await embed(text), if(index >= chunks.length) return;
text, const text = chunks[index];
tokens: this.estimateTokens(text), return embed(text).then(embedding => {
}))); results.push({index, embedding, text, tokens: this.estimateTokens(text)});
return next();
})
}
await Promise.all(Array(parallel).fill(null).map(() => next()));
return results.toSorted((a, b) => a.index - b.index);
} }
/** /**