Compare commits

..

3 Commits
0.7.0 ... 0.7.3

Author SHA1 Message Date
a07f069ad0 One embedding at a time
All checks were successful
Publish Library / Build NPM Project (push) Successful in 27s
Publish Library / Tag Version (push) Successful in 7s
2026-02-19 22:58:53 -05:00
da15d299e6 parallel embedding cap
All checks were successful
Publish Library / Build NPM Project (push) Successful in 31s
Publish Library / Tag Version (push) Successful in 5s
2026-02-19 21:37:58 -05:00
7ef7c3f676 Cap speaker ID transcript length to 2000 tokens
All checks were successful
Publish Library / Build NPM Project (push) Successful in 34s
Publish Library / Tag Version (push) Successful in 6s
2026-02-14 09:48:12 -05:00
3 changed files with 15 additions and 13 deletions

View File

@@ -1,6 +1,6 @@
{
"name": "@ztimson/ai-utils",
"version": "0.7.0",
"version": "0.7.3",
"description": "AI Utility library",
"author": "Zak Timson",
"license": "MIT",

View File

@@ -40,9 +40,11 @@ export class Audio {
if(!this.ai.language.defaultModel) throw new Error('Configure an LLM for advanced ASR speaker detection');
p = p.then(async transcript => {
if(!transcript) return transcript;
const names = await this.ai.language.json(transcript, '{1: "Detected Name"}', {
let chunks = this.ai.language.chunk(transcript, 500, 0);
if(chunks.length > 4) chunks = [...chunks.slice(0, 3), <string>chunks.at(-1)];
const names = await this.ai.language.json(chunks.join('\n'), '{1: "Detected Name"}', {
system: 'Use this following transcript to identify speakers. Only identify speakers you are sure about',
temperature: 0.2,
temperature: 0.1,
});
Object.entries(names).forEach(([speaker, name]) => {
transcript = (<string>transcript).replaceAll(`[Speaker ${speaker}]`, `[${name}]`);

View File

@@ -255,11 +255,11 @@ class LLM {
/**
* Create a vector representation of a string
* @param {object | string} target Item that will be embedded (objects get converted)
* @param {number} maxTokens Chunking size. More = better context, less = more specific (Search by paragraphs or lines)
* @param {number} overlapTokens Includes previous X tokens to provide continuity to AI (In addition to max tokens)
* @param {maxTokens?: number, overlapTokens?: number} opts Options for embedding such as chunk sizes
* @returns {Promise<Awaited<{index: number, embedding: number[], text: string, tokens: number}>[]>} Chunked embeddings
*/
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
async embedding(target: object | string, opts: {maxTokens?: number, overlapTokens?: number} = {}) {
let {maxTokens = 500, overlapTokens = 50} = opts;
const embed = (text: string): Promise<number[]> => {
return new Promise((resolve, reject) => {
const worker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
@@ -279,13 +279,13 @@ class LLM {
worker.postMessage({text, model: this.ai.options?.embedder || 'bge-small-en-v1.5', modelDir: this.ai.options.path});
});
};
const chunks = this.chunk(target, maxTokens, overlapTokens);
return Promise.all(chunks.map(async (text, index) => ({
index,
embedding: await embed(text),
text,
tokens: this.estimateTokens(text),
})));
const chunks = this.chunk(target, maxTokens, overlapTokens), results: any[] = [];
for(let i = 0; i < chunks.length; i++) {
const text= chunks[i];
const embedding = await embed(text);
results.push({index: i, embedding, text, tokens: this.estimateTokens(text)});
}
return results;
}
/**