|
|
|
@@ -1,4 +1,3 @@
|
|
|
|
import {pipeline} from '@xenova/transformers';
|
|
|
|
|
|
|
|
import {JSONAttemptParse} from '@ztimson/utils';
|
|
|
|
import {JSONAttemptParse} from '@ztimson/utils';
|
|
|
|
import {Ai} from './ai.ts';
|
|
|
|
import {Ai} from './ai.ts';
|
|
|
|
import {Anthropic} from './antrhopic.ts';
|
|
|
|
import {Anthropic} from './antrhopic.ts';
|
|
|
|
@@ -6,7 +5,9 @@ import {Ollama} from './ollama.ts';
|
|
|
|
import {OpenAi} from './open-ai.ts';
|
|
|
|
import {OpenAi} from './open-ai.ts';
|
|
|
|
import {AbortablePromise, LLMProvider} from './provider.ts';
|
|
|
|
import {AbortablePromise, LLMProvider} from './provider.ts';
|
|
|
|
import {AiTool} from './tools.ts';
|
|
|
|
import {AiTool} from './tools.ts';
|
|
|
|
import * as tf from '@tensorflow/tfjs';
|
|
|
|
import {Worker} from 'worker_threads';
|
|
|
|
|
|
|
|
import {fileURLToPath} from 'url';
|
|
|
|
|
|
|
|
import {dirname, join} from 'path';
|
|
|
|
|
|
|
|
|
|
|
|
export type LLMMessage = {
|
|
|
|
export type LLMMessage = {
|
|
|
|
/** Message originator */
|
|
|
|
/** Message originator */
|
|
|
|
@@ -83,11 +84,22 @@ export type LLMRequest = {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
export class LLM {
|
|
|
|
export class LLM {
|
|
|
|
private embedModel: any;
|
|
|
|
private embedWorker: Worker | null = null;
|
|
|
|
|
|
|
|
private embedQueue = new Map<number, { resolve: (value: number[]) => void; reject: (error: any) => void }>();
|
|
|
|
|
|
|
|
private embedId = 0;
|
|
|
|
private providers: {[key: string]: LLMProvider} = {};
|
|
|
|
private providers: {[key: string]: LLMProvider} = {};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
constructor(public readonly ai: Ai) {
|
|
|
|
constructor(public readonly ai: Ai) {
|
|
|
|
this.embedModel = pipeline('feature-extraction', 'Xenova/all-MiniLM-L6-v2');
|
|
|
|
this.embedWorker = new Worker(join(dirname(fileURLToPath(import.meta.url)), 'embedder.js'));
|
|
|
|
|
|
|
|
this.embedWorker.on('message', ({ id, embedding }) => {
|
|
|
|
|
|
|
|
const pending = this.embedQueue.get(id);
|
|
|
|
|
|
|
|
if (pending) {
|
|
|
|
|
|
|
|
pending.resolve(embedding);
|
|
|
|
|
|
|
|
this.embedQueue.delete(id);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
|
|
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
|
|
|
|
if(ai.options.anthropic?.token) this.providers.anthropic = new Anthropic(this.ai, ai.options.anthropic.token, ai.options.anthropic.model);
|
|
|
|
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
|
|
|
|
if(ai.options.ollama?.host) this.providers.ollama = new Ollama(this.ai, ai.options.ollama.host, ai.options.ollama.model);
|
|
|
|
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
|
|
|
|
if(ai.options.openAi?.token) this.providers.openAi = new OpenAi(this.ai, ai.options.openAi.token, ai.options.openAi.model);
|
|
|
|
@@ -148,49 +160,44 @@ export class LLM {
|
|
|
|
return denominator === 0 ? 0 : dotProduct / denominator;
|
|
|
|
return denominator === 0 ? 0 : dotProduct / denominator;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
|
|
|
chunk(target: object | string, maxTokens = 500, overlapTokens = 50): string[] {
|
|
|
|
const objString = (obj: any, path = ''): string[] => {
|
|
|
|
const objString = (obj: any, path = ''): string[] => {
|
|
|
|
if(obj === null || obj === undefined) return [];
|
|
|
|
if(!obj) return [];
|
|
|
|
return Object.entries(obj).flatMap(([key, value]) => {
|
|
|
|
return Object.entries(obj).flatMap(([key, value]) => {
|
|
|
|
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
|
|
|
|
const p = path ? `${path}${isNaN(+key) ? `.${key}` : `[${key}]`}` : key;
|
|
|
|
if(typeof value === 'object' && value !== null && !Array.isArray(value)) return objString(value, p);
|
|
|
|
if(typeof value === 'object' && !Array.isArray(value)) return objString(value, p);
|
|
|
|
const valueStr = Array.isArray(value) ? value.join(', ') : String(value);
|
|
|
|
return `${p}: ${Array.isArray(value) ? value.join(', ') : value}`;
|
|
|
|
return `${p}: ${valueStr}`;
|
|
|
|
|
|
|
|
});
|
|
|
|
});
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
const embed = async (text: string): Promise<number[]> => {
|
|
|
|
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
|
|
|
const model = await this.embedModel;
|
|
|
|
const tokens = lines.flatMap(l => [...l.split(/\s+/).filter(Boolean), '\n']);
|
|
|
|
const output = await model(text, {pooling: 'mean', normalize: true});
|
|
|
|
const chunks: string[] = [];
|
|
|
|
return Array.from(output.data);
|
|
|
|
for(let i = 0; i < tokens.length;) {
|
|
|
|
|
|
|
|
let text = '', j = i;
|
|
|
|
|
|
|
|
while(j < tokens.length) {
|
|
|
|
|
|
|
|
const next = text + (text ? ' ' : '') + tokens[j];
|
|
|
|
|
|
|
|
if(this.estimateTokens(next.replace(/\s*\n\s*/g, '\n')) > maxTokens && text) break;
|
|
|
|
|
|
|
|
text = next;
|
|
|
|
|
|
|
|
j++;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
const clean = text.replace(/\s*\n\s*/g, '\n').trim();
|
|
|
|
|
|
|
|
if(clean) chunks.push(clean);
|
|
|
|
|
|
|
|
i = Math.max(j - overlapTokens, j === i ? i + 1 : j);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return chunks;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
embedding(target: object | string, maxTokens = 500, overlapTokens = 50) {
|
|
|
|
|
|
|
|
const embed = (text: string): Promise<number[]> => {
|
|
|
|
|
|
|
|
return new Promise((resolve, reject) => {
|
|
|
|
|
|
|
|
const id = this.embedId++;
|
|
|
|
|
|
|
|
this.embedQueue.set(id, { resolve, reject });
|
|
|
|
|
|
|
|
this.embedWorker?.postMessage({ id, text });
|
|
|
|
|
|
|
|
});
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// Tokenize
|
|
|
|
const chunks = this.chunk(target, maxTokens, overlapTokens);
|
|
|
|
const lines = typeof target === 'object' ? objString(target) : target.split('\n');
|
|
|
|
|
|
|
|
const tokens = lines.flatMap(line => [...line.split(/\s+/).filter(w => w.trim()), '\n']);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Chunk
|
|
|
|
|
|
|
|
const chunks: string[] = [];
|
|
|
|
|
|
|
|
let start = 0;
|
|
|
|
|
|
|
|
while (start < tokens.length) {
|
|
|
|
|
|
|
|
let end = start;
|
|
|
|
|
|
|
|
let text = '';
|
|
|
|
|
|
|
|
// Build chunk
|
|
|
|
|
|
|
|
while (end < tokens.length) {
|
|
|
|
|
|
|
|
const nextToken = tokens[end];
|
|
|
|
|
|
|
|
const testText = text + (text ? ' ' : '') + nextToken;
|
|
|
|
|
|
|
|
const testTokens = this.estimateTokens(testText.replace(/\s*\n\s*/g, '\n'));
|
|
|
|
|
|
|
|
if (testTokens > maxTokens && text) break;
|
|
|
|
|
|
|
|
text = testText;
|
|
|
|
|
|
|
|
end++;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save chunk
|
|
|
|
|
|
|
|
const cleanText = text.replace(/\s*\n\s*/g, '\n').trim();
|
|
|
|
|
|
|
|
if(cleanText) chunks.push(cleanText);
|
|
|
|
|
|
|
|
start = end - overlapTokens;
|
|
|
|
|
|
|
|
if (start <= end - tokens.length + end) start = end; // Safety: prevent infinite loop
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return Promise.all(chunks.map(async (text, index) => ({
|
|
|
|
return Promise.all(chunks.map(async (text, index) => ({
|
|
|
|
index,
|
|
|
|
index,
|
|
|
|
embedding: await embed(text),
|
|
|
|
embedding: await embed(text),
|
|
|
|
|