Working speaker detection with advanced LLM identifying. Improved LLM json function
All checks were successful
Publish Library / Build NPM Project (push) Successful in 39s
Publish Library / Tag Version (push) Successful in 5s

This commit is contained in:
2026-02-14 09:39:17 -05:00
parent 0360f2493d
commit 4143d00de7
4 changed files with 90 additions and 56 deletions

View File

@@ -75,8 +75,8 @@ export type LLMRequest = {
}
class LLM {
private models: {[model: string]: LLMProvider} = {};
private defaultModel!: string;
defaultModel!: string;
models: {[model: string]: LLMProvider} = {};
constructor(public readonly ai: Ai) {
if(!ai.options.llm?.models) return;
@@ -184,7 +184,12 @@ class LLM {
const system = history[0].role == 'system' ? history[0] : null,
recent = keep == 0 ? [] : history.slice(-keep),
process = (keep == 0 ? history : history.slice(0, -keep)).filter(h => h.role === 'assistant' || h.role === 'user');
const summary: any = await this.json(`Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn't already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI. Match this format: {summary: string, facts: [[subject, fact]]}\n\n${process.map(m => `${m.role}: ${m.content}`).join('\n\n')}`, {model: options?.model, temperature: options?.temperature || 0.3});
const summary: any = await this.json(process.map(m => `${m.role}: ${m.content}`).join('\n\n'), '{summary: string, facts: [[subject, fact]]}', {
system: 'Create the smallest summary possible, no more than 500 tokens. Create a list of NEW facts (split by subject [pro]noun and fact) about what you learned from this conversation that you didn\'t already know or get from a tool call or system prompt. Focus only on new information about people, topics, or facts. Avoid generating facts about the AI.',
model: options?.model,
temperature: options?.temperature || 0.3
});
const timestamp = new Date();
const memory = await Promise.all((summary?.facts || [])?.map(async ([owner, fact]: [string, string]) => {
const e = await Promise.all([this.embedding(owner), this.embedding(`${owner}: ${fact}`)]);
@@ -312,12 +317,16 @@ class LLM {
/**
* Ask a question with JSON response
* @param {string} message Question
* @param {string} text Text to process
* @param {string} schema JSON schema the AI should match
* @param {LLMRequest} options Configuration options and chat history
* @returns {Promise<{} | {} | RegExpExecArray | null>}
*/
async json(message: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(message, {system: 'Respond using a JSON blob matching any provided examples', ...options});
async json(text: string, schema: string, options?: LLMRequest): Promise<any> {
let resp = await this.ask(text, {...options, system: (options?.system ? `${options.system}\n` : '') + `Only respond using a JSON code block matching this schema:
\`\`\`json
${schema}
\`\`\``});
if(!resp) return {};
const codeBlock = /```(?:.+)?\s*([\s\S]*?)```/.exec(resp);
const jsonStr = codeBlock ? codeBlock[1].trim() : resp;