Files
pleroma-ollama-bot/src/memory.ts

323 lines
11 KiB
TypeScript

/**
* ADAPTIVE MEMORY SYSTEM FOR FEDIVERSE CHATBOT
*
* This system maintains persistent, evolving user profiles to enable personalized
* interactions across chat sessions. It uses LLM-based analysis to extract and
* categorize user traits, then builds context for future conversations.
*
* ARCHITECTURE:
* - UserMemory: Core profile (personality, gags, relationships, interests, backstory)
* - InteractionLog: Historical conversation snapshots with sentiment analysis
* - JSON string arrays in SQLite for flexible data storage
*
* WORKFLOW:
* 1. Each user message + bot response gets analyzed by Ollama
* 2. Extract personality traits, running gags, relationship dynamics, etc.
* 3. Merge new insights with existing profile (deduplication)
* 4. Generate memory context string for next conversation's system prompt
* 5. Log interaction with sentiment and notable quotes
*
* MEMORY CATEGORIES:
* - personalityTraits: User characteristics (sarcastic, protective, etc.)
* - runningGags: Recurring jokes, memes, fake claims between user and bot
* - relationships: How user treats bot (mean, protective, flirty)
* - interests: Hobbies, topics user cares about
* - backstory: Biographical info, "lore" (real or fabricated)
*
* CURRENT LIMITATIONS:
* - No memory aging/decay - old info persists indefinitely
* - Simple deduplication - similar but not identical entries accumulate
* - No relevance scoring - stale assumptions carry same weight as recent ones
* - Fixed array limits may truncate important long-term patterns
*
* RECOMMENDED IMPROVEMENTS:
* - Add timestamp-based relevance weighting
* - Implement semantic similarity checks for better deduplication
* - Add contradiction detection to update outdated assumptions
* - Consider LRU-style eviction instead of simple truncation
*/
// Updated memory.ts with JSON string handling for SQLite
import { prisma } from "./main.js";
import { envConfig } from "./main.js";
import { InteractionAnalysis, MemoryUpdateRequest, OllamaChatRequest, OllamaChatResponse } from "../types.js";
// Helper functions for JSON string array handling
const parseJsonArray = (jsonString: string): string[] => {
try {
const parsed = JSON.parse(jsonString);
return Array.isArray(parsed) ? parsed : [];
} catch {
return [];
}
};
const stringifyJsonArray = (array: string[]): string => {
return JSON.stringify(array);
};
/**
* Analyze a conversation to extract user personality, gags, and relationship dynamics
*/
const analyzeInteraction = async (
userMessage: string,
botResponse: string,
userFqn: string
): Promise<InteractionAnalysis> => {
const { ollamaUrl, ollamaModel } = envConfig;
const analysisPrompt = `Analyze this conversation between a user and a cute female AI chatbot named Lexi. Extract personality traits, running gags, relationship dynamics, and interesting facts.
User (${userFqn}): ${userMessage}
Bot (Lexi): ${botResponse}
Please analyze and respond with a JSON object containing:
{
"sentiment": "positive|negative|neutral|teasing|flirty|aggressive",
"topics": ["topic1", "topic2"],
"personalityObservations": ["trait1", "trait2"],
"runningGagUpdates": ["gag1", "gag2"],
"relationshipUpdates": ["relationship_change1"],
"interestMentions": ["interest1", "interest2"],
"backstoryElements": ["fact1", "fact2"],
"memorableQuotes": ["quote1", "quote2"]
}
Focus on:
- Personality traits (sarcastic, teasing, protective, joker, etc.)
- Running gags and memes (fake claims, recurring jokes, etc.)
- How they treat the bot (mean, nice, flirty, protective)
- Interests and hobbies mentioned
- Any biographical info (real or fake "lore")
- Memorable or funny quotes
Keep entries brief and specific. Empty arrays are fine if nothing notable.`;
try {
const analysisRequest: OllamaChatRequest = {
model: ollamaModel,
messages: [
{
role: "system",
content: "You are an expert at analyzing social interactions and extracting personality insights. Always respond with valid JSON only."
},
{ role: "user", content: analysisPrompt }
],
stream: false,
options: {
temperature: 0.3, // Lower temperature for more consistent analysis
num_predict: 800,
}
};
const response = await fetch(`${ollamaUrl}/api/chat`, {
method: "POST",
body: JSON.stringify(analysisRequest),
});
if (!response.ok) {
throw new Error(`Analysis request failed: ${response.statusText}`);
}
const analysisResponse: OllamaChatResponse = await response.json();
try {
// Parse the JSON response
const analysis: InteractionAnalysis = JSON.parse(analysisResponse.message.content.trim());
return analysis;
} catch (parseError) {
console.error("Failed to parse analysis JSON:", analysisResponse.message.content);
// Return default analysis if parsing fails
return {
sentiment: 'neutral',
topics: [],
personalityObservations: [],
runningGagUpdates: [],
relationshipUpdates: [],
interestMentions: [],
backstoryElements: [],
memorableQuotes: []
};
}
} catch (error: any) {
console.error(`Error analyzing interaction: ${error.message}`);
return {
sentiment: 'neutral',
topics: [],
personalityObservations: [],
runningGagUpdates: [],
relationshipUpdates: [],
interestMentions: [],
backstoryElements: [],
memorableQuotes: []
};
}
};
/**
* Get or create user memory profile
*/
const getUserMemory = async (userFqn: string) => {
try {
let memory = await prisma.userMemory.findUnique({
where: { userFqn: userFqn }
});
if (!memory) {
memory = await prisma.userMemory.create({
data: {
userFqn: userFqn,
personalityTraits: stringifyJsonArray([]),
runningGags: stringifyJsonArray([]),
relationships: stringifyJsonArray([]),
interests: stringifyJsonArray([]),
backstory: stringifyJsonArray([]),
lastInteractionSummary: null,
interactionCount: 0,
}
});
}
return memory;
} catch (error: any) {
console.error(`Error getting user memory: ${error.message}`);
return null;
}
};
/**
* Update user memory with new interaction insights
*/
const updateUserMemory = async (request: MemoryUpdateRequest): Promise<void> => {
try {
const { userFqn, conversationContent, botResponse, analysis } = request;
// Get existing memory
const existingMemory = await getUserMemory(userFqn);
if (!existingMemory) return;
// Parse existing JSON arrays
const existingPersonality = parseJsonArray(existingMemory.personalityTraits);
const existingGags = parseJsonArray(existingMemory.runningGags);
const existingRelationships = parseJsonArray(existingMemory.relationships);
const existingInterests = parseJsonArray(existingMemory.interests);
const existingBackstory = parseJsonArray(existingMemory.backstory);
// Merge new observations with existing ones (avoiding duplicates)
const mergeArrays = (existing: string[], newItems: string[]): string[] => {
const combined = [...existing, ...newItems];
return [...new Set(combined)]; // Remove duplicates
};
// Limit array sizes to prevent memory bloat
const limitArray = (arr: string[], maxSize: number = 20): string[] => {
return arr.slice(-maxSize); // Keep most recent items
};
const updatedMemory = {
personalityTraits: stringifyJsonArray(limitArray(mergeArrays(existingPersonality, analysis.personalityObservations))),
runningGags: stringifyJsonArray(limitArray(mergeArrays(existingGags, analysis.runningGagUpdates))),
relationships: stringifyJsonArray(limitArray(mergeArrays(existingRelationships, analysis.relationshipUpdates))),
interests: stringifyJsonArray(limitArray(mergeArrays(existingInterests, analysis.interestMentions))),
backstory: stringifyJsonArray(limitArray(mergeArrays(existingBackstory, analysis.backstoryElements))),
lastInteractionSummary: `${analysis.sentiment} conversation about ${analysis.topics.join(', ') || 'general chat'}`,
interactionCount: existingMemory.interactionCount + 1,
};
// Update database
await prisma.userMemory.update({
where: { userFqn: userFqn },
data: updatedMemory
});
// Log the interaction for historical reference
await prisma.interactionLog.create({
data: {
userFqn: userFqn,
conversationSnapshot: `${userFqn}: ${conversationContent.slice(0, 200)}... | Lexi: ${botResponse.slice(0, 200)}...`,
sentiment: analysis.sentiment,
extractedTopics: stringifyJsonArray(analysis.topics),
memorableQuotes: stringifyJsonArray(analysis.memorableQuotes),
botEmotionalState: generateEmotionalState(analysis),
}
});
console.log(`Updated memory for ${userFqn}: ${analysis.personalityObservations.join(', ')}`);
} catch (error: any) {
console.error(`Error updating user memory: ${error.message}`);
}
};
/**
* Generate how the bot should "feel" about this interaction
*/
const generateEmotionalState = (analysis: InteractionAnalysis): string => {
const { sentiment, relationshipUpdates } = analysis;
if (sentiment === 'teasing') return 'playfully_hurt';
if (sentiment === 'flirty') return 'flustered';
if (sentiment === 'aggressive') return 'sad';
if (relationshipUpdates.some(rel => rel.includes('hurt') || rel.includes('mean'))) return 'hurt_feelings';
if (relationshipUpdates.some(rel => rel.includes('cute') || rel.includes('sweet'))) return 'happy';
return 'neutral';
};
/**
* Generate memory context for system prompt
*/
const generateMemoryContext = async (userFqn: string): Promise<string> => {
try {
const memory = await getUserMemory(userFqn);
if (!memory || memory.interactionCount === 0) {
return "";
}
let context = `\n\n--- User Memory for ${userFqn} ---\n`;
const personalityTraits = parseJsonArray(memory.personalityTraits);
const runningGags = parseJsonArray(memory.runningGags);
const relationships = parseJsonArray(memory.relationships);
const interests = parseJsonArray(memory.interests);
const backstory = parseJsonArray(memory.backstory);
if (personalityTraits.length > 0) {
context += `Personality: ${personalityTraits.join(', ')}\n`;
}
if (runningGags.length > 0) {
context += `Running gags: ${runningGags.join(', ')}\n`;
}
if (relationships.length > 0) {
context += `Our relationship: ${relationships.join(', ')}\n`;
}
if (interests.length > 0) {
context += `Interests: ${interests.join(', ')}\n`;
}
if (backstory.length > 0) {
context += `Background: ${backstory.join(', ')}\n`;
}
if (memory.lastInteractionSummary) {
context += `Last time we talked: ${memory.lastInteractionSummary}\n`;
}
context += `Total conversations: ${memory.interactionCount}`;
return context;
} catch (error: any) {
console.error(`Error generating memory context: ${error.message}`);
return "";
}
};
export {
analyzeInteraction,
updateUserMemory,
getUserMemory,
generateMemoryContext,
parseJsonArray,
stringifyJsonArray,
};