Compare commits
	
		
			6 Commits
		
	
	
		
			tyler
			...
			2a53b0a827
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 2a53b0a827 | |||
| 051a66ff26 | |||
| ee367a0d9a | |||
| e696343a73 | |||
| 88a0710c55 | |||
| 75fa4cea8b | 
							
								
								
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1,6 +1,6 @@ | ||||
| node_modules | ||||
| # Keep environment variables out of version control | ||||
| .env | ||||
| .env* | ||||
| *.log | ||||
| *.db | ||||
| /dist | ||||
|  | ||||
| @ -27,49 +27,4 @@ model User { | ||||
|   id              Int       @id @default(autoincrement()) | ||||
|   userFqn         String    @unique | ||||
|   lastRespondedTo DateTime? | ||||
|   memory          UserMemory? | ||||
| } | ||||
|  | ||||
| model Reaction { | ||||
|   id         Int      @id @default(autoincrement()) | ||||
|   statusId   String   // The Pleroma status ID we reacted to | ||||
|   emojiName  String   // The emoji we used to react | ||||
|   reactedAt  DateTime @default(now()) | ||||
|   createdAt  DateTime @default(now()) | ||||
|    | ||||
|   @@unique([statusId]) // Prevent multiple reactions to same status | ||||
|   @@map("reactions") | ||||
| } | ||||
|  | ||||
| model UserMemory { | ||||
|   id                      Int      @id @default(autoincrement()) | ||||
|   userFqn                String   @unique | ||||
|   personalityTraits       String   @default("[]") // JSON string of personality observations | ||||
|   runningGags            String   @default("[]") // JSON string of running jokes/gags | ||||
|   relationships          String   @default("[]") // JSON string of relationship dynamics with bot | ||||
|   interests              String   @default("[]") // JSON string of user interests | ||||
|   backstory              String   @default("[]") // JSON string of biographical elements | ||||
|   lastInteractionSummary String?  // Brief summary of last chat | ||||
|   interactionCount       Int      @default(0) | ||||
|   lastUpdated           DateTime @default(now()) @updatedAt | ||||
|   createdAt             DateTime @default(now()) | ||||
|    | ||||
|   // Relation to existing User model | ||||
|   user User @relation(fields: [userFqn], references: [userFqn]) | ||||
|    | ||||
|   @@map("user_memories") | ||||
| } | ||||
|  | ||||
| model InteractionLog { | ||||
|   id              Int      @id @default(autoincrement()) | ||||
|   userFqn         String | ||||
|   conversationSnapshot String // Key parts of the conversation | ||||
|   sentiment       String   // positive, negative, teasing, etc. | ||||
|   extractedTopics String   @default("[]") // JSON string of topics discussed | ||||
|   memorableQuotes String   @default("[]") // JSON string of funny/notable quotes | ||||
|   botEmotionalState String? // How the bot should "feel" about this interaction | ||||
|   createdAt       DateTime @default(now()) | ||||
|    | ||||
|   @@map("interaction_logs") | ||||
|   @@index([userFqn, createdAt]) | ||||
| } | ||||
							
								
								
									
										331
									
								
								src/api.ts
									
									
									
									
									
								
							
							
						
						
									
										331
									
								
								src/api.ts
									
									
									
									
									
								
							| @ -1,9 +1,5 @@ | ||||
| import { envConfig, prisma } from "./main.js"; | ||||
| import { PleromaEmoji, Notification, ContextResponse } from "../types.js"; | ||||
| import { selectRandomEmojis } from "./util.js"; | ||||
| import { getUserMemory, parseJsonArray, stringifyJsonArray } from "./memory.js"; | ||||
|  | ||||
|  | ||||
|  | ||||
| const getNotifications = async () => { | ||||
|   const { bearerToken, pleromaInstanceUrl } = envConfig; | ||||
| @ -102,336 +98,9 @@ const deleteNotification = async (notification: Notification) => { | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| /** | ||||
|  * React to a status with a random emoji | ||||
|  */ | ||||
| const reactToStatus = async (statusId: string, emojiName: string): Promise<boolean> => { | ||||
|   const { bearerToken, pleromaInstanceUrl } = envConfig; | ||||
|    | ||||
|   try { | ||||
|     const response = await fetch( | ||||
|       `${pleromaInstanceUrl}/api/v1/statuses/${statusId}/react/${emojiName}`, | ||||
|       { | ||||
|         method: "PUT", | ||||
|         headers: { | ||||
|           Authorization: `Bearer ${bearerToken}`, | ||||
|           "Content-Type": "application/json", | ||||
|         }, | ||||
|       } | ||||
|     ); | ||||
|  | ||||
|     if (!response.ok) { | ||||
|       console.error(`Failed to react to status ${statusId}: ${response.status} - ${response.statusText}`); | ||||
|       return false; | ||||
|     } | ||||
|  | ||||
|     return true; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error reacting to status ${statusId}: ${error.message}`); | ||||
|     return false; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Check if we've already reacted to a status | ||||
|  */ | ||||
| const hasAlreadyReacted = async (statusId: string): Promise<boolean> => { | ||||
|   try { | ||||
|     const reaction = await prisma.reaction.findFirst({ | ||||
|       where: { statusId: statusId }, | ||||
|     }); | ||||
|     return !!reaction; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error checking reaction status: ${error.message}`); | ||||
|     return true; // Assume we've reacted to avoid spamming on error | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Record that we've reacted to a status | ||||
|  */ | ||||
| const recordReaction = async (statusId: string, emojiName: string): Promise<void> => { | ||||
|   try { | ||||
|     await prisma.reaction.create({ | ||||
|       data: { | ||||
|         statusId: statusId, | ||||
|         emojiName: emojiName, | ||||
|         reactedAt: new Date(), | ||||
|       }, | ||||
|     }); | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error recording reaction: ${error.message}`); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Decide whether to react to a post (not every post gets a reaction) | ||||
|  */ | ||||
| const shouldReactToPost = (): boolean => { | ||||
|   // React to roughly 30% of posts | ||||
|   return Math.random() < 0.3; | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Get appropriate reaction emojis based on content sentiment/keywords | ||||
|  */ | ||||
| const getContextualEmoji = (content: string, availableEmojis: string[]): string => { | ||||
|   const contentLower = content.toLowerCase(); | ||||
|    | ||||
|   // Define emoji categories with keywords | ||||
|   const emojiCategories = { | ||||
|     positive: ['happy', 'smile', 'joy', 'love', 'heart', 'thumbsup', 'fire', 'based'], | ||||
|     negative: ['sad', 'cry', 'angry', 'rage', 'disappointed', 'cringe'], | ||||
|     thinking: ['think', 'hmm', 'brain', 'smart', 'curious'], | ||||
|     laughing: ['laugh', 'lol', 'kek', 'funny', 'haha', 'rofl'], | ||||
|     agreement: ['yes', 'agree', 'nod', 'correct', 'true', 'based'], | ||||
|     surprise: ['wow', 'amazing', 'surprised', 'shock', 'omg'], | ||||
|   }; | ||||
|  | ||||
|   // Keywords that might indicate sentiment | ||||
|   const sentimentKeywords = { | ||||
|     positive: ['good', 'great', 'awesome', 'nice', 'love', 'happy', 'excellent', 'perfect'], | ||||
|     negative: ['bad', 'terrible', 'hate', 'awful', 'horrible', 'worst', 'sucks'], | ||||
|     funny: ['lol', 'haha', 'funny', 'hilarious', 'joke', 'meme'], | ||||
|     question: ['?', 'what', 'how', 'why', 'when', 'where'], | ||||
|     agreement: ['yes', 'exactly', 'true', 'right', 'correct', 'agree'], | ||||
|     thinking: ['think', 'consider', 'maybe', 'perhaps', 'hmm', 'interesting'], | ||||
|   }; | ||||
|  | ||||
|   // Check content sentiment and find matching emojis | ||||
|   for (const [sentiment, keywords] of Object.entries(sentimentKeywords)) { | ||||
|     if (keywords.some(keyword => contentLower.includes(keyword))) { | ||||
|       const categoryEmojis = emojiCategories[sentiment as keyof typeof emojiCategories]; | ||||
|       if (categoryEmojis) { | ||||
|         const matchingEmojis = availableEmojis.filter(emoji =>  | ||||
|           categoryEmojis.some(cat => emoji.toLowerCase().includes(cat)) | ||||
|         ); | ||||
|         if (matchingEmojis.length > 0) { | ||||
|           return matchingEmojis[Math.floor(Math.random() * matchingEmojis.length)]; | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|  | ||||
|   // Fallback to random emoji from a curated list of common reactions | ||||
|   const commonReactions = availableEmojis.filter(emoji =>  | ||||
|     ['heart', 'thumbsup', 'fire', 'kek', 'based', 'think', 'smile', 'laugh'] | ||||
|       .some(common => emoji.toLowerCase().includes(common)) | ||||
|   ); | ||||
|  | ||||
|   if (commonReactions.length > 0) { | ||||
|     return commonReactions[Math.floor(Math.random() * commonReactions.length)]; | ||||
|   } | ||||
|  | ||||
|   // Final fallback to any random emoji | ||||
|   return availableEmojis[Math.floor(Math.random() * availableEmojis.length)]; | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Main function to handle post reactions | ||||
|  */ | ||||
| const handlePostReaction = async (notification: Notification): Promise<void> => { | ||||
|   try { | ||||
|     const statusId = notification.status.id; | ||||
|      | ||||
|     // Check if we should react to this post | ||||
|     if (!shouldReactToPost()) { | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|     // Check if we've already reacted | ||||
|     if (await hasAlreadyReacted(statusId)) { | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|     // Get available emojis | ||||
|     const emojiList = await getInstanceEmojis(); | ||||
|     if (!emojiList || emojiList.length === 0) { | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|     // Select a smaller random pool for reactions (5-10 emojis) | ||||
|     const reactionPool = selectRandomEmojis(emojiList, 8); | ||||
|      | ||||
|     // Get contextual emoji based on post content | ||||
|     const selectedEmoji = getContextualEmoji( | ||||
|       notification.status.pleroma.content["text/plain"],  | ||||
|       reactionPool | ||||
|     ); | ||||
|  | ||||
|     // React to the post | ||||
|     const success = await reactToStatus(statusId, selectedEmoji); | ||||
|      | ||||
|     if (success) { | ||||
|       await recordReaction(statusId, selectedEmoji); | ||||
|       console.log(`Reacted to status ${statusId} with :${selectedEmoji}:`); | ||||
|     } | ||||
|  | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error handling post reaction: ${error.message}`); | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| /** | ||||
|  * Get detailed user memory for admin/debugging | ||||
|  */ | ||||
| const getUserMemoryDetails = async (userFqn: string) => { | ||||
|   try { | ||||
|     const memory = await prisma.userMemory.findUnique({ | ||||
|       where: { userFqn: userFqn }, | ||||
|       include: { | ||||
|         user: true | ||||
|       } | ||||
|     }); | ||||
|  | ||||
|     if (!memory) return null; | ||||
|  | ||||
|     // Get recent interaction logs | ||||
|     const recentLogs = await prisma.interactionLog.findMany({ | ||||
|       where: { userFqn: userFqn }, | ||||
|       orderBy: { createdAt: 'desc' }, | ||||
|       take: 10 | ||||
|     }); | ||||
|  | ||||
|     // Parse JSON strings for better readability | ||||
|     const parsedMemory = { | ||||
|       ...memory, | ||||
|       personalityTraits: parseJsonArray(memory.personalityTraits), | ||||
|       runningGags: parseJsonArray(memory.runningGags), | ||||
|       relationships: parseJsonArray(memory.relationships), | ||||
|       interests: parseJsonArray(memory.interests), | ||||
|       backstory: parseJsonArray(memory.backstory), | ||||
|       recentInteractions: recentLogs.map(log => ({ | ||||
|         ...log, | ||||
|         extractedTopics: parseJsonArray(log.extractedTopics), | ||||
|         memorableQuotes: parseJsonArray(log.memorableQuotes) | ||||
|       })) | ||||
|     }; | ||||
|  | ||||
|     return parsedMemory; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error getting user memory details: ${error.message}`); | ||||
|     return null; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Manually add or remove memory elements (for admin use) | ||||
|  */ | ||||
| const modifyUserMemory = async ( | ||||
|   userFqn: string,  | ||||
|   action: 'add' | 'remove', | ||||
|   category: 'personalityTraits' | 'runningGags' | 'relationships' | 'interests' | 'backstory', | ||||
|   item: string | ||||
| ) => { | ||||
|   try { | ||||
|     const memory = await getUserMemory(userFqn); | ||||
|     if (!memory) return false; | ||||
|  | ||||
|     const currentArray = parseJsonArray(memory[category] as string); | ||||
|     let updatedArray: string[]; | ||||
|  | ||||
|     if (action === 'add') { | ||||
|       updatedArray = [...new Set([...currentArray, item])]; // Add without duplicates | ||||
|     } else { | ||||
|       updatedArray = currentArray.filter(existingItem => existingItem !== item); | ||||
|     } | ||||
|  | ||||
|     await prisma.userMemory.update({ | ||||
|       where: { userFqn: userFqn }, | ||||
|       data: { [category]: stringifyJsonArray(updatedArray) } | ||||
|     }); | ||||
|  | ||||
|     console.log(`${action === 'add' ? 'Added' : 'Removed'} "${item}" ${action === 'add' ? 'to' : 'from'} ${category} for ${userFqn}`); | ||||
|     return true; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error modifying user memory: ${error.message}`); | ||||
|     return false; | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| const getMemoryStats = async () => { | ||||
|   try { | ||||
|     const totalUsers = await prisma.userMemory.count(); | ||||
|     const totalInteractions = await prisma.interactionLog.count(); | ||||
|      | ||||
|     const mostActiveUsers = await prisma.userMemory.findMany({ | ||||
|       orderBy: { interactionCount: 'desc' }, | ||||
|       take: 10, | ||||
|       select: { | ||||
|         userFqn: true, | ||||
|         interactionCount: true, | ||||
|         personalityTraits: true, | ||||
|         runningGags: true | ||||
|       } | ||||
|     }); | ||||
|  | ||||
|     // Parse JSON strings for the active users | ||||
|     const parsedActiveUsers = mostActiveUsers.map(user => ({ | ||||
|       ...user, | ||||
|       personalityTraits: parseJsonArray(user.personalityTraits), | ||||
|       runningGags: parseJsonArray(user.runningGags) | ||||
|     })); | ||||
|  | ||||
|     const sentimentStats = await prisma.interactionLog.groupBy({ | ||||
|       by: ['sentiment'], | ||||
|       _count: { sentiment: true } | ||||
|     }); | ||||
|  | ||||
|     return { | ||||
|       totalUsers, | ||||
|       totalInteractions, | ||||
|       mostActiveUsers: parsedActiveUsers, | ||||
|       sentimentDistribution: sentimentStats | ||||
|     }; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error getting memory stats: ${error.message}`); | ||||
|     return null; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| const resetUserMemory = async (userFqn: string) => { | ||||
|   try { | ||||
|     await prisma.userMemory.update({ | ||||
|       where: { userFqn: userFqn }, | ||||
|       data: { | ||||
|         personalityTraits: stringifyJsonArray([]), | ||||
|         runningGags: stringifyJsonArray([]), | ||||
|         relationships: stringifyJsonArray([]), | ||||
|         interests: stringifyJsonArray([]), | ||||
|         backstory: stringifyJsonArray([]), | ||||
|         lastInteractionSummary: null, | ||||
|         interactionCount: 0, | ||||
|       } | ||||
|     }); | ||||
|  | ||||
|     // Optionally delete interaction logs too | ||||
|     await prisma.interactionLog.deleteMany({ | ||||
|       where: { userFqn: userFqn } | ||||
|     }); | ||||
|  | ||||
|     console.log(`Reset memory for ${userFqn}`); | ||||
|     return true; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error resetting user memory: ${error.message}`); | ||||
|     return false; | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| export { | ||||
|   deleteNotification, | ||||
|   getInstanceEmojis, | ||||
|   getNotifications, | ||||
|   getStatusContext, | ||||
|   reactToStatus, | ||||
|   handlePostReaction, | ||||
|   hasAlreadyReacted, | ||||
|   getUserMemoryDetails, | ||||
|   modifyUserMemory, | ||||
|   getMemoryStats, | ||||
|   resetUserMemory, | ||||
| }; | ||||
|  | ||||
							
								
								
									
										360
									
								
								src/main.ts
									
									
									
									
									
								
							
							
						
						
									
										360
									
								
								src/main.ts
									
									
									
									
									
								
							| @ -4,34 +4,21 @@ import { | ||||
|   OllamaConfigOptions, | ||||
|   OllamaChatRequest, | ||||
|   OllamaChatResponse, | ||||
|   PostAncestorsForModel, | ||||
|   // PostAncestorsForModel, | ||||
| } from "../types.js"; | ||||
| // import striptags from "striptags"; | ||||
| import { PrismaClient } from "../generated/prisma/client.js"; | ||||
| import { | ||||
|   getInstanceEmojis, | ||||
|   deleteNotification, | ||||
|   getNotifications, | ||||
|   getStatusContext, | ||||
|   handlePostReaction, | ||||
| } from "./api.js"; | ||||
| import { storeUserData, storePromptData } from "./prisma.js"; | ||||
| import { | ||||
|   isFromWhitelistedDomain, | ||||
|   alreadyRespondedTo, | ||||
|   recordPendingResponse, | ||||
|   // trimInputData, | ||||
|   // selectRandomEmoji, | ||||
|   selectRandomEmojis, | ||||
|   isLLMRefusal, | ||||
|   shouldContinue, | ||||
|   processConversationHistory, | ||||
| } from "./util.js"; | ||||
| import { | ||||
|   analyzeInteraction, | ||||
|   updateUserMemory, | ||||
|   generateMemoryContext, | ||||
| } from "./memory.js"; | ||||
|  | ||||
| export const prisma = new PrismaClient(); | ||||
|  | ||||
| @ -43,7 +30,7 @@ export const envConfig = { | ||||
|     ? process.env.WHITELISTED_DOMAINS.split(",") | ||||
|     : [process.env.PLEROMA_INSTANCE_DOMAIN], | ||||
|   ollamaUrl: process.env.OLLAMA_URL || "", | ||||
|   ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT, | ||||
|   ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT || "", | ||||
|   ollamaModel: process.env.OLLAMA_MODEL || "", | ||||
|   fetchInterval: process.env.FETCH_INTERVAL | ||||
|     ? parseInt(process.env.FETCH_INTERVAL) | ||||
| @ -57,19 +44,19 @@ export const envConfig = { | ||||
| }; | ||||
|  | ||||
| const ollamaConfig: OllamaConfigOptions = { | ||||
|   temperature: 0.6, | ||||
|   top_p: 0.85, | ||||
|   temperature: 0.85, // Increased from 0.6 - more creative and varied | ||||
|   top_p: 0.9, // Slightly increased for more diverse responses | ||||
|   top_k: 40, | ||||
|   num_ctx: 8192, | ||||
|   repeat_penalty: 1.1, | ||||
|   num_ctx: 16384, | ||||
|   repeat_penalty: 1.1, // Reduced from 1.15 - less mechanical | ||||
|   // stop: ['<|im_end|>', '\n\n'] | ||||
| }; | ||||
|  | ||||
| // this could be helpful | ||||
| // https://replicate.com/blog/how-to-prompt-llama | ||||
|  | ||||
| const generateOllamaRequest = async ( | ||||
|   notification: Notification, | ||||
|   retryAttempt: number = 0 | ||||
|   notification: Notification | ||||
| ): Promise<OllamaChatResponse | undefined> => { | ||||
|   const { | ||||
|     whitelistOnly, | ||||
| @ -78,146 +65,158 @@ const generateOllamaRequest = async ( | ||||
|     ollamaUrl, | ||||
|     replyWithContext, | ||||
|   } = envConfig; | ||||
|    | ||||
|  | ||||
|   let shouldDeleteNotification = false; | ||||
|  | ||||
|   try { | ||||
|     if (shouldContinue(notification)) { | ||||
|       if (whitelistOnly && !isFromWhitelistedDomain(notification)) { | ||||
|         await deleteNotification(notification); | ||||
|         return; | ||||
|       } | ||||
|       if (await alreadyRespondedTo(notification)) { | ||||
|         return; | ||||
|       } | ||||
|       await recordPendingResponse(notification); | ||||
|       await storeUserData(notification); | ||||
|        | ||||
|       const userFqn = notification.status.account.fqn; | ||||
|       const userMessage = notification.status.pleroma.content["text/plain"]; | ||||
|        | ||||
|       let conversationHistory: PostAncestorsForModel[] = []; | ||||
|       let processedContext = ""; | ||||
|        | ||||
|       if (replyWithContext) { | ||||
|         const contextPosts = await getStatusContext(notification.status.id); | ||||
|         if (!contextPosts?.ancestors || !contextPosts) { | ||||
|           throw new Error(`Unable to obtain post context ancestors.`); | ||||
|         } | ||||
|         conversationHistory = contextPosts.ancestors.map((ancestor) => { | ||||
|           const mentions = ancestor.mentions.map((mention) => mention.acct); | ||||
|           return { | ||||
|             account_fqn: ancestor.account.fqn, | ||||
|             mentions, | ||||
|             plaintext_content: ancestor.pleroma.content["text/plain"], | ||||
|           }; | ||||
|         }); | ||||
|          | ||||
|         // Process context - summarize if too long | ||||
|         processedContext = await processConversationHistory(conversationHistory); | ||||
|       } | ||||
|  | ||||
|       const formattedUserMessage = `${userFqn} says: ${userMessage}`; | ||||
|  | ||||
|       // Get user memory context | ||||
|       const memoryContext = await generateMemoryContext(userFqn); | ||||
|  | ||||
|       // Get random emojis for this request | ||||
|       const emojiList = await getInstanceEmojis(); | ||||
|       let availableEmojis = ""; | ||||
|       if (emojiList && emojiList.length > 0) { | ||||
|         const randomEmojis = selectRandomEmojis(emojiList, 20); | ||||
|         availableEmojis = `\n\nAvailable custom emojis you can use in your response (format as :emoji_name:): ${randomEmojis.join(", ")}`; | ||||
|       } | ||||
|  | ||||
|       let systemContent = ollamaSystemPrompt + memoryContext + availableEmojis; | ||||
|        | ||||
|       if (replyWithContext) { | ||||
|         systemContent = `${ollamaSystemPrompt}${memoryContext}\n\nPrevious conversation context:\n${processedContext}\nReply as if you are a party to the conversation. If '@nice-ai' is mentioned, respond directly. Prefix usernames with '@' when addressing them.${availableEmojis}`; | ||||
|       } | ||||
|  | ||||
|       // Use different seeds for retry attempts | ||||
|       const currentConfig = { | ||||
|         ...ollamaConfig, | ||||
|         seed: retryAttempt > 0 ? Math.floor(Math.random() * 1000000) : ollamaConfig.seed, | ||||
|       }; | ||||
|  | ||||
|       const ollamaRequestBody: OllamaChatRequest = { | ||||
|         model: ollamaModel, | ||||
|         messages: [ | ||||
|           { role: "system", content: systemContent as string }, | ||||
|           { role: "user", content: formattedUserMessage }, | ||||
|         ], | ||||
|         stream: false, | ||||
|         options: currentConfig, | ||||
|       }; | ||||
|  | ||||
|       const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||
|         method: "POST", | ||||
|         body: JSON.stringify(ollamaRequestBody), | ||||
|       }); | ||||
|       const ollamaResponse: OllamaChatResponse = await response.json(); | ||||
|  | ||||
|       // Check for refusal and retry up to 2 times | ||||
|       if (isLLMRefusal(ollamaResponse.message.content) && retryAttempt < 2) { | ||||
|         console.log(`LLM refused to answer (attempt ${retryAttempt + 1}), retrying with different seed...`); | ||||
|         return generateOllamaRequest(notification, retryAttempt + 1); | ||||
|       } | ||||
|  | ||||
|       // Analyze interaction and update user memory (async, don't block response) | ||||
|       analyzeAndUpdateMemory(userFqn, userMessage, ollamaResponse.message.content); | ||||
|  | ||||
|       await storePromptData(notification, ollamaResponse); | ||||
|       return ollamaResponse; | ||||
|     if (!shouldContinue(notification)) { | ||||
|       shouldDeleteNotification = true; | ||||
|       return; | ||||
|     } | ||||
|   } catch (error: any) { | ||||
|     throw new Error(error.message); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Analyze interaction and update user memory (runs asynchronously) | ||||
|  */ | ||||
| const analyzeAndUpdateMemory = async ( | ||||
|   userFqn: string,  | ||||
|   userMessage: string,  | ||||
|   botResponse: string | ||||
| ): Promise<void> => { | ||||
|   try { | ||||
|     // Run analysis in background - don't await to avoid blocking response | ||||
|     const analysis = await analyzeInteraction(userMessage, botResponse, userFqn); | ||||
|      | ||||
|     await updateUserMemory({ | ||||
|       userFqn, | ||||
|       conversationContent: userMessage, | ||||
|       botResponse, | ||||
|       analysis, | ||||
|     if (whitelistOnly && !isFromWhitelistedDomain(notification)) { | ||||
|       shouldDeleteNotification = true; | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|     if (await alreadyRespondedTo(notification)) { | ||||
|       shouldDeleteNotification = true; | ||||
|       return; | ||||
|     } | ||||
|  | ||||
|     await recordPendingResponse(notification); | ||||
|     await storeUserData(notification); | ||||
|  | ||||
|     let conversationContext = ""; | ||||
|     if (replyWithContext) { | ||||
|       const contextPosts = await getStatusContext(notification.status.id); | ||||
|       if (!contextPosts?.ancestors) { | ||||
|         throw new Error(`Unable to obtain post context ancestors.`); | ||||
|       } | ||||
|  | ||||
|       // Build a human-readable conversation thread | ||||
|       const allPosts = [...contextPosts.ancestors]; | ||||
|  | ||||
|       // Include descendants (follow-up posts) if available | ||||
|       if (contextPosts.descendents && contextPosts.descendents.length > 0) { | ||||
|         allPosts.push(...contextPosts.descendents); | ||||
|       } | ||||
|  | ||||
|       if (allPosts.length > 0) { | ||||
|         const conversationLines = allPosts.map((post) => { | ||||
|           const author = post.account.fqn; | ||||
|           const content = post.pleroma.content["text/plain"]; | ||||
|           const replyingTo = post.in_reply_to_account_id | ||||
|             ? ` (replying to another message)` | ||||
|             : ""; | ||||
|           return `[@${author}${replyingTo}]: ${content}`; | ||||
|         }); | ||||
|  | ||||
|         conversationContext = ` | ||||
| Previous conversation thread: | ||||
| ${conversationLines.join("\n\n")} | ||||
| --- | ||||
| `; | ||||
|       } | ||||
|     } | ||||
|  | ||||
|     const userMessage = notification.status.pleroma.content["text/plain"]; | ||||
|     const originalAuthor = notification.account.fqn; | ||||
|  | ||||
|     let systemContent = ollamaSystemPrompt; | ||||
|     if (replyWithContext && conversationContext) { | ||||
|       systemContent = `${ollamaSystemPrompt} | ||||
|  | ||||
| ${conversationContext} | ||||
| Current message from @${originalAuthor}: | ||||
| "${userMessage}" | ||||
|  | ||||
| Instructions: | ||||
| - You are replying to @${originalAuthor} | ||||
| - Address them directly if appropriate | ||||
| - Use markdown formatting and emojis sparingly`; | ||||
|     } | ||||
|  | ||||
|     const ollamaRequestBody: OllamaChatRequest = { | ||||
|       model: ollamaModel, | ||||
|       messages: [ | ||||
|         { role: "system", content: systemContent }, | ||||
|         { role: "user", content: userMessage }, | ||||
|       ], | ||||
|       stream: false, | ||||
|       options: { | ||||
|         ...ollamaConfig, | ||||
|         stop: ["</s>", "[INST]"], // Mistral 0.3 stop tokens | ||||
|       }, | ||||
|     }; | ||||
|  | ||||
|     console.log( | ||||
|       `Generating response for notification ${notification.id} from @${originalAuthor}` | ||||
|     ); | ||||
|  | ||||
|     // Change endpoint to /api/chat | ||||
|     const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||
|       method: "POST", | ||||
|       body: JSON.stringify(ollamaRequestBody), | ||||
|     }); | ||||
|  | ||||
|     if (!response.ok) { | ||||
|       throw new Error(`Ollama API request failed: ${response.statusText}`); | ||||
|     } | ||||
|  | ||||
|     const ollamaResponse: OllamaChatResponse = await response.json(); | ||||
|  | ||||
|     await storePromptData(notification, ollamaResponse); | ||||
|     return ollamaResponse; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Memory analysis failed for ${userFqn}: ${error.message}`); | ||||
|     console.error( | ||||
|       `Error in generateOllamaRequest for notification ${notification.id}:`, | ||||
|       error.message | ||||
|     ); | ||||
|     // Delete notification on error to prevent retry loops | ||||
|     shouldDeleteNotification = true; | ||||
|     throw error; | ||||
|   } finally { | ||||
|     if (shouldDeleteNotification) { | ||||
|       try { | ||||
|         await deleteNotification(notification); | ||||
|       } catch (deleteError: any) { | ||||
|         console.error( | ||||
|           `Failed to delete notification ${notification.id}:`, | ||||
|           deleteError.message | ||||
|         ); | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| }; | ||||
|  | ||||
|  | ||||
| const postReplyToStatus = async ( | ||||
|   notification: Notification, | ||||
|   ollamaResponseBody: OllamaChatResponse | ||||
| ) => { | ||||
|   const { pleromaInstanceUrl, bearerToken } = envConfig; | ||||
|  | ||||
|   try { | ||||
|     let mentions: string[]; | ||||
|     // Only mention the original author who triggered the bot | ||||
|     const originalAuthor = notification.account.acct; | ||||
|     console.log( | ||||
|       `Replying to: @${originalAuthor} (status ID: ${notification.status.id})` | ||||
|     ); | ||||
|  | ||||
|     // Sanitize LLM output - remove any stray Mistral special tokens | ||||
|     let sanitizedContent = ollamaResponseBody.message.content | ||||
|       .replace(/<\/s>/g, "") // Remove EOS token if it appears | ||||
|       .replace(/\[INST\]/g, "") // Remove instruction start token | ||||
|       .replace(/\[\/INST\]/g, "") // Remove instruction end token | ||||
|       .replace(/<s>/g, "") // Remove BOS token if it appears | ||||
|       .trim(); | ||||
|  | ||||
|     const statusBody: NewStatusBody = { | ||||
|       content_type: "text/markdown", | ||||
|       status: ollamaResponseBody.message.content, | ||||
|       status: sanitizedContent, | ||||
|       in_reply_to_id: notification.status.id, | ||||
|       to: [originalAuthor], // Only send to the person who mentioned the bot | ||||
|     }; | ||||
|     if ( | ||||
|       notification.status.mentions && | ||||
|       notification.status.mentions.length > 0 | ||||
|     ) { | ||||
|       mentions = notification.status.mentions.map((mention) => { | ||||
|         return mention.acct; | ||||
|       }); | ||||
|       statusBody.to = mentions; | ||||
|     } | ||||
|  | ||||
|     const response = await fetch(`${pleromaInstanceUrl}/api/v1/statuses`, { | ||||
|       method: "POST", | ||||
| @ -232,9 +231,23 @@ const postReplyToStatus = async ( | ||||
|       throw new Error(`New status request failed: ${response.statusText}`); | ||||
|     } | ||||
|  | ||||
|     await deleteNotification(notification); | ||||
|     console.log(`Successfully posted reply to @${originalAuthor}`); | ||||
|   } catch (error: any) { | ||||
|     throw new Error(error.message); | ||||
|     console.error( | ||||
|       `Error posting reply for notification ${notification.id}:`, | ||||
|       error.message | ||||
|     ); | ||||
|     throw error; | ||||
|   } finally { | ||||
|     // Always try to delete the notification, even if posting failed | ||||
|     try { | ||||
|       await deleteNotification(notification); | ||||
|     } catch (deleteError: any) { | ||||
|       console.error( | ||||
|         `Failed to delete notification ${notification.id}:`, | ||||
|         deleteError.message | ||||
|       ); | ||||
|     } | ||||
|   } | ||||
| }; | ||||
|  | ||||
| @ -250,10 +263,17 @@ const createTimelinePost = async () => { | ||||
|     model: ollamaModel, | ||||
|     messages: [ | ||||
|       { role: "system", content: ollamaSystemPrompt as string }, | ||||
|       { role: "user", content: "Say something random." }, | ||||
|       { | ||||
|         role: "user", | ||||
|         content: | ||||
|           "Make a post about something. Keep your tone authentic, as if you are a real person making a post about a topic that interests you on a microblogging platform. This can be about anything like politics, gardening, homesteading, your favorite animal, a fun fact, what happened during your day, seeking companionship, baking, cooking, et cetera. Do not format the post with a title or quotes, nor sign the post with your name. It will be posted to your timeline so everyone will know you said it.", | ||||
|       }, | ||||
|     ], | ||||
|     stream: false, | ||||
|     options: ollamaConfig, | ||||
|     options: { | ||||
|       ...ollamaConfig, | ||||
|       stop: ["</s>", "[INST]"], // Mistral 0.3 stop tokens | ||||
|     }, | ||||
|   }; | ||||
|   try { | ||||
|     const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||
| @ -296,31 +316,23 @@ const beginFetchCycle = async () => { | ||||
|   setInterval(async () => { | ||||
|     notifications = await getNotifications(); | ||||
|     if (notifications.length > 0) { | ||||
|       await Promise.all( | ||||
|         notifications.map(async (notification) => { | ||||
|           try { | ||||
|             // Handle reactions first (before generating response) | ||||
|             // This way we can react even if response generation fails | ||||
|             await handlePostReaction(notification); | ||||
|              | ||||
|             // Then handle the response generation as before | ||||
|             const ollamaResponse = await generateOllamaRequest(notification); | ||||
|             if (ollamaResponse) { | ||||
|               await postReplyToStatus(notification, ollamaResponse); | ||||
|             } | ||||
|           } catch (error: any) { | ||||
|             console.error(`Error processing notification ${notification.id}: ${error.message}`); | ||||
|             // Still try to delete the notification to avoid getting stuck | ||||
|             try { | ||||
|               await deleteNotification(notification); | ||||
|             } catch (deleteError: any) { | ||||
|               console.error(`Failed to delete notification: ${deleteError.message}`); | ||||
|             } | ||||
|       // Process notifications sequentially to avoid race conditions | ||||
|       for (const notification of notifications) { | ||||
|         try { | ||||
|           const ollamaResponse = await generateOllamaRequest(notification); | ||||
|           if (ollamaResponse) { | ||||
|             await postReplyToStatus(notification, ollamaResponse); | ||||
|           } | ||||
|         }) | ||||
|       ); | ||||
|         } catch (error: any) { | ||||
|           console.error( | ||||
|             `Error processing notification ${notification.id}:`, | ||||
|             error.message | ||||
|           ); | ||||
|           // Continue processing other notifications even if one fails | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|   }, envConfig.fetchInterval); | ||||
|   }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function | ||||
| }; | ||||
|  | ||||
| const beginStatusPostInterval = async () => { | ||||
| @ -340,6 +352,11 @@ console.log( | ||||
|     envConfig.fetchInterval / 1000 | ||||
|   } seconds.` | ||||
| ); | ||||
| console.log( | ||||
|   `Making ad-hoc post to ${envConfig.pleromaInstanceDomain}, every ${ | ||||
|     envConfig.adHocPostInterval / 1000 / 60 | ||||
|   } minutes.` | ||||
| ); | ||||
| console.log( | ||||
|   `Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}` | ||||
| ); | ||||
| @ -351,7 +368,4 @@ console.log( | ||||
| console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); | ||||
|  | ||||
| await beginFetchCycle(); | ||||
| // setInterval(async () => { | ||||
| //   createTimelinePost(); | ||||
| // }, 10000); | ||||
| await beginStatusPostInterval(); | ||||
|  | ||||
							
								
								
									
										323
									
								
								src/memory.ts
									
									
									
									
									
								
							
							
						
						
									
										323
									
								
								src/memory.ts
									
									
									
									
									
								
							| @ -1,323 +0,0 @@ | ||||
| /** | ||||
|  * ADAPTIVE MEMORY SYSTEM FOR FEDIVERSE CHATBOT | ||||
|  *  | ||||
|  * This system maintains persistent, evolving user profiles to enable personalized | ||||
|  * interactions across chat sessions. It uses LLM-based analysis to extract and | ||||
|  * categorize user traits, then builds context for future conversations. | ||||
|  *  | ||||
|  * ARCHITECTURE: | ||||
|  * - UserMemory: Core profile (personality, gags, relationships, interests, backstory) | ||||
|  * - InteractionLog: Historical conversation snapshots with sentiment analysis | ||||
|  * - JSON string arrays in SQLite for flexible data storage | ||||
|  *  | ||||
|  * WORKFLOW: | ||||
|  * 1. Each user message + bot response gets analyzed by Ollama | ||||
|  * 2. Extract personality traits, running gags, relationship dynamics, etc. | ||||
|  * 3. Merge new insights with existing profile (deduplication) | ||||
|  * 4. Generate memory context string for next conversation's system prompt | ||||
|  * 5. Log interaction with sentiment and notable quotes | ||||
|  *  | ||||
|  * MEMORY CATEGORIES: | ||||
|  * - personalityTraits: User characteristics (sarcastic, protective, etc.) | ||||
|  * - runningGags: Recurring jokes, memes, fake claims between user and bot | ||||
|  * - relationships: How user treats bot (mean, protective, flirty) | ||||
|  * - interests: Hobbies, topics user cares about | ||||
|  * - backstory: Biographical info, "lore" (real or fabricated) | ||||
|  *  | ||||
|  * CURRENT LIMITATIONS: | ||||
|  * - No memory aging/decay - old info persists indefinitely | ||||
|  * - Simple deduplication - similar but not identical entries accumulate | ||||
|  * - No relevance scoring - stale assumptions carry same weight as recent ones | ||||
|  * - Fixed array limits may truncate important long-term patterns | ||||
|  *  | ||||
|  * RECOMMENDED IMPROVEMENTS: | ||||
|  * - Add timestamp-based relevance weighting | ||||
|  * - Implement semantic similarity checks for better deduplication | ||||
|  * - Add contradiction detection to update outdated assumptions | ||||
|  * - Consider LRU-style eviction instead of simple truncation | ||||
|  */ | ||||
|  | ||||
| // Updated memory.ts with JSON string handling for SQLite | ||||
| import { prisma } from "./main.js"; | ||||
| import { envConfig } from "./main.js"; | ||||
| import { InteractionAnalysis, MemoryUpdateRequest, OllamaChatRequest, OllamaChatResponse } from "../types.js"; | ||||
|  | ||||
| // Helper functions for JSON string array handling | ||||
| const parseJsonArray = (jsonString: string): string[] => { | ||||
|   try { | ||||
|     const parsed = JSON.parse(jsonString); | ||||
|     return Array.isArray(parsed) ? parsed : []; | ||||
|   } catch { | ||||
|     return []; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| const stringifyJsonArray = (array: string[]): string => { | ||||
|   return JSON.stringify(array); | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Analyze a conversation to extract user personality, gags, and relationship dynamics | ||||
|  */ | ||||
| const analyzeInteraction = async ( | ||||
|   userMessage: string,  | ||||
|   botResponse: string,  | ||||
|   userFqn: string | ||||
| ): Promise<InteractionAnalysis> => { | ||||
|   const { ollamaUrl, ollamaModel } = envConfig; | ||||
|    | ||||
|   const analysisPrompt = `Analyze this conversation between a user and a cute female AI chatbot named Lexi. Extract personality traits, running gags, relationship dynamics, and interesting facts. | ||||
|  | ||||
| User (${userFqn}): ${userMessage} | ||||
| Bot (Lexi): ${botResponse} | ||||
|  | ||||
| Please analyze and respond with a JSON object containing: | ||||
| { | ||||
|   "sentiment": "positive|negative|neutral|teasing|flirty|aggressive", | ||||
|   "topics": ["topic1", "topic2"], | ||||
|   "personalityObservations": ["trait1", "trait2"], | ||||
|   "runningGagUpdates": ["gag1", "gag2"], | ||||
|   "relationshipUpdates": ["relationship_change1"], | ||||
|   "interestMentions": ["interest1", "interest2"], | ||||
|   "backstoryElements": ["fact1", "fact2"], | ||||
|   "memorableQuotes": ["quote1", "quote2"] | ||||
| } | ||||
|  | ||||
| Focus on: | ||||
| - Personality traits (sarcastic, teasing, protective, joker, etc.) | ||||
| - Running gags and memes (fake claims, recurring jokes, etc.) | ||||
| - How they treat the bot (mean, nice, flirty, protective) | ||||
| - Interests and hobbies mentioned | ||||
| - Any biographical info (real or fake "lore") | ||||
| - Memorable or funny quotes | ||||
|  | ||||
| Keep entries brief and specific. Empty arrays are fine if nothing notable.`; | ||||
|  | ||||
|   try { | ||||
|     const analysisRequest: OllamaChatRequest = { | ||||
|       model: ollamaModel, | ||||
|       messages: [ | ||||
|         {  | ||||
|           role: "system",  | ||||
|           content: "You are an expert at analyzing social interactions and extracting personality insights. Always respond with valid JSON only."  | ||||
|         }, | ||||
|         { role: "user", content: analysisPrompt } | ||||
|       ], | ||||
|       stream: false, | ||||
|       options: { | ||||
|         temperature: 0.3, // Lower temperature for more consistent analysis | ||||
|         num_predict: 800, | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||
|       method: "POST", | ||||
|       body: JSON.stringify(analysisRequest), | ||||
|     }); | ||||
|  | ||||
|     if (!response.ok) { | ||||
|       throw new Error(`Analysis request failed: ${response.statusText}`); | ||||
|     } | ||||
|  | ||||
|     const analysisResponse: OllamaChatResponse = await response.json(); | ||||
|      | ||||
|     try { | ||||
|       // Parse the JSON response | ||||
|       const analysis: InteractionAnalysis = JSON.parse(analysisResponse.message.content.trim()); | ||||
|       return analysis; | ||||
|     } catch (parseError) { | ||||
|       console.error("Failed to parse analysis JSON:", analysisResponse.message.content); | ||||
|       // Return default analysis if parsing fails | ||||
|       return { | ||||
|         sentiment: 'neutral', | ||||
|         topics: [], | ||||
|         personalityObservations: [], | ||||
|         runningGagUpdates: [], | ||||
|         relationshipUpdates: [], | ||||
|         interestMentions: [], | ||||
|         backstoryElements: [], | ||||
|         memorableQuotes: [] | ||||
|       }; | ||||
|     } | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error analyzing interaction: ${error.message}`); | ||||
|     return { | ||||
|       sentiment: 'neutral', | ||||
|       topics: [], | ||||
|       personalityObservations: [], | ||||
|       runningGagUpdates: [], | ||||
|       relationshipUpdates: [], | ||||
|       interestMentions: [], | ||||
|       backstoryElements: [], | ||||
|       memorableQuotes: [] | ||||
|     }; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Get or create user memory profile | ||||
|  */ | ||||
| const getUserMemory = async (userFqn: string) => { | ||||
|   try { | ||||
|     let memory = await prisma.userMemory.findUnique({ | ||||
|       where: { userFqn: userFqn } | ||||
|     }); | ||||
|  | ||||
|     if (!memory) { | ||||
|       memory = await prisma.userMemory.create({ | ||||
|         data: { | ||||
|           userFqn: userFqn, | ||||
|           personalityTraits: stringifyJsonArray([]), | ||||
|           runningGags: stringifyJsonArray([]), | ||||
|           relationships: stringifyJsonArray([]), | ||||
|           interests: stringifyJsonArray([]), | ||||
|           backstory: stringifyJsonArray([]), | ||||
|           lastInteractionSummary: null, | ||||
|           interactionCount: 0, | ||||
|         } | ||||
|       }); | ||||
|     } | ||||
|  | ||||
|     return memory; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error getting user memory: ${error.message}`); | ||||
|     return null; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Update user memory with new interaction insights | ||||
|  */ | ||||
| const updateUserMemory = async (request: MemoryUpdateRequest): Promise<void> => { | ||||
|   try { | ||||
|     const { userFqn, conversationContent, botResponse, analysis } = request; | ||||
|      | ||||
|     // Get existing memory | ||||
|     const existingMemory = await getUserMemory(userFqn); | ||||
|     if (!existingMemory) return; | ||||
|  | ||||
|     // Parse existing JSON arrays | ||||
|     const existingPersonality = parseJsonArray(existingMemory.personalityTraits); | ||||
|     const existingGags = parseJsonArray(existingMemory.runningGags); | ||||
|     const existingRelationships = parseJsonArray(existingMemory.relationships); | ||||
|     const existingInterests = parseJsonArray(existingMemory.interests); | ||||
|     const existingBackstory = parseJsonArray(existingMemory.backstory); | ||||
|  | ||||
|     // Merge new observations with existing ones (avoiding duplicates) | ||||
|     const mergeArrays = (existing: string[], newItems: string[]): string[] => { | ||||
|       const combined = [...existing, ...newItems]; | ||||
|       return [...new Set(combined)]; // Remove duplicates | ||||
|     }; | ||||
|  | ||||
|     // Limit array sizes to prevent memory bloat | ||||
|     const limitArray = (arr: string[], maxSize: number = 20): string[] => { | ||||
|       return arr.slice(-maxSize); // Keep most recent items | ||||
|     }; | ||||
|  | ||||
|     const updatedMemory = { | ||||
|       personalityTraits: stringifyJsonArray(limitArray(mergeArrays(existingPersonality, analysis.personalityObservations))), | ||||
|       runningGags: stringifyJsonArray(limitArray(mergeArrays(existingGags, analysis.runningGagUpdates))), | ||||
|       relationships: stringifyJsonArray(limitArray(mergeArrays(existingRelationships, analysis.relationshipUpdates))), | ||||
|       interests: stringifyJsonArray(limitArray(mergeArrays(existingInterests, analysis.interestMentions))), | ||||
|       backstory: stringifyJsonArray(limitArray(mergeArrays(existingBackstory, analysis.backstoryElements))), | ||||
|       lastInteractionSummary: `${analysis.sentiment} conversation about ${analysis.topics.join(', ') || 'general chat'}`, | ||||
|       interactionCount: existingMemory.interactionCount + 1, | ||||
|     }; | ||||
|  | ||||
|     // Update database | ||||
|     await prisma.userMemory.update({ | ||||
|       where: { userFqn: userFqn }, | ||||
|       data: updatedMemory | ||||
|     }); | ||||
|  | ||||
|     // Log the interaction for historical reference | ||||
|     await prisma.interactionLog.create({ | ||||
|       data: { | ||||
|         userFqn: userFqn, | ||||
|         conversationSnapshot: `${userFqn}: ${conversationContent.slice(0, 200)}... | Lexi: ${botResponse.slice(0, 200)}...`, | ||||
|         sentiment: analysis.sentiment, | ||||
|         extractedTopics: stringifyJsonArray(analysis.topics), | ||||
|         memorableQuotes: stringifyJsonArray(analysis.memorableQuotes), | ||||
|         botEmotionalState: generateEmotionalState(analysis), | ||||
|       } | ||||
|     }); | ||||
|  | ||||
|     console.log(`Updated memory for ${userFqn}: ${analysis.personalityObservations.join(', ')}`); | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error updating user memory: ${error.message}`); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Generate how the bot should "feel" about this interaction | ||||
|  */ | ||||
| const generateEmotionalState = (analysis: InteractionAnalysis): string => { | ||||
|   const { sentiment, relationshipUpdates } = analysis; | ||||
|    | ||||
|   if (sentiment === 'teasing') return 'playfully_hurt'; | ||||
|   if (sentiment === 'flirty') return 'flustered'; | ||||
|   if (sentiment === 'aggressive') return 'sad'; | ||||
|   if (relationshipUpdates.some(rel => rel.includes('hurt') || rel.includes('mean'))) return 'hurt_feelings'; | ||||
|   if (relationshipUpdates.some(rel => rel.includes('cute') || rel.includes('sweet'))) return 'happy'; | ||||
|   return 'neutral'; | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Generate memory context for system prompt | ||||
|  */ | ||||
| const generateMemoryContext = async (userFqn: string): Promise<string> => { | ||||
|   try { | ||||
|     const memory = await getUserMemory(userFqn); | ||||
|     if (!memory || memory.interactionCount === 0) { | ||||
|       return ""; | ||||
|     } | ||||
|  | ||||
|     let context = `\n\n--- User Memory for ${userFqn} ---\n`; | ||||
|      | ||||
|     const personalityTraits = parseJsonArray(memory.personalityTraits); | ||||
|     const runningGags = parseJsonArray(memory.runningGags); | ||||
|     const relationships = parseJsonArray(memory.relationships); | ||||
|     const interests = parseJsonArray(memory.interests); | ||||
|     const backstory = parseJsonArray(memory.backstory); | ||||
|      | ||||
|     if (personalityTraits.length > 0) { | ||||
|       context += `Personality: ${personalityTraits.join(', ')}\n`; | ||||
|     } | ||||
|      | ||||
|     if (runningGags.length > 0) { | ||||
|       context += `Running gags: ${runningGags.join(', ')}\n`; | ||||
|     } | ||||
|      | ||||
|     if (relationships.length > 0) { | ||||
|       context += `Our relationship: ${relationships.join(', ')}\n`; | ||||
|     } | ||||
|      | ||||
|     if (interests.length > 0) { | ||||
|       context += `Interests: ${interests.join(', ')}\n`; | ||||
|     } | ||||
|      | ||||
|     if (backstory.length > 0) { | ||||
|       context += `Background: ${backstory.join(', ')}\n`; | ||||
|     } | ||||
|      | ||||
|     if (memory.lastInteractionSummary) { | ||||
|       context += `Last time we talked: ${memory.lastInteractionSummary}\n`; | ||||
|     } | ||||
|      | ||||
|     context += `Total conversations: ${memory.interactionCount}`; | ||||
|      | ||||
|     return context; | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error generating memory context: ${error.message}`); | ||||
|     return ""; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| export { | ||||
|   analyzeInteraction, | ||||
|   updateUserMemory, | ||||
|   getUserMemory, | ||||
|   generateMemoryContext, | ||||
|   parseJsonArray, | ||||
|   stringifyJsonArray, | ||||
| }; | ||||
							
								
								
									
										159
									
								
								src/util.ts
									
									
									
									
									
								
							
							
						
						
									
										159
									
								
								src/util.ts
									
									
									
									
									
								
							| @ -2,8 +2,6 @@ import striptags from "striptags"; | ||||
| import { prisma } from "./main.js"; | ||||
| import { envConfig } from "./main.js"; | ||||
| import { Notification } from "../types.js"; | ||||
| import { OllamaChatRequest, OllamaChatResponse, PostAncestorsForModel } from "../types.js"; | ||||
|  | ||||
|  | ||||
| const trimInputData = (input: string): string => { | ||||
|   const strippedInput = striptags(input); | ||||
| @ -100,168 +98,11 @@ const selectRandomEmoji = (emojiList: string[]) => { | ||||
|   return emojiList[Math.floor(Math.random() * emojiList.length)]; | ||||
| }; | ||||
|  | ||||
| const selectRandomEmojis = (emojiList: string[], count: number = 20): string[] => { | ||||
|   if (emojiList.length <= count) return emojiList; | ||||
|    | ||||
|   const shuffled = [...emojiList].sort(() => 0.5 - Math.random()); | ||||
|   return shuffled.slice(0, count); | ||||
| }; | ||||
|  | ||||
| const isLLMRefusal = (response: string): boolean => { | ||||
|   const refusalPatterns = [ | ||||
|     /i can't|i cannot|unable to|i'm not able to/i, | ||||
|     /i don't feel comfortable/i, | ||||
|     /i'm not comfortable/i, | ||||
|     /i shouldn't|i won't/i, | ||||
|     /that's not something i can/i, | ||||
|     /i'm not programmed to/i, | ||||
|     /i'm an ai (assistant|language model)/i, | ||||
|     /as an ai/i, | ||||
|     /i apologize, but/i, | ||||
|     /i must decline/i, | ||||
|     /that would be inappropriate/i, | ||||
|     /i'm not supposed to/i, | ||||
|     /i'd rather not/i, | ||||
|     /i prefer not to/i, | ||||
|     /against my guidelines/i, | ||||
|     /violates my programming/i, | ||||
|   ]; | ||||
|  | ||||
|   const normalizedResponse = response.toLowerCase().trim(); | ||||
|    | ||||
|   // Check if response is too short (likely a refusal) | ||||
|   if (normalizedResponse.length < 20) return true; | ||||
|    | ||||
|   // Check for refusal patterns | ||||
|   return refusalPatterns.some(pattern => pattern.test(normalizedResponse)); | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Summarize a long conversation thread to reduce context length | ||||
|  */ | ||||
| const summarizeConversationHistory = async ( | ||||
|   conversationHistory: PostAncestorsForModel[] | ||||
| ): Promise<string> => { | ||||
|   const { ollamaUrl, ollamaModel } = envConfig; | ||||
|    | ||||
|   if (conversationHistory.length === 0) return ""; | ||||
|    | ||||
|   // Create a concise thread representation | ||||
|   const threadText = conversationHistory | ||||
|     .map(post => `${post.account_fqn}: ${post.plaintext_content}`) | ||||
|     .join('\n'); | ||||
|  | ||||
|   const summarizePrompt = `Summarize this conversation thread in 2-3 sentences, focusing on the main topics discussed and the overall tone/mood. Keep it brief but capture the essential context: | ||||
|  | ||||
| ${threadText} | ||||
|  | ||||
| Summary:`; | ||||
|  | ||||
|   try { | ||||
|     const summarizeRequest: OllamaChatRequest = { | ||||
|       model: ollamaModel, | ||||
|       messages: [ | ||||
|         {  | ||||
|           role: "system",  | ||||
|           content: "You are excellent at creating concise, informative summaries. Keep summaries under 150 words and focus on key topics and relationships between participants."  | ||||
|         }, | ||||
|         { role: "user", content: summarizePrompt } | ||||
|       ], | ||||
|       stream: false, | ||||
|       options: { | ||||
|         temperature: 0.2, // Low temperature for consistent summaries | ||||
|         num_predict: 200, | ||||
|         num_ctx: 4096, // Smaller context for summarization | ||||
|       } | ||||
|     }; | ||||
|  | ||||
|     const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||
|       method: "POST", | ||||
|       body: JSON.stringify(summarizeRequest), | ||||
|     }); | ||||
|  | ||||
|     if (!response.ok) { | ||||
|       console.error(`Summary request failed: ${response.statusText}`); | ||||
|       return `Previous conversation with ${conversationHistory.length} messages about various topics.`; | ||||
|     } | ||||
|  | ||||
|     const summaryResponse: OllamaChatResponse = await response.json(); | ||||
|     return summaryResponse.message.content.trim(); | ||||
|      | ||||
|   } catch (error: any) { | ||||
|     console.error(`Error summarizing conversation: ${error.message}`); | ||||
|     return `Previous conversation with ${conversationHistory.length} messages.`; | ||||
|   } | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Decide whether to summarize based on thread length and complexity | ||||
|  */ | ||||
| const shouldSummarizeThread = (conversationHistory: PostAncestorsForModel[]): boolean => { | ||||
|   const SUMMARY_THRESHOLD = 15; | ||||
|    | ||||
|   if (conversationHistory.length < SUMMARY_THRESHOLD) return false; | ||||
|    | ||||
|   // Additional heuristics could be added here: | ||||
|   // - Total character count | ||||
|   // - Average message length | ||||
|   // - Time span of conversation | ||||
|    | ||||
|   return true; | ||||
| }; | ||||
|  | ||||
| /** | ||||
|  * Process conversation history - either use full context or summarized version | ||||
|  */ | ||||
| const processConversationHistory = async ( | ||||
|   conversationHistory: PostAncestorsForModel[] | ||||
| ): Promise<string> => { | ||||
|   if (!shouldSummarizeThread(conversationHistory)) { | ||||
|     // Use full context for short threads | ||||
|     return conversationHistory | ||||
|       .map(post =>  | ||||
|         `${post.account_fqn} (to ${post.mentions.join(", ")}): ${post.plaintext_content}` | ||||
|       ) | ||||
|       .join('\n'); | ||||
|   } | ||||
|    | ||||
|   // Keep the last few messages in full detail + summary of earlier messages | ||||
|   const KEEP_RECENT_COUNT = 5; | ||||
|   const recentMessages = conversationHistory.slice(-KEEP_RECENT_COUNT); | ||||
|   const olderMessages = conversationHistory.slice(0, -KEEP_RECENT_COUNT); | ||||
|    | ||||
|   let contextString = ""; | ||||
|    | ||||
|   if (olderMessages.length > 0) { | ||||
|     const summary = await summarizeConversationHistory(olderMessages); | ||||
|     contextString += `Earlier conversation summary: ${summary}\n\n`; | ||||
|   } | ||||
|    | ||||
|   if (recentMessages.length > 0) { | ||||
|     contextString += "Recent messages:\n"; | ||||
|     contextString += recentMessages | ||||
|       .map(post =>  | ||||
|         `${post.account_fqn} (to ${post.mentions.join(", ")}): ${post.plaintext_content}` | ||||
|       ) | ||||
|       .join('\n'); | ||||
|   } | ||||
|    | ||||
|   return contextString; | ||||
| }; | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| export { | ||||
|   alreadyRespondedTo, | ||||
|   selectRandomEmoji, | ||||
|   selectRandomEmojis, | ||||
|   isLLMRefusal, | ||||
|   trimInputData, | ||||
|   recordPendingResponse, | ||||
|   isFromWhitelistedDomain, | ||||
|   shouldContinue, | ||||
|   summarizeConversationHistory, | ||||
|   shouldSummarizeThread, | ||||
|   processConversationHistory, | ||||
| }; | ||||
|  | ||||
| @ -8,7 +8,7 @@ Type=simple | ||||
| User=bot | ||||
| Restart=always | ||||
| RestartSec=3 | ||||
| ExecStart=/usr/bin/screen -L -DmS pleroma-ollama-bot /home/bot/.nvm/versions/node/v22.11.0/bin/npm run start | ||||
| ExecStart=/home/bot/.nvm/versions/node/v22.11.0/bin/npm run start | ||||
| WorkingDirectory=/path/to/directory | ||||
| [Install] | ||||
| WantedBy=multi-user.target | ||||
							
								
								
									
										177
									
								
								types.d.ts
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										177
									
								
								types.d.ts
									
									
									
									
										vendored
									
									
								
							| @ -158,215 +158,48 @@ interface PleromaEmojiMetadata { | ||||
|   tags: string[]; | ||||
| } | ||||
|  | ||||
| interface ReactionRequest { | ||||
|   name: string; // emoji name without colons | ||||
| } | ||||
|  | ||||
| interface ReactionResponse { | ||||
|   name: string; | ||||
|   count: number; | ||||
|   me: boolean; | ||||
|   url?: string; | ||||
|   static_url?: string; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either) | ||||
|  */ | ||||
| export interface OllamaConfigOptions { | ||||
|   /** | ||||
|    * Number of tokens guaranteed to be kept in memory during response generation.  | ||||
|    * Higher values leave less room for num_ctx. Used to preserve important context. | ||||
|    * Default: 0, Range: 0-512 | ||||
|    * Number of tokens guaranteed to be kept in memory during response generation. Higher values leave less | ||||
|    * possible room for num_ctx | ||||
|    */ | ||||
|   num_keep?: number; | ||||
|  | ||||
|   /** | ||||
|    * Random seed for reproducible outputs. Same seed + same inputs = same output. | ||||
|    * Default: -1 (random), Range: any integer | ||||
|    */ | ||||
|   seed?: number; | ||||
|  | ||||
|   /** | ||||
|    * Maximum number of tokens to generate in the response. Controls response length. | ||||
|    * Default: 128, Range: 1-4096+ (model dependent) | ||||
|    * Sets maximum of tokens in the response | ||||
|    */ | ||||
|   num_predict?: number; | ||||
|  | ||||
|   /** | ||||
|    * Limits token selection to top K most probable tokens. Reduces randomness. | ||||
|    * Default: 40, Range: 1-100 (higher = more diverse) | ||||
|    */ | ||||
|   top_k?: number; | ||||
|  | ||||
|   /** | ||||
|    * Nucleus sampling - cumulative probability cutoff for token selection. | ||||
|    * Default: 0.9, Range: 0.0-1.0 (lower = more focused) | ||||
|    */ | ||||
|   top_p?: number; | ||||
|  | ||||
|   /** | ||||
|    * Alternative to top_p - minimum probability threshold for tokens. | ||||
|    * Default: 0.0, Range: 0.0-1.0 (higher = more selective) | ||||
|    */ | ||||
|   min_p?: number; | ||||
|  | ||||
|   /** | ||||
|    * Typical sampling - targets tokens with "typical" probability mass. | ||||
|    * Default: 1.0 (disabled), Range: 0.0-1.0 (lower = less random) | ||||
|    */ | ||||
|   typical_p?: number; | ||||
|  | ||||
|   /** | ||||
|    * Number of previous tokens to consider for repetition penalty. | ||||
|    * Default: 64, Range: 0-512 | ||||
|    */ | ||||
|   repeat_last_n?: number; | ||||
|  | ||||
|   /** | ||||
|    * Randomness/creativity control. Lower = more deterministic, higher = more creative. | ||||
|    * Default: 0.8, Range: 0.0-2.0 (sweet spot: 0.1-1.2) | ||||
|    * How close of a response should the response be to the original prompt - lower = more focused response | ||||
|    */ | ||||
|   temperature?: number; | ||||
|  | ||||
|   /** | ||||
|    * Penalty for repeating tokens. Higher values reduce repetition. | ||||
|    * Default: 1.1, Range: 0.0-2.0 (1.0 = no penalty) | ||||
|    */ | ||||
|   repeat_penalty?: number; | ||||
|  | ||||
|   /** | ||||
|    * Penalty for using tokens that have already appeared (OpenAI-style). | ||||
|    * Default: 0.0, Range: -2.0 to 2.0 | ||||
|    */ | ||||
|   presence_penalty?: number; | ||||
|  | ||||
|   /** | ||||
|    * Penalty proportional to token frequency in text (OpenAI-style). | ||||
|    * Default: 0.0, Range: -2.0 to 2.0 | ||||
|    */ | ||||
|   frequency_penalty?: number; | ||||
|  | ||||
|   /** | ||||
|    * Enables Mirostat sampling algorithm (0=disabled, 1=v1, 2=v2). | ||||
|    * Default: 0, Range: 0, 1, or 2 | ||||
|    */ | ||||
|   mirostat?: number; | ||||
|  | ||||
|   /** | ||||
|    * Target entropy for Mirostat. Controls coherence vs creativity balance. | ||||
|    * Default: 5.0, Range: 0.0-10.0 | ||||
|    */ | ||||
|   mirostat_tau?: number; | ||||
|  | ||||
|   /** | ||||
|    * Learning rate for Mirostat. How quickly it adapts. | ||||
|    * Default: 0.1, Range: 0.001-1.0 | ||||
|    */ | ||||
|   mirostat_eta?: number; | ||||
|  | ||||
|   /** | ||||
|    * Apply penalty to newline tokens to control formatting. | ||||
|    * Default: true | ||||
|    */ | ||||
|   penalize_newline?: boolean; | ||||
|  | ||||
|   /** | ||||
|    * Array of strings that will stop generation when encountered. | ||||
|    * Default: [], Example: ["\n", "User:", "###"] | ||||
|    */ | ||||
|   stop?: string[]; | ||||
|  | ||||
|   /** | ||||
|    * Enable NUMA (Non-Uniform Memory Access) optimization. | ||||
|    * Default: false (Linux systems may benefit from true) | ||||
|    */ | ||||
|   numa?: boolean; | ||||
|  | ||||
|   /** | ||||
|    * Context window size - total tokens for prompt + response. | ||||
|    * Default: 2048, Range: 512-32768+ (model dependent, affects memory usage) | ||||
|    * Number of tokens for the prompt to keep in memory for the response, minus the value of num_keep | ||||
|    */ | ||||
|   num_ctx?: number; | ||||
|  | ||||
|   /** | ||||
|    * Batch size for prompt processing. Higher = faster but more memory. | ||||
|    * Default: 512, Range: 1-2048 | ||||
|    */ | ||||
|   num_batch?: number; | ||||
|  | ||||
|   /** | ||||
|    * Number of GPU layers to offload. -1 = auto, 0 = CPU only. | ||||
|    * Default: -1, Range: -1 to model layer count | ||||
|    */ | ||||
|   num_gpu?: number; | ||||
|  | ||||
|   /** | ||||
|    * Primary GPU device ID for multi-GPU setups. | ||||
|    * Default: 0, Range: 0 to (GPU count - 1) | ||||
|    */ | ||||
|   main_gpu?: number; | ||||
|  | ||||
|   /** | ||||
|    * Optimize for low VRAM usage at cost of speed. | ||||
|    * Default: false | ||||
|    */ | ||||
|   low_vram?: boolean; | ||||
|  | ||||
|   /** | ||||
|    * Only load vocabulary, skip weights. For tokenization only. | ||||
|    * Default: false | ||||
|    */ | ||||
|   vocab_only?: boolean; | ||||
|  | ||||
|   /** | ||||
|    * Use memory mapping for model files (faster loading). | ||||
|    * Default: true | ||||
|    */ | ||||
|   use_mmap?: boolean; | ||||
|  | ||||
|   /** | ||||
|    * Lock model in memory to prevent swapping. | ||||
|    * Default: false (enable for consistent performance) | ||||
|    */ | ||||
|   use_mlock?: boolean; | ||||
|  | ||||
|   /** | ||||
|    * Number of CPU threads for inference. | ||||
|    * Default: auto-detected, Range: 1 to CPU core count | ||||
|    */ | ||||
|   num_thread?: number; | ||||
| } | ||||
|  | ||||
|  | ||||
| export interface UserMemory { | ||||
|   id: number; | ||||
|   userFqn: string; | ||||
|   personalityTraits: string[]; // ["teases_bot", "sarcastic", "friendly", "joker"] | ||||
|   runningGags: string[]; // ["claims_to_shit_pants", "pretends_to_be_cat", "always_hungry"] | ||||
|   relationships: string[]; // ["hurt_my_feelings_once", "called_me_cute", "protective_of_me"] | ||||
|   interests: string[]; // ["programming", "anime", "cooking"] | ||||
|   backstory: string[]; // ["works_at_tech_company", "has_three_cats", "lives_in_california"] | ||||
|   lastInteractionSummary: string; // Brief summary of last conversation | ||||
|   interactionCount: number; | ||||
|   lastUpdated: DateTime; | ||||
|   createdAt: DateTime; | ||||
| } | ||||
|  | ||||
| export interface InteractionAnalysis { | ||||
|   sentiment: 'positive' | 'negative' | 'neutral' | 'teasing' | 'flirty' | 'aggressive'; | ||||
|   topics: string[]; // Extracted topics from conversation | ||||
|   personalityObservations: string[]; // New traits observed | ||||
|   runningGagUpdates: string[]; // New or updated running gags | ||||
|   relationshipUpdates: string[]; // How relationship with bot changed | ||||
|   interestMentions: string[]; // Interests/hobbies mentioned | ||||
|   backstoryElements: string[]; // New biographical info (real or fake) | ||||
|   memorableQuotes: string[]; // Funny or notable things they said | ||||
| } | ||||
|  | ||||
| export interface MemoryUpdateRequest { | ||||
|   userFqn: string; | ||||
|   conversationContent: string; | ||||
|   botResponse: string; | ||||
|   analysis: InteractionAnalysis; | ||||
| } | ||||
		Reference in New Issue
	
	Block a user