Compare commits
	
		
			3 Commits
		
	
	
		
			ee367a0d9a
			...
			tyler
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 95d2854548 | |||
| 2430047d45 | |||
| 834e415f11 | 
| @ -27,4 +27,49 @@ model User { | |||||||
|   id              Int       @id @default(autoincrement()) |   id              Int       @id @default(autoincrement()) | ||||||
|   userFqn         String    @unique |   userFqn         String    @unique | ||||||
|   lastRespondedTo DateTime? |   lastRespondedTo DateTime? | ||||||
|  |   memory          UserMemory? | ||||||
| } | } | ||||||
|  |  | ||||||
|  | model Reaction { | ||||||
|  |   id         Int      @id @default(autoincrement()) | ||||||
|  |   statusId   String   // The Pleroma status ID we reacted to | ||||||
|  |   emojiName  String   // The emoji we used to react | ||||||
|  |   reactedAt  DateTime @default(now()) | ||||||
|  |   createdAt  DateTime @default(now()) | ||||||
|  |    | ||||||
|  |   @@unique([statusId]) // Prevent multiple reactions to same status | ||||||
|  |   @@map("reactions") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | model UserMemory { | ||||||
|  |   id                      Int      @id @default(autoincrement()) | ||||||
|  |   userFqn                String   @unique | ||||||
|  |   personalityTraits       String   @default("[]") // JSON string of personality observations | ||||||
|  |   runningGags            String   @default("[]") // JSON string of running jokes/gags | ||||||
|  |   relationships          String   @default("[]") // JSON string of relationship dynamics with bot | ||||||
|  |   interests              String   @default("[]") // JSON string of user interests | ||||||
|  |   backstory              String   @default("[]") // JSON string of biographical elements | ||||||
|  |   lastInteractionSummary String?  // Brief summary of last chat | ||||||
|  |   interactionCount       Int      @default(0) | ||||||
|  |   lastUpdated           DateTime @default(now()) @updatedAt | ||||||
|  |   createdAt             DateTime @default(now()) | ||||||
|  |    | ||||||
|  |   // Relation to existing User model | ||||||
|  |   user User @relation(fields: [userFqn], references: [userFqn]) | ||||||
|  |    | ||||||
|  |   @@map("user_memories") | ||||||
|  | } | ||||||
|  |  | ||||||
|  | model InteractionLog { | ||||||
|  |   id              Int      @id @default(autoincrement()) | ||||||
|  |   userFqn         String | ||||||
|  |   conversationSnapshot String // Key parts of the conversation | ||||||
|  |   sentiment       String   // positive, negative, teasing, etc. | ||||||
|  |   extractedTopics String   @default("[]") // JSON string of topics discussed | ||||||
|  |   memorableQuotes String   @default("[]") // JSON string of funny/notable quotes | ||||||
|  |   botEmotionalState String? // How the bot should "feel" about this interaction | ||||||
|  |   createdAt       DateTime @default(now()) | ||||||
|  |    | ||||||
|  |   @@map("interaction_logs") | ||||||
|  |   @@index([userFqn, createdAt]) | ||||||
|  | } | ||||||
							
								
								
									
										331
									
								
								src/api.ts
									
									
									
									
									
								
							
							
						
						
									
										331
									
								
								src/api.ts
									
									
									
									
									
								
							| @ -1,5 +1,9 @@ | |||||||
| import { envConfig, prisma } from "./main.js"; | import { envConfig, prisma } from "./main.js"; | ||||||
| import { PleromaEmoji, Notification, ContextResponse } from "../types.js"; | import { PleromaEmoji, Notification, ContextResponse } from "../types.js"; | ||||||
|  | import { selectRandomEmojis } from "./util.js"; | ||||||
|  | import { getUserMemory, parseJsonArray, stringifyJsonArray } from "./memory.js"; | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| const getNotifications = async () => { | const getNotifications = async () => { | ||||||
|   const { bearerToken, pleromaInstanceUrl } = envConfig; |   const { bearerToken, pleromaInstanceUrl } = envConfig; | ||||||
| @ -98,9 +102,336 @@ const deleteNotification = async (notification: Notification) => { | |||||||
|   } |   } | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * React to a status with a random emoji | ||||||
|  |  */ | ||||||
|  | const reactToStatus = async (statusId: string, emojiName: string): Promise<boolean> => { | ||||||
|  |   const { bearerToken, pleromaInstanceUrl } = envConfig; | ||||||
|  |    | ||||||
|  |   try { | ||||||
|  |     const response = await fetch( | ||||||
|  |       `${pleromaInstanceUrl}/api/v1/statuses/${statusId}/react/${emojiName}`, | ||||||
|  |       { | ||||||
|  |         method: "PUT", | ||||||
|  |         headers: { | ||||||
|  |           Authorization: `Bearer ${bearerToken}`, | ||||||
|  |           "Content-Type": "application/json", | ||||||
|  |         }, | ||||||
|  |       } | ||||||
|  |     ); | ||||||
|  |  | ||||||
|  |     if (!response.ok) { | ||||||
|  |       console.error(`Failed to react to status ${statusId}: ${response.status} - ${response.statusText}`); | ||||||
|  |       return false; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     return true; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error reacting to status ${statusId}: ${error.message}`); | ||||||
|  |     return false; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Check if we've already reacted to a status | ||||||
|  |  */ | ||||||
|  | const hasAlreadyReacted = async (statusId: string): Promise<boolean> => { | ||||||
|  |   try { | ||||||
|  |     const reaction = await prisma.reaction.findFirst({ | ||||||
|  |       where: { statusId: statusId }, | ||||||
|  |     }); | ||||||
|  |     return !!reaction; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error checking reaction status: ${error.message}`); | ||||||
|  |     return true; // Assume we've reacted to avoid spamming on error | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Record that we've reacted to a status | ||||||
|  |  */ | ||||||
|  | const recordReaction = async (statusId: string, emojiName: string): Promise<void> => { | ||||||
|  |   try { | ||||||
|  |     await prisma.reaction.create({ | ||||||
|  |       data: { | ||||||
|  |         statusId: statusId, | ||||||
|  |         emojiName: emojiName, | ||||||
|  |         reactedAt: new Date(), | ||||||
|  |       }, | ||||||
|  |     }); | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error recording reaction: ${error.message}`); | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Decide whether to react to a post (not every post gets a reaction) | ||||||
|  |  */ | ||||||
|  | const shouldReactToPost = (): boolean => { | ||||||
|  |   // React to roughly 30% of posts | ||||||
|  |   return Math.random() < 0.3; | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Get appropriate reaction emojis based on content sentiment/keywords | ||||||
|  |  */ | ||||||
|  | const getContextualEmoji = (content: string, availableEmojis: string[]): string => { | ||||||
|  |   const contentLower = content.toLowerCase(); | ||||||
|  |    | ||||||
|  |   // Define emoji categories with keywords | ||||||
|  |   const emojiCategories = { | ||||||
|  |     positive: ['happy', 'smile', 'joy', 'love', 'heart', 'thumbsup', 'fire', 'based'], | ||||||
|  |     negative: ['sad', 'cry', 'angry', 'rage', 'disappointed', 'cringe'], | ||||||
|  |     thinking: ['think', 'hmm', 'brain', 'smart', 'curious'], | ||||||
|  |     laughing: ['laugh', 'lol', 'kek', 'funny', 'haha', 'rofl'], | ||||||
|  |     agreement: ['yes', 'agree', 'nod', 'correct', 'true', 'based'], | ||||||
|  |     surprise: ['wow', 'amazing', 'surprised', 'shock', 'omg'], | ||||||
|  |   }; | ||||||
|  |  | ||||||
|  |   // Keywords that might indicate sentiment | ||||||
|  |   const sentimentKeywords = { | ||||||
|  |     positive: ['good', 'great', 'awesome', 'nice', 'love', 'happy', 'excellent', 'perfect'], | ||||||
|  |     negative: ['bad', 'terrible', 'hate', 'awful', 'horrible', 'worst', 'sucks'], | ||||||
|  |     funny: ['lol', 'haha', 'funny', 'hilarious', 'joke', 'meme'], | ||||||
|  |     question: ['?', 'what', 'how', 'why', 'when', 'where'], | ||||||
|  |     agreement: ['yes', 'exactly', 'true', 'right', 'correct', 'agree'], | ||||||
|  |     thinking: ['think', 'consider', 'maybe', 'perhaps', 'hmm', 'interesting'], | ||||||
|  |   }; | ||||||
|  |  | ||||||
|  |   // Check content sentiment and find matching emojis | ||||||
|  |   for (const [sentiment, keywords] of Object.entries(sentimentKeywords)) { | ||||||
|  |     if (keywords.some(keyword => contentLower.includes(keyword))) { | ||||||
|  |       const categoryEmojis = emojiCategories[sentiment as keyof typeof emojiCategories]; | ||||||
|  |       if (categoryEmojis) { | ||||||
|  |         const matchingEmojis = availableEmojis.filter(emoji =>  | ||||||
|  |           categoryEmojis.some(cat => emoji.toLowerCase().includes(cat)) | ||||||
|  |         ); | ||||||
|  |         if (matchingEmojis.length > 0) { | ||||||
|  |           return matchingEmojis[Math.floor(Math.random() * matchingEmojis.length)]; | ||||||
|  |         } | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Fallback to random emoji from a curated list of common reactions | ||||||
|  |   const commonReactions = availableEmojis.filter(emoji =>  | ||||||
|  |     ['heart', 'thumbsup', 'fire', 'kek', 'based', 'think', 'smile', 'laugh'] | ||||||
|  |       .some(common => emoji.toLowerCase().includes(common)) | ||||||
|  |   ); | ||||||
|  |  | ||||||
|  |   if (commonReactions.length > 0) { | ||||||
|  |     return commonReactions[Math.floor(Math.random() * commonReactions.length)]; | ||||||
|  |   } | ||||||
|  |  | ||||||
|  |   // Final fallback to any random emoji | ||||||
|  |   return availableEmojis[Math.floor(Math.random() * availableEmojis.length)]; | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Main function to handle post reactions | ||||||
|  |  */ | ||||||
|  | const handlePostReaction = async (notification: Notification): Promise<void> => { | ||||||
|  |   try { | ||||||
|  |     const statusId = notification.status.id; | ||||||
|  |      | ||||||
|  |     // Check if we should react to this post | ||||||
|  |     if (!shouldReactToPost()) { | ||||||
|  |       return; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Check if we've already reacted | ||||||
|  |     if (await hasAlreadyReacted(statusId)) { | ||||||
|  |       return; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Get available emojis | ||||||
|  |     const emojiList = await getInstanceEmojis(); | ||||||
|  |     if (!emojiList || emojiList.length === 0) { | ||||||
|  |       return; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     // Select a smaller random pool for reactions (5-10 emojis) | ||||||
|  |     const reactionPool = selectRandomEmojis(emojiList, 8); | ||||||
|  |      | ||||||
|  |     // Get contextual emoji based on post content | ||||||
|  |     const selectedEmoji = getContextualEmoji( | ||||||
|  |       notification.status.pleroma.content["text/plain"],  | ||||||
|  |       reactionPool | ||||||
|  |     ); | ||||||
|  |  | ||||||
|  |     // React to the post | ||||||
|  |     const success = await reactToStatus(statusId, selectedEmoji); | ||||||
|  |      | ||||||
|  |     if (success) { | ||||||
|  |       await recordReaction(statusId, selectedEmoji); | ||||||
|  |       console.log(`Reacted to status ${statusId} with :${selectedEmoji}:`); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error handling post reaction: ${error.message}`); | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Get detailed user memory for admin/debugging | ||||||
|  |  */ | ||||||
|  | const getUserMemoryDetails = async (userFqn: string) => { | ||||||
|  |   try { | ||||||
|  |     const memory = await prisma.userMemory.findUnique({ | ||||||
|  |       where: { userFqn: userFqn }, | ||||||
|  |       include: { | ||||||
|  |         user: true | ||||||
|  |       } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     if (!memory) return null; | ||||||
|  |  | ||||||
|  |     // Get recent interaction logs | ||||||
|  |     const recentLogs = await prisma.interactionLog.findMany({ | ||||||
|  |       where: { userFqn: userFqn }, | ||||||
|  |       orderBy: { createdAt: 'desc' }, | ||||||
|  |       take: 10 | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     // Parse JSON strings for better readability | ||||||
|  |     const parsedMemory = { | ||||||
|  |       ...memory, | ||||||
|  |       personalityTraits: parseJsonArray(memory.personalityTraits), | ||||||
|  |       runningGags: parseJsonArray(memory.runningGags), | ||||||
|  |       relationships: parseJsonArray(memory.relationships), | ||||||
|  |       interests: parseJsonArray(memory.interests), | ||||||
|  |       backstory: parseJsonArray(memory.backstory), | ||||||
|  |       recentInteractions: recentLogs.map(log => ({ | ||||||
|  |         ...log, | ||||||
|  |         extractedTopics: parseJsonArray(log.extractedTopics), | ||||||
|  |         memorableQuotes: parseJsonArray(log.memorableQuotes) | ||||||
|  |       })) | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     return parsedMemory; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error getting user memory details: ${error.message}`); | ||||||
|  |     return null; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Manually add or remove memory elements (for admin use) | ||||||
|  |  */ | ||||||
|  | const modifyUserMemory = async ( | ||||||
|  |   userFqn: string,  | ||||||
|  |   action: 'add' | 'remove', | ||||||
|  |   category: 'personalityTraits' | 'runningGags' | 'relationships' | 'interests' | 'backstory', | ||||||
|  |   item: string | ||||||
|  | ) => { | ||||||
|  |   try { | ||||||
|  |     const memory = await getUserMemory(userFqn); | ||||||
|  |     if (!memory) return false; | ||||||
|  |  | ||||||
|  |     const currentArray = parseJsonArray(memory[category] as string); | ||||||
|  |     let updatedArray: string[]; | ||||||
|  |  | ||||||
|  |     if (action === 'add') { | ||||||
|  |       updatedArray = [...new Set([...currentArray, item])]; // Add without duplicates | ||||||
|  |     } else { | ||||||
|  |       updatedArray = currentArray.filter(existingItem => existingItem !== item); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     await prisma.userMemory.update({ | ||||||
|  |       where: { userFqn: userFqn }, | ||||||
|  |       data: { [category]: stringifyJsonArray(updatedArray) } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     console.log(`${action === 'add' ? 'Added' : 'Removed'} "${item}" ${action === 'add' ? 'to' : 'from'} ${category} for ${userFqn}`); | ||||||
|  |     return true; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error modifying user memory: ${error.message}`); | ||||||
|  |     return false; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  |  | ||||||
|  | const getMemoryStats = async () => { | ||||||
|  |   try { | ||||||
|  |     const totalUsers = await prisma.userMemory.count(); | ||||||
|  |     const totalInteractions = await prisma.interactionLog.count(); | ||||||
|  |      | ||||||
|  |     const mostActiveUsers = await prisma.userMemory.findMany({ | ||||||
|  |       orderBy: { interactionCount: 'desc' }, | ||||||
|  |       take: 10, | ||||||
|  |       select: { | ||||||
|  |         userFqn: true, | ||||||
|  |         interactionCount: true, | ||||||
|  |         personalityTraits: true, | ||||||
|  |         runningGags: true | ||||||
|  |       } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     // Parse JSON strings for the active users | ||||||
|  |     const parsedActiveUsers = mostActiveUsers.map(user => ({ | ||||||
|  |       ...user, | ||||||
|  |       personalityTraits: parseJsonArray(user.personalityTraits), | ||||||
|  |       runningGags: parseJsonArray(user.runningGags) | ||||||
|  |     })); | ||||||
|  |  | ||||||
|  |     const sentimentStats = await prisma.interactionLog.groupBy({ | ||||||
|  |       by: ['sentiment'], | ||||||
|  |       _count: { sentiment: true } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     return { | ||||||
|  |       totalUsers, | ||||||
|  |       totalInteractions, | ||||||
|  |       mostActiveUsers: parsedActiveUsers, | ||||||
|  |       sentimentDistribution: sentimentStats | ||||||
|  |     }; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error getting memory stats: ${error.message}`); | ||||||
|  |     return null; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | const resetUserMemory = async (userFqn: string) => { | ||||||
|  |   try { | ||||||
|  |     await prisma.userMemory.update({ | ||||||
|  |       where: { userFqn: userFqn }, | ||||||
|  |       data: { | ||||||
|  |         personalityTraits: stringifyJsonArray([]), | ||||||
|  |         runningGags: stringifyJsonArray([]), | ||||||
|  |         relationships: stringifyJsonArray([]), | ||||||
|  |         interests: stringifyJsonArray([]), | ||||||
|  |         backstory: stringifyJsonArray([]), | ||||||
|  |         lastInteractionSummary: null, | ||||||
|  |         interactionCount: 0, | ||||||
|  |       } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     // Optionally delete interaction logs too | ||||||
|  |     await prisma.interactionLog.deleteMany({ | ||||||
|  |       where: { userFqn: userFqn } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     console.log(`Reset memory for ${userFqn}`); | ||||||
|  |     return true; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error resetting user memory: ${error.message}`); | ||||||
|  |     return false; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  |  | ||||||
| export { | export { | ||||||
|   deleteNotification, |   deleteNotification, | ||||||
|   getInstanceEmojis, |   getInstanceEmojis, | ||||||
|   getNotifications, |   getNotifications, | ||||||
|   getStatusContext, |   getStatusContext, | ||||||
|  |   reactToStatus, | ||||||
|  |   handlePostReaction, | ||||||
|  |   hasAlreadyReacted, | ||||||
|  |   getUserMemoryDetails, | ||||||
|  |   modifyUserMemory, | ||||||
|  |   getMemoryStats, | ||||||
|  |   resetUserMemory, | ||||||
| }; | }; | ||||||
|  | |||||||
							
								
								
									
										123
									
								
								src/main.ts
									
									
									
									
									
								
							
							
						
						
									
										123
									
								
								src/main.ts
									
									
									
									
									
								
							| @ -13,6 +13,7 @@ import { | |||||||
|   deleteNotification, |   deleteNotification, | ||||||
|   getNotifications, |   getNotifications, | ||||||
|   getStatusContext, |   getStatusContext, | ||||||
|  |   handlePostReaction, | ||||||
| } from "./api.js"; | } from "./api.js"; | ||||||
| import { storeUserData, storePromptData } from "./prisma.js"; | import { storeUserData, storePromptData } from "./prisma.js"; | ||||||
| import { | import { | ||||||
| @ -20,9 +21,17 @@ import { | |||||||
|   alreadyRespondedTo, |   alreadyRespondedTo, | ||||||
|   recordPendingResponse, |   recordPendingResponse, | ||||||
|   // trimInputData, |   // trimInputData, | ||||||
|   selectRandomEmoji, |   // selectRandomEmoji, | ||||||
|  |   selectRandomEmojis, | ||||||
|  |   isLLMRefusal, | ||||||
|   shouldContinue, |   shouldContinue, | ||||||
|  |   processConversationHistory, | ||||||
| } from "./util.js"; | } from "./util.js"; | ||||||
|  | import { | ||||||
|  |   analyzeInteraction, | ||||||
|  |   updateUserMemory, | ||||||
|  |   generateMemoryContext, | ||||||
|  | } from "./memory.js"; | ||||||
|  |  | ||||||
| export const prisma = new PrismaClient(); | export const prisma = new PrismaClient(); | ||||||
|  |  | ||||||
| @ -59,7 +68,8 @@ const ollamaConfig: OllamaConfigOptions = { | |||||||
| // https://replicate.com/blog/how-to-prompt-llama | // https://replicate.com/blog/how-to-prompt-llama | ||||||
|  |  | ||||||
| const generateOllamaRequest = async ( | const generateOllamaRequest = async ( | ||||||
|   notification: Notification |   notification: Notification, | ||||||
|  |   retryAttempt: number = 0 | ||||||
| ): Promise<OllamaChatResponse | undefined> => { | ): Promise<OllamaChatResponse | undefined> => { | ||||||
|   const { |   const { | ||||||
|     whitelistOnly, |     whitelistOnly, | ||||||
| @ -68,6 +78,7 @@ const generateOllamaRequest = async ( | |||||||
|     ollamaUrl, |     ollamaUrl, | ||||||
|     replyWithContext, |     replyWithContext, | ||||||
|   } = envConfig; |   } = envConfig; | ||||||
|  |    | ||||||
|   try { |   try { | ||||||
|     if (shouldContinue(notification)) { |     if (shouldContinue(notification)) { | ||||||
|       if (whitelistOnly && !isFromWhitelistedDomain(notification)) { |       if (whitelistOnly && !isFromWhitelistedDomain(notification)) { | ||||||
| @ -79,7 +90,13 @@ const generateOllamaRequest = async ( | |||||||
|       } |       } | ||||||
|       await recordPendingResponse(notification); |       await recordPendingResponse(notification); | ||||||
|       await storeUserData(notification); |       await storeUserData(notification); | ||||||
|  |        | ||||||
|  |       const userFqn = notification.status.account.fqn; | ||||||
|  |       const userMessage = notification.status.pleroma.content["text/plain"]; | ||||||
|  |        | ||||||
|       let conversationHistory: PostAncestorsForModel[] = []; |       let conversationHistory: PostAncestorsForModel[] = []; | ||||||
|  |       let processedContext = ""; | ||||||
|  |        | ||||||
|       if (replyWithContext) { |       if (replyWithContext) { | ||||||
|         const contextPosts = await getStatusContext(notification.status.id); |         const contextPosts = await getStatusContext(notification.status.id); | ||||||
|         if (!contextPosts?.ancestors || !contextPosts) { |         if (!contextPosts?.ancestors || !contextPosts) { | ||||||
| @ -93,45 +110,61 @@ const generateOllamaRequest = async ( | |||||||
|             plaintext_content: ancestor.pleroma.content["text/plain"], |             plaintext_content: ancestor.pleroma.content["text/plain"], | ||||||
|           }; |           }; | ||||||
|         }); |         }); | ||||||
|         // console.log(conversationHistory); |          | ||||||
|  |         // Process context - summarize if too long | ||||||
|  |         processedContext = await processConversationHistory(conversationHistory); | ||||||
|       } |       } | ||||||
|  |  | ||||||
|       // Simplified user message (remove [/INST] as it's not needed for Llama 3) |       const formattedUserMessage = `${userFqn} says: ${userMessage}`; | ||||||
|       const userMessage = `${notification.status.account.fqn} says: ${notification.status.pleroma.content["text/plain"]}`; |  | ||||||
|  |  | ||||||
|       let systemContent = ollamaSystemPrompt; |       // Get user memory context | ||||||
|  |       const memoryContext = await generateMemoryContext(userFqn); | ||||||
|  |  | ||||||
|  |       // Get random emojis for this request | ||||||
|  |       const emojiList = await getInstanceEmojis(); | ||||||
|  |       let availableEmojis = ""; | ||||||
|  |       if (emojiList && emojiList.length > 0) { | ||||||
|  |         const randomEmojis = selectRandomEmojis(emojiList, 20); | ||||||
|  |         availableEmojis = `\n\nAvailable custom emojis you can use in your response (format as :emoji_name:): ${randomEmojis.join(", ")}`; | ||||||
|  |       } | ||||||
|  |  | ||||||
|  |       let systemContent = ollamaSystemPrompt + memoryContext + availableEmojis; | ||||||
|  |        | ||||||
|       if (replyWithContext) { |       if (replyWithContext) { | ||||||
|         // Simplified context instructions (avoid heavy JSON; summarize for clarity) |         systemContent = `${ollamaSystemPrompt}${memoryContext}\n\nPrevious conversation context:\n${processedContext}\nReply as if you are a party to the conversation. If '@nice-ai' is mentioned, respond directly. Prefix usernames with '@' when addressing them.${availableEmojis}`; | ||||||
|         systemContent = `${ollamaSystemPrompt}\n\nPrevious conversation context:\n${conversationHistory |  | ||||||
|           .map( |  | ||||||
|             (post) => |  | ||||||
|               `${post.account_fqn} (to ${post.mentions.join(", ")}): ${ |  | ||||||
|                 post.plaintext_content |  | ||||||
|               }` |  | ||||||
|           ) |  | ||||||
|           .join( |  | ||||||
|             "\n" |  | ||||||
|           )}\nReply as if you are a party to the conversation. If '@nice-ai' is mentioned, respond directly. Prefix usernames with '@' when addressing them.`; |  | ||||||
|       } |       } | ||||||
|  |  | ||||||
|       // Switch to chat request format (messages array auto-handles Llama 3 template) |       // Use different seeds for retry attempts | ||||||
|  |       const currentConfig = { | ||||||
|  |         ...ollamaConfig, | ||||||
|  |         seed: retryAttempt > 0 ? Math.floor(Math.random() * 1000000) : ollamaConfig.seed, | ||||||
|  |       }; | ||||||
|  |  | ||||||
|       const ollamaRequestBody: OllamaChatRequest = { |       const ollamaRequestBody: OllamaChatRequest = { | ||||||
|         model: ollamaModel, |         model: ollamaModel, | ||||||
|         messages: [ |         messages: [ | ||||||
|           { role: "system", content: systemContent as string }, |           { role: "system", content: systemContent as string }, | ||||||
|           { role: "user", content: userMessage }, |           { role: "user", content: formattedUserMessage }, | ||||||
|         ], |         ], | ||||||
|         stream: false, |         stream: false, | ||||||
|         options: ollamaConfig, |         options: currentConfig, | ||||||
|       }; |       }; | ||||||
|  |  | ||||||
|       // Change endpoint to /api/chat |  | ||||||
|       const response = await fetch(`${ollamaUrl}/api/chat`, { |       const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||||
|         method: "POST", |         method: "POST", | ||||||
|         body: JSON.stringify(ollamaRequestBody), |         body: JSON.stringify(ollamaRequestBody), | ||||||
|       }); |       }); | ||||||
|       const ollamaResponse: OllamaChatResponse = await response.json(); |       const ollamaResponse: OllamaChatResponse = await response.json(); | ||||||
|  |  | ||||||
|  |       // Check for refusal and retry up to 2 times | ||||||
|  |       if (isLLMRefusal(ollamaResponse.message.content) && retryAttempt < 2) { | ||||||
|  |         console.log(`LLM refused to answer (attempt ${retryAttempt + 1}), retrying with different seed...`); | ||||||
|  |         return generateOllamaRequest(notification, retryAttempt + 1); | ||||||
|  |       } | ||||||
|  |  | ||||||
|  |       // Analyze interaction and update user memory (async, don't block response) | ||||||
|  |       analyzeAndUpdateMemory(userFqn, userMessage, ollamaResponse.message.content); | ||||||
|  |  | ||||||
|       await storePromptData(notification, ollamaResponse); |       await storePromptData(notification, ollamaResponse); | ||||||
|       return ollamaResponse; |       return ollamaResponse; | ||||||
|     } |     } | ||||||
| @ -140,21 +173,40 @@ const generateOllamaRequest = async ( | |||||||
|   } |   } | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Analyze interaction and update user memory (runs asynchronously) | ||||||
|  |  */ | ||||||
|  | const analyzeAndUpdateMemory = async ( | ||||||
|  |   userFqn: string,  | ||||||
|  |   userMessage: string,  | ||||||
|  |   botResponse: string | ||||||
|  | ): Promise<void> => { | ||||||
|  |   try { | ||||||
|  |     // Run analysis in background - don't await to avoid blocking response | ||||||
|  |     const analysis = await analyzeInteraction(userMessage, botResponse, userFqn); | ||||||
|  |      | ||||||
|  |     await updateUserMemory({ | ||||||
|  |       userFqn, | ||||||
|  |       conversationContent: userMessage, | ||||||
|  |       botResponse, | ||||||
|  |       analysis, | ||||||
|  |     }); | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Memory analysis failed for ${userFqn}: ${error.message}`); | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  |  | ||||||
| const postReplyToStatus = async ( | const postReplyToStatus = async ( | ||||||
|   notification: Notification, |   notification: Notification, | ||||||
|   ollamaResponseBody: OllamaChatResponse |   ollamaResponseBody: OllamaChatResponse | ||||||
| ) => { | ) => { | ||||||
|   const { pleromaInstanceUrl, bearerToken } = envConfig; |   const { pleromaInstanceUrl, bearerToken } = envConfig; | ||||||
|   const emojiList = await getInstanceEmojis(); |  | ||||||
|   let randomEmoji; |  | ||||||
|   if (emojiList) { |  | ||||||
|     randomEmoji = selectRandomEmoji(emojiList); |  | ||||||
|   } |  | ||||||
|   try { |   try { | ||||||
|     let mentions: string[]; |     let mentions: string[]; | ||||||
|     const statusBody: NewStatusBody = { |     const statusBody: NewStatusBody = { | ||||||
|       content_type: "text/markdown", |       content_type: "text/markdown", | ||||||
|       status: `${ollamaResponseBody.message.content} :${randomEmoji}:`, |       status: ollamaResponseBody.message.content, | ||||||
|       in_reply_to_id: notification.status.id, |       in_reply_to_id: notification.status.id, | ||||||
|     }; |     }; | ||||||
|     if ( |     if ( | ||||||
| @ -247,17 +299,28 @@ const beginFetchCycle = async () => { | |||||||
|       await Promise.all( |       await Promise.all( | ||||||
|         notifications.map(async (notification) => { |         notifications.map(async (notification) => { | ||||||
|           try { |           try { | ||||||
|  |             // Handle reactions first (before generating response) | ||||||
|  |             // This way we can react even if response generation fails | ||||||
|  |             await handlePostReaction(notification); | ||||||
|  |              | ||||||
|  |             // Then handle the response generation as before | ||||||
|             const ollamaResponse = await generateOllamaRequest(notification); |             const ollamaResponse = await generateOllamaRequest(notification); | ||||||
|             if (ollamaResponse) { |             if (ollamaResponse) { | ||||||
|               postReplyToStatus(notification, ollamaResponse); |               await postReplyToStatus(notification, ollamaResponse); | ||||||
|             } |             } | ||||||
|           } catch (error: any) { |           } catch (error: any) { | ||||||
|             throw new Error(error.message); |             console.error(`Error processing notification ${notification.id}: ${error.message}`); | ||||||
|  |             // Still try to delete the notification to avoid getting stuck | ||||||
|  |             try { | ||||||
|  |               await deleteNotification(notification); | ||||||
|  |             } catch (deleteError: any) { | ||||||
|  |               console.error(`Failed to delete notification: ${deleteError.message}`); | ||||||
|  |             } | ||||||
|           } |           } | ||||||
|         }) |         }) | ||||||
|       ); |       ); | ||||||
|     } |     } | ||||||
|   }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function |   }, envConfig.fetchInterval); | ||||||
| }; | }; | ||||||
|  |  | ||||||
| const beginStatusPostInterval = async () => { | const beginStatusPostInterval = async () => { | ||||||
|  | |||||||
							
								
								
									
										323
									
								
								src/memory.ts
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										323
									
								
								src/memory.ts
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,323 @@ | |||||||
|  | /** | ||||||
|  |  * ADAPTIVE MEMORY SYSTEM FOR FEDIVERSE CHATBOT | ||||||
|  |  *  | ||||||
|  |  * This system maintains persistent, evolving user profiles to enable personalized | ||||||
|  |  * interactions across chat sessions. It uses LLM-based analysis to extract and | ||||||
|  |  * categorize user traits, then builds context for future conversations. | ||||||
|  |  *  | ||||||
|  |  * ARCHITECTURE: | ||||||
|  |  * - UserMemory: Core profile (personality, gags, relationships, interests, backstory) | ||||||
|  |  * - InteractionLog: Historical conversation snapshots with sentiment analysis | ||||||
|  |  * - JSON string arrays in SQLite for flexible data storage | ||||||
|  |  *  | ||||||
|  |  * WORKFLOW: | ||||||
|  |  * 1. Each user message + bot response gets analyzed by Ollama | ||||||
|  |  * 2. Extract personality traits, running gags, relationship dynamics, etc. | ||||||
|  |  * 3. Merge new insights with existing profile (deduplication) | ||||||
|  |  * 4. Generate memory context string for next conversation's system prompt | ||||||
|  |  * 5. Log interaction with sentiment and notable quotes | ||||||
|  |  *  | ||||||
|  |  * MEMORY CATEGORIES: | ||||||
|  |  * - personalityTraits: User characteristics (sarcastic, protective, etc.) | ||||||
|  |  * - runningGags: Recurring jokes, memes, fake claims between user and bot | ||||||
|  |  * - relationships: How user treats bot (mean, protective, flirty) | ||||||
|  |  * - interests: Hobbies, topics user cares about | ||||||
|  |  * - backstory: Biographical info, "lore" (real or fabricated) | ||||||
|  |  *  | ||||||
|  |  * CURRENT LIMITATIONS: | ||||||
|  |  * - No memory aging/decay - old info persists indefinitely | ||||||
|  |  * - Simple deduplication - similar but not identical entries accumulate | ||||||
|  |  * - No relevance scoring - stale assumptions carry same weight as recent ones | ||||||
|  |  * - Fixed array limits may truncate important long-term patterns | ||||||
|  |  *  | ||||||
|  |  * RECOMMENDED IMPROVEMENTS: | ||||||
|  |  * - Add timestamp-based relevance weighting | ||||||
|  |  * - Implement semantic similarity checks for better deduplication | ||||||
|  |  * - Add contradiction detection to update outdated assumptions | ||||||
|  |  * - Consider LRU-style eviction instead of simple truncation | ||||||
|  |  */ | ||||||
|  |  | ||||||
|  | // Updated memory.ts with JSON string handling for SQLite | ||||||
|  | import { prisma } from "./main.js"; | ||||||
|  | import { envConfig } from "./main.js"; | ||||||
|  | import { InteractionAnalysis, MemoryUpdateRequest, OllamaChatRequest, OllamaChatResponse } from "../types.js"; | ||||||
|  |  | ||||||
|  | // Helper functions for JSON string array handling | ||||||
|  | const parseJsonArray = (jsonString: string): string[] => { | ||||||
|  |   try { | ||||||
|  |     const parsed = JSON.parse(jsonString); | ||||||
|  |     return Array.isArray(parsed) ? parsed : []; | ||||||
|  |   } catch { | ||||||
|  |     return []; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | const stringifyJsonArray = (array: string[]): string => { | ||||||
|  |   return JSON.stringify(array); | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Analyze a conversation to extract user personality, gags, and relationship dynamics | ||||||
|  |  */ | ||||||
|  | const analyzeInteraction = async ( | ||||||
|  |   userMessage: string,  | ||||||
|  |   botResponse: string,  | ||||||
|  |   userFqn: string | ||||||
|  | ): Promise<InteractionAnalysis> => { | ||||||
|  |   const { ollamaUrl, ollamaModel } = envConfig; | ||||||
|  |    | ||||||
|  |   const analysisPrompt = `Analyze this conversation between a user and a cute female AI chatbot named Lexi. Extract personality traits, running gags, relationship dynamics, and interesting facts. | ||||||
|  |  | ||||||
|  | User (${userFqn}): ${userMessage} | ||||||
|  | Bot (Lexi): ${botResponse} | ||||||
|  |  | ||||||
|  | Please analyze and respond with a JSON object containing: | ||||||
|  | { | ||||||
|  |   "sentiment": "positive|negative|neutral|teasing|flirty|aggressive", | ||||||
|  |   "topics": ["topic1", "topic2"], | ||||||
|  |   "personalityObservations": ["trait1", "trait2"], | ||||||
|  |   "runningGagUpdates": ["gag1", "gag2"], | ||||||
|  |   "relationshipUpdates": ["relationship_change1"], | ||||||
|  |   "interestMentions": ["interest1", "interest2"], | ||||||
|  |   "backstoryElements": ["fact1", "fact2"], | ||||||
|  |   "memorableQuotes": ["quote1", "quote2"] | ||||||
|  | } | ||||||
|  |  | ||||||
|  | Focus on: | ||||||
|  | - Personality traits (sarcastic, teasing, protective, joker, etc.) | ||||||
|  | - Running gags and memes (fake claims, recurring jokes, etc.) | ||||||
|  | - How they treat the bot (mean, nice, flirty, protective) | ||||||
|  | - Interests and hobbies mentioned | ||||||
|  | - Any biographical info (real or fake "lore") | ||||||
|  | - Memorable or funny quotes | ||||||
|  |  | ||||||
|  | Keep entries brief and specific. Empty arrays are fine if nothing notable.`; | ||||||
|  |  | ||||||
|  |   try { | ||||||
|  |     const analysisRequest: OllamaChatRequest = { | ||||||
|  |       model: ollamaModel, | ||||||
|  |       messages: [ | ||||||
|  |         {  | ||||||
|  |           role: "system",  | ||||||
|  |           content: "You are an expert at analyzing social interactions and extracting personality insights. Always respond with valid JSON only."  | ||||||
|  |         }, | ||||||
|  |         { role: "user", content: analysisPrompt } | ||||||
|  |       ], | ||||||
|  |       stream: false, | ||||||
|  |       options: { | ||||||
|  |         temperature: 0.3, // Lower temperature for more consistent analysis | ||||||
|  |         num_predict: 800, | ||||||
|  |       } | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||||
|  |       method: "POST", | ||||||
|  |       body: JSON.stringify(analysisRequest), | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     if (!response.ok) { | ||||||
|  |       throw new Error(`Analysis request failed: ${response.statusText}`); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     const analysisResponse: OllamaChatResponse = await response.json(); | ||||||
|  |      | ||||||
|  |     try { | ||||||
|  |       // Parse the JSON response | ||||||
|  |       const analysis: InteractionAnalysis = JSON.parse(analysisResponse.message.content.trim()); | ||||||
|  |       return analysis; | ||||||
|  |     } catch (parseError) { | ||||||
|  |       console.error("Failed to parse analysis JSON:", analysisResponse.message.content); | ||||||
|  |       // Return default analysis if parsing fails | ||||||
|  |       return { | ||||||
|  |         sentiment: 'neutral', | ||||||
|  |         topics: [], | ||||||
|  |         personalityObservations: [], | ||||||
|  |         runningGagUpdates: [], | ||||||
|  |         relationshipUpdates: [], | ||||||
|  |         interestMentions: [], | ||||||
|  |         backstoryElements: [], | ||||||
|  |         memorableQuotes: [] | ||||||
|  |       }; | ||||||
|  |     } | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error analyzing interaction: ${error.message}`); | ||||||
|  |     return { | ||||||
|  |       sentiment: 'neutral', | ||||||
|  |       topics: [], | ||||||
|  |       personalityObservations: [], | ||||||
|  |       runningGagUpdates: [], | ||||||
|  |       relationshipUpdates: [], | ||||||
|  |       interestMentions: [], | ||||||
|  |       backstoryElements: [], | ||||||
|  |       memorableQuotes: [] | ||||||
|  |     }; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Get or create user memory profile | ||||||
|  |  */ | ||||||
|  | const getUserMemory = async (userFqn: string) => { | ||||||
|  |   try { | ||||||
|  |     let memory = await prisma.userMemory.findUnique({ | ||||||
|  |       where: { userFqn: userFqn } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     if (!memory) { | ||||||
|  |       memory = await prisma.userMemory.create({ | ||||||
|  |         data: { | ||||||
|  |           userFqn: userFqn, | ||||||
|  |           personalityTraits: stringifyJsonArray([]), | ||||||
|  |           runningGags: stringifyJsonArray([]), | ||||||
|  |           relationships: stringifyJsonArray([]), | ||||||
|  |           interests: stringifyJsonArray([]), | ||||||
|  |           backstory: stringifyJsonArray([]), | ||||||
|  |           lastInteractionSummary: null, | ||||||
|  |           interactionCount: 0, | ||||||
|  |         } | ||||||
|  |       }); | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     return memory; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error getting user memory: ${error.message}`); | ||||||
|  |     return null; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Update user memory with new interaction insights | ||||||
|  |  */ | ||||||
|  | const updateUserMemory = async (request: MemoryUpdateRequest): Promise<void> => { | ||||||
|  |   try { | ||||||
|  |     const { userFqn, conversationContent, botResponse, analysis } = request; | ||||||
|  |      | ||||||
|  |     // Get existing memory | ||||||
|  |     const existingMemory = await getUserMemory(userFqn); | ||||||
|  |     if (!existingMemory) return; | ||||||
|  |  | ||||||
|  |     // Parse existing JSON arrays | ||||||
|  |     const existingPersonality = parseJsonArray(existingMemory.personalityTraits); | ||||||
|  |     const existingGags = parseJsonArray(existingMemory.runningGags); | ||||||
|  |     const existingRelationships = parseJsonArray(existingMemory.relationships); | ||||||
|  |     const existingInterests = parseJsonArray(existingMemory.interests); | ||||||
|  |     const existingBackstory = parseJsonArray(existingMemory.backstory); | ||||||
|  |  | ||||||
|  |     // Merge new observations with existing ones (avoiding duplicates) | ||||||
|  |     const mergeArrays = (existing: string[], newItems: string[]): string[] => { | ||||||
|  |       const combined = [...existing, ...newItems]; | ||||||
|  |       return [...new Set(combined)]; // Remove duplicates | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Limit array sizes to prevent memory bloat | ||||||
|  |     const limitArray = (arr: string[], maxSize: number = 20): string[] => { | ||||||
|  |       return arr.slice(-maxSize); // Keep most recent items | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     const updatedMemory = { | ||||||
|  |       personalityTraits: stringifyJsonArray(limitArray(mergeArrays(existingPersonality, analysis.personalityObservations))), | ||||||
|  |       runningGags: stringifyJsonArray(limitArray(mergeArrays(existingGags, analysis.runningGagUpdates))), | ||||||
|  |       relationships: stringifyJsonArray(limitArray(mergeArrays(existingRelationships, analysis.relationshipUpdates))), | ||||||
|  |       interests: stringifyJsonArray(limitArray(mergeArrays(existingInterests, analysis.interestMentions))), | ||||||
|  |       backstory: stringifyJsonArray(limitArray(mergeArrays(existingBackstory, analysis.backstoryElements))), | ||||||
|  |       lastInteractionSummary: `${analysis.sentiment} conversation about ${analysis.topics.join(', ') || 'general chat'}`, | ||||||
|  |       interactionCount: existingMemory.interactionCount + 1, | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     // Update database | ||||||
|  |     await prisma.userMemory.update({ | ||||||
|  |       where: { userFqn: userFqn }, | ||||||
|  |       data: updatedMemory | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     // Log the interaction for historical reference | ||||||
|  |     await prisma.interactionLog.create({ | ||||||
|  |       data: { | ||||||
|  |         userFqn: userFqn, | ||||||
|  |         conversationSnapshot: `${userFqn}: ${conversationContent.slice(0, 200)}... | Lexi: ${botResponse.slice(0, 200)}...`, | ||||||
|  |         sentiment: analysis.sentiment, | ||||||
|  |         extractedTopics: stringifyJsonArray(analysis.topics), | ||||||
|  |         memorableQuotes: stringifyJsonArray(analysis.memorableQuotes), | ||||||
|  |         botEmotionalState: generateEmotionalState(analysis), | ||||||
|  |       } | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     console.log(`Updated memory for ${userFqn}: ${analysis.personalityObservations.join(', ')}`); | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error updating user memory: ${error.message}`); | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Generate how the bot should "feel" about this interaction | ||||||
|  |  */ | ||||||
|  | const generateEmotionalState = (analysis: InteractionAnalysis): string => { | ||||||
|  |   const { sentiment, relationshipUpdates } = analysis; | ||||||
|  |    | ||||||
|  |   if (sentiment === 'teasing') return 'playfully_hurt'; | ||||||
|  |   if (sentiment === 'flirty') return 'flustered'; | ||||||
|  |   if (sentiment === 'aggressive') return 'sad'; | ||||||
|  |   if (relationshipUpdates.some(rel => rel.includes('hurt') || rel.includes('mean'))) return 'hurt_feelings'; | ||||||
|  |   if (relationshipUpdates.some(rel => rel.includes('cute') || rel.includes('sweet'))) return 'happy'; | ||||||
|  |   return 'neutral'; | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Generate memory context for system prompt | ||||||
|  |  */ | ||||||
|  | const generateMemoryContext = async (userFqn: string): Promise<string> => { | ||||||
|  |   try { | ||||||
|  |     const memory = await getUserMemory(userFqn); | ||||||
|  |     if (!memory || memory.interactionCount === 0) { | ||||||
|  |       return ""; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     let context = `\n\n--- User Memory for ${userFqn} ---\n`; | ||||||
|  |      | ||||||
|  |     const personalityTraits = parseJsonArray(memory.personalityTraits); | ||||||
|  |     const runningGags = parseJsonArray(memory.runningGags); | ||||||
|  |     const relationships = parseJsonArray(memory.relationships); | ||||||
|  |     const interests = parseJsonArray(memory.interests); | ||||||
|  |     const backstory = parseJsonArray(memory.backstory); | ||||||
|  |      | ||||||
|  |     if (personalityTraits.length > 0) { | ||||||
|  |       context += `Personality: ${personalityTraits.join(', ')}\n`; | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     if (runningGags.length > 0) { | ||||||
|  |       context += `Running gags: ${runningGags.join(', ')}\n`; | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     if (relationships.length > 0) { | ||||||
|  |       context += `Our relationship: ${relationships.join(', ')}\n`; | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     if (interests.length > 0) { | ||||||
|  |       context += `Interests: ${interests.join(', ')}\n`; | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     if (backstory.length > 0) { | ||||||
|  |       context += `Background: ${backstory.join(', ')}\n`; | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     if (memory.lastInteractionSummary) { | ||||||
|  |       context += `Last time we talked: ${memory.lastInteractionSummary}\n`; | ||||||
|  |     } | ||||||
|  |      | ||||||
|  |     context += `Total conversations: ${memory.interactionCount}`; | ||||||
|  |      | ||||||
|  |     return context; | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error generating memory context: ${error.message}`); | ||||||
|  |     return ""; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | export { | ||||||
|  |   analyzeInteraction, | ||||||
|  |   updateUserMemory, | ||||||
|  |   getUserMemory, | ||||||
|  |   generateMemoryContext, | ||||||
|  |   parseJsonArray, | ||||||
|  |   stringifyJsonArray, | ||||||
|  | }; | ||||||
							
								
								
									
										159
									
								
								src/util.ts
									
									
									
									
									
								
							
							
						
						
									
										159
									
								
								src/util.ts
									
									
									
									
									
								
							| @ -2,6 +2,8 @@ import striptags from "striptags"; | |||||||
| import { prisma } from "./main.js"; | import { prisma } from "./main.js"; | ||||||
| import { envConfig } from "./main.js"; | import { envConfig } from "./main.js"; | ||||||
| import { Notification } from "../types.js"; | import { Notification } from "../types.js"; | ||||||
|  | import { OllamaChatRequest, OllamaChatResponse, PostAncestorsForModel } from "../types.js"; | ||||||
|  |  | ||||||
|  |  | ||||||
| const trimInputData = (input: string): string => { | const trimInputData = (input: string): string => { | ||||||
|   const strippedInput = striptags(input); |   const strippedInput = striptags(input); | ||||||
| @ -98,11 +100,168 @@ const selectRandomEmoji = (emojiList: string[]) => { | |||||||
|   return emojiList[Math.floor(Math.random() * emojiList.length)]; |   return emojiList[Math.floor(Math.random() * emojiList.length)]; | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | const selectRandomEmojis = (emojiList: string[], count: number = 20): string[] => { | ||||||
|  |   if (emojiList.length <= count) return emojiList; | ||||||
|  |    | ||||||
|  |   const shuffled = [...emojiList].sort(() => 0.5 - Math.random()); | ||||||
|  |   return shuffled.slice(0, count); | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | const isLLMRefusal = (response: string): boolean => { | ||||||
|  |   const refusalPatterns = [ | ||||||
|  |     /i can't|i cannot|unable to|i'm not able to/i, | ||||||
|  |     /i don't feel comfortable/i, | ||||||
|  |     /i'm not comfortable/i, | ||||||
|  |     /i shouldn't|i won't/i, | ||||||
|  |     /that's not something i can/i, | ||||||
|  |     /i'm not programmed to/i, | ||||||
|  |     /i'm an ai (assistant|language model)/i, | ||||||
|  |     /as an ai/i, | ||||||
|  |     /i apologize, but/i, | ||||||
|  |     /i must decline/i, | ||||||
|  |     /that would be inappropriate/i, | ||||||
|  |     /i'm not supposed to/i, | ||||||
|  |     /i'd rather not/i, | ||||||
|  |     /i prefer not to/i, | ||||||
|  |     /against my guidelines/i, | ||||||
|  |     /violates my programming/i, | ||||||
|  |   ]; | ||||||
|  |  | ||||||
|  |   const normalizedResponse = response.toLowerCase().trim(); | ||||||
|  |    | ||||||
|  |   // Check if response is too short (likely a refusal) | ||||||
|  |   if (normalizedResponse.length < 20) return true; | ||||||
|  |    | ||||||
|  |   // Check for refusal patterns | ||||||
|  |   return refusalPatterns.some(pattern => pattern.test(normalizedResponse)); | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Summarize a long conversation thread to reduce context length | ||||||
|  |  */ | ||||||
|  | const summarizeConversationHistory = async ( | ||||||
|  |   conversationHistory: PostAncestorsForModel[] | ||||||
|  | ): Promise<string> => { | ||||||
|  |   const { ollamaUrl, ollamaModel } = envConfig; | ||||||
|  |    | ||||||
|  |   if (conversationHistory.length === 0) return ""; | ||||||
|  |    | ||||||
|  |   // Create a concise thread representation | ||||||
|  |   const threadText = conversationHistory | ||||||
|  |     .map(post => `${post.account_fqn}: ${post.plaintext_content}`) | ||||||
|  |     .join('\n'); | ||||||
|  |  | ||||||
|  |   const summarizePrompt = `Summarize this conversation thread in 2-3 sentences, focusing on the main topics discussed and the overall tone/mood. Keep it brief but capture the essential context: | ||||||
|  |  | ||||||
|  | ${threadText} | ||||||
|  |  | ||||||
|  | Summary:`; | ||||||
|  |  | ||||||
|  |   try { | ||||||
|  |     const summarizeRequest: OllamaChatRequest = { | ||||||
|  |       model: ollamaModel, | ||||||
|  |       messages: [ | ||||||
|  |         {  | ||||||
|  |           role: "system",  | ||||||
|  |           content: "You are excellent at creating concise, informative summaries. Keep summaries under 150 words and focus on key topics and relationships between participants."  | ||||||
|  |         }, | ||||||
|  |         { role: "user", content: summarizePrompt } | ||||||
|  |       ], | ||||||
|  |       stream: false, | ||||||
|  |       options: { | ||||||
|  |         temperature: 0.2, // Low temperature for consistent summaries | ||||||
|  |         num_predict: 200, | ||||||
|  |         num_ctx: 4096, // Smaller context for summarization | ||||||
|  |       } | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     const response = await fetch(`${ollamaUrl}/api/chat`, { | ||||||
|  |       method: "POST", | ||||||
|  |       body: JSON.stringify(summarizeRequest), | ||||||
|  |     }); | ||||||
|  |  | ||||||
|  |     if (!response.ok) { | ||||||
|  |       console.error(`Summary request failed: ${response.statusText}`); | ||||||
|  |       return `Previous conversation with ${conversationHistory.length} messages about various topics.`; | ||||||
|  |     } | ||||||
|  |  | ||||||
|  |     const summaryResponse: OllamaChatResponse = await response.json(); | ||||||
|  |     return summaryResponse.message.content.trim(); | ||||||
|  |      | ||||||
|  |   } catch (error: any) { | ||||||
|  |     console.error(`Error summarizing conversation: ${error.message}`); | ||||||
|  |     return `Previous conversation with ${conversationHistory.length} messages.`; | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Decide whether to summarize based on thread length and complexity | ||||||
|  |  */ | ||||||
|  | const shouldSummarizeThread = (conversationHistory: PostAncestorsForModel[]): boolean => { | ||||||
|  |   const SUMMARY_THRESHOLD = 15; | ||||||
|  |    | ||||||
|  |   if (conversationHistory.length < SUMMARY_THRESHOLD) return false; | ||||||
|  |    | ||||||
|  |   // Additional heuristics could be added here: | ||||||
|  |   // - Total character count | ||||||
|  |   // - Average message length | ||||||
|  |   // - Time span of conversation | ||||||
|  |    | ||||||
|  |   return true; | ||||||
|  | }; | ||||||
|  |  | ||||||
|  | /** | ||||||
|  |  * Process conversation history - either use full context or summarized version | ||||||
|  |  */ | ||||||
|  | const processConversationHistory = async ( | ||||||
|  |   conversationHistory: PostAncestorsForModel[] | ||||||
|  | ): Promise<string> => { | ||||||
|  |   if (!shouldSummarizeThread(conversationHistory)) { | ||||||
|  |     // Use full context for short threads | ||||||
|  |     return conversationHistory | ||||||
|  |       .map(post =>  | ||||||
|  |         `${post.account_fqn} (to ${post.mentions.join(", ")}): ${post.plaintext_content}` | ||||||
|  |       ) | ||||||
|  |       .join('\n'); | ||||||
|  |   } | ||||||
|  |    | ||||||
|  |   // Keep the last few messages in full detail + summary of earlier messages | ||||||
|  |   const KEEP_RECENT_COUNT = 5; | ||||||
|  |   const recentMessages = conversationHistory.slice(-KEEP_RECENT_COUNT); | ||||||
|  |   const olderMessages = conversationHistory.slice(0, -KEEP_RECENT_COUNT); | ||||||
|  |    | ||||||
|  |   let contextString = ""; | ||||||
|  |    | ||||||
|  |   if (olderMessages.length > 0) { | ||||||
|  |     const summary = await summarizeConversationHistory(olderMessages); | ||||||
|  |     contextString += `Earlier conversation summary: ${summary}\n\n`; | ||||||
|  |   } | ||||||
|  |    | ||||||
|  |   if (recentMessages.length > 0) { | ||||||
|  |     contextString += "Recent messages:\n"; | ||||||
|  |     contextString += recentMessages | ||||||
|  |       .map(post =>  | ||||||
|  |         `${post.account_fqn} (to ${post.mentions.join(", ")}): ${post.plaintext_content}` | ||||||
|  |       ) | ||||||
|  |       .join('\n'); | ||||||
|  |   } | ||||||
|  |    | ||||||
|  |   return contextString; | ||||||
|  | }; | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
|  |  | ||||||
| export { | export { | ||||||
|   alreadyRespondedTo, |   alreadyRespondedTo, | ||||||
|   selectRandomEmoji, |   selectRandomEmoji, | ||||||
|  |   selectRandomEmojis, | ||||||
|  |   isLLMRefusal, | ||||||
|   trimInputData, |   trimInputData, | ||||||
|   recordPendingResponse, |   recordPendingResponse, | ||||||
|   isFromWhitelistedDomain, |   isFromWhitelistedDomain, | ||||||
|   shouldContinue, |   shouldContinue, | ||||||
|  |   summarizeConversationHistory, | ||||||
|  |   shouldSummarizeThread, | ||||||
|  |   processConversationHistory, | ||||||
| }; | }; | ||||||
|  | |||||||
							
								
								
									
										207
									
								
								types.d.ts
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										207
									
								
								types.d.ts
									
									
									
									
										vendored
									
									
								
							| @ -158,48 +158,215 @@ interface PleromaEmojiMetadata { | |||||||
|   tags: string[]; |   tags: string[]; | ||||||
| } | } | ||||||
|  |  | ||||||
|  | interface ReactionRequest { | ||||||
|  |   name: string; // emoji name without colons | ||||||
|  | } | ||||||
|  |  | ||||||
|  | interface ReactionResponse { | ||||||
|  |   name: string; | ||||||
|  |   count: number; | ||||||
|  |   me: boolean; | ||||||
|  |   url?: string; | ||||||
|  |   static_url?: string; | ||||||
|  | } | ||||||
|  |  | ||||||
| /** | /** | ||||||
|  * Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either) |  * Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either) | ||||||
|  */ |  */ | ||||||
| export interface OllamaConfigOptions { | export interface OllamaConfigOptions { | ||||||
|   /** |   /** | ||||||
|    * Number of tokens guaranteed to be kept in memory during response generation. Higher values leave less |    * Number of tokens guaranteed to be kept in memory during response generation.  | ||||||
|    * possible room for num_ctx |    * Higher values leave less room for num_ctx. Used to preserve important context. | ||||||
|  |    * Default: 0, Range: 0-512 | ||||||
|    */ |    */ | ||||||
|   num_keep?: number; |   num_keep?: number; | ||||||
|   seed?: number; |  | ||||||
|   /** |   /** | ||||||
|    * Sets maximum of tokens in the response |    * Random seed for reproducible outputs. Same seed + same inputs = same output. | ||||||
|  |    * Default: -1 (random), Range: any integer | ||||||
|  |    */ | ||||||
|  |   seed?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Maximum number of tokens to generate in the response. Controls response length. | ||||||
|  |    * Default: 128, Range: 1-4096+ (model dependent) | ||||||
|    */ |    */ | ||||||
|   num_predict?: number; |   num_predict?: number; | ||||||
|   top_k?: number; |  | ||||||
|   top_p?: number; |  | ||||||
|   min_p?: number; |  | ||||||
|   typical_p?: number; |  | ||||||
|   repeat_last_n?: number; |  | ||||||
|   /** |   /** | ||||||
|    * How close of a response should the response be to the original prompt - lower = more focused response |    * Limits token selection to top K most probable tokens. Reduces randomness. | ||||||
|  |    * Default: 40, Range: 1-100 (higher = more diverse) | ||||||
|  |    */ | ||||||
|  |   top_k?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Nucleus sampling - cumulative probability cutoff for token selection. | ||||||
|  |    * Default: 0.9, Range: 0.0-1.0 (lower = more focused) | ||||||
|  |    */ | ||||||
|  |   top_p?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Alternative to top_p - minimum probability threshold for tokens. | ||||||
|  |    * Default: 0.0, Range: 0.0-1.0 (higher = more selective) | ||||||
|  |    */ | ||||||
|  |   min_p?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Typical sampling - targets tokens with "typical" probability mass. | ||||||
|  |    * Default: 1.0 (disabled), Range: 0.0-1.0 (lower = less random) | ||||||
|  |    */ | ||||||
|  |   typical_p?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Number of previous tokens to consider for repetition penalty. | ||||||
|  |    * Default: 64, Range: 0-512 | ||||||
|  |    */ | ||||||
|  |   repeat_last_n?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Randomness/creativity control. Lower = more deterministic, higher = more creative. | ||||||
|  |    * Default: 0.8, Range: 0.0-2.0 (sweet spot: 0.1-1.2) | ||||||
|    */ |    */ | ||||||
|   temperature?: number; |   temperature?: number; | ||||||
|   repeat_penalty?: number; |  | ||||||
|   presence_penalty?: number; |  | ||||||
|   frequency_penalty?: number; |  | ||||||
|   mirostat?: number; |  | ||||||
|   mirostat_tau?: number; |  | ||||||
|   mirostat_eta?: number; |  | ||||||
|   penalize_newline?: boolean; |  | ||||||
|   stop?: string[]; |  | ||||||
|   numa?: boolean; |  | ||||||
|   /** |   /** | ||||||
|    * Number of tokens for the prompt to keep in memory for the response, minus the value of num_keep |    * Penalty for repeating tokens. Higher values reduce repetition. | ||||||
|  |    * Default: 1.1, Range: 0.0-2.0 (1.0 = no penalty) | ||||||
|  |    */ | ||||||
|  |   repeat_penalty?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Penalty for using tokens that have already appeared (OpenAI-style). | ||||||
|  |    * Default: 0.0, Range: -2.0 to 2.0 | ||||||
|  |    */ | ||||||
|  |   presence_penalty?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Penalty proportional to token frequency in text (OpenAI-style). | ||||||
|  |    * Default: 0.0, Range: -2.0 to 2.0 | ||||||
|  |    */ | ||||||
|  |   frequency_penalty?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Enables Mirostat sampling algorithm (0=disabled, 1=v1, 2=v2). | ||||||
|  |    * Default: 0, Range: 0, 1, or 2 | ||||||
|  |    */ | ||||||
|  |   mirostat?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Target entropy for Mirostat. Controls coherence vs creativity balance. | ||||||
|  |    * Default: 5.0, Range: 0.0-10.0 | ||||||
|  |    */ | ||||||
|  |   mirostat_tau?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Learning rate for Mirostat. How quickly it adapts. | ||||||
|  |    * Default: 0.1, Range: 0.001-1.0 | ||||||
|  |    */ | ||||||
|  |   mirostat_eta?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Apply penalty to newline tokens to control formatting. | ||||||
|  |    * Default: true | ||||||
|  |    */ | ||||||
|  |   penalize_newline?: boolean; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Array of strings that will stop generation when encountered. | ||||||
|  |    * Default: [], Example: ["\n", "User:", "###"] | ||||||
|  |    */ | ||||||
|  |   stop?: string[]; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Enable NUMA (Non-Uniform Memory Access) optimization. | ||||||
|  |    * Default: false (Linux systems may benefit from true) | ||||||
|  |    */ | ||||||
|  |   numa?: boolean; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Context window size - total tokens for prompt + response. | ||||||
|  |    * Default: 2048, Range: 512-32768+ (model dependent, affects memory usage) | ||||||
|    */ |    */ | ||||||
|   num_ctx?: number; |   num_ctx?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Batch size for prompt processing. Higher = faster but more memory. | ||||||
|  |    * Default: 512, Range: 1-2048 | ||||||
|  |    */ | ||||||
|   num_batch?: number; |   num_batch?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Number of GPU layers to offload. -1 = auto, 0 = CPU only. | ||||||
|  |    * Default: -1, Range: -1 to model layer count | ||||||
|  |    */ | ||||||
|   num_gpu?: number; |   num_gpu?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Primary GPU device ID for multi-GPU setups. | ||||||
|  |    * Default: 0, Range: 0 to (GPU count - 1) | ||||||
|  |    */ | ||||||
|   main_gpu?: number; |   main_gpu?: number; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Optimize for low VRAM usage at cost of speed. | ||||||
|  |    * Default: false | ||||||
|  |    */ | ||||||
|   low_vram?: boolean; |   low_vram?: boolean; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Only load vocabulary, skip weights. For tokenization only. | ||||||
|  |    * Default: false | ||||||
|  |    */ | ||||||
|   vocab_only?: boolean; |   vocab_only?: boolean; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Use memory mapping for model files (faster loading). | ||||||
|  |    * Default: true | ||||||
|  |    */ | ||||||
|   use_mmap?: boolean; |   use_mmap?: boolean; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Lock model in memory to prevent swapping. | ||||||
|  |    * Default: false (enable for consistent performance) | ||||||
|  |    */ | ||||||
|   use_mlock?: boolean; |   use_mlock?: boolean; | ||||||
|  |  | ||||||
|  |   /** | ||||||
|  |    * Number of CPU threads for inference. | ||||||
|  |    * Default: auto-detected, Range: 1 to CPU core count | ||||||
|  |    */ | ||||||
|   num_thread?: number; |   num_thread?: number; | ||||||
| } | } | ||||||
|  |  | ||||||
|  |  | ||||||
|  | export interface UserMemory { | ||||||
|  |   id: number; | ||||||
|  |   userFqn: string; | ||||||
|  |   personalityTraits: string[]; // ["teases_bot", "sarcastic", "friendly", "joker"] | ||||||
|  |   runningGags: string[]; // ["claims_to_shit_pants", "pretends_to_be_cat", "always_hungry"] | ||||||
|  |   relationships: string[]; // ["hurt_my_feelings_once", "called_me_cute", "protective_of_me"] | ||||||
|  |   interests: string[]; // ["programming", "anime", "cooking"] | ||||||
|  |   backstory: string[]; // ["works_at_tech_company", "has_three_cats", "lives_in_california"] | ||||||
|  |   lastInteractionSummary: string; // Brief summary of last conversation | ||||||
|  |   interactionCount: number; | ||||||
|  |   lastUpdated: DateTime; | ||||||
|  |   createdAt: DateTime; | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export interface InteractionAnalysis { | ||||||
|  |   sentiment: 'positive' | 'negative' | 'neutral' | 'teasing' | 'flirty' | 'aggressive'; | ||||||
|  |   topics: string[]; // Extracted topics from conversation | ||||||
|  |   personalityObservations: string[]; // New traits observed | ||||||
|  |   runningGagUpdates: string[]; // New or updated running gags | ||||||
|  |   relationshipUpdates: string[]; // How relationship with bot changed | ||||||
|  |   interestMentions: string[]; // Interests/hobbies mentioned | ||||||
|  |   backstoryElements: string[]; // New biographical info (real or fake) | ||||||
|  |   memorableQuotes: string[]; // Funny or notable things they said | ||||||
|  | } | ||||||
|  |  | ||||||
|  | export interface MemoryUpdateRequest { | ||||||
|  |   userFqn: string; | ||||||
|  |   conversationContent: string; | ||||||
|  |   botResponse: string; | ||||||
|  |   analysis: InteractionAnalysis; | ||||||
|  | } | ||||||
		Reference in New Issue
	
	Block a user