diff --git a/src/main.ts b/src/main.ts index 6370c7c..f67ca1e 100644 --- a/src/main.ts +++ b/src/main.ts @@ -4,7 +4,7 @@ import { OllamaConfigOptions, OllamaChatRequest, OllamaChatResponse, - PostAncestorsForModel, + // PostAncestorsForModel, } from "../types.js"; import { PrismaClient } from "../generated/prisma/client.js"; import { @@ -65,70 +65,128 @@ const generateOllamaRequest = async ( ollamaUrl, replyWithContext, } = envConfig; + + let shouldDeleteNotification = false; + try { - if (shouldContinue(notification)) { - if (whitelistOnly && !isFromWhitelistedDomain(notification)) { - await deleteNotification(notification); - return; - } - if (await alreadyRespondedTo(notification)) { - return; - } - await recordPendingResponse(notification); - await storeUserData(notification); - let conversationHistory: PostAncestorsForModel[] = []; - if (replyWithContext) { - const contextPosts = await getStatusContext(notification.status.id); - if (!contextPosts?.ancestors) { - throw new Error(`Unable to obtain post context ancestors.`); - } - conversationHistory = contextPosts.ancestors.map((ancestor) => ({ - account_fqn: ancestor.account.fqn, - mentions: ancestor.mentions.map((mention) => mention.acct), - plaintext_content: ancestor.pleroma.content["text/plain"], - })); + if (!shouldContinue(notification)) { + shouldDeleteNotification = true; + return; + } + + if (whitelistOnly && !isFromWhitelistedDomain(notification)) { + shouldDeleteNotification = true; + return; + } + + if (await alreadyRespondedTo(notification)) { + shouldDeleteNotification = true; + return; + } + + await recordPendingResponse(notification); + await storeUserData(notification); + + let conversationContext = ""; + if (replyWithContext) { + const contextPosts = await getStatusContext(notification.status.id); + if (!contextPosts?.ancestors) { + throw new Error(`Unable to obtain post context ancestors.`); } - const userMessage = notification.status.pleroma.content["text/plain"]; + // Build a human-readable conversation thread + const allPosts = [...contextPosts.ancestors]; - let systemContent = ollamaSystemPrompt; - if (replyWithContext) { - systemContent = `${ollamaSystemPrompt} -Previous conversation (JSON format): -${JSON.stringify(conversationHistory, null, 2)} + // Include descendants (follow-up posts) if available + if (contextPosts.descendents && contextPosts.descendents.length > 0) { + allPosts.push(...contextPosts.descendents); + } + + if (allPosts.length > 0) { + const conversationLines = allPosts.map((post) => { + const author = post.account.fqn; + const content = post.pleroma.content["text/plain"]; + const replyingTo = post.in_reply_to_account_id + ? ` (replying to another message)` + : ""; + return `[@${author}${replyingTo}]: ${content}`; + }); + + conversationContext = ` +Previous conversation thread: +${conversationLines.join("\n\n")} +--- +`; + } + } + + const userMessage = notification.status.pleroma.content["text/plain"]; + const originalAuthor = notification.account.fqn; + + let systemContent = ollamaSystemPrompt; + if (replyWithContext && conversationContext) { + systemContent = `${ollamaSystemPrompt} + +${conversationContext} +Current message from @${originalAuthor}: +"${userMessage}" Instructions: -- Each entry shows: account_fqn (who posted), mentions (tagged users), and plaintext_content (message) -- The first mention is the direct recipient -- Address users with @ before their names +- You are replying to @${originalAuthor} +- Address them directly if appropriate - Use markdown formatting and emojis sparingly`; - } - - const ollamaRequestBody: OllamaChatRequest = { - model: ollamaModel, - messages: [ - { role: "system", content: systemContent }, - { role: "user", content: userMessage }, - ], - stream: false, - options: { - ...ollamaConfig, - stop: ["<|im_end|>", "\n\n"], - }, - }; - - // Change endpoint to /api/chat - const response = await fetch(`${ollamaUrl}/api/chat`, { - method: "POST", - body: JSON.stringify(ollamaRequestBody), - }); - const ollamaResponse: OllamaChatResponse = await response.json(); - - await storePromptData(notification, ollamaResponse); - return ollamaResponse; } + + const ollamaRequestBody: OllamaChatRequest = { + model: ollamaModel, + messages: [ + { role: "system", content: systemContent }, + { role: "user", content: userMessage }, + ], + stream: false, + options: { + ...ollamaConfig, + stop: ["", "[INST]"], // Mistral 0.3 stop tokens + }, + }; + + console.log( + `Generating response for notification ${notification.id} from @${originalAuthor}` + ); + + // Change endpoint to /api/chat + const response = await fetch(`${ollamaUrl}/api/chat`, { + method: "POST", + body: JSON.stringify(ollamaRequestBody), + }); + + if (!response.ok) { + throw new Error(`Ollama API request failed: ${response.statusText}`); + } + + const ollamaResponse: OllamaChatResponse = await response.json(); + + await storePromptData(notification, ollamaResponse); + return ollamaResponse; } catch (error: any) { - throw new Error(error.message); + console.error( + `Error in generateOllamaRequest for notification ${notification.id}:`, + error.message + ); + // Delete notification on error to prevent retry loops + shouldDeleteNotification = true; + throw error; + } finally { + if (shouldDeleteNotification) { + try { + await deleteNotification(notification); + } catch (deleteError: any) { + console.error( + `Failed to delete notification ${notification.id}:`, + deleteError.message + ); + } + } } }; @@ -139,21 +197,26 @@ const postReplyToStatus = async ( const { pleromaInstanceUrl, bearerToken } = envConfig; try { - let mentions: string[]; + // Only mention the original author who triggered the bot + const originalAuthor = notification.account.acct; + console.log( + `Replying to: @${originalAuthor} (status ID: ${notification.status.id})` + ); + + // Sanitize LLM output - remove any stray Mistral special tokens + let sanitizedContent = ollamaResponseBody.message.content + .replace(/<\/s>/g, "") // Remove EOS token if it appears + .replace(/\[INST\]/g, "") // Remove instruction start token + .replace(/\[\/INST\]/g, "") // Remove instruction end token + .replace(//g, "") // Remove BOS token if it appears + .trim(); + const statusBody: NewStatusBody = { content_type: "text/markdown", - status: ollamaResponseBody.message.content, + status: sanitizedContent, in_reply_to_id: notification.status.id, + to: [originalAuthor], // Only send to the person who mentioned the bot }; - if ( - notification.status.mentions && - notification.status.mentions.length > 0 - ) { - mentions = notification.status.mentions.map((mention) => { - return mention.acct; - }); - statusBody.to = mentions; - } const response = await fetch(`${pleromaInstanceUrl}/api/v1/statuses`, { method: "POST", @@ -168,9 +231,23 @@ const postReplyToStatus = async ( throw new Error(`New status request failed: ${response.statusText}`); } - await deleteNotification(notification); + console.log(`Successfully posted reply to @${originalAuthor}`); } catch (error: any) { - throw new Error(error.message); + console.error( + `Error posting reply for notification ${notification.id}:`, + error.message + ); + throw error; + } finally { + // Always try to delete the notification, even if posting failed + try { + await deleteNotification(notification); + } catch (deleteError: any) { + console.error( + `Failed to delete notification ${notification.id}:`, + deleteError.message + ); + } } }; @@ -195,7 +272,7 @@ const createTimelinePost = async () => { stream: false, options: { ...ollamaConfig, - stop: ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"], + stop: ["", "[INST]"], // Mistral 0.3 stop tokens }, }; try { @@ -239,18 +316,21 @@ const beginFetchCycle = async () => { setInterval(async () => { notifications = await getNotifications(); if (notifications.length > 0) { - await Promise.all( - notifications.map(async (notification) => { - try { - const ollamaResponse = await generateOllamaRequest(notification); - if (ollamaResponse) { - postReplyToStatus(notification, ollamaResponse); - } - } catch (error: any) { - throw new Error(error.message); + // Process notifications sequentially to avoid race conditions + for (const notification of notifications) { + try { + const ollamaResponse = await generateOllamaRequest(notification); + if (ollamaResponse) { + await postReplyToStatus(notification, ollamaResponse); } - }) - ); + } catch (error: any) { + console.error( + `Error processing notification ${notification.id}:`, + error.message + ); + // Continue processing other notifications even if one fails + } + } } }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function };