292 lines
		
	
	
		
			8.8 KiB
		
	
	
	
		
			TypeScript
		
	
	
	
	
	
			
		
		
	
	
			292 lines
		
	
	
		
			8.8 KiB
		
	
	
	
		
			TypeScript
		
	
	
	
	
	
| import {
 | |
|   NewStatusBody,
 | |
|   Notification,
 | |
|   OllamaConfigOptions,
 | |
|   OllamaChatRequest,
 | |
|   OllamaChatResponse,
 | |
|   PostAncestorsForModel,
 | |
| } from "../types.js";
 | |
| import { PrismaClient } from "../generated/prisma/client.js";
 | |
| import {
 | |
|   deleteNotification,
 | |
|   getNotifications,
 | |
|   getStatusContext,
 | |
| } from "./api.js";
 | |
| import { storeUserData, storePromptData } from "./prisma.js";
 | |
| import {
 | |
|   isFromWhitelistedDomain,
 | |
|   alreadyRespondedTo,
 | |
|   recordPendingResponse,
 | |
|   shouldContinue,
 | |
| } from "./util.js";
 | |
| 
 | |
| export const prisma = new PrismaClient();
 | |
| 
 | |
| export const envConfig = {
 | |
|   pleromaInstanceUrl: process.env.PLEROMA_INSTANCE_URL || "",
 | |
|   pleromaInstanceDomain: process.env.PLEROMA_INSTANCE_DOMAIN || "",
 | |
|   whitelistOnly: process.env.ONLY_WHITELIST === "true" ? true : false,
 | |
|   whitelistedDomains: process.env.WHITELISTED_DOMAINS
 | |
|     ? process.env.WHITELISTED_DOMAINS.split(",")
 | |
|     : [process.env.PLEROMA_INSTANCE_DOMAIN],
 | |
|   ollamaUrl: process.env.OLLAMA_URL || "",
 | |
|   ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT || "",
 | |
|   ollamaModel: process.env.OLLAMA_MODEL || "",
 | |
|   fetchInterval: process.env.FETCH_INTERVAL
 | |
|     ? parseInt(process.env.FETCH_INTERVAL)
 | |
|     : 15000,
 | |
|   bearerToken: process.env.INSTANCE_BEARER_TOKEN || "",
 | |
|   adHocPostInterval: process.env.RANDOM_POST_INTERVAL
 | |
|     ? parseInt(process.env.RANDOM_POST_INTERVAL)
 | |
|     : 3600000,
 | |
|   botAccountId: process.env.PLEROMA_ACCOUNT_ID,
 | |
|   replyWithContext: process.env.REPLY_WITH_CONTEXT === "true" ? true : false,
 | |
| };
 | |
| 
 | |
| const ollamaConfig: OllamaConfigOptions = {
 | |
|   temperature: 0.85, // Increased from 0.6 - more creative and varied
 | |
|   top_p: 0.9, // Slightly increased for more diverse responses
 | |
|   top_k: 40,
 | |
|   num_ctx: 16384,
 | |
|   repeat_penalty: 1.1, // Reduced from 1.15 - less mechanical
 | |
|   // stop: ['<|im_end|>', '\n\n']
 | |
| };
 | |
| 
 | |
| // this could be helpful
 | |
| // https://replicate.com/blog/how-to-prompt-llama
 | |
| 
 | |
| const generateOllamaRequest = async (
 | |
|   notification: Notification
 | |
| ): Promise<OllamaChatResponse | undefined> => {
 | |
|   const {
 | |
|     whitelistOnly,
 | |
|     ollamaModel,
 | |
|     ollamaSystemPrompt,
 | |
|     ollamaUrl,
 | |
|     replyWithContext,
 | |
|   } = envConfig;
 | |
|   try {
 | |
|     if (shouldContinue(notification)) {
 | |
|       if (whitelistOnly && !isFromWhitelistedDomain(notification)) {
 | |
|         await deleteNotification(notification);
 | |
|         return;
 | |
|       }
 | |
|       if (await alreadyRespondedTo(notification)) {
 | |
|         return;
 | |
|       }
 | |
|       await recordPendingResponse(notification);
 | |
|       await storeUserData(notification);
 | |
|       let conversationHistory: PostAncestorsForModel[] = [];
 | |
|       if (replyWithContext) {
 | |
|         const contextPosts = await getStatusContext(notification.status.id);
 | |
|         if (!contextPosts?.ancestors) {
 | |
|           throw new Error(`Unable to obtain post context ancestors.`);
 | |
|         }
 | |
|         conversationHistory = contextPosts.ancestors.map((ancestor) => ({
 | |
|           account_fqn: ancestor.account.fqn,
 | |
|           mentions: ancestor.mentions.map((mention) => mention.acct),
 | |
|           plaintext_content: ancestor.pleroma.content["text/plain"],
 | |
|         }));
 | |
|       }
 | |
| 
 | |
|       const userMessage = notification.status.pleroma.content["text/plain"];
 | |
| 
 | |
|       let systemContent = ollamaSystemPrompt;
 | |
|       if (replyWithContext) {
 | |
|         systemContent = `${ollamaSystemPrompt}
 | |
| Previous conversation (JSON format):
 | |
| ${JSON.stringify(conversationHistory, null, 2)}
 | |
| 
 | |
| Instructions:
 | |
| - Each entry shows: account_fqn (who posted), mentions (tagged users), and plaintext_content (message)
 | |
| - The first mention is the direct recipient
 | |
| - Address users with @ before their names
 | |
| - Use markdown formatting and emojis sparingly`;
 | |
|       }
 | |
| 
 | |
|       const ollamaRequestBody: OllamaChatRequest = {
 | |
|         model: ollamaModel,
 | |
|         messages: [
 | |
|           { role: "system", content: systemContent },
 | |
|           { role: "user", content: userMessage },
 | |
|         ],
 | |
|         stream: false,
 | |
|         options: {
 | |
|           ...ollamaConfig,
 | |
|           stop: ["<|im_end|>", "\n\n"],
 | |
|         },
 | |
|       };
 | |
| 
 | |
|       // Change endpoint to /api/chat
 | |
|       const response = await fetch(`${ollamaUrl}/api/chat`, {
 | |
|         method: "POST",
 | |
|         body: JSON.stringify(ollamaRequestBody),
 | |
|       });
 | |
|       const ollamaResponse: OllamaChatResponse = await response.json();
 | |
| 
 | |
|       await storePromptData(notification, ollamaResponse);
 | |
|       return ollamaResponse;
 | |
|     }
 | |
|   } catch (error: any) {
 | |
|     throw new Error(error.message);
 | |
|   }
 | |
| };
 | |
| 
 | |
| const postReplyToStatus = async (
 | |
|   notification: Notification,
 | |
|   ollamaResponseBody: OllamaChatResponse
 | |
| ) => {
 | |
|   const { pleromaInstanceUrl, bearerToken } = envConfig;
 | |
| 
 | |
|   try {
 | |
|     let mentions: string[];
 | |
|     const statusBody: NewStatusBody = {
 | |
|       content_type: "text/markdown",
 | |
|       status: ollamaResponseBody.message.content,
 | |
|       in_reply_to_id: notification.status.id,
 | |
|     };
 | |
|     if (
 | |
|       notification.status.mentions &&
 | |
|       notification.status.mentions.length > 0
 | |
|     ) {
 | |
|       mentions = notification.status.mentions.map((mention) => {
 | |
|         return mention.acct;
 | |
|       });
 | |
|       statusBody.to = mentions;
 | |
|     }
 | |
| 
 | |
|     const response = await fetch(`${pleromaInstanceUrl}/api/v1/statuses`, {
 | |
|       method: "POST",
 | |
|       headers: {
 | |
|         Authorization: `Bearer ${bearerToken}`,
 | |
|         "Content-Type": "application/json",
 | |
|       },
 | |
|       body: JSON.stringify(statusBody),
 | |
|     });
 | |
| 
 | |
|     if (!response.ok) {
 | |
|       throw new Error(`New status request failed: ${response.statusText}`);
 | |
|     }
 | |
| 
 | |
|     await deleteNotification(notification);
 | |
|   } catch (error: any) {
 | |
|     throw new Error(error.message);
 | |
|   }
 | |
| };
 | |
| 
 | |
| const createTimelinePost = async () => {
 | |
|   const {
 | |
|     bearerToken,
 | |
|     ollamaModel,
 | |
|     ollamaSystemPrompt,
 | |
|     ollamaUrl,
 | |
|     pleromaInstanceUrl,
 | |
|   } = envConfig;
 | |
|   const ollamaRequestBody: OllamaChatRequest = {
 | |
|     model: ollamaModel,
 | |
|     messages: [
 | |
|       { role: "system", content: ollamaSystemPrompt as string },
 | |
|       {
 | |
|         role: "user",
 | |
|         content:
 | |
|           "Make a post about something. Keep your tone authentic, as if you are a real person making a post about a topic that interests you on a microblogging platform. This can be about anything like politics, gardening, homesteading, your favorite animal, a fun fact, what happened during your day, seeking companionship, baking, cooking, et cetera. Do not format the post with a title or quotes, nor sign the post with your name. It will be posted to your timeline so everyone will know you said it.",
 | |
|       },
 | |
|     ],
 | |
|     stream: false,
 | |
|     options: {
 | |
|       ...ollamaConfig,
 | |
|       stop: ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"],
 | |
|     },
 | |
|   };
 | |
|   try {
 | |
|     const response = await fetch(`${ollamaUrl}/api/chat`, {
 | |
|       method: "POST",
 | |
|       body: JSON.stringify(ollamaRequestBody),
 | |
|     });
 | |
|     if (!response.ok)
 | |
|       throw new Error("Error generating ad-hoc Ollama response");
 | |
| 
 | |
|     const ollamaResponse: OllamaChatResponse = await response.json();
 | |
| 
 | |
|     const newStatusBody: NewStatusBody = {
 | |
|       content_type: "text/markdown",
 | |
|       status: ollamaResponse.message.content,
 | |
|     };
 | |
| 
 | |
|     const pleromaResponse = await fetch(
 | |
|       `${pleromaInstanceUrl}/api/v1/statuses`,
 | |
|       {
 | |
|         method: "POST",
 | |
|         headers: {
 | |
|           "Content-Type": "application/json",
 | |
|           Authorization: `Bearer ${bearerToken}`,
 | |
|         },
 | |
|         body: JSON.stringify(newStatusBody),
 | |
|       }
 | |
|     );
 | |
| 
 | |
|     if (!pleromaResponse.ok)
 | |
|       throw new Error("Error posting ad-hoc Ollama response to Pleroma");
 | |
|   } catch (error: unknown) {
 | |
|     if (error instanceof Error) {
 | |
|       throw new Error(error.message);
 | |
|     }
 | |
|   }
 | |
| };
 | |
| 
 | |
| let notifications = [];
 | |
| const beginFetchCycle = async () => {
 | |
|   setInterval(async () => {
 | |
|     notifications = await getNotifications();
 | |
|     if (notifications.length > 0) {
 | |
|       await Promise.all(
 | |
|         notifications.map(async (notification) => {
 | |
|           try {
 | |
|             const ollamaResponse = await generateOllamaRequest(notification);
 | |
|             if (ollamaResponse) {
 | |
|               postReplyToStatus(notification, ollamaResponse);
 | |
|             }
 | |
|           } catch (error: any) {
 | |
|             throw new Error(error.message);
 | |
|           }
 | |
|         })
 | |
|       );
 | |
|     }
 | |
|   }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function
 | |
| };
 | |
| 
 | |
| const beginStatusPostInterval = async () => {
 | |
|   setInterval(async () => {
 | |
|     try {
 | |
|       createTimelinePost();
 | |
|     } catch (error: unknown) {
 | |
|       if (error instanceof Error) {
 | |
|         throw new Error(error.message);
 | |
|       }
 | |
|     }
 | |
|   }, envConfig.adHocPostInterval);
 | |
| };
 | |
| 
 | |
| console.log(
 | |
|   `Fetching notifications from ${envConfig.pleromaInstanceDomain}, every ${
 | |
|     envConfig.fetchInterval / 1000
 | |
|   } seconds.`
 | |
| );
 | |
| console.log(
 | |
|   `Making ad-hoc post to ${envConfig.pleromaInstanceDomain}, every ${
 | |
|     envConfig.adHocPostInterval / 1000 / 60
 | |
|   } minutes.`
 | |
| );
 | |
| console.log(
 | |
|   `Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}`
 | |
| );
 | |
| console.log(
 | |
|   `Using model: ${envConfig.ollamaModel}\nConfig: ${JSON.stringify(
 | |
|     ollamaConfig
 | |
|   )}`
 | |
| );
 | |
| console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`);
 | |
| 
 | |
| await beginFetchCycle();
 | |
| await beginStatusPostInterval();
 |