From 150e2d638ecd5364d1dbffbbb96eab86666a5a99 Mon Sep 17 00:00:00 2001 From: matty Date: Sat, 2 Aug 2025 23:24:35 +0000 Subject: [PATCH] add configurable ad-hoc post interval --- .env.example | 1 + src/main.ts | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) diff --git a/.env.example b/.env.example index 87460f4..2069bf3 100644 --- a/.env.example +++ b/.env.example @@ -7,4 +7,5 @@ OLLAMA_URL="http://localhost:11434" # OLLAMA connection URL OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest FETCH_INTERVAL="" # interval for fetching new notifications from the instance, in milliseconds, recommend at least 15000 +RANDOM_POST_INTERVAL="" # interval for ad-hoc posts INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox) \ No newline at end of file diff --git a/src/main.ts b/src/main.ts index 14dbbe9..864b6b9 100644 --- a/src/main.ts +++ b/src/main.ts @@ -39,6 +39,9 @@ export const envConfig = { ? parseInt(process.env.FETCH_INTERVAL) : 15000, bearerToken: process.env.INSTANCE_BEARER_TOKEN || "", + adHocPostInterval: process.env.RANDOM_POST_INTERVAL + ? parseInt(process.env.RANDOM_POST_INTERVAL) + : 3600000, }; const ollamaConfig: OllamaConfigOptions = { @@ -138,6 +141,57 @@ const postReplyToStatus = async ( } }; +const createTimelinePost = async () => { + const { + bearerToken, + ollamaModel, + ollamaSystemPrompt, + ollamaUrl, + pleromaInstanceUrl, + } = envConfig; + const ollamaRequestBody: OllamaRequest = { + model: ollamaModel, + prompt: "Make a random post about a random topic.", + system: ollamaSystemPrompt, + stream: false, + // options: ollamaConfig, + }; + try { + const response = await fetch(`${ollamaUrl}/api/generate`, { + method: "POST", + body: JSON.stringify(ollamaRequestBody), + }); + if (!response.ok) + throw new Error("Error generating ad-hoc Ollama response"); + + const ollamaResponse: OllamaResponse = await response.json(); + + const newStatusBody: NewStatusBody = { + content_type: "text/markdown", + status: ollamaResponse.response, + }; + + const pleromaResponse = await fetch( + `${pleromaInstanceUrl}/api/v1/statuses`, + { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${bearerToken}`, + }, + body: JSON.stringify(newStatusBody), + } + ); + + if (!pleromaResponse.ok) + throw new Error("Error posting ad-hoc Ollama response to Pleroma"); + } catch (error: unknown) { + if (error instanceof Error) { + throw new Error(error.message); + } + } +}; + let notifications = []; const beginFetchCycle = async () => { setInterval(async () => { @@ -159,6 +213,18 @@ const beginFetchCycle = async () => { }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function }; +const beginStatusPostInterval = async () => { + setInterval(async () => { + try { + createTimelinePost(); + } catch (error: unknown) { + if (error instanceof Error) { + throw new Error(error.message); + } + } + }, envConfig.adHocPostInterval); +}; + console.log( `Fetching notifications from ${envConfig.pleromaInstanceDomain}, every ${ envConfig.fetchInterval / 1000 @@ -173,4 +239,6 @@ console.log( )}` ); console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); + await beginFetchCycle(); +await beginStatusPostInterval();