add configurable ad-hoc post interval
This commit is contained in:
		| @ -7,4 +7,5 @@ OLLAMA_URL="http://localhost:11434" # OLLAMA connection URL | |||||||
| OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI | OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI | ||||||
| OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest | OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest | ||||||
| FETCH_INTERVAL="" # interval for fetching new notifications from the instance, in milliseconds, recommend at least 15000 | FETCH_INTERVAL="" # interval for fetching new notifications from the instance, in milliseconds, recommend at least 15000 | ||||||
|  | RANDOM_POST_INTERVAL="" # interval for ad-hoc posts | ||||||
| INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox) | INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox) | ||||||
							
								
								
									
										68
									
								
								src/main.ts
									
									
									
									
									
								
							
							
						
						
									
										68
									
								
								src/main.ts
									
									
									
									
									
								
							| @ -39,6 +39,9 @@ export const envConfig = { | |||||||
|     ? parseInt(process.env.FETCH_INTERVAL) |     ? parseInt(process.env.FETCH_INTERVAL) | ||||||
|     : 15000, |     : 15000, | ||||||
|   bearerToken: process.env.INSTANCE_BEARER_TOKEN || "", |   bearerToken: process.env.INSTANCE_BEARER_TOKEN || "", | ||||||
|  |   adHocPostInterval: process.env.RANDOM_POST_INTERVAL | ||||||
|  |     ? parseInt(process.env.RANDOM_POST_INTERVAL) | ||||||
|  |     : 3600000, | ||||||
| }; | }; | ||||||
|  |  | ||||||
| const ollamaConfig: OllamaConfigOptions = { | const ollamaConfig: OllamaConfigOptions = { | ||||||
| @ -138,6 +141,57 @@ const postReplyToStatus = async ( | |||||||
|   } |   } | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | const createTimelinePost = async () => { | ||||||
|  |   const { | ||||||
|  |     bearerToken, | ||||||
|  |     ollamaModel, | ||||||
|  |     ollamaSystemPrompt, | ||||||
|  |     ollamaUrl, | ||||||
|  |     pleromaInstanceUrl, | ||||||
|  |   } = envConfig; | ||||||
|  |   const ollamaRequestBody: OllamaRequest = { | ||||||
|  |     model: ollamaModel, | ||||||
|  |     prompt: "Make a random post about a random topic.", | ||||||
|  |     system: ollamaSystemPrompt, | ||||||
|  |     stream: false, | ||||||
|  |     // options: ollamaConfig, | ||||||
|  |   }; | ||||||
|  |   try { | ||||||
|  |     const response = await fetch(`${ollamaUrl}/api/generate`, { | ||||||
|  |       method: "POST", | ||||||
|  |       body: JSON.stringify(ollamaRequestBody), | ||||||
|  |     }); | ||||||
|  |     if (!response.ok) | ||||||
|  |       throw new Error("Error generating ad-hoc Ollama response"); | ||||||
|  |  | ||||||
|  |     const ollamaResponse: OllamaResponse = await response.json(); | ||||||
|  |  | ||||||
|  |     const newStatusBody: NewStatusBody = { | ||||||
|  |       content_type: "text/markdown", | ||||||
|  |       status: ollamaResponse.response, | ||||||
|  |     }; | ||||||
|  |  | ||||||
|  |     const pleromaResponse = await fetch( | ||||||
|  |       `${pleromaInstanceUrl}/api/v1/statuses`, | ||||||
|  |       { | ||||||
|  |         method: "POST", | ||||||
|  |         headers: { | ||||||
|  |           "Content-Type": "application/json", | ||||||
|  |           Authorization: `Bearer ${bearerToken}`, | ||||||
|  |         }, | ||||||
|  |         body: JSON.stringify(newStatusBody), | ||||||
|  |       } | ||||||
|  |     ); | ||||||
|  |  | ||||||
|  |     if (!pleromaResponse.ok) | ||||||
|  |       throw new Error("Error posting ad-hoc Ollama response to Pleroma"); | ||||||
|  |   } catch (error: unknown) { | ||||||
|  |     if (error instanceof Error) { | ||||||
|  |       throw new Error(error.message); | ||||||
|  |     } | ||||||
|  |   } | ||||||
|  | }; | ||||||
|  |  | ||||||
| let notifications = []; | let notifications = []; | ||||||
| const beginFetchCycle = async () => { | const beginFetchCycle = async () => { | ||||||
|   setInterval(async () => { |   setInterval(async () => { | ||||||
| @ -159,6 +213,18 @@ const beginFetchCycle = async () => { | |||||||
|   }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function |   }, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function | ||||||
| }; | }; | ||||||
|  |  | ||||||
|  | const beginStatusPostInterval = async () => { | ||||||
|  |   setInterval(async () => { | ||||||
|  |     try { | ||||||
|  |       createTimelinePost(); | ||||||
|  |     } catch (error: unknown) { | ||||||
|  |       if (error instanceof Error) { | ||||||
|  |         throw new Error(error.message); | ||||||
|  |       } | ||||||
|  |     } | ||||||
|  |   }, envConfig.adHocPostInterval); | ||||||
|  | }; | ||||||
|  |  | ||||||
| console.log( | console.log( | ||||||
|   `Fetching notifications from ${envConfig.pleromaInstanceDomain}, every ${ |   `Fetching notifications from ${envConfig.pleromaInstanceDomain}, every ${ | ||||||
|     envConfig.fetchInterval / 1000 |     envConfig.fetchInterval / 1000 | ||||||
| @ -173,4 +239,6 @@ console.log( | |||||||
|   )}` |   )}` | ||||||
| ); | ); | ||||||
| console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); | console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); | ||||||
|  |  | ||||||
| await beginFetchCycle(); | await beginFetchCycle(); | ||||||
|  | await beginStatusPostInterval(); | ||||||
|  | |||||||
		Reference in New Issue
	
	Block a user