Compare commits

..

3 Commits

Author SHA1 Message Date
b6ad54f40a way better responsiveness, better system prompt 2025-08-03 14:37:23 +00:00
2f3d16dbc5 slight update to input processing 2025-08-03 14:24:50 +00:00
150e2d638e add configurable ad-hoc post interval 2025-08-02 23:24:35 +00:00
3 changed files with 97 additions and 11 deletions

View File

@ -7,4 +7,5 @@ OLLAMA_URL="http://localhost:11434" # OLLAMA connection URL
OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI
OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest
FETCH_INTERVAL="" # interval for fetching new notifications from the instance, in milliseconds, recommend at least 15000
RANDOM_POST_INTERVAL="" # interval for ad-hoc posts in milliseconds
INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox)

View File

@ -7,7 +7,7 @@ import {
OllamaRequest,
OllamaResponse,
} from "../types.js";
import striptags from "striptags";
// import striptags from "striptags";
import { PrismaClient } from "../generated/prisma/client.js";
import {
getInstanceEmojis,
@ -39,13 +39,17 @@ export const envConfig = {
? parseInt(process.env.FETCH_INTERVAL)
: 15000,
bearerToken: process.env.INSTANCE_BEARER_TOKEN || "",
adHocPostInterval: process.env.RANDOM_POST_INTERVAL
? parseInt(process.env.RANDOM_POST_INTERVAL)
: 3600000,
};
const ollamaConfig: OllamaConfigOptions = {
temperature: 0.2,
top_p: 0.9,
top_k: 30,
temperature: 0.6,
top_p: 0.85,
top_k: 40,
num_ctx: 2048,
repeat_penalty: 1.1,
};
// this could be helpful
@ -58,10 +62,10 @@ const generateOllamaRequest = async (
envConfig;
try {
if (
striptags(notification.status.content).includes("!prompt") &&
// striptags(notification.status.content).includes("!prompt") &&
!notification.status.account.bot && // sanity check, sort of
notification.type === "mention" &&
notification.status.visibility !== "private" // for safety, let's only respond to public messages
notification.type === "mention" // &&
// notification.status.visibility !== "private" // for safety, let's only respond to public messages
) {
if (whitelistOnly && !isFromWhitelistedDomain(notification)) {
await deleteNotification(notification);
@ -72,12 +76,16 @@ const generateOllamaRequest = async (
}
await recordPendingResponse(notification);
await storeUserData(notification);
// console.log(trimInputData(notification.status.content));
const ollamaRequestBody: OllamaRequest = {
model: ollamaModel,
prompt: trimInputData(notification.status.content),
// prompt: trimInputData(notification.status.content),
prompt: `${notification.status.account.fqn} says: ${trimInputData(
notification.status.content
)}`,
system: ollamaSystemPrompt,
stream: false,
// options: ollamaConfig,
options: ollamaConfig,
};
const response = await fetch(`${ollamaUrl}/api/generate`, {
method: "POST",
@ -138,6 +146,57 @@ const postReplyToStatus = async (
}
};
const createTimelinePost = async () => {
const {
bearerToken,
ollamaModel,
ollamaSystemPrompt,
ollamaUrl,
pleromaInstanceUrl,
} = envConfig;
const ollamaRequestBody: OllamaRequest = {
model: ollamaModel,
prompt: "Say something random.",
system: ollamaSystemPrompt,
stream: false,
// options: ollamaConfig,
};
try {
const response = await fetch(`${ollamaUrl}/api/generate`, {
method: "POST",
body: JSON.stringify(ollamaRequestBody),
});
if (!response.ok)
throw new Error("Error generating ad-hoc Ollama response");
const ollamaResponse: OllamaResponse = await response.json();
const newStatusBody: NewStatusBody = {
content_type: "text/markdown",
status: ollamaResponse.response,
};
const pleromaResponse = await fetch(
`${pleromaInstanceUrl}/api/v1/statuses`,
{
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${bearerToken}`,
},
body: JSON.stringify(newStatusBody),
}
);
if (!pleromaResponse.ok)
throw new Error("Error posting ad-hoc Ollama response to Pleroma");
} catch (error: unknown) {
if (error instanceof Error) {
throw new Error(error.message);
}
}
};
let notifications = [];
const beginFetchCycle = async () => {
setInterval(async () => {
@ -159,6 +218,18 @@ const beginFetchCycle = async () => {
}, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function
};
const beginStatusPostInterval = async () => {
setInterval(async () => {
try {
createTimelinePost();
} catch (error: unknown) {
if (error instanceof Error) {
throw new Error(error.message);
}
}
}, envConfig.adHocPostInterval);
};
console.log(
`Fetching notifications from ${envConfig.pleromaInstanceDomain}, every ${
envConfig.fetchInterval / 1000
@ -173,4 +244,9 @@ console.log(
)}`
);
console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`);
await beginFetchCycle();
// setInterval(async () => {
// createTimelinePost();
// }, 10000);
await beginStatusPostInterval();

View File

@ -5,9 +5,18 @@ import { Notification } from "../types.js";
const trimInputData = (input: string): string => {
const strippedInput = striptags(input);
const split = strippedInput.split(" ");
const promptStringIndex = split.indexOf("!prompt");
split.splice(promptStringIndex, 1);
// const promptStringIndex = split.indexOf("!prompt");
const botFqnIndex = split.indexOf("@nice-ai");
const botFqnIndexFull = split.indexOf("@nice-ai@nicecrew.digital");
if (botFqnIndex !== -1) {
split[botFqnIndex] = "Lexi";
}
if (botFqnIndexFull !== -1) {
split[botFqnIndexFull] = "Lexi";
}
// split.splice(promptStringIndex, 1);
return split.join(" "); // returns everything after the !prompt
};