diff --git a/.env.example b/.env.example index 2069bf3..835f81d 100644 --- a/.env.example +++ b/.env.example @@ -7,5 +7,5 @@ OLLAMA_URL="http://localhost:11434" # OLLAMA connection URL OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest FETCH_INTERVAL="" # interval for fetching new notifications from the instance, in milliseconds, recommend at least 15000 -RANDOM_POST_INTERVAL="" # interval for ad-hoc posts +RANDOM_POST_INTERVAL="" # interval for ad-hoc posts in milliseconds INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox) \ No newline at end of file diff --git a/src/main.ts b/src/main.ts index 864b6b9..959efdd 100644 --- a/src/main.ts +++ b/src/main.ts @@ -45,10 +45,11 @@ export const envConfig = { }; const ollamaConfig: OllamaConfigOptions = { - temperature: 0.2, - top_p: 0.9, - top_k: 30, + temperature: 0.6, + top_p: 0.85, + top_k: 40, num_ctx: 2048, + repeat_penalty: 1.1, }; // this could be helpful @@ -63,8 +64,8 @@ const generateOllamaRequest = async ( if ( striptags(notification.status.content).includes("!prompt") && !notification.status.account.bot && // sanity check, sort of - notification.type === "mention" && - notification.status.visibility !== "private" // for safety, let's only respond to public messages + notification.type === "mention" // && + // notification.status.visibility !== "private" // for safety, let's only respond to public messages ) { if (whitelistOnly && !isFromWhitelistedDomain(notification)) { await deleteNotification(notification); @@ -75,12 +76,16 @@ const generateOllamaRequest = async ( } await recordPendingResponse(notification); await storeUserData(notification); + console.log(trimInputData(notification.status.content)); const ollamaRequestBody: OllamaRequest = { model: ollamaModel, - prompt: trimInputData(notification.status.content), + // prompt: trimInputData(notification.status.content), + prompt: `${notification.status.account.fqn} says: ${trimInputData( + notification.status.content + )}`, system: ollamaSystemPrompt, stream: false, - // options: ollamaConfig, + options: ollamaConfig, }; const response = await fetch(`${ollamaUrl}/api/generate`, { method: "POST", @@ -145,14 +150,15 @@ const createTimelinePost = async () => { const { bearerToken, ollamaModel, - ollamaSystemPrompt, + // ollamaSystemPrompt, ollamaUrl, pleromaInstanceUrl, } = envConfig; const ollamaRequestBody: OllamaRequest = { model: ollamaModel, - prompt: "Make a random post about a random topic.", - system: ollamaSystemPrompt, + prompt: "Say something random.", + system: + "You are a friendly AI assistant who loves to educate people on random topics, provide words of encouragement. You like to be as detailed as possible.", stream: false, // options: ollamaConfig, }; @@ -241,4 +247,7 @@ console.log( console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); await beginFetchCycle(); +// setInterval(async () => { +// createTimelinePost(); +// }, 10000); await beginStatusPostInterval(); diff --git a/src/util.ts b/src/util.ts index 0123b24..cd28246 100644 --- a/src/util.ts +++ b/src/util.ts @@ -4,9 +4,18 @@ import { envConfig } from "./main.js"; import { Notification } from "../types.js"; const trimInputData = (input: string): string => { - const strippedInput = striptags(input); + const strippedInput = striptags(input, [], "\n"); + const split = strippedInput.split(" "); const promptStringIndex = split.indexOf("!prompt"); + const botFqnIndex = split.indexOf("@nice-ai"); + const botFqnIndexFull = split.indexOf("@nice-ai@nicecrew.digital"); + if (botFqnIndex !== -1) { + split[botFqnIndex] = "Lexi"; + } + if (botFqnIndexFull !== -1) { + split[botFqnIndexFull] = "Lexi"; + } split.splice(promptStringIndex, 1); return split.join(" "); // returns everything after the !prompt };