Compare commits

...

6 Commits

Author SHA1 Message Date
2a53b0a827 seems to work a little better now 2025-10-28 17:22:45 +00:00
051a66ff26 new baseline 2025-10-28 16:56:53 +00:00
ee367a0d9a update systemd service 2025-10-04 01:30:13 +00:00
e696343a73 a crumb of changes 2025-08-12 19:07:22 +00:00
88a0710c55 update system prompt to hopefully make it easier for Lexi to understand 2025-08-04 21:08:47 +00:00
75fa4cea8b jorkin my preanits 2025-08-04 11:56:06 +00:00
3 changed files with 189 additions and 112 deletions

2
.gitignore vendored
View File

@ -1,6 +1,6 @@
node_modules
# Keep environment variables out of version control
.env
.env*
*.log
*.db
/dist

View File

@ -4,12 +4,10 @@ import {
OllamaConfigOptions,
OllamaChatRequest,
OllamaChatResponse,
PostAncestorsForModel,
// PostAncestorsForModel,
} from "../types.js";
// import striptags from "striptags";
import { PrismaClient } from "../generated/prisma/client.js";
import {
getInstanceEmojis,
deleteNotification,
getNotifications,
getStatusContext,
@ -19,8 +17,6 @@ import {
isFromWhitelistedDomain,
alreadyRespondedTo,
recordPendingResponse,
// trimInputData,
selectRandomEmoji,
shouldContinue,
} from "./util.js";
@ -34,7 +30,7 @@ export const envConfig = {
? process.env.WHITELISTED_DOMAINS.split(",")
: [process.env.PLEROMA_INSTANCE_DOMAIN],
ollamaUrl: process.env.OLLAMA_URL || "",
ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT,
ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT || "",
ollamaModel: process.env.OLLAMA_MODEL || "",
fetchInterval: process.env.FETCH_INTERVAL
? parseInt(process.env.FETCH_INTERVAL)
@ -48,11 +44,12 @@ export const envConfig = {
};
const ollamaConfig: OllamaConfigOptions = {
temperature: 0.6,
top_p: 0.85,
temperature: 0.85, // Increased from 0.6 - more creative and varied
top_p: 0.9, // Slightly increased for more diverse responses
top_k: 40,
num_ctx: 8192,
repeat_penalty: 1.1,
num_ctx: 16384,
repeat_penalty: 1.1, // Reduced from 1.15 - less mechanical
// stop: ['<|im_end|>', '\n\n']
};
// this could be helpful
@ -68,75 +65,128 @@ const generateOllamaRequest = async (
ollamaUrl,
replyWithContext,
} = envConfig;
let shouldDeleteNotification = false;
try {
if (shouldContinue(notification)) {
if (whitelistOnly && !isFromWhitelistedDomain(notification)) {
await deleteNotification(notification);
return;
}
if (await alreadyRespondedTo(notification)) {
return;
}
await recordPendingResponse(notification);
await storeUserData(notification);
let conversationHistory: PostAncestorsForModel[] = [];
if (replyWithContext) {
const contextPosts = await getStatusContext(notification.status.id);
if (!contextPosts?.ancestors || !contextPosts) {
throw new Error(`Unable to obtain post context ancestors.`);
}
conversationHistory = contextPosts.ancestors.map((ancestor) => {
const mentions = ancestor.mentions.map((mention) => mention.acct);
return {
account_fqn: ancestor.account.fqn,
mentions,
plaintext_content: ancestor.pleroma.content["text/plain"],
};
});
// console.log(conversationHistory);
}
// Simplified user message (remove [/INST] as it's not needed for Llama 3)
const userMessage = `${notification.status.account.fqn} says: ${notification.status.pleroma.content["text/plain"]}`;
let systemContent = ollamaSystemPrompt;
if (replyWithContext) {
// Simplified context instructions (avoid heavy JSON; summarize for clarity)
systemContent = `${ollamaSystemPrompt}\n\nPrevious conversation context:\n${conversationHistory
.map(
(post) =>
`${post.account_fqn} (to ${post.mentions.join(", ")}): ${
post.plaintext_content
}`
)
.join(
"\n"
)}\nReply as if you are a party to the conversation. If '@nice-ai' is mentioned, respond directly. Prefix usernames with '@' when addressing them.`;
}
// Switch to chat request format (messages array auto-handles Llama 3 template)
const ollamaRequestBody: OllamaChatRequest = {
model: ollamaModel,
messages: [
{ role: "system", content: systemContent as string },
{ role: "user", content: userMessage },
],
stream: false,
options: ollamaConfig,
};
// Change endpoint to /api/chat
const response = await fetch(`${ollamaUrl}/api/chat`, {
method: "POST",
body: JSON.stringify(ollamaRequestBody),
});
const ollamaResponse: OllamaChatResponse = await response.json();
await storePromptData(notification, ollamaResponse);
return ollamaResponse;
if (!shouldContinue(notification)) {
shouldDeleteNotification = true;
return;
}
if (whitelistOnly && !isFromWhitelistedDomain(notification)) {
shouldDeleteNotification = true;
return;
}
if (await alreadyRespondedTo(notification)) {
shouldDeleteNotification = true;
return;
}
await recordPendingResponse(notification);
await storeUserData(notification);
let conversationContext = "";
if (replyWithContext) {
const contextPosts = await getStatusContext(notification.status.id);
if (!contextPosts?.ancestors) {
throw new Error(`Unable to obtain post context ancestors.`);
}
// Build a human-readable conversation thread
const allPosts = [...contextPosts.ancestors];
// Include descendants (follow-up posts) if available
if (contextPosts.descendents && contextPosts.descendents.length > 0) {
allPosts.push(...contextPosts.descendents);
}
if (allPosts.length > 0) {
const conversationLines = allPosts.map((post) => {
const author = post.account.fqn;
const content = post.pleroma.content["text/plain"];
const replyingTo = post.in_reply_to_account_id
? ` (replying to another message)`
: "";
return `[@${author}${replyingTo}]: ${content}`;
});
conversationContext = `
Previous conversation thread:
${conversationLines.join("\n\n")}
---
`;
}
}
const userMessage = notification.status.pleroma.content["text/plain"];
const originalAuthor = notification.account.fqn;
let systemContent = ollamaSystemPrompt;
if (replyWithContext && conversationContext) {
systemContent = `${ollamaSystemPrompt}
${conversationContext}
Current message from @${originalAuthor}:
"${userMessage}"
Instructions:
- You are replying to @${originalAuthor}
- Address them directly if appropriate
- Use markdown formatting and emojis sparingly`;
}
const ollamaRequestBody: OllamaChatRequest = {
model: ollamaModel,
messages: [
{ role: "system", content: systemContent },
{ role: "user", content: userMessage },
],
stream: false,
options: {
...ollamaConfig,
stop: ["</s>", "[INST]"], // Mistral 0.3 stop tokens
},
};
console.log(
`Generating response for notification ${notification.id} from @${originalAuthor}`
);
// Change endpoint to /api/chat
const response = await fetch(`${ollamaUrl}/api/chat`, {
method: "POST",
body: JSON.stringify(ollamaRequestBody),
});
if (!response.ok) {
throw new Error(`Ollama API request failed: ${response.statusText}`);
}
const ollamaResponse: OllamaChatResponse = await response.json();
await storePromptData(notification, ollamaResponse);
return ollamaResponse;
} catch (error: any) {
throw new Error(error.message);
console.error(
`Error in generateOllamaRequest for notification ${notification.id}:`,
error.message
);
// Delete notification on error to prevent retry loops
shouldDeleteNotification = true;
throw error;
} finally {
if (shouldDeleteNotification) {
try {
await deleteNotification(notification);
} catch (deleteError: any) {
console.error(
`Failed to delete notification ${notification.id}:`,
deleteError.message
);
}
}
}
};
@ -145,27 +195,28 @@ const postReplyToStatus = async (
ollamaResponseBody: OllamaChatResponse
) => {
const { pleromaInstanceUrl, bearerToken } = envConfig;
const emojiList = await getInstanceEmojis();
let randomEmoji;
if (emojiList) {
randomEmoji = selectRandomEmoji(emojiList);
}
try {
let mentions: string[];
// Only mention the original author who triggered the bot
const originalAuthor = notification.account.acct;
console.log(
`Replying to: @${originalAuthor} (status ID: ${notification.status.id})`
);
// Sanitize LLM output - remove any stray Mistral special tokens
let sanitizedContent = ollamaResponseBody.message.content
.replace(/<\/s>/g, "") // Remove EOS token if it appears
.replace(/\[INST\]/g, "") // Remove instruction start token
.replace(/\[\/INST\]/g, "") // Remove instruction end token
.replace(/<s>/g, "") // Remove BOS token if it appears
.trim();
const statusBody: NewStatusBody = {
content_type: "text/markdown",
status: `${ollamaResponseBody.message.content} :${randomEmoji}:`,
status: sanitizedContent,
in_reply_to_id: notification.status.id,
to: [originalAuthor], // Only send to the person who mentioned the bot
};
if (
notification.status.mentions &&
notification.status.mentions.length > 0
) {
mentions = notification.status.mentions.map((mention) => {
return mention.acct;
});
statusBody.to = mentions;
}
const response = await fetch(`${pleromaInstanceUrl}/api/v1/statuses`, {
method: "POST",
@ -180,9 +231,23 @@ const postReplyToStatus = async (
throw new Error(`New status request failed: ${response.statusText}`);
}
await deleteNotification(notification);
console.log(`Successfully posted reply to @${originalAuthor}`);
} catch (error: any) {
throw new Error(error.message);
console.error(
`Error posting reply for notification ${notification.id}:`,
error.message
);
throw error;
} finally {
// Always try to delete the notification, even if posting failed
try {
await deleteNotification(notification);
} catch (deleteError: any) {
console.error(
`Failed to delete notification ${notification.id}:`,
deleteError.message
);
}
}
};
@ -198,10 +263,17 @@ const createTimelinePost = async () => {
model: ollamaModel,
messages: [
{ role: "system", content: ollamaSystemPrompt as string },
{ role: "user", content: "Say something random." },
{
role: "user",
content:
"Make a post about something. Keep your tone authentic, as if you are a real person making a post about a topic that interests you on a microblogging platform. This can be about anything like politics, gardening, homesteading, your favorite animal, a fun fact, what happened during your day, seeking companionship, baking, cooking, et cetera. Do not format the post with a title or quotes, nor sign the post with your name. It will be posted to your timeline so everyone will know you said it.",
},
],
stream: false,
options: ollamaConfig,
options: {
...ollamaConfig,
stop: ["</s>", "[INST]"], // Mistral 0.3 stop tokens
},
};
try {
const response = await fetch(`${ollamaUrl}/api/chat`, {
@ -244,18 +316,21 @@ const beginFetchCycle = async () => {
setInterval(async () => {
notifications = await getNotifications();
if (notifications.length > 0) {
await Promise.all(
notifications.map(async (notification) => {
try {
const ollamaResponse = await generateOllamaRequest(notification);
if (ollamaResponse) {
postReplyToStatus(notification, ollamaResponse);
}
} catch (error: any) {
throw new Error(error.message);
// Process notifications sequentially to avoid race conditions
for (const notification of notifications) {
try {
const ollamaResponse = await generateOllamaRequest(notification);
if (ollamaResponse) {
await postReplyToStatus(notification, ollamaResponse);
}
})
);
} catch (error: any) {
console.error(
`Error processing notification ${notification.id}:`,
error.message
);
// Continue processing other notifications even if one fails
}
}
}
}, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function
};
@ -277,6 +352,11 @@ console.log(
envConfig.fetchInterval / 1000
} seconds.`
);
console.log(
`Making ad-hoc post to ${envConfig.pleromaInstanceDomain}, every ${
envConfig.adHocPostInterval / 1000 / 60
} minutes.`
);
console.log(
`Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}`
);
@ -288,7 +368,4 @@ console.log(
console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`);
await beginFetchCycle();
// setInterval(async () => {
// createTimelinePost();
// }, 10000);
await beginStatusPostInterval();

View File

@ -8,7 +8,7 @@ Type=simple
User=bot
Restart=always
RestartSec=3
ExecStart=/usr/bin/screen -L -DmS pleroma-ollama-bot /home/bot/.nvm/versions/node/v22.11.0/bin/npm run start
ExecStart=/home/bot/.nvm/versions/node/v22.11.0/bin/npm run start
WorkingDirectory=/path/to/directory
[Install]
WantedBy=multi-user.target