diff --git a/src/main.ts b/src/main.ts index af40843..6370c7c 100644 --- a/src/main.ts +++ b/src/main.ts @@ -6,10 +6,8 @@ import { OllamaChatResponse, PostAncestorsForModel, } from "../types.js"; -// import striptags from "striptags"; import { PrismaClient } from "../generated/prisma/client.js"; import { - // getInstanceEmojis, deleteNotification, getNotifications, getStatusContext, @@ -19,8 +17,6 @@ import { isFromWhitelistedDomain, alreadyRespondedTo, recordPendingResponse, - // trimInputData, - // selectRandomEmoji, shouldContinue, } from "./util.js"; @@ -34,7 +30,7 @@ export const envConfig = { ? process.env.WHITELISTED_DOMAINS.split(",") : [process.env.PLEROMA_INSTANCE_DOMAIN], ollamaUrl: process.env.OLLAMA_URL || "", - ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT, + ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT || "", ollamaModel: process.env.OLLAMA_MODEL || "", fetchInterval: process.env.FETCH_INTERVAL ? parseInt(process.env.FETCH_INTERVAL) @@ -48,11 +44,12 @@ export const envConfig = { }; const ollamaConfig: OllamaConfigOptions = { - temperature: 0.9, - top_p: 0.85, - top_k: 60, - num_ctx: 16384, // maximum context window for Llama 3.1 - repeat_penalty: 1.1, + temperature: 0.85, // Increased from 0.6 - more creative and varied + top_p: 0.9, // Slightly increased for more diverse responses + top_k: 40, + num_ctx: 16384, + repeat_penalty: 1.1, // Reduced from 1.15 - less mechanical + // stop: ['<|im_end|>', '\n\n'] }; // this could be helpful @@ -82,47 +79,42 @@ const generateOllamaRequest = async ( let conversationHistory: PostAncestorsForModel[] = []; if (replyWithContext) { const contextPosts = await getStatusContext(notification.status.id); - if (!contextPosts?.ancestors || !contextPosts) { + if (!contextPosts?.ancestors) { throw new Error(`Unable to obtain post context ancestors.`); } - conversationHistory = contextPosts.ancestors.map((ancestor) => { - const mentions = ancestor.mentions.map((mention) => mention.acct); - return { - account_fqn: ancestor.account.fqn, - mentions, - plaintext_content: ancestor.pleroma.content["text/plain"], - }; - }); - // console.log(conversationHistory); + conversationHistory = contextPosts.ancestors.map((ancestor) => ({ + account_fqn: ancestor.account.fqn, + mentions: ancestor.mentions.map((mention) => mention.acct), + plaintext_content: ancestor.pleroma.content["text/plain"], + })); } - // Simplified user message (remove [/INST] as it's not needed for Llama 3) - const userMessage = `${notification.status.account.fqn} says to you: \"${notification.status.pleroma.content["text/plain"]}\".`; + const userMessage = notification.status.pleroma.content["text/plain"]; let systemContent = ollamaSystemPrompt; if (replyWithContext) { - // Simplified context instructions (avoid heavy JSON; summarize for clarity) - systemContent = `${ollamaSystemPrompt}\n\nPrevious conversation context:\n${conversationHistory - .map( - (post) => - `${post.account_fqn} (said to ${post.mentions.join(", ")}): ${ - post.plaintext_content - }` - ) - .join( - "\n" - )}\nReply to the user who addressed you (you are Lexi, also known as nice-ai or nice-ai@nicecrew.digital). Examine the context of the entire conversation and make references to topics or information where appropriate. Prefix usernames with '@' when addressing them. Assume if there is no domain in the username, the domain is @nicecrew.digital (for example @matty would be @matty@nicecrew.digital)`; + systemContent = `${ollamaSystemPrompt} +Previous conversation (JSON format): +${JSON.stringify(conversationHistory, null, 2)} + +Instructions: +- Each entry shows: account_fqn (who posted), mentions (tagged users), and plaintext_content (message) +- The first mention is the direct recipient +- Address users with @ before their names +- Use markdown formatting and emojis sparingly`; } - // Switch to chat request format (messages array auto-handles Llama 3 template) const ollamaRequestBody: OllamaChatRequest = { model: ollamaModel, messages: [ - { role: "system", content: systemContent as string }, + { role: "system", content: systemContent }, { role: "user", content: userMessage }, ], stream: false, - options: ollamaConfig, + options: { + ...ollamaConfig, + stop: ["<|im_end|>", "\n\n"], + }, }; // Change endpoint to /api/chat @@ -145,16 +137,12 @@ const postReplyToStatus = async ( ollamaResponseBody: OllamaChatResponse ) => { const { pleromaInstanceUrl, bearerToken } = envConfig; - // const emojiList = await getInstanceEmojis(); - // let randomEmoji; - // if (emojiList) { - // randomEmoji = selectRandomEmoji(emojiList); - // } + try { let mentions: string[]; const statusBody: NewStatusBody = { content_type: "text/markdown", - status: `${ollamaResponseBody.message.content}`, + status: ollamaResponseBody.message.content, in_reply_to_id: notification.status.id, }; if ( @@ -198,10 +186,17 @@ const createTimelinePost = async () => { model: ollamaModel, messages: [ { role: "system", content: ollamaSystemPrompt as string }, - { role: "user", content: "Say something random." }, + { + role: "user", + content: + "Make a post about something. Keep your tone authentic, as if you are a real person making a post about a topic that interests you on a microblogging platform. This can be about anything like politics, gardening, homesteading, your favorite animal, a fun fact, what happened during your day, seeking companionship, baking, cooking, et cetera. Do not format the post with a title or quotes, nor sign the post with your name. It will be posted to your timeline so everyone will know you said it.", + }, ], stream: false, - options: ollamaConfig, + options: { + ...ollamaConfig, + stop: ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"], + }, }; try { const response = await fetch(`${ollamaUrl}/api/chat`, { @@ -277,6 +272,11 @@ console.log( envConfig.fetchInterval / 1000 } seconds.` ); +console.log( + `Making ad-hoc post to ${envConfig.pleromaInstanceDomain}, every ${ + envConfig.adHocPostInterval / 1000 / 60 + } minutes.` +); console.log( `Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}` ); @@ -288,7 +288,4 @@ console.log( console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); await beginFetchCycle(); -// setInterval(async () => { -// createTimelinePost(); -// }, 10000); await beginStatusPostInterval();