new baseline

This commit is contained in:
2025-10-28 16:56:53 +00:00
parent ee367a0d9a
commit 051a66ff26

View File

@ -6,10 +6,8 @@ import {
OllamaChatResponse, OllamaChatResponse,
PostAncestorsForModel, PostAncestorsForModel,
} from "../types.js"; } from "../types.js";
// import striptags from "striptags";
import { PrismaClient } from "../generated/prisma/client.js"; import { PrismaClient } from "../generated/prisma/client.js";
import { import {
// getInstanceEmojis,
deleteNotification, deleteNotification,
getNotifications, getNotifications,
getStatusContext, getStatusContext,
@ -19,8 +17,6 @@ import {
isFromWhitelistedDomain, isFromWhitelistedDomain,
alreadyRespondedTo, alreadyRespondedTo,
recordPendingResponse, recordPendingResponse,
// trimInputData,
// selectRandomEmoji,
shouldContinue, shouldContinue,
} from "./util.js"; } from "./util.js";
@ -34,7 +30,7 @@ export const envConfig = {
? process.env.WHITELISTED_DOMAINS.split(",") ? process.env.WHITELISTED_DOMAINS.split(",")
: [process.env.PLEROMA_INSTANCE_DOMAIN], : [process.env.PLEROMA_INSTANCE_DOMAIN],
ollamaUrl: process.env.OLLAMA_URL || "", ollamaUrl: process.env.OLLAMA_URL || "",
ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT, ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT || "",
ollamaModel: process.env.OLLAMA_MODEL || "", ollamaModel: process.env.OLLAMA_MODEL || "",
fetchInterval: process.env.FETCH_INTERVAL fetchInterval: process.env.FETCH_INTERVAL
? parseInt(process.env.FETCH_INTERVAL) ? parseInt(process.env.FETCH_INTERVAL)
@ -48,11 +44,12 @@ export const envConfig = {
}; };
const ollamaConfig: OllamaConfigOptions = { const ollamaConfig: OllamaConfigOptions = {
temperature: 0.9, temperature: 0.85, // Increased from 0.6 - more creative and varied
top_p: 0.85, top_p: 0.9, // Slightly increased for more diverse responses
top_k: 60, top_k: 40,
num_ctx: 16384, // maximum context window for Llama 3.1 num_ctx: 16384,
repeat_penalty: 1.1, repeat_penalty: 1.1, // Reduced from 1.15 - less mechanical
// stop: ['<|im_end|>', '\n\n']
}; };
// this could be helpful // this could be helpful
@ -82,47 +79,42 @@ const generateOllamaRequest = async (
let conversationHistory: PostAncestorsForModel[] = []; let conversationHistory: PostAncestorsForModel[] = [];
if (replyWithContext) { if (replyWithContext) {
const contextPosts = await getStatusContext(notification.status.id); const contextPosts = await getStatusContext(notification.status.id);
if (!contextPosts?.ancestors || !contextPosts) { if (!contextPosts?.ancestors) {
throw new Error(`Unable to obtain post context ancestors.`); throw new Error(`Unable to obtain post context ancestors.`);
} }
conversationHistory = contextPosts.ancestors.map((ancestor) => { conversationHistory = contextPosts.ancestors.map((ancestor) => ({
const mentions = ancestor.mentions.map((mention) => mention.acct);
return {
account_fqn: ancestor.account.fqn, account_fqn: ancestor.account.fqn,
mentions, mentions: ancestor.mentions.map((mention) => mention.acct),
plaintext_content: ancestor.pleroma.content["text/plain"], plaintext_content: ancestor.pleroma.content["text/plain"],
}; }));
});
// console.log(conversationHistory);
} }
// Simplified user message (remove [/INST] as it's not needed for Llama 3) const userMessage = notification.status.pleroma.content["text/plain"];
const userMessage = `${notification.status.account.fqn} says to you: \"${notification.status.pleroma.content["text/plain"]}\".`;
let systemContent = ollamaSystemPrompt; let systemContent = ollamaSystemPrompt;
if (replyWithContext) { if (replyWithContext) {
// Simplified context instructions (avoid heavy JSON; summarize for clarity) systemContent = `${ollamaSystemPrompt}
systemContent = `${ollamaSystemPrompt}\n\nPrevious conversation context:\n${conversationHistory Previous conversation (JSON format):
.map( ${JSON.stringify(conversationHistory, null, 2)}
(post) =>
`${post.account_fqn} (said to ${post.mentions.join(", ")}): ${ Instructions:
post.plaintext_content - Each entry shows: account_fqn (who posted), mentions (tagged users), and plaintext_content (message)
}` - The first mention is the direct recipient
) - Address users with @ before their names
.join( - Use markdown formatting and emojis sparingly`;
"\n"
)}\nReply to the user who addressed you (you are Lexi, also known as nice-ai or nice-ai@nicecrew.digital). Examine the context of the entire conversation and make references to topics or information where appropriate. Prefix usernames with '@' when addressing them. Assume if there is no domain in the username, the domain is @nicecrew.digital (for example @matty would be @matty@nicecrew.digital)`;
} }
// Switch to chat request format (messages array auto-handles Llama 3 template)
const ollamaRequestBody: OllamaChatRequest = { const ollamaRequestBody: OllamaChatRequest = {
model: ollamaModel, model: ollamaModel,
messages: [ messages: [
{ role: "system", content: systemContent as string }, { role: "system", content: systemContent },
{ role: "user", content: userMessage }, { role: "user", content: userMessage },
], ],
stream: false, stream: false,
options: ollamaConfig, options: {
...ollamaConfig,
stop: ["<|im_end|>", "\n\n"],
},
}; };
// Change endpoint to /api/chat // Change endpoint to /api/chat
@ -145,16 +137,12 @@ const postReplyToStatus = async (
ollamaResponseBody: OllamaChatResponse ollamaResponseBody: OllamaChatResponse
) => { ) => {
const { pleromaInstanceUrl, bearerToken } = envConfig; const { pleromaInstanceUrl, bearerToken } = envConfig;
// const emojiList = await getInstanceEmojis();
// let randomEmoji;
// if (emojiList) {
// randomEmoji = selectRandomEmoji(emojiList);
// }
try { try {
let mentions: string[]; let mentions: string[];
const statusBody: NewStatusBody = { const statusBody: NewStatusBody = {
content_type: "text/markdown", content_type: "text/markdown",
status: `${ollamaResponseBody.message.content}`, status: ollamaResponseBody.message.content,
in_reply_to_id: notification.status.id, in_reply_to_id: notification.status.id,
}; };
if ( if (
@ -198,10 +186,17 @@ const createTimelinePost = async () => {
model: ollamaModel, model: ollamaModel,
messages: [ messages: [
{ role: "system", content: ollamaSystemPrompt as string }, { role: "system", content: ollamaSystemPrompt as string },
{ role: "user", content: "Say something random." }, {
role: "user",
content:
"Make a post about something. Keep your tone authentic, as if you are a real person making a post about a topic that interests you on a microblogging platform. This can be about anything like politics, gardening, homesteading, your favorite animal, a fun fact, what happened during your day, seeking companionship, baking, cooking, et cetera. Do not format the post with a title or quotes, nor sign the post with your name. It will be posted to your timeline so everyone will know you said it.",
},
], ],
stream: false, stream: false,
options: ollamaConfig, options: {
...ollamaConfig,
stop: ["<|start_header_id|>", "<|end_header_id|>", "<|eot_id|>"],
},
}; };
try { try {
const response = await fetch(`${ollamaUrl}/api/chat`, { const response = await fetch(`${ollamaUrl}/api/chat`, {
@ -277,6 +272,11 @@ console.log(
envConfig.fetchInterval / 1000 envConfig.fetchInterval / 1000
} seconds.` } seconds.`
); );
console.log(
`Making ad-hoc post to ${envConfig.pleromaInstanceDomain}, every ${
envConfig.adHocPostInterval / 1000 / 60
} minutes.`
);
console.log( console.log(
`Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}` `Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}`
); );
@ -288,7 +288,4 @@ console.log(
console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`);
await beginFetchCycle(); await beginFetchCycle();
// setInterval(async () => {
// createTimelinePost();
// }, 10000);
await beginStatusPostInterval(); await beginStatusPostInterval();