sanity checking, do not duplicate responses

This commit is contained in:
2025-07-05 03:58:50 +00:00
parent ea5e783ee5
commit b8f6023029
5 changed files with 85 additions and 7 deletions

View File

@ -0,0 +1,18 @@
-- RedefineTables
PRAGMA defer_foreign_keys=ON;
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_Response" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pleromaNotificationId" TEXT NOT NULL DEFAULT 'null',
"to" TEXT NOT NULL,
"request" TEXT,
"response" TEXT,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"processedAt" DATETIME,
"isProcessing" BOOLEAN NOT NULL DEFAULT true
);
INSERT INTO "new_Response" ("createdAt", "id", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "pleromaNotificationId", "processedAt", "request", "response", "to" FROM "Response";
DROP TABLE "Response";
ALTER TABLE "new_Response" RENAME TO "Response";
PRAGMA foreign_keys=ON;
PRAGMA defer_foreign_keys=OFF;

View File

@ -0,0 +1,18 @@
-- RedefineTables
PRAGMA defer_foreign_keys=ON;
PRAGMA foreign_keys=OFF;
CREATE TABLE "new_Response" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"pleromaNotificationId" TEXT NOT NULL DEFAULT 'null',
"to" TEXT NOT NULL DEFAULT 'null',
"request" TEXT NOT NULL DEFAULT 'null',
"response" TEXT NOT NULL DEFAULT 'null',
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"processedAt" DATETIME,
"isProcessing" BOOLEAN NOT NULL DEFAULT true
);
INSERT INTO "new_Response" ("createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", coalesce("request", 'null') AS "request", coalesce("response", 'null') AS "response", "to" FROM "Response";
DROP TABLE "Response";
ALTER TABLE "new_Response" RENAME TO "Response";
PRAGMA foreign_keys=ON;
PRAGMA defer_foreign_keys=OFF;

View File

@ -14,11 +14,12 @@ datasource db {
model Response { model Response {
id Int @id @default(autoincrement()) id Int @id @default(autoincrement())
pleromaNotificationId String @default("null") pleromaNotificationId String @default("null")
to String to String @default("null")
request String? request String @default("null")
response String? response String @default("null")
createdAt DateTime @default(now()) createdAt DateTime @default(now())
processedAt DateTime? processedAt DateTime?
isProcessing Boolean @default(true)
} }
model User { model User {

View File

@ -3,6 +3,7 @@ import {
OllamaResponse, OllamaResponse,
NewStatusBody, NewStatusBody,
Notification, Notification,
OllamaConfigOptions,
} from "../types.js"; } from "../types.js";
import striptags from "striptags"; import striptags from "striptags";
import { PrismaClient } from "../generated/prisma/client.js"; import { PrismaClient } from "../generated/prisma/client.js";
@ -51,7 +52,7 @@ const alreadyRespondedTo = async (
): Promise<boolean> => { ): Promise<boolean> => {
try { try {
const duplicate = await prisma.response.findFirst({ const duplicate = await prisma.response.findFirst({
where: { pleromaNotificationId: notification.status.id }, where: { pleromaNotificationId: notification.id, isProcessing: true },
}); });
if (duplicate) { if (duplicate) {
return true; return true;
@ -67,12 +68,13 @@ const storePromptData = async (
ollamaResponseBody: OllamaResponse ollamaResponseBody: OllamaResponse
) => { ) => {
try { try {
await prisma.response.create({ await prisma.response.updateMany({
where: { pleromaNotificationId: notification.id },
data: { data: {
response: ollamaResponseBody.response, response: ollamaResponseBody.response,
request: striptags(notification.status.content), request: trimInputData(notification.status.content),
to: notification.account.fqn, to: notification.account.fqn,
pleromaNotificationId: notification.status.id, isProcessing: false,
}, },
}); });
} catch (error: any) { } catch (error: any) {
@ -87,6 +89,18 @@ const trimInputData = (input: string) => {
return split.slice(promptStringIndex + 1).join(" "); // returns everything after the !prompt return split.slice(promptStringIndex + 1).join(" "); // returns everything after the !prompt
}; };
const recordPendingResponse = async (notification: Notification) => {
try {
await prisma.response.create({
data: {
pleromaNotificationId: notification.id,
},
});
} catch (error: any) {
throw new Error(error.message);
}
};
const generateOllamaRequest = async ( const generateOllamaRequest = async (
notification: Notification notification: Notification
): Promise<OllamaResponse | undefined> => { ): Promise<OllamaResponse | undefined> => {
@ -107,7 +121,12 @@ const generateOllamaRequest = async (
if (await alreadyRespondedTo(notification)) { if (await alreadyRespondedTo(notification)) {
return; return;
} }
await recordPendingResponse(notification);
await storeUserData(notification); await storeUserData(notification);
const ollamaConfig: OllamaConfigOptions = {
temperature: 1.2,
num_predict: 400,
};
const ollamaRequestBody: OllamaRequest = { const ollamaRequestBody: OllamaRequest = {
model: process.env.OLLAMA_MODEL as string, model: process.env.OLLAMA_MODEL as string,
system: process.env.OLLAMA_SYSTEM_PROMPT as string, system: process.env.OLLAMA_SYSTEM_PROMPT as string,
@ -115,6 +134,7 @@ const generateOllamaRequest = async (
notification.status.content notification.status.content
)}`, )}`,
stream: false, stream: false,
options: ollamaConfig,
}; };
const response = await fetch(`${process.env.OLLAMA_URL}/api/generate`, { const response = await fetch(`${process.env.OLLAMA_URL}/api/generate`, {
method: "POST", method: "POST",
@ -177,6 +197,11 @@ const deleteNotification = async (notification: Notification) => {
if (!notification.id) { if (!notification.id) {
return; return;
} }
await prisma.response.updateMany({
// this is probably not the best way to do this, but since we may have duplicate notifications, we have to update all of them - probably won't scale (lmao)
where: { pleromaNotificationId: notification.id },
data: { isProcessing: false },
});
const response = await fetch( const response = await fetch(
`${process.env.PLEROMA_INSTANCE_URL}/api/v1/notifications/${notification.id}/dismiss`, `${process.env.PLEROMA_INSTANCE_URL}/api/v1/notifications/${notification.id}/dismiss`,
{ {

16
types.d.ts vendored
View File

@ -69,15 +69,28 @@ export interface Mention {
username: string; username: string;
} }
/**
* Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either)
*/
export interface OllamaConfigOptions { export interface OllamaConfigOptions {
/**
* Number of tokens guaranteed to be kept in memory during response generation. Higher values leave less
* possible room for num_ctx
*/
num_keep?: number; num_keep?: number;
seed?: number; seed?: number;
/**
* Sets maximum of tokens in the response
*/
num_predict?: number; num_predict?: number;
top_k?: number; top_k?: number;
top_p?: number; top_p?: number;
min_p?: number; min_p?: number;
typical_p?: number; typical_p?: number;
repeat_last_n?: number; repeat_last_n?: number;
/**
* How close of a response should the response be to the original prompt - lower = more focused response
*/
temperature?: number; temperature?: number;
repeat_penalty?: number; repeat_penalty?: number;
presence_penalty?: number; presence_penalty?: number;
@ -88,6 +101,9 @@ export interface OllamaConfigOptions {
penalize_newline?: boolean; penalize_newline?: boolean;
stop?: string[]; stop?: string[];
numa?: boolean; numa?: boolean;
/**
* Number of tokens for the prompt to keep in memory for the response, minus the value of num_keep
*/
num_ctx?: number; num_ctx?: number;
num_batch?: number; num_batch?: number;
num_gpu?: number; num_gpu?: number;