sanity checking, do not duplicate responses
This commit is contained in:
		| @ -0,0 +1,18 @@ | ||||
| -- RedefineTables | ||||
| PRAGMA defer_foreign_keys=ON; | ||||
| PRAGMA foreign_keys=OFF; | ||||
| CREATE TABLE "new_Response" ( | ||||
|     "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, | ||||
|     "pleromaNotificationId" TEXT NOT NULL DEFAULT 'null', | ||||
|     "to" TEXT NOT NULL, | ||||
|     "request" TEXT, | ||||
|     "response" TEXT, | ||||
|     "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, | ||||
|     "processedAt" DATETIME, | ||||
|     "isProcessing" BOOLEAN NOT NULL DEFAULT true | ||||
| ); | ||||
| INSERT INTO "new_Response" ("createdAt", "id", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "pleromaNotificationId", "processedAt", "request", "response", "to" FROM "Response"; | ||||
| DROP TABLE "Response"; | ||||
| ALTER TABLE "new_Response" RENAME TO "Response"; | ||||
| PRAGMA foreign_keys=ON; | ||||
| PRAGMA defer_foreign_keys=OFF; | ||||
| @ -0,0 +1,18 @@ | ||||
| -- RedefineTables | ||||
| PRAGMA defer_foreign_keys=ON; | ||||
| PRAGMA foreign_keys=OFF; | ||||
| CREATE TABLE "new_Response" ( | ||||
|     "id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, | ||||
|     "pleromaNotificationId" TEXT NOT NULL DEFAULT 'null', | ||||
|     "to" TEXT NOT NULL DEFAULT 'null', | ||||
|     "request" TEXT NOT NULL DEFAULT 'null', | ||||
|     "response" TEXT NOT NULL DEFAULT 'null', | ||||
|     "createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, | ||||
|     "processedAt" DATETIME, | ||||
|     "isProcessing" BOOLEAN NOT NULL DEFAULT true | ||||
| ); | ||||
| INSERT INTO "new_Response" ("createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", coalesce("request", 'null') AS "request", coalesce("response", 'null') AS "response", "to" FROM "Response"; | ||||
| DROP TABLE "Response"; | ||||
| ALTER TABLE "new_Response" RENAME TO "Response"; | ||||
| PRAGMA foreign_keys=ON; | ||||
| PRAGMA defer_foreign_keys=OFF; | ||||
| @ -14,11 +14,12 @@ datasource db { | ||||
| model Response { | ||||
|   id                    Int       @id @default(autoincrement()) | ||||
|   pleromaNotificationId String    @default("null") | ||||
|   to                    String | ||||
|   request               String? | ||||
|   response              String? | ||||
|   to                    String    @default("null") | ||||
|   request               String    @default("null") | ||||
|   response              String    @default("null") | ||||
|   createdAt             DateTime  @default(now()) | ||||
|   processedAt           DateTime? | ||||
|   isProcessing          Boolean   @default(true) | ||||
| } | ||||
|  | ||||
| model User { | ||||
|  | ||||
							
								
								
									
										33
									
								
								src/main.ts
									
									
									
									
									
								
							
							
						
						
									
										33
									
								
								src/main.ts
									
									
									
									
									
								
							| @ -3,6 +3,7 @@ import { | ||||
|   OllamaResponse, | ||||
|   NewStatusBody, | ||||
|   Notification, | ||||
|   OllamaConfigOptions, | ||||
| } from "../types.js"; | ||||
| import striptags from "striptags"; | ||||
| import { PrismaClient } from "../generated/prisma/client.js"; | ||||
| @ -51,7 +52,7 @@ const alreadyRespondedTo = async ( | ||||
| ): Promise<boolean> => { | ||||
|   try { | ||||
|     const duplicate = await prisma.response.findFirst({ | ||||
|       where: { pleromaNotificationId: notification.status.id }, | ||||
|       where: { pleromaNotificationId: notification.id, isProcessing: true }, | ||||
|     }); | ||||
|     if (duplicate) { | ||||
|       return true; | ||||
| @ -67,12 +68,13 @@ const storePromptData = async ( | ||||
|   ollamaResponseBody: OllamaResponse | ||||
| ) => { | ||||
|   try { | ||||
|     await prisma.response.create({ | ||||
|     await prisma.response.updateMany({ | ||||
|       where: { pleromaNotificationId: notification.id }, | ||||
|       data: { | ||||
|         response: ollamaResponseBody.response, | ||||
|         request: striptags(notification.status.content), | ||||
|         request: trimInputData(notification.status.content), | ||||
|         to: notification.account.fqn, | ||||
|         pleromaNotificationId: notification.status.id, | ||||
|         isProcessing: false, | ||||
|       }, | ||||
|     }); | ||||
|   } catch (error: any) { | ||||
| @ -87,6 +89,18 @@ const trimInputData = (input: string) => { | ||||
|   return split.slice(promptStringIndex + 1).join(" "); // returns everything after the !prompt | ||||
| }; | ||||
|  | ||||
| const recordPendingResponse = async (notification: Notification) => { | ||||
|   try { | ||||
|     await prisma.response.create({ | ||||
|       data: { | ||||
|         pleromaNotificationId: notification.id, | ||||
|       }, | ||||
|     }); | ||||
|   } catch (error: any) { | ||||
|     throw new Error(error.message); | ||||
|   } | ||||
| }; | ||||
|  | ||||
| const generateOllamaRequest = async ( | ||||
|   notification: Notification | ||||
| ): Promise<OllamaResponse | undefined> => { | ||||
| @ -107,7 +121,12 @@ const generateOllamaRequest = async ( | ||||
|       if (await alreadyRespondedTo(notification)) { | ||||
|         return; | ||||
|       } | ||||
|       await recordPendingResponse(notification); | ||||
|       await storeUserData(notification); | ||||
|       const ollamaConfig: OllamaConfigOptions = { | ||||
|         temperature: 1.2, | ||||
|         num_predict: 400, | ||||
|       }; | ||||
|       const ollamaRequestBody: OllamaRequest = { | ||||
|         model: process.env.OLLAMA_MODEL as string, | ||||
|         system: process.env.OLLAMA_SYSTEM_PROMPT as string, | ||||
| @ -115,6 +134,7 @@ const generateOllamaRequest = async ( | ||||
|           notification.status.content | ||||
|         )}`, | ||||
|         stream: false, | ||||
|         options: ollamaConfig, | ||||
|       }; | ||||
|       const response = await fetch(`${process.env.OLLAMA_URL}/api/generate`, { | ||||
|         method: "POST", | ||||
| @ -177,6 +197,11 @@ const deleteNotification = async (notification: Notification) => { | ||||
|     if (!notification.id) { | ||||
|       return; | ||||
|     } | ||||
|     await prisma.response.updateMany({ | ||||
|       // this is probably not the best way to do this, but since we may have duplicate notifications, we have to update all of them - probably won't scale (lmao) | ||||
|       where: { pleromaNotificationId: notification.id }, | ||||
|       data: { isProcessing: false }, | ||||
|     }); | ||||
|     const response = await fetch( | ||||
|       `${process.env.PLEROMA_INSTANCE_URL}/api/v1/notifications/${notification.id}/dismiss`, | ||||
|       { | ||||
|  | ||||
							
								
								
									
										16
									
								
								types.d.ts
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										16
									
								
								types.d.ts
									
									
									
									
										vendored
									
									
								
							| @ -69,15 +69,28 @@ export interface Mention { | ||||
|   username: string; | ||||
| } | ||||
|  | ||||
| /** | ||||
|  * Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either) | ||||
|  */ | ||||
| export interface OllamaConfigOptions { | ||||
|   /** | ||||
|    * Number of tokens guaranteed to be kept in memory during response generation. Higher values leave less | ||||
|    * possible room for num_ctx | ||||
|    */ | ||||
|   num_keep?: number; | ||||
|   seed?: number; | ||||
|   /** | ||||
|    * Sets maximum of tokens in the response | ||||
|    */ | ||||
|   num_predict?: number; | ||||
|   top_k?: number; | ||||
|   top_p?: number; | ||||
|   min_p?: number; | ||||
|   typical_p?: number; | ||||
|   repeat_last_n?: number; | ||||
|   /** | ||||
|    * How close of a response should the response be to the original prompt - lower = more focused response | ||||
|    */ | ||||
|   temperature?: number; | ||||
|   repeat_penalty?: number; | ||||
|   presence_penalty?: number; | ||||
| @ -88,6 +101,9 @@ export interface OllamaConfigOptions { | ||||
|   penalize_newline?: boolean; | ||||
|   stop?: string[]; | ||||
|   numa?: boolean; | ||||
|   /** | ||||
|    * Number of tokens for the prompt to keep in memory for the response, minus the value of num_keep | ||||
|    */ | ||||
|   num_ctx?: number; | ||||
|   num_batch?: number; | ||||
|   num_gpu?: number; | ||||
|  | ||||
		Reference in New Issue
	
	Block a user