Compare commits
54 Commits
9145b07da7
...
main
Author | SHA1 | Date | |
---|---|---|---|
e696343a73 | |||
88a0710c55 | |||
75fa4cea8b | |||
733a41a35c | |||
ed3467b213 | |||
0f178fcfa9 | |||
0bfff52fd0 | |||
8e90e8b71e | |||
566d6ae518 | |||
2ec367f203 | |||
a04cb9a6ad | |||
2111a47411 | |||
11c1332757 | |||
aaf4adcf06 | |||
b6ad54f40a | |||
2f3d16dbc5 | |||
150e2d638e | |||
0c7c176bae | |||
c3d4f1b1ff | |||
57ab59d342 | |||
71ae54930c | |||
3466a984ac | |||
cbf6b1d3eb | |||
e2ce397118 | |||
9a7cd118b3 | |||
7a60a672d4 | |||
419285487a | |||
09722507c6 | |||
41317301bf | |||
00a2eb63bc | |||
6c8f779294 | |||
ff5c7506ff | |||
5c51acc8d1 | |||
d4ee457d74 | |||
b8f6023029 | |||
ea5e783ee5 | |||
eb5282a50d | |||
9ee3663890 | |||
d85acd2179 | |||
856cc84208 | |||
ca4643092f | |||
b4b656f808 | |||
92f1366574 | |||
a64afa7e7b | |||
d63aa365e7 | |||
3759c5aa23 | |||
1a151b197b | |||
70180c5d5f | |||
dac037809c | |||
6088a2cbd3 | |||
ed8d148d0a | |||
379099dc7a | |||
c0ed38ac1a | |||
b295777041 |
@ -1,8 +1,13 @@
|
|||||||
DATABASE_URL="file:../dev.db" # SQLite database relative to the ./prisma path
|
DATABASE_URL="file:../dev.db" # SQLite database relative to the ./prisma path
|
||||||
PLEROMA_INSTANCE_URL="https://instance.tld" # Pleroma instance full URL including scheme
|
PLEROMA_INSTANCE_URL="https://instance.tld" # Pleroma instance full URL including scheme
|
||||||
PLEROMA_INSTANCE_DOMAIN="instance.tld" # used if you want to only want to respond to people from a particular instance
|
PLEROMA_INSTANCE_DOMAIN="instance.tld" # used if you want to only want to respond to people from a particular instance
|
||||||
ONLY_LOCAL_REPLIES="true" # reply to only users locally on your instance
|
PLEROMA_ACCOUNT_ID="" # obtained from /api/v1/accounts/{nickname} - used so we don't spam mentions when not directly addressed
|
||||||
|
REPLY_WITH_CONTEXT="" # set to true or false whether you want the bot to fetch context or not
|
||||||
|
ONLY_WHITELIST="true" # change to "false" if you want to accept prompts from any and all domains - *** USE WITH CAUTION ***
|
||||||
|
WHITELISTED_DOMAINS="" # comma separated list of domains you want to allow the bot to accept prompts from (i.e. poa.st,nicecrew.digital,detroitriotcity.com,decayable.ink)
|
||||||
OLLAMA_URL="http://localhost:11434" # OLLAMA connection URL
|
OLLAMA_URL="http://localhost:11434" # OLLAMA connection URL
|
||||||
OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI
|
OLLAMA_SYSTEM_PROMPT="" # system prompt - used to help tune the responses from the AI
|
||||||
OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest
|
OLLAMA_MODEL="" # Ollama model for responses e.g dolphin-mistral:latest
|
||||||
|
FETCH_INTERVAL="" # interval for fetching new notifications from the instance, in milliseconds, recommend at least 15000
|
||||||
|
RANDOM_POST_INTERVAL="" # interval for ad-hoc posts in milliseconds
|
||||||
INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox)
|
INSTANCE_BEARER_TOKEN="" # instance auth/bearer token (check the "verify_credentials" endpoint request headers in Chrome DevTools if on Soapbox)
|
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,8 +1,9 @@
|
|||||||
node_modules
|
node_modules
|
||||||
# Keep environment variables out of version control
|
# Keep environment variables out of version control
|
||||||
.env
|
.env*
|
||||||
*.log
|
*.log
|
||||||
*.db
|
*.db
|
||||||
/dist
|
/dist
|
||||||
|
screenlog*
|
||||||
|
|
||||||
/generated/prisma
|
/generated/prisma
|
||||||
|
@ -1,15 +1,13 @@
|
|||||||
## Pleroma -> Ollama Bot Setup
|
## Pleroma -> Ollama Bot Setup
|
||||||
|
|
||||||
1. Clone project
|
1. Clone project
|
||||||
2. Install npm 22.11.0 if you don't have it already
|
2. Install Node `v22.11.0` if you don't have it already
|
||||||
|
* If using `nvm`, just `nvm install 22.11.0` and then `nvm use 22.11.0` if necessary
|
||||||
3. `cd` into the project directory
|
3. `cd` into the project directory
|
||||||
4. Run `npm install`
|
4. Run `npm install`
|
||||||
5. Run `npx prisma init --datasource-provider sqlite --output ../generated/prisma`
|
|
||||||
6. Run `npx prisma migrate dev --name init`
|
6. Run `npx prisma migrate dev --name init`
|
||||||
7. To run the software on a cronjob, use `npm run once`
|
7. To start, run `npm run start`
|
||||||
|
|
||||||
### Database Migrations
|
### Database Migrations
|
||||||
|
|
||||||
If you add stuff to the schema, follow the [Prisma development workflow](https://www.prisma.io/docs/orm/prisma-migrate/workflows/development-and-production). This will apply the new schema to the database and generate a new Prisma client with type safety.
|
If you add stuff to the schema, follow the [Prisma development workflow](https://www.prisma.io/docs/orm/prisma-migrate/workflows/development-and-production). This will apply the new schema to the database and generate a new Prisma client with type safety.
|
||||||
|
|
||||||
Setting as a system service will come at some point, or someone could contribute if they wanted.
|
|
48
package-lock.json
generated
48
package-lock.json
generated
@ -1,22 +1,22 @@
|
|||||||
{
|
{
|
||||||
"name": "pleroma-ollama-bot",
|
"name": "pleroma-ollama-bot",
|
||||||
"version": "1.0.0",
|
"version": "1.0.5",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "pleroma-ollama-bot",
|
"name": "pleroma-ollama-bot",
|
||||||
"version": "1.0.0",
|
"version": "1.0.5",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prisma/client": "^6.10.1",
|
"@prisma/client": "^6.10.1",
|
||||||
"@types/node": "^24.0.5",
|
|
||||||
"dotenv": "^17.0.0",
|
"dotenv": "^17.0.0",
|
||||||
"striptags": "^3.2.0",
|
"striptags": "^3.2.0",
|
||||||
"ts-node": "^10.9.2",
|
"ts-node": "^10.9.2",
|
||||||
"typescript": "^5.8.3",
|
"typescript": "^5.8.3"
|
||||||
"ws": "^8.18.3"
|
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@types/node": "^24.0.10",
|
||||||
|
"@types/ws": "^8.18.1",
|
||||||
"prisma": "^6.10.1"
|
"prisma": "^6.10.1"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -164,14 +164,23 @@
|
|||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/@types/node": {
|
"node_modules/@types/node": {
|
||||||
"version": "24.0.5",
|
"version": "24.0.10",
|
||||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-24.0.5.tgz",
|
"resolved": "https://registry.npmjs.org/@types/node/-/node-24.0.10.tgz",
|
||||||
"integrity": "sha512-CXEG9E7GCTOZIre0WdDznmnhvF7xi7AmnP/zF496trmLiqlfdtxp9nPRgLVqfmJ8jgtcKcs0EcvOu2yDZSuvTg==",
|
"integrity": "sha512-ENHwaH+JIRTDIEEbDK6QSQntAYGtbvdDXnMXnZaZ6k13Du1dPMmprkEHIL7ok2Wl2aZevetwTAb5S+7yIF+enA==",
|
||||||
"license": "MIT",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"undici-types": "~7.8.0"
|
"undici-types": "~7.8.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@types/ws": {
|
||||||
|
"version": "8.18.1",
|
||||||
|
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.18.1.tgz",
|
||||||
|
"integrity": "sha512-ThVF6DCVhA8kUGy+aazFQ4kXQ7E1Ty7A3ypFOe0IcJV8O/M511G99AW24irKrW56Wt44yG9+ij8FaqoBGkuBXg==",
|
||||||
|
"dev": true,
|
||||||
|
"license": "MIT",
|
||||||
|
"dependencies": {
|
||||||
|
"@types/node": "*"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/acorn": {
|
"node_modules/acorn": {
|
||||||
"version": "8.15.0",
|
"version": "8.15.0",
|
||||||
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
|
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz",
|
||||||
@ -345,27 +354,6 @@
|
|||||||
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==",
|
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==",
|
||||||
"license": "MIT"
|
"license": "MIT"
|
||||||
},
|
},
|
||||||
"node_modules/ws": {
|
|
||||||
"version": "8.18.3",
|
|
||||||
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.3.tgz",
|
|
||||||
"integrity": "sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==",
|
|
||||||
"license": "MIT",
|
|
||||||
"engines": {
|
|
||||||
"node": ">=10.0.0"
|
|
||||||
},
|
|
||||||
"peerDependencies": {
|
|
||||||
"bufferutil": "^4.0.1",
|
|
||||||
"utf-8-validate": ">=5.0.2"
|
|
||||||
},
|
|
||||||
"peerDependenciesMeta": {
|
|
||||||
"bufferutil": {
|
|
||||||
"optional": true
|
|
||||||
},
|
|
||||||
"utf-8-validate": {
|
|
||||||
"optional": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"node_modules/yn": {
|
"node_modules/yn": {
|
||||||
"version": "3.1.1",
|
"version": "3.1.1",
|
||||||
"resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",
|
"resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz",
|
||||||
|
12
package.json
12
package.json
@ -1,25 +1,25 @@
|
|||||||
{
|
{
|
||||||
"name": "pleroma-ollama-bot",
|
"name": "pleroma-ollama-bot",
|
||||||
"version": "1.0.0",
|
"version": "1.1.0",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"once": "tsc && node -r dotenv/config dist/main.js",
|
"start": "tsc && node -r dotenv/config dist/main.js",
|
||||||
"build": "tsc"
|
"build": "tsc"
|
||||||
},
|
},
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"keywords": [],
|
"keywords": [],
|
||||||
"author": "NiceCrew",
|
"author": "NiceCrew",
|
||||||
"description": "A simple bot that responds to activities from Pleroma instances using Ollama's API.",
|
"description": "A simple bot that responds to activities from Pleroma instances using Ollama's API at a configurable interval.",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@prisma/client": "^6.10.1",
|
"@prisma/client": "^6.10.1",
|
||||||
"@types/node": "^24.0.5",
|
|
||||||
"dotenv": "^17.0.0",
|
"dotenv": "^17.0.0",
|
||||||
"striptags": "^3.2.0",
|
"striptags": "^3.2.0",
|
||||||
"ts-node": "^10.9.2",
|
"ts-node": "^10.9.2",
|
||||||
"typescript": "^5.8.3",
|
"typescript": "^5.8.3"
|
||||||
"ws": "^8.18.3"
|
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
|
"@types/node": "^24.0.10",
|
||||||
|
"@types/ws": "^8.18.1",
|
||||||
"prisma": "^6.10.1"
|
"prisma": "^6.10.1"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,18 @@
|
|||||||
|
-- RedefineTables
|
||||||
|
PRAGMA defer_foreign_keys=ON;
|
||||||
|
PRAGMA foreign_keys=OFF;
|
||||||
|
CREATE TABLE "new_Response" (
|
||||||
|
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
"pleromaNotificationId" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"to" TEXT NOT NULL,
|
||||||
|
"request" TEXT,
|
||||||
|
"response" TEXT,
|
||||||
|
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"processedAt" DATETIME,
|
||||||
|
"isProcessing" BOOLEAN NOT NULL DEFAULT true
|
||||||
|
);
|
||||||
|
INSERT INTO "new_Response" ("createdAt", "id", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "pleromaNotificationId", "processedAt", "request", "response", "to" FROM "Response";
|
||||||
|
DROP TABLE "Response";
|
||||||
|
ALTER TABLE "new_Response" RENAME TO "Response";
|
||||||
|
PRAGMA foreign_keys=ON;
|
||||||
|
PRAGMA defer_foreign_keys=OFF;
|
@ -0,0 +1,18 @@
|
|||||||
|
-- RedefineTables
|
||||||
|
PRAGMA defer_foreign_keys=ON;
|
||||||
|
PRAGMA foreign_keys=OFF;
|
||||||
|
CREATE TABLE "new_Response" (
|
||||||
|
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
"pleromaNotificationId" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"to" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"request" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"response" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"processedAt" DATETIME,
|
||||||
|
"isProcessing" BOOLEAN NOT NULL DEFAULT true
|
||||||
|
);
|
||||||
|
INSERT INTO "new_Response" ("createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", coalesce("request", 'null') AS "request", coalesce("response", 'null') AS "response", "to" FROM "Response";
|
||||||
|
DROP TABLE "Response";
|
||||||
|
ALTER TABLE "new_Response" RENAME TO "Response";
|
||||||
|
PRAGMA foreign_keys=ON;
|
||||||
|
PRAGMA defer_foreign_keys=OFF;
|
@ -0,0 +1,19 @@
|
|||||||
|
-- RedefineTables
|
||||||
|
PRAGMA defer_foreign_keys=ON;
|
||||||
|
PRAGMA foreign_keys=OFF;
|
||||||
|
CREATE TABLE "new_Response" (
|
||||||
|
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
"pleromaNotificationId" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"to" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"request" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"response" TEXT NOT NULL DEFAULT 'null',
|
||||||
|
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||||
|
"processedAt" DATETIME,
|
||||||
|
"isProcessing" BOOLEAN NOT NULL DEFAULT true,
|
||||||
|
"isComplete" BOOLEAN NOT NULL DEFAULT true
|
||||||
|
);
|
||||||
|
INSERT INTO "new_Response" ("createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", "request", "response", "to") SELECT "createdAt", "id", "isProcessing", "pleromaNotificationId", "processedAt", "request", "response", "to" FROM "Response";
|
||||||
|
DROP TABLE "Response";
|
||||||
|
ALTER TABLE "new_Response" RENAME TO "Response";
|
||||||
|
PRAGMA foreign_keys=ON;
|
||||||
|
PRAGMA defer_foreign_keys=OFF;
|
@ -14,11 +14,13 @@ datasource db {
|
|||||||
model Response {
|
model Response {
|
||||||
id Int @id @default(autoincrement())
|
id Int @id @default(autoincrement())
|
||||||
pleromaNotificationId String @default("null")
|
pleromaNotificationId String @default("null")
|
||||||
to String
|
to String @default("null")
|
||||||
request String?
|
request String @default("null")
|
||||||
response String?
|
response String @default("null")
|
||||||
createdAt DateTime @default(now())
|
createdAt DateTime @default(now())
|
||||||
processedAt DateTime?
|
processedAt DateTime?
|
||||||
|
isProcessing Boolean @default(true)
|
||||||
|
isComplete Boolean @default(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
model User {
|
model User {
|
||||||
|
106
src/api.ts
Normal file
106
src/api.ts
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
import { envConfig, prisma } from "./main.js";
|
||||||
|
import { PleromaEmoji, Notification, ContextResponse } from "../types.js";
|
||||||
|
|
||||||
|
const getNotifications = async () => {
|
||||||
|
const { bearerToken, pleromaInstanceUrl } = envConfig;
|
||||||
|
try {
|
||||||
|
const request = await fetch(
|
||||||
|
`${pleromaInstanceUrl}/api/v1/notifications?types[]=mention`,
|
||||||
|
{
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${bearerToken}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
const notifications: Notification[] = await request.json();
|
||||||
|
|
||||||
|
return notifications;
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const getStatusContext = async (statusId: string) => {
|
||||||
|
const { bearerToken, pleromaInstanceUrl } = envConfig;
|
||||||
|
try {
|
||||||
|
const response = await fetch(
|
||||||
|
`${pleromaInstanceUrl}/api/v1/statuses/${statusId}/context`,
|
||||||
|
{
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${bearerToken}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(
|
||||||
|
`Could not get conversation context: ${response.status} - ${response.statusText}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
const data: ContextResponse = await response.json();
|
||||||
|
return data;
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const getInstanceEmojis = async () => {
|
||||||
|
const { bearerToken, pleromaInstanceUrl } = envConfig;
|
||||||
|
try {
|
||||||
|
const request = await fetch(`${pleromaInstanceUrl}/api/v1/pleroma/emoji`, {
|
||||||
|
method: "GET",
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${bearerToken}`,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
if (!request.ok) {
|
||||||
|
console.error(`Emoji GET failed: ${request.status}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const emojis: PleromaEmoji[] = await request.json();
|
||||||
|
return Object.keys(emojis);
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error(`Could not fetch emojis: ${error.message}`);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const deleteNotification = async (notification: Notification) => {
|
||||||
|
const { pleromaInstanceUrl, bearerToken } = envConfig;
|
||||||
|
try {
|
||||||
|
if (!notification.id) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
await prisma.response.updateMany({
|
||||||
|
// this is probably not the best way to do this, but since we may have duplicate notifications, we have to update all of them - probably won't scale (lmao)
|
||||||
|
where: { pleromaNotificationId: notification.id },
|
||||||
|
data: { isProcessing: false },
|
||||||
|
});
|
||||||
|
const response = await fetch(
|
||||||
|
`${pleromaInstanceUrl}/api/v1/notifications/${notification.id}/dismiss`,
|
||||||
|
{
|
||||||
|
method: "POST",
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${bearerToken}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
if (!response.ok) {
|
||||||
|
console.error(
|
||||||
|
`Could not delete notification ID: ${notification.id}\nReason: ${response.status} - ${response.statusText}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export {
|
||||||
|
deleteNotification,
|
||||||
|
getInstanceEmojis,
|
||||||
|
getNotifications,
|
||||||
|
getStatusContext,
|
||||||
|
};
|
351
src/main.ts
351
src/main.ts
@ -1,127 +1,137 @@
|
|||||||
import {
|
import {
|
||||||
OllamaRequest,
|
|
||||||
OllamaResponse,
|
|
||||||
NewStatusBody,
|
NewStatusBody,
|
||||||
Notification,
|
Notification,
|
||||||
|
OllamaConfigOptions,
|
||||||
|
OllamaChatRequest,
|
||||||
|
OllamaChatResponse,
|
||||||
|
PostAncestorsForModel,
|
||||||
} from "../types.js";
|
} from "../types.js";
|
||||||
import striptags from "striptags";
|
// import striptags from "striptags";
|
||||||
import { PrismaClient } from "../generated/prisma/client.js";
|
import { PrismaClient } from "../generated/prisma/client.js";
|
||||||
|
import {
|
||||||
|
// getInstanceEmojis,
|
||||||
|
deleteNotification,
|
||||||
|
getNotifications,
|
||||||
|
getStatusContext,
|
||||||
|
} from "./api.js";
|
||||||
|
import { storeUserData, storePromptData } from "./prisma.js";
|
||||||
|
import {
|
||||||
|
isFromWhitelistedDomain,
|
||||||
|
alreadyRespondedTo,
|
||||||
|
recordPendingResponse,
|
||||||
|
// trimInputData,
|
||||||
|
// selectRandomEmoji,
|
||||||
|
shouldContinue,
|
||||||
|
} from "./util.js";
|
||||||
|
|
||||||
const prisma = new PrismaClient();
|
export const prisma = new PrismaClient();
|
||||||
|
|
||||||
const getNotifications = async () => {
|
export const envConfig = {
|
||||||
try {
|
pleromaInstanceUrl: process.env.PLEROMA_INSTANCE_URL || "",
|
||||||
const request = await fetch(
|
pleromaInstanceDomain: process.env.PLEROMA_INSTANCE_DOMAIN || "",
|
||||||
`${process.env.PLEROMA_INSTANCE_URL}/api/v1/notifications?types[]=mention`,
|
whitelistOnly: process.env.ONLY_WHITELIST === "true" ? true : false,
|
||||||
{
|
whitelistedDomains: process.env.WHITELISTED_DOMAINS
|
||||||
method: "GET",
|
? process.env.WHITELISTED_DOMAINS.split(",")
|
||||||
headers: {
|
: [process.env.PLEROMA_INSTANCE_DOMAIN],
|
||||||
Authorization: `Bearer ${process.env.INSTANCE_BEARER_TOKEN}`,
|
ollamaUrl: process.env.OLLAMA_URL || "",
|
||||||
},
|
ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT,
|
||||||
}
|
ollamaModel: process.env.OLLAMA_MODEL || "",
|
||||||
);
|
fetchInterval: process.env.FETCH_INTERVAL
|
||||||
|
? parseInt(process.env.FETCH_INTERVAL)
|
||||||
const notifications: Notification[] = await request.json();
|
: 15000,
|
||||||
|
bearerToken: process.env.INSTANCE_BEARER_TOKEN || "",
|
||||||
return notifications;
|
adHocPostInterval: process.env.RANDOM_POST_INTERVAL
|
||||||
} catch (error: any) {
|
? parseInt(process.env.RANDOM_POST_INTERVAL)
|
||||||
throw new Error(error.message);
|
: 3600000,
|
||||||
}
|
botAccountId: process.env.PLEROMA_ACCOUNT_ID,
|
||||||
|
replyWithContext: process.env.REPLY_WITH_CONTEXT === "true" ? true : false,
|
||||||
};
|
};
|
||||||
|
|
||||||
const notifications = await getNotifications();
|
const ollamaConfig: OllamaConfigOptions = {
|
||||||
|
temperature: 0.9,
|
||||||
const storeUserData = async (notification: Notification): Promise<void> => {
|
top_p: 0.85,
|
||||||
try {
|
top_k: 60,
|
||||||
await prisma.user.upsert({
|
num_ctx: 16384, // maximum context window for Llama 3.1
|
||||||
where: { userFqn: notification.status.account.fqn },
|
repeat_penalty: 1.1,
|
||||||
update: {
|
|
||||||
lastRespondedTo: new Date(Date.now()),
|
|
||||||
},
|
|
||||||
create: {
|
|
||||||
userFqn: notification.status.account.fqn,
|
|
||||||
lastRespondedTo: new Date(Date.now()),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
} catch (error: any) {
|
|
||||||
throw new Error(error.message);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const alreadyRespondedTo = async (
|
// this could be helpful
|
||||||
notification: Notification
|
// https://replicate.com/blog/how-to-prompt-llama
|
||||||
): Promise<boolean> => {
|
|
||||||
try {
|
|
||||||
const duplicate = await prisma.response.findFirst({
|
|
||||||
where: { pleromaNotificationId: notification.status.id },
|
|
||||||
});
|
|
||||||
if (duplicate) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
} catch (error: any) {
|
|
||||||
throw new Error(error.message);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const storePromptData = async (
|
|
||||||
notification: Notification,
|
|
||||||
ollamaResponseBody: OllamaResponse
|
|
||||||
) => {
|
|
||||||
try {
|
|
||||||
await prisma.response.create({
|
|
||||||
data: {
|
|
||||||
response: ollamaResponseBody.response,
|
|
||||||
request: striptags(notification.status.content),
|
|
||||||
to: notification.account.fqn,
|
|
||||||
pleromaNotificationId: notification.status.id,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
} catch (error: any) {
|
|
||||||
throw new Error(error.message);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const trimInputData = (input: string) => {
|
|
||||||
const strippedInput = striptags(input);
|
|
||||||
const split = strippedInput.split(" ");
|
|
||||||
const promptStringIndex = split.indexOf("!prompt");
|
|
||||||
return split.slice(promptStringIndex + 1).join(" "); // returns everything after the !prompt
|
|
||||||
};
|
|
||||||
|
|
||||||
const generateOllamaRequest = async (
|
const generateOllamaRequest = async (
|
||||||
notification: Notification
|
notification: Notification
|
||||||
): Promise<OllamaResponse | undefined> => {
|
): Promise<OllamaChatResponse | undefined> => {
|
||||||
|
const {
|
||||||
|
whitelistOnly,
|
||||||
|
ollamaModel,
|
||||||
|
ollamaSystemPrompt,
|
||||||
|
ollamaUrl,
|
||||||
|
replyWithContext,
|
||||||
|
} = envConfig;
|
||||||
try {
|
try {
|
||||||
if (
|
if (shouldContinue(notification)) {
|
||||||
striptags(notification.status.content).includes("!prompt") &&
|
if (whitelistOnly && !isFromWhitelistedDomain(notification)) {
|
||||||
!notification.status.account.bot
|
await deleteNotification(notification);
|
||||||
) {
|
|
||||||
if (
|
|
||||||
process.env.ONLY_LOCAL_REPLIES === "true" &&
|
|
||||||
!notification.status.account.fqn.includes(
|
|
||||||
`@${process.env.PLEROMA_INSTANCE_DOMAIN}`
|
|
||||||
)
|
|
||||||
) {
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (await alreadyRespondedTo(notification)) {
|
if (await alreadyRespondedTo(notification)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
await recordPendingResponse(notification);
|
||||||
await storeUserData(notification);
|
await storeUserData(notification);
|
||||||
const ollamaRequestBody: OllamaRequest = {
|
let conversationHistory: PostAncestorsForModel[] = [];
|
||||||
model: process.env.OLLAMA_MODEL as string,
|
if (replyWithContext) {
|
||||||
system: process.env.OLLAMA_SYSTEM_PROMPT as string,
|
const contextPosts = await getStatusContext(notification.status.id);
|
||||||
prompt: `@${notification.status.account.fqn} says: ${trimInputData(
|
if (!contextPosts?.ancestors || !contextPosts) {
|
||||||
notification.status.content
|
throw new Error(`Unable to obtain post context ancestors.`);
|
||||||
)}`,
|
}
|
||||||
|
conversationHistory = contextPosts.ancestors.map((ancestor) => {
|
||||||
|
const mentions = ancestor.mentions.map((mention) => mention.acct);
|
||||||
|
return {
|
||||||
|
account_fqn: ancestor.account.fqn,
|
||||||
|
mentions,
|
||||||
|
plaintext_content: ancestor.pleroma.content["text/plain"],
|
||||||
|
};
|
||||||
|
});
|
||||||
|
// console.log(conversationHistory);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simplified user message (remove [/INST] as it's not needed for Llama 3)
|
||||||
|
const userMessage = `${notification.status.account.fqn} says to you: \"${notification.status.pleroma.content["text/plain"]}\".`;
|
||||||
|
|
||||||
|
let systemContent = ollamaSystemPrompt;
|
||||||
|
if (replyWithContext) {
|
||||||
|
// Simplified context instructions (avoid heavy JSON; summarize for clarity)
|
||||||
|
systemContent = `${ollamaSystemPrompt}\n\nPrevious conversation context:\n${conversationHistory
|
||||||
|
.map(
|
||||||
|
(post) =>
|
||||||
|
`${post.account_fqn} (said to ${post.mentions.join(", ")}): ${
|
||||||
|
post.plaintext_content
|
||||||
|
}`
|
||||||
|
)
|
||||||
|
.join(
|
||||||
|
"\n"
|
||||||
|
)}\nReply to the user who addressed you (you are Lexi, also known as nice-ai or nice-ai@nicecrew.digital). Examine the context of the entire conversation and make references to topics or information where appropriate. Prefix usernames with '@' when addressing them. Assume if there is no domain in the username, the domain is @nicecrew.digital (for example @matty would be @matty@nicecrew.digital)`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Switch to chat request format (messages array auto-handles Llama 3 template)
|
||||||
|
const ollamaRequestBody: OllamaChatRequest = {
|
||||||
|
model: ollamaModel,
|
||||||
|
messages: [
|
||||||
|
{ role: "system", content: systemContent as string },
|
||||||
|
{ role: "user", content: userMessage },
|
||||||
|
],
|
||||||
stream: false,
|
stream: false,
|
||||||
|
options: ollamaConfig,
|
||||||
};
|
};
|
||||||
const response = await fetch(`${process.env.OLLAMA_URL}/api/generate`, {
|
|
||||||
|
// Change endpoint to /api/chat
|
||||||
|
const response = await fetch(`${ollamaUrl}/api/chat`, {
|
||||||
method: "POST",
|
method: "POST",
|
||||||
body: JSON.stringify(ollamaRequestBody),
|
body: JSON.stringify(ollamaRequestBody),
|
||||||
});
|
});
|
||||||
const ollamaResponse: OllamaResponse = await response.json();
|
const ollamaResponse: OllamaChatResponse = await response.json();
|
||||||
|
|
||||||
await storePromptData(notification, ollamaResponse);
|
await storePromptData(notification, ollamaResponse);
|
||||||
return ollamaResponse;
|
return ollamaResponse;
|
||||||
}
|
}
|
||||||
@ -132,13 +142,19 @@ const generateOllamaRequest = async (
|
|||||||
|
|
||||||
const postReplyToStatus = async (
|
const postReplyToStatus = async (
|
||||||
notification: Notification,
|
notification: Notification,
|
||||||
ollamaResponseBody: OllamaResponse
|
ollamaResponseBody: OllamaChatResponse
|
||||||
) => {
|
) => {
|
||||||
|
const { pleromaInstanceUrl, bearerToken } = envConfig;
|
||||||
|
// const emojiList = await getInstanceEmojis();
|
||||||
|
// let randomEmoji;
|
||||||
|
// if (emojiList) {
|
||||||
|
// randomEmoji = selectRandomEmoji(emojiList);
|
||||||
|
// }
|
||||||
try {
|
try {
|
||||||
let mentions: string[];
|
let mentions: string[];
|
||||||
const statusBody: NewStatusBody = {
|
const statusBody: NewStatusBody = {
|
||||||
content_type: "text/markdown",
|
content_type: "text/markdown",
|
||||||
status: ollamaResponseBody.response,
|
status: `${ollamaResponseBody.message.content}`,
|
||||||
in_reply_to_id: notification.status.id,
|
in_reply_to_id: notification.status.id,
|
||||||
};
|
};
|
||||||
if (
|
if (
|
||||||
@ -151,33 +167,128 @@ const postReplyToStatus = async (
|
|||||||
statusBody.to = mentions;
|
statusBody.to = mentions;
|
||||||
}
|
}
|
||||||
|
|
||||||
const response = await fetch(
|
const response = await fetch(`${pleromaInstanceUrl}/api/v1/statuses`, {
|
||||||
`${process.env.PLEROMA_INSTANCE_URL}/api/v1/statuses`,
|
method: "POST",
|
||||||
{
|
headers: {
|
||||||
method: "POST",
|
Authorization: `Bearer ${bearerToken}`,
|
||||||
headers: {
|
"Content-Type": "application/json",
|
||||||
Authorization: `Bearer ${process.env.INSTANCE_BEARER_TOKEN}`,
|
},
|
||||||
"Content-Type": "application/json",
|
body: JSON.stringify(statusBody),
|
||||||
},
|
});
|
||||||
body: JSON.stringify(statusBody),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
if (!response.ok) {
|
if (!response.ok) {
|
||||||
throw new Error(`New status request failed: ${response.statusText}`);
|
throw new Error(`New status request failed: ${response.statusText}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
await deleteNotification(notification);
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
throw new Error(error.message);
|
throw new Error(error.message);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if (notifications) {
|
const createTimelinePost = async () => {
|
||||||
await Promise.all(
|
const {
|
||||||
notifications.map(async (notification) => {
|
bearerToken,
|
||||||
const ollamaResponse = await generateOllamaRequest(notification);
|
ollamaModel,
|
||||||
if (ollamaResponse) {
|
ollamaSystemPrompt,
|
||||||
postReplyToStatus(notification, ollamaResponse);
|
ollamaUrl,
|
||||||
|
pleromaInstanceUrl,
|
||||||
|
} = envConfig;
|
||||||
|
const ollamaRequestBody: OllamaChatRequest = {
|
||||||
|
model: ollamaModel,
|
||||||
|
messages: [
|
||||||
|
{ role: "system", content: ollamaSystemPrompt as string },
|
||||||
|
{ role: "user", content: "Say something random." },
|
||||||
|
],
|
||||||
|
stream: false,
|
||||||
|
options: ollamaConfig,
|
||||||
|
};
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${ollamaUrl}/api/chat`, {
|
||||||
|
method: "POST",
|
||||||
|
body: JSON.stringify(ollamaRequestBody),
|
||||||
|
});
|
||||||
|
if (!response.ok)
|
||||||
|
throw new Error("Error generating ad-hoc Ollama response");
|
||||||
|
|
||||||
|
const ollamaResponse: OllamaChatResponse = await response.json();
|
||||||
|
|
||||||
|
const newStatusBody: NewStatusBody = {
|
||||||
|
content_type: "text/markdown",
|
||||||
|
status: ollamaResponse.message.content,
|
||||||
|
};
|
||||||
|
|
||||||
|
const pleromaResponse = await fetch(
|
||||||
|
`${pleromaInstanceUrl}/api/v1/statuses`,
|
||||||
|
{
|
||||||
|
method: "POST",
|
||||||
|
headers: {
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
Authorization: `Bearer ${bearerToken}`,
|
||||||
|
},
|
||||||
|
body: JSON.stringify(newStatusBody),
|
||||||
}
|
}
|
||||||
})
|
);
|
||||||
);
|
|
||||||
}
|
if (!pleromaResponse.ok)
|
||||||
|
throw new Error("Error posting ad-hoc Ollama response to Pleroma");
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let notifications = [];
|
||||||
|
const beginFetchCycle = async () => {
|
||||||
|
setInterval(async () => {
|
||||||
|
notifications = await getNotifications();
|
||||||
|
if (notifications.length > 0) {
|
||||||
|
await Promise.all(
|
||||||
|
notifications.map(async (notification) => {
|
||||||
|
try {
|
||||||
|
const ollamaResponse = await generateOllamaRequest(notification);
|
||||||
|
if (ollamaResponse) {
|
||||||
|
postReplyToStatus(notification, ollamaResponse);
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}, envConfig.fetchInterval); // lower intervals may cause the bot to respond multiple times to the same message, but we try to mitigate this with the deleteNotification function
|
||||||
|
};
|
||||||
|
|
||||||
|
const beginStatusPostInterval = async () => {
|
||||||
|
setInterval(async () => {
|
||||||
|
try {
|
||||||
|
createTimelinePost();
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, envConfig.adHocPostInterval);
|
||||||
|
};
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`Fetching notifications from ${envConfig.pleromaInstanceDomain}, every ${
|
||||||
|
envConfig.fetchInterval / 1000
|
||||||
|
} seconds.`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`Accepting prompts from: ${envConfig.whitelistedDomains.join(", ")}`
|
||||||
|
);
|
||||||
|
console.log(
|
||||||
|
`Using model: ${envConfig.ollamaModel}\nConfig: ${JSON.stringify(
|
||||||
|
ollamaConfig
|
||||||
|
)}`
|
||||||
|
);
|
||||||
|
console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`);
|
||||||
|
|
||||||
|
await beginFetchCycle();
|
||||||
|
// setInterval(async () => {
|
||||||
|
// createTimelinePost();
|
||||||
|
// }, 10000);
|
||||||
|
await beginStatusPostInterval();
|
||||||
|
42
src/prisma.ts
Normal file
42
src/prisma.ts
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
import { Notification, OllamaChatResponse } from "../types.js";
|
||||||
|
import { trimInputData } from "./util.js";
|
||||||
|
import { prisma } from "./main.js";
|
||||||
|
|
||||||
|
const storePromptData = async (
|
||||||
|
notification: Notification,
|
||||||
|
ollamaResponseBody: OllamaChatResponse
|
||||||
|
) => {
|
||||||
|
try {
|
||||||
|
await prisma.response.updateMany({
|
||||||
|
where: { pleromaNotificationId: notification.id },
|
||||||
|
data: {
|
||||||
|
response: ollamaResponseBody.message.content,
|
||||||
|
request: trimInputData(notification.status.content),
|
||||||
|
to: notification.account.fqn,
|
||||||
|
isProcessing: false,
|
||||||
|
isComplete: true,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const storeUserData = async (notification: Notification): Promise<void> => {
|
||||||
|
try {
|
||||||
|
await prisma.user.upsert({
|
||||||
|
where: { userFqn: notification.status.account.fqn },
|
||||||
|
update: {
|
||||||
|
lastRespondedTo: new Date(Date.now()),
|
||||||
|
},
|
||||||
|
create: {
|
||||||
|
userFqn: notification.status.account.fqn,
|
||||||
|
lastRespondedTo: new Date(Date.now()),
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export { storeUserData, storePromptData };
|
108
src/util.ts
Normal file
108
src/util.ts
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
import striptags from "striptags";
|
||||||
|
import { prisma } from "./main.js";
|
||||||
|
import { envConfig } from "./main.js";
|
||||||
|
import { Notification } from "../types.js";
|
||||||
|
|
||||||
|
const trimInputData = (input: string): string => {
|
||||||
|
const strippedInput = striptags(input);
|
||||||
|
|
||||||
|
const split = strippedInput.split(" ");
|
||||||
|
// const promptStringIndex = split.indexOf("!prompt");
|
||||||
|
const botFqnIndex = split.indexOf("@nice-ai");
|
||||||
|
const botFqnIndexFull = split.indexOf("@nice-ai@nicecrew.digital");
|
||||||
|
if (botFqnIndex !== -1) {
|
||||||
|
split[botFqnIndex] = "Lexi";
|
||||||
|
}
|
||||||
|
if (botFqnIndexFull !== -1) {
|
||||||
|
split[botFqnIndexFull] = "Lexi";
|
||||||
|
}
|
||||||
|
// split.splice(promptStringIndex, 1);
|
||||||
|
return split.join(" "); // returns everything after the !prompt
|
||||||
|
};
|
||||||
|
|
||||||
|
const recordPendingResponse = async (notification: Notification) => {
|
||||||
|
try {
|
||||||
|
await prisma.response.create({
|
||||||
|
data: {
|
||||||
|
pleromaNotificationId: notification.id,
|
||||||
|
isProcessing: true,
|
||||||
|
isComplete: false,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const shouldContinue = (notification: Notification) => {
|
||||||
|
// wow this is bad
|
||||||
|
try {
|
||||||
|
const { botAccountId } = envConfig;
|
||||||
|
const statusContent = trimInputData(notification.status.content);
|
||||||
|
if (
|
||||||
|
// notification.status.visibility !== "private" &&
|
||||||
|
!notification.account.bot &&
|
||||||
|
notification.type === "mention"
|
||||||
|
) {
|
||||||
|
if (notification.status.in_reply_to_account_id === botAccountId) {
|
||||||
|
return true;
|
||||||
|
} else if (
|
||||||
|
notification.status.in_reply_to_account_id !== botAccountId &&
|
||||||
|
statusContent.includes("Lexi")
|
||||||
|
) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (error: unknown) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const isFromWhitelistedDomain = (notification: Notification): boolean => {
|
||||||
|
try {
|
||||||
|
const domain = notification.status.account.fqn.split("@")[1];
|
||||||
|
if (envConfig.whitelistedDomains.includes(domain)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
console.log(
|
||||||
|
`Rejecting prompt request from non-whitelisted domain: ${domain}`
|
||||||
|
);
|
||||||
|
return false;
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error(`Error with domain check: ${error.message}`);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const alreadyRespondedTo = async (
|
||||||
|
notification: Notification
|
||||||
|
): Promise<boolean> => {
|
||||||
|
try {
|
||||||
|
const duplicate = await prisma.response.findFirst({
|
||||||
|
where: { pleromaNotificationId: notification.id },
|
||||||
|
});
|
||||||
|
if (duplicate?.isProcessing || duplicate?.isComplete) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
} catch (error: any) {
|
||||||
|
throw new Error(error.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const selectRandomEmoji = (emojiList: string[]) => {
|
||||||
|
return emojiList[Math.floor(Math.random() * emojiList.length)];
|
||||||
|
};
|
||||||
|
|
||||||
|
export {
|
||||||
|
alreadyRespondedTo,
|
||||||
|
selectRandomEmoji,
|
||||||
|
trimInputData,
|
||||||
|
recordPendingResponse,
|
||||||
|
isFromWhitelistedDomain,
|
||||||
|
shouldContinue,
|
||||||
|
};
|
14
systemd.service
Normal file
14
systemd.service
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Pleroma Ollama Bot
|
||||||
|
Wants=network-online.target
|
||||||
|
After=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=bot
|
||||||
|
Restart=always
|
||||||
|
RestartSec=3
|
||||||
|
ExecStart=/usr/bin/screen -L -DmS pleroma-ollama-bot /home/bot/.nvm/versions/node/v22.11.0/bin/npm run start
|
||||||
|
WorkingDirectory=/path/to/directory
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
147
types.d.ts
vendored
147
types.d.ts
vendored
@ -1,6 +1,44 @@
|
|||||||
export interface Notification {
|
export interface Notification {
|
||||||
account: Account;
|
account: Account;
|
||||||
status: Status;
|
status: Status;
|
||||||
|
id: string;
|
||||||
|
type: string;
|
||||||
|
created_at: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ContextResponse {
|
||||||
|
ancestors: ContextObject[];
|
||||||
|
descendents: ContextObject[];
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface PostAncestorsForModel {
|
||||||
|
account_fqn: string;
|
||||||
|
mentions: string[];
|
||||||
|
plaintext_content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface ContextAccountObject {
|
||||||
|
acct: string;
|
||||||
|
avatar: string;
|
||||||
|
bot: boolean;
|
||||||
|
display_name: string;
|
||||||
|
followers_count: number;
|
||||||
|
following_count: number;
|
||||||
|
fqn: string;
|
||||||
|
id: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface ContextObject {
|
||||||
|
content: string;
|
||||||
|
id: string;
|
||||||
|
in_reply_to_account_id: string | null;
|
||||||
|
in_reply_to_id: string | null;
|
||||||
|
media_attachments: string[];
|
||||||
|
mentions: Mention[];
|
||||||
|
pleroma: PleromaObjectInResponse;
|
||||||
|
visibility: "public" | "private" | "unlisted";
|
||||||
|
uri: string;
|
||||||
|
account: ContextAccountObject;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface NewStatusBody {
|
export interface NewStatusBody {
|
||||||
@ -33,11 +71,46 @@ export interface OllamaRequest {
|
|||||||
/**
|
/**
|
||||||
* Whatever system prompt you'd like to add to the model to make it more unique, or force it to respond a certain way.
|
* Whatever system prompt you'd like to add to the model to make it more unique, or force it to respond a certain way.
|
||||||
*/
|
*/
|
||||||
system: string;
|
system?: string;
|
||||||
/**
|
/**
|
||||||
* Whether to stream responses from the API, or have it sent all as one payload.
|
* Whether to stream responses from the API, or have it sent all as one payload.
|
||||||
*/
|
*/
|
||||||
stream?: boolean = false; // stream response vs get response in one full message
|
stream?: boolean = false;
|
||||||
|
/**
|
||||||
|
* Ollama configuration options
|
||||||
|
*/
|
||||||
|
options?: OllamaConfigOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface OllamaChatRequest {
|
||||||
|
model: string;
|
||||||
|
messages: OllamaMessages[];
|
||||||
|
stream?: boolean = false;
|
||||||
|
options?: OllamaConfigOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
export interface OllamaChatResponse {
|
||||||
|
model: string;
|
||||||
|
created_at: string;
|
||||||
|
message: OllamaChatResponseMessage;
|
||||||
|
done_reason: "string";
|
||||||
|
done: boolean;
|
||||||
|
total_duration: number;
|
||||||
|
load_duration: number;
|
||||||
|
prompt_eval_count: number;
|
||||||
|
prompt_eval_duration: number;
|
||||||
|
eval_count: number;
|
||||||
|
eval_duration: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface OllamaChatResponseMessage {
|
||||||
|
role: "assistant";
|
||||||
|
content: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface OllamaMessages {
|
||||||
|
role: "system" | "user";
|
||||||
|
content: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface OllamaResponse {
|
export interface OllamaResponse {
|
||||||
@ -54,8 +127,19 @@ export interface Status {
|
|||||||
created_at: string | Date; // when the post was created
|
created_at: string | Date; // when the post was created
|
||||||
id: string; // ID of the reply itself
|
id: string; // ID of the reply itself
|
||||||
in_reply_to_account_id: string; // account ID of the reply
|
in_reply_to_account_id: string; // account ID of the reply
|
||||||
in_reply_to_id?: string; // status that the user has replied to
|
in_reply_to_id: string; // status that the user has replied to
|
||||||
mentions?: Mention[]; // array of mentions
|
mentions: Mention[]; // array of mentions
|
||||||
|
pleroma: PleromaObjectInResponse;
|
||||||
|
visibility: "private" | "public" | "unlisted";
|
||||||
|
}
|
||||||
|
|
||||||
|
interface PleromaObjectInResponse {
|
||||||
|
content: { "text/plain": string };
|
||||||
|
context: string;
|
||||||
|
conversation_id: number;
|
||||||
|
direct_conversation_id: number | null;
|
||||||
|
local: boolean;
|
||||||
|
in_reply_to_account_acct: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
export interface Mention {
|
export interface Mention {
|
||||||
@ -64,3 +148,58 @@ export interface Mention {
|
|||||||
url: string;
|
url: string;
|
||||||
username: string;
|
username: string;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export interface PleromaEmoji {
|
||||||
|
[emojiName: string]: PleromaEmojiMetadata;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface PleromaEmojiMetadata {
|
||||||
|
image_url: string;
|
||||||
|
tags: string[];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either)
|
||||||
|
*/
|
||||||
|
export interface OllamaConfigOptions {
|
||||||
|
/**
|
||||||
|
* Number of tokens guaranteed to be kept in memory during response generation. Higher values leave less
|
||||||
|
* possible room for num_ctx
|
||||||
|
*/
|
||||||
|
num_keep?: number;
|
||||||
|
seed?: number;
|
||||||
|
/**
|
||||||
|
* Sets maximum of tokens in the response
|
||||||
|
*/
|
||||||
|
num_predict?: number;
|
||||||
|
top_k?: number;
|
||||||
|
top_p?: number;
|
||||||
|
min_p?: number;
|
||||||
|
typical_p?: number;
|
||||||
|
repeat_last_n?: number;
|
||||||
|
/**
|
||||||
|
* How close of a response should the response be to the original prompt - lower = more focused response
|
||||||
|
*/
|
||||||
|
temperature?: number;
|
||||||
|
repeat_penalty?: number;
|
||||||
|
presence_penalty?: number;
|
||||||
|
frequency_penalty?: number;
|
||||||
|
mirostat?: number;
|
||||||
|
mirostat_tau?: number;
|
||||||
|
mirostat_eta?: number;
|
||||||
|
penalize_newline?: boolean;
|
||||||
|
stop?: string[];
|
||||||
|
numa?: boolean;
|
||||||
|
/**
|
||||||
|
* Number of tokens for the prompt to keep in memory for the response, minus the value of num_keep
|
||||||
|
*/
|
||||||
|
num_ctx?: number;
|
||||||
|
num_batch?: number;
|
||||||
|
num_gpu?: number;
|
||||||
|
main_gpu?: number;
|
||||||
|
low_vram?: boolean;
|
||||||
|
vocab_only?: boolean;
|
||||||
|
use_mmap?: boolean;
|
||||||
|
use_mlock?: boolean;
|
||||||
|
num_thread?: number;
|
||||||
|
}
|
||||||
|
Reference in New Issue
Block a user