I don't remember
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@ -4,5 +4,6 @@ node_modules
|
||||
*.log
|
||||
*.db
|
||||
/dist
|
||||
screenlog*
|
||||
|
||||
/generated/prisma
|
||||
|
24
src/main.ts
24
src/main.ts
@ -1,9 +1,11 @@
|
||||
import {
|
||||
OllamaRequest,
|
||||
OllamaResponse,
|
||||
NewStatusBody,
|
||||
Notification,
|
||||
OllamaConfigOptions,
|
||||
// OllamaChatRequest,
|
||||
// OllamaChatResponse,
|
||||
OllamaRequest,
|
||||
OllamaResponse,
|
||||
} from "../types.js";
|
||||
import striptags from "striptags";
|
||||
import { PrismaClient } from "../generated/prisma/client.js";
|
||||
@ -31,9 +33,7 @@ export const envConfig = {
|
||||
? process.env.WHITELISTED_DOMAINS.split(",")
|
||||
: [process.env.PLEROMA_INSTANCE_DOMAIN],
|
||||
ollamaUrl: process.env.OLLAMA_URL || "",
|
||||
ollamaSystemPrompt:
|
||||
process.env.OLLAMA_SYSTEM_PROMPT ||
|
||||
"You are a helpful AI assistant. Answer all questions concisely.",
|
||||
ollamaSystemPrompt: process.env.OLLAMA_SYSTEM_PROMPT,
|
||||
ollamaModel: process.env.OLLAMA_MODEL || "",
|
||||
fetchInterval: process.env.FETCH_INTERVAL
|
||||
? parseInt(process.env.FETCH_INTERVAL)
|
||||
@ -42,9 +42,10 @@ export const envConfig = {
|
||||
};
|
||||
|
||||
const ollamaConfig: OllamaConfigOptions = {
|
||||
temperature: 1.4,
|
||||
top_k: 100,
|
||||
top_p: 0.8,
|
||||
temperature: 0.2,
|
||||
top_p: 0.9,
|
||||
top_k: 30,
|
||||
num_ctx: 2048,
|
||||
};
|
||||
|
||||
// this could be helpful
|
||||
@ -73,12 +74,10 @@ const generateOllamaRequest = async (
|
||||
await storeUserData(notification);
|
||||
const ollamaRequestBody: OllamaRequest = {
|
||||
model: ollamaModel,
|
||||
prompt: trimInputData(notification.status.content),
|
||||
system: ollamaSystemPrompt,
|
||||
prompt: `[INST] @${
|
||||
notification.status.account.fqn
|
||||
} says: ${trimInputData(notification.status.content)} [/INST]`,
|
||||
stream: false,
|
||||
options: ollamaConfig,
|
||||
// options: ollamaConfig,
|
||||
};
|
||||
const response = await fetch(`${ollamaUrl}/api/generate`, {
|
||||
method: "POST",
|
||||
@ -173,4 +172,5 @@ console.log(
|
||||
ollamaConfig
|
||||
)}`
|
||||
);
|
||||
console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`);
|
||||
await beginFetchCycle();
|
||||
|
33
types.d.ts
vendored
33
types.d.ts
vendored
@ -36,7 +36,7 @@ export interface OllamaRequest {
|
||||
/**
|
||||
* Whatever system prompt you'd like to add to the model to make it more unique, or force it to respond a certain way.
|
||||
*/
|
||||
system: string;
|
||||
system?: string;
|
||||
/**
|
||||
* Whether to stream responses from the API, or have it sent all as one payload.
|
||||
*/
|
||||
@ -47,6 +47,37 @@ export interface OllamaRequest {
|
||||
options?: OllamaConfigOptions;
|
||||
}
|
||||
|
||||
export interface OllamaChatRequest {
|
||||
model: string;
|
||||
messages: OllamaMessages[];
|
||||
stream?: boolean = false;
|
||||
options?: OllamaConfigOptions;
|
||||
}
|
||||
|
||||
export interface OllamaChatResponse {
|
||||
model: string;
|
||||
created_at: string;
|
||||
message: OllamaChatResponseMessage;
|
||||
done_reason: "string";
|
||||
done: boolean;
|
||||
total_duration: number;
|
||||
load_duration: number;
|
||||
prompt_eval_count: number;
|
||||
prompt_eval_duration: number;
|
||||
eval_count: number;
|
||||
eval_duration: number;
|
||||
}
|
||||
|
||||
interface OllamaChatResponseMessage {
|
||||
role: "assistant";
|
||||
content: string;
|
||||
}
|
||||
|
||||
interface OllamaMessages {
|
||||
role: "system" | "user";
|
||||
content: string;
|
||||
}
|
||||
|
||||
export interface OllamaResponse {
|
||||
model: string;
|
||||
created_at: Date | string;
|
||||
|
Reference in New Issue
Block a user