130 lines
3.1 KiB
TypeScript
130 lines
3.1 KiB
TypeScript
export interface Notification {
|
|
account: Account;
|
|
status: Status;
|
|
id: string;
|
|
type: string;
|
|
created_at: string;
|
|
}
|
|
|
|
export interface NewStatusBody {
|
|
content_type: "application/json" | "text/markdown";
|
|
in_reply_to_id?: string;
|
|
media_ids?: string[];
|
|
sensitive?: "true" | "false" | boolean;
|
|
status: string;
|
|
to?: string[];
|
|
}
|
|
|
|
export interface Account {
|
|
acct: string; // nickname
|
|
bot: boolean;
|
|
display_name: string;
|
|
fqn: string; // user@instance.tld
|
|
id: string; // user ID
|
|
note?: string; // bio
|
|
}
|
|
|
|
export interface OllamaRequest {
|
|
/**
|
|
* Name of the Ollama model to generate a response from. Must be a valid and locally installed model.
|
|
*/
|
|
model: string;
|
|
/**
|
|
* The prompt sent from the end-user.
|
|
*/
|
|
prompt: string;
|
|
/**
|
|
* Whatever system prompt you'd like to add to the model to make it more unique, or force it to respond a certain way.
|
|
*/
|
|
system: string;
|
|
/**
|
|
* Whether to stream responses from the API, or have it sent all as one payload.
|
|
*/
|
|
stream?: boolean = false;
|
|
/**
|
|
* Ollama configuration options
|
|
*/
|
|
options?: OllamaConfigOptions;
|
|
}
|
|
|
|
export interface OllamaResponse {
|
|
model: string;
|
|
created_at: Date | string;
|
|
response: string;
|
|
done: boolean;
|
|
done_reason: string;
|
|
}
|
|
|
|
export interface Status {
|
|
account: Account;
|
|
content: string; // content of the post
|
|
created_at: string | Date; // when the post was created
|
|
id: string; // ID of the reply itself
|
|
in_reply_to_account_id: string; // account ID of the reply
|
|
in_reply_to_id: string; // status that the user has replied to
|
|
mentions: Mention[]; // array of mentions
|
|
visibility: "private" | "public" | "unlisted";
|
|
}
|
|
|
|
export interface Mention {
|
|
acct: string;
|
|
id: string;
|
|
url: string;
|
|
username: string;
|
|
}
|
|
|
|
export interface PleromaEmoji {
|
|
[emojiName: string]: PleromaEmojiMetadata;
|
|
}
|
|
|
|
interface PleromaEmojiMetadata {
|
|
image_url: string;
|
|
tags: string[];
|
|
}
|
|
|
|
/**
|
|
* Experimental settings, I wouldn't recommend messing with these if you don't know how they work (I don't either)
|
|
*/
|
|
export interface OllamaConfigOptions {
|
|
/**
|
|
* Number of tokens guaranteed to be kept in memory during response generation. Higher values leave less
|
|
* possible room for num_ctx
|
|
*/
|
|
num_keep?: number;
|
|
seed?: number;
|
|
/**
|
|
* Sets maximum of tokens in the response
|
|
*/
|
|
num_predict?: number;
|
|
top_k?: number;
|
|
top_p?: number;
|
|
min_p?: number;
|
|
typical_p?: number;
|
|
repeat_last_n?: number;
|
|
/**
|
|
* How close of a response should the response be to the original prompt - lower = more focused response
|
|
*/
|
|
temperature?: number;
|
|
repeat_penalty?: number;
|
|
presence_penalty?: number;
|
|
frequency_penalty?: number;
|
|
mirostat?: number;
|
|
mirostat_tau?: number;
|
|
mirostat_eta?: number;
|
|
penalize_newline?: boolean;
|
|
stop?: string[];
|
|
numa?: boolean;
|
|
/**
|
|
* Number of tokens for the prompt to keep in memory for the response, minus the value of num_keep
|
|
*/
|
|
num_ctx?: number;
|
|
num_batch?: number;
|
|
num_gpu?: number;
|
|
main_gpu?: number;
|
|
low_vram?: boolean;
|
|
vocab_only?: boolean;
|
|
use_mmap?: boolean;
|
|
use_mlock?: boolean;
|
|
num_thread?: number;
|
|
}
|