slight update to input processing
This commit is contained in:
		
							
								
								
									
										29
									
								
								src/main.ts
									
									
									
									
									
								
							
							
						
						
									
										29
									
								
								src/main.ts
									
									
									
									
									
								
							| @ -45,10 +45,11 @@ export const envConfig = { | ||||
| }; | ||||
|  | ||||
| const ollamaConfig: OllamaConfigOptions = { | ||||
|   temperature: 0.2, | ||||
|   top_p: 0.9, | ||||
|   top_k: 30, | ||||
|   temperature: 0.6, | ||||
|   top_p: 0.85, | ||||
|   top_k: 40, | ||||
|   num_ctx: 2048, | ||||
|   repeat_penalty: 1.1, | ||||
| }; | ||||
|  | ||||
| // this could be helpful | ||||
| @ -63,8 +64,8 @@ const generateOllamaRequest = async ( | ||||
|     if ( | ||||
|       striptags(notification.status.content).includes("!prompt") && | ||||
|       !notification.status.account.bot && // sanity check, sort of | ||||
|       notification.type === "mention" && | ||||
|       notification.status.visibility !== "private" // for safety, let's only respond to public messages | ||||
|       notification.type === "mention" // && | ||||
|       // notification.status.visibility !== "private" // for safety, let's only respond to public messages | ||||
|     ) { | ||||
|       if (whitelistOnly && !isFromWhitelistedDomain(notification)) { | ||||
|         await deleteNotification(notification); | ||||
| @ -75,12 +76,16 @@ const generateOllamaRequest = async ( | ||||
|       } | ||||
|       await recordPendingResponse(notification); | ||||
|       await storeUserData(notification); | ||||
|       console.log(trimInputData(notification.status.content)); | ||||
|       const ollamaRequestBody: OllamaRequest = { | ||||
|         model: ollamaModel, | ||||
|         prompt: trimInputData(notification.status.content), | ||||
|         // prompt: trimInputData(notification.status.content), | ||||
|         prompt: `${notification.status.account.fqn} says: ${trimInputData( | ||||
|           notification.status.content | ||||
|         )}`, | ||||
|         system: ollamaSystemPrompt, | ||||
|         stream: false, | ||||
|         // options: ollamaConfig, | ||||
|         options: ollamaConfig, | ||||
|       }; | ||||
|       const response = await fetch(`${ollamaUrl}/api/generate`, { | ||||
|         method: "POST", | ||||
| @ -145,14 +150,15 @@ const createTimelinePost = async () => { | ||||
|   const { | ||||
|     bearerToken, | ||||
|     ollamaModel, | ||||
|     ollamaSystemPrompt, | ||||
|     // ollamaSystemPrompt, | ||||
|     ollamaUrl, | ||||
|     pleromaInstanceUrl, | ||||
|   } = envConfig; | ||||
|   const ollamaRequestBody: OllamaRequest = { | ||||
|     model: ollamaModel, | ||||
|     prompt: "Make a random post about a random topic.", | ||||
|     system: ollamaSystemPrompt, | ||||
|     prompt: "Say something random.", | ||||
|     system: | ||||
|       "You are a friendly AI assistant who loves to educate people on random topics, provide words of encouragement. You like to be as detailed as possible.", | ||||
|     stream: false, | ||||
|     // options: ollamaConfig, | ||||
|   }; | ||||
| @ -241,4 +247,7 @@ console.log( | ||||
| console.log(`System prompt: ${envConfig.ollamaSystemPrompt}`); | ||||
|  | ||||
| await beginFetchCycle(); | ||||
| // setInterval(async () => { | ||||
| //   createTimelinePost(); | ||||
| // }, 10000); | ||||
| await beginStatusPostInterval(); | ||||
|  | ||||
		Reference in New Issue
	
	Block a user