Update config.template

This commit is contained in:
SpudGunMan
2025-11-08 19:48:23 -08:00
parent d20eab03e9
commit 532efda9e8

View File

@@ -79,31 +79,29 @@ kiwixURL = http://127.0.0.1:8080
# Kiwix library name (e.g., wikipedia_en_100_nopic_2025-09)
kiwixLibraryName = wikipedia_en_100_nopic_2025-09
# Enable ollama LLM see more at https://ollama.com
# Enable LLM local Ollama integration, set true for any LLM support
ollama = False
# Ollama model to use (defaults to gemma3:270m) gemma2 is good for older SYSTEM prompt
# ollamaModel = gemma3:latest
# ollamaModel = gemma2:2b
# server instance to use (defaults to local machine install)
# Ollama server instance to use (defaults to local machine install)
ollamaHostName = http://localhost:11434
# Produce LLM replies to messages that aren't commands?
# If False, the LLM only replies to the "ask:" and "askai" commands.
llmReplyToNonCommands = True
# if True, the input is sent raw to the LLM, if False uses SYSTEM prompt
rawLLMQuery = True
# Enable Wikipedia/Kiwix integration with LLM for RAG (Retrieval Augmented Generation)
# When enabled, LLM will automatically search Wikipedia/Kiwix and include context in responses
llmUseWikiContext = False
# Use OpenWebUI instead of direct Ollama API (enables advanced RAG features)
# Use OpenWebUI instead of direct Ollama API / still leave ollama = True
useOpenWebUI = False
# OpenWebUI server URL (e.g., http://localhost:3000)
openWebUIURL = http://localhost:3000
# OpenWebUI API key/token (required when useOpenWebUI is True)
openWebUIAPIKey =
# Ollama model to use (defaults to gemma3:270m) gemma2 is good for older SYSTEM prompt
# ollamaModel is used for both Ollama and OpenWebUI when useOpenWebUI its just the model name
# ollamaModel = gemma3:latest
# ollamaModel = gemma2:2b
# if True, the query is sent raw to the LLM, if False uses internal SYSTEM prompt
rawLLMQuery = True
# If False, the LLM only replies to the "ask:" and "askai" commands. otherwise DM's automatically go to LLM
llmReplyToNonCommands = True
# Enable Wikipedia/Kiwix integration with LLM for RAG (Retrieval Augmented Generation)
# When enabled, LLM will automatically search Wikipedia/Kiwix and include context in responses
llmUseWikiContext = False
# StoreForward Enabled and Limits
StoreForward = True
StoreLimit = 3