Compare commits

...

20 Commits

Author SHA1 Message Date
SpudGunMan
71733de05f enhance DM/Channel logic for llm
fix logic for DM or Channel Sending
2024-09-10 12:59:00 -07:00
SpudGunMan
d5e48a3e36 Update README.md 2024-09-10 00:56:16 -07:00
SpudGunMan
e36755a21d Update llm.py
fix typo
2024-09-10 00:52:20 -07:00
SpudGunMan
9620164884 Update locationdata.py
enhance for LLM
2024-09-10 00:46:43 -07:00
SpudGunMan
711844cc83 enhanceMessages
fixes a few things I didnt wrap up and also enhances this suggestion https://github.com/SpudGunMan/meshing-around/issues/59
2024-09-10 00:46:32 -07:00
SpudGunMan
6eb82b26a7 Update mesh_bot.py 2024-09-09 23:27:12 -07:00
SpudGunMan
b12cf6219a responseDelay 2024-09-06 09:31:30 -07:00
SpudGunMan
7e46305277 responseDelay 2024-09-06 09:30:07 -07:00
SpudGunMan
819c37bdec networkErrorFix 2024-09-05 23:01:46 -07:00
SpudGunMan
c80690d66e Update README.md 2024-09-05 09:38:56 -07:00
SpudGunMan
084f879537 Update settings.py 2024-09-05 09:09:38 -07:00
SpudGunMan
7afd6bbbe9 Update README.md 2024-09-05 00:54:32 -07:00
SpudGunMan
46641e8a86 Update README.md 2024-09-05 00:51:01 -07:00
SpudGunMan
010b386ce1 Update README.md 2024-09-05 00:43:02 -07:00
SpudGunMan
a73b320715 Update llm.py 2024-09-05 00:22:33 -07:00
SpudGunMan
5f822a6230 Update mesh_bot.py 2024-09-04 22:27:15 -07:00
SpudGunMan
024fac90cd Update install.sh 2024-09-04 19:50:49 -07:00
SpudGunMan
133bc36cca Update install.sh 2024-09-04 19:22:21 -07:00
SpudGunMan
51a7ff2820 checkPip 2024-09-04 19:17:37 -07:00
SpudGunMan
bc96f8df49 installOllama 2024-09-04 18:42:04 -07:00
7 changed files with 127 additions and 50 deletions

View File

@@ -8,7 +8,7 @@ The feature-rich bot requires the internet for full functionality. These respond
Along with network testing, this bot has a lot of other fun features, like simple mail messaging you can leave for another device, and when that device is seen, it can send the mail as a DM. Or a scheduler to send weather or a reminder weekly for the VHF net.
The bot is also capable of using dual radio/nodes, so you can monitor two networks at the same time and send messages to nodes using the same `bbspost @nodeNumber #message` or `bbspost @nodeShportName #message` function. There is a small message board to fit in the constraints of Meshtastic for posting bulletin messages with `bbspost $subject #message`.
The bot is also capable of using dual radio/nodes, so you can monitor two networks at the same time and send messages to nodes using the same `bbspost @nodeNumber #message` or `bbspost @nodeShortName #message` function. There is a small message board to fit in the constraints of Meshtastic for posting bulletin messages with `bbspost $subject #message`.
Look up data using wiki results or interact with [Ollama](https://ollama.com) LLM AI see the [OllamaDocs](https://github.com/ollama/ollama/tree/main/docs) If Ollama is enabled you can DM the bot directly. The default model for mesh-bot which is currently `gemma2:2b`
@@ -32,7 +32,7 @@ Any messages that are over 160 characters are chunked into 160 message bytes to
- `bbshelp` returns the following
- `bbslist` list the messages by ID and subject
- `bbsread` read a message example use: `bbsread #1`
- `bbspost` post a message to public board or send a DM example use: `bbspost $subject #message, or bbspost @nodeNumber #message or bbspost @nodeShportName #message`
- `bbspost` post a message to public board or send a DM example use: `bbspost $subject #message, or bbspost @nodeNumber #message or bbspost @nodeShortName #message`
- `bbsdelete` delete a message example use: `bbsdelete #4`
- Other functions
- `whereami` returns the address of location of sender if known
@@ -159,11 +159,14 @@ Ollama Settings, for Ollama to work the command line `ollama run 'model'` needs
Enable History, set via code readme Ollama Config in [Settings](https://github.com/SpudGunMan/meshing-around?tab=readme-ov-file#configurations) and [llm.py](https://github.com/SpudGunMan/meshing-around/blob/eb3bbdd3c5e0f16fe3c465bea30c781bd132d2d3/modules/llm.py#L12)
Tested models are `llama3.1, gemma2 (and variants), phi3.5, mistrial` other models may not handle the template as well.
```
# Enable ollama LLM see more at https://ollama.com
ollama = True
# Ollama model to use (defaults to llama3.1)
ollamaModel = gemma2:2b
# Ollama model to use (defaults to gemma2:2b)
ollamaModel = gemma2
#ollamaModel = llama3.1
```
also see llm.py for changing the defaults of

View File

@@ -3,10 +3,23 @@
# install.sh
cd "$(dirname "$0")"
printf "\nMeshing Around Installer\n"
# add user to groups for serial access
printf "\nAdding user to dialout and tty groups for serial access\n"
sudo usermod -a -G dialout $USER
sudo usermod -a -G tty $USER
# check for pip
if ! command -v pip &> /dev/null
then
printf "pip not found, please install pip with your OS\n"
sudo apt-get install python3-pip
else
printf "python pip found\n"
fi
# generate config file, check if it exists
if [ -f config.ini ]; then
printf "\nConfig file already exists, moving to backup config.old\n"
@@ -97,6 +110,25 @@ if [ $bot == "n" ]; then
fi
fi
printf "\nOptionally if you want to install the LLM Ollama compnents we will execute the following commands\n"
printf "\ncurl -fsSL https://ollama.com/install.sh | sh\n"
# ask if the user wants to install the LLM Ollama components
echo "Do you want to install the LLM Ollama components? (y/n)"
read ollama
if [ $ollama == "y" ]; then
curl -fsSL https://ollama.com/install.sh | sh
# ask if want to install gemma2:2b
printf "\n Ollama install done now we can install the Gemma2:2b components, multi GB download\n"
echo "Do you want to install the Gemma2:2b components? (y/n)"
read gemma
if [ $gemma == "y" ]; then
olamma pull gemma2:2b
fi
fi
printf "\nGoodbye!"
exit 0

View File

@@ -8,6 +8,8 @@ from pubsub import pub # pip install pubsub
from modules.log import *
from modules.system import *
responseDelay = 0.7 # delay in seconds for response to avoid message collision
def auto_response(message, snr, rssi, hop, message_from_id, channel_number, deviceID):
#Auto response to messages
message_lower = message.lower()
@@ -57,8 +59,8 @@ def auto_response(message, snr, rssi, hop, message_from_id, channel_number, devi
# run the first command after sorting
bot_response = command_handler[cmds[0]['cmd']]()
# wait a 700ms to avoid message collision from lora-ack
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack
time.sleep(responseDelay)
return bot_response
@@ -104,30 +106,49 @@ def handle_wiki(message):
return get_wikipedia_summary(search)
else:
return "Please add a search term example:wiki: travelling gnome"
llmRunCounter = 0
llmTotalRuntime = []
llmLocationTable = {}
def handle_llm(message_from_id, channel_number, deviceID, message, publicChannel):
global llmRunCounter, llmTotalRuntime, llmLocationTable
if location_enabled:
location = get_node_location(message_from_id, deviceID)
# if message_from_id is is the llmLocationTable use the location from the table to save on API calls
if message_from_id in llmLocationTable:
location = llmLocationTable[message_from_id]
location_name = llmLocationTable[message_from_id]
else:
location_name = where_am_i(str(location[0]), str(location[1]), short = True)
llmLocationTable.append({message_from_id: location_name})
if NO_DATA_NOGPS in location_name:
location_name = "no location provided "
if NO_DATA_NOGPS in location_name:
location_name = "no location provided "
else:
location_name = "no location provided "
location_name = "no location provided"
if "ask:" in message.lower():
user_input = message.split(":")[1]
elif "askai" in message.lower():
user_input = message.replace("askai", "")
else:
# likely a DM
user_input = message
# if the message_from_id is not in the llmLocationTable send the welcome message
if not message_from_id in llmLocationTable:
if (channel_number == publicChannel and antiSpam) or useDMForResponse:
# send via DM
send_message(welcome_message, channel_number, message_from_id, deviceID)
time.sleep(responseDelay)
else:
# send via channel
send_message(welcome_message, channel_number, 0, deviceID)
time.sleep(responseDelay)
# add the node to the llmLocationTable for future use
llmLocationTable[message_from_id] = location_name
user_input = user_input.strip()
if len(user_input) < 1:
@@ -138,16 +159,24 @@ def handle_llm(message_from_id, channel_number, deviceID, message, publicChannel
averageRuntime = sum(llmTotalRuntime) / len(llmTotalRuntime)
if averageRuntime > 25:
msg = f"Please wait, average query time is: {int(averageRuntime)} seconds"
if channel_number == publicChannel:
if (channel_number == publicChannel and antiSpam) or useDMForResponse:
# send via DM
send_message(msg, channel_number, message_from_id, deviceID)
time.sleep(responseDelay)
else:
# send via channel
send_message(msg, channel_number, 0, deviceID)
time.sleep(responseDelay)
else:
msg = "Please wait, response could take 3+ minutes. Fund the SysOp's GPU budget!"
if channel_number == publicChannel:
send_message(msg, channel_number, message_from_id, deviceID)
else:
send_message(msg, channel_number, 0, deviceID)
msg = "Please wait, response could take 30+ seconds. Fund the SysOp's GPU budget!"
if (channel_number == publicChannel and antiSpam) or useDMForResponse:
# send via DM
send_message(msg, channel_number, message_from_id, deviceID)
time.sleep(responseDelay)
else:
# send via channel
send_message(msg, channel_number, 0, deviceID)
time.sleep(responseDelay)
start = time.time()
@@ -338,8 +367,8 @@ def onReceive(packet, interface):
msg = bbs_check_dm(message_from_id)
if msg:
# wait a 700ms to avoid message collision from lora-ack.
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack.
time.sleep(responseDelay)
logger.info(f"System: BBS DM Found: {msg[1]} For: {get_name_from_number(message_from_id, 'long', rxNode)}")
message = "Mail: " + msg[1] + " From: " + get_name_from_number(msg[2], 'long', rxNode)
bbs_delete_dm(msg[0], msg[1])
@@ -458,8 +487,8 @@ def onReceive(packet, interface):
# repeat the message on the other device
if repeater_enabled and interface2_enabled:
# wait a 700ms to avoid message collision from lora-ack.
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack.
time.sleep(responseDelay)
rMsg = (f"{message_string} From:{get_name_from_number(message_from_id, 'short', rxNode)}")
# if channel found in the repeater list repeat the message
if str(channel_number) in repeater_channels:

View File

@@ -86,6 +86,11 @@ def llm_query(input, nodeID=0, location_name=None):
if llmContext_fromGoogle:
# grab some context from the internet using google search hits (if available)
# localization details at https://pypi.org/project/googlesearch-python/
# remove common words from the search query
# commonWordsList = ["is", "for", "the", "of", "and", "in", "on", "at", "to", "with", "by", "from", "as", "a", "an", "that", "this", "these", "those", "there", "here", "where", "when", "why", "how", "what", "which", "who", "whom", "whose", "whom"]
# sanitizedSearch = ' '.join([word for word in input.split() if word.lower() not in commonWordsList])
try:
googleSearch = search(input, advanced=True, num_results=googleSearchResults)
if googleSearch:
@@ -95,18 +100,18 @@ def llm_query(input, nodeID=0, location_name=None):
else:
googleResults = ['no other context provided']
except Exception as e:
logger.debug(f"System: LLM Query: context gathering error: {e}")
logger.debug(f"System: LLM Query: context gathering failed, likely due to network issues")
googleResults = ['no other context provided']
if googleResults:
logger.debug(f"System: External LLM Query: {input} From:{nodeID} with context from google")
logger.debug(f"System: LLM Query: {input} From:{nodeID} with context from google")
else:
logger.debug(f"System: External LLM Query: {input} From:{nodeID}")
logger.debug(f"System: LLM Query: {input} From:{nodeID}")
response = ""
result = ""
location_name += f" at the current time of {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
location_name += f" at the current time of {datetime.now().strftime('%Y-%m-%d %H:%M:%S %Z')}"
try:
result = chain_prompt_model.invoke({"input": input, "llmModel": llmModel, "userID": nodeID, \

View File

@@ -22,28 +22,33 @@ def where_am_i(lat=0, lon=0, short=False):
# initialize Nominatim API
geolocator = Nominatim(user_agent="mesh-bot")
# Nomatim API call to get address
if short:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'county', 'country']
whereIam = f"City: {address.get('city', '')} State: {address.get('state', '')} County: {address.get('county', '')} Country: {address.get('country', '')}"
try:
# Nomatim API call to get address
if short:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'county', 'country']
whereIam = f"City: {address.get('city', '')}. State: {address.get('state', '')}. County: {address.get('county', '')}. Country: {address.get('country', '')}."
return whereIam
if float(lat) == latitudeValue and float(lon) == longitudeValue:
# redacted address when no GPS and using default location
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
else:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['house_number', 'road', 'city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
return whereIam
except Exception as e:
logger.debug("Location:Error fetching location data with whereami, likely network error")
return ERROR_FETCHING_DATA
if float(lat) == latitudeValue and float(lon) == longitudeValue:
# redacted address when no GPS and using default location
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
else:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['house_number', 'road', 'city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
return whereIam
def get_tide(lat=0, lon=0):
station_id = ""

View File

@@ -96,6 +96,7 @@ try:
welcome_message = config['general'].get('welcome_message', WELCOME_MSG)
welcome_message = (f"{welcome_message}").replace('\\n', '\n') # allow for newlines in the welcome message
motd_enabled = config['general'].getboolean('motdEnabled', True)
MOTD = config['general'].get('motd', MOTD)
dad_jokes_enabled = config['general'].getboolean('DadJokes', False)
solar_conditions_enabled = config['general'].getboolean('spaceWeather', True)
wikipedia_enabled = config['general'].getboolean('wikipedia', False)

View File

@@ -8,6 +8,8 @@ from pubsub import pub # pip install pubsub
from modules.log import *
from modules.system import *
responseDelay = 0.7 # delay in seconds for response to avoid message collision
def auto_response(message, snr, rssi, hop, message_from_id, channel_number, deviceID):
# Auto response to messages
message_lower = message.lower()
@@ -37,8 +39,8 @@ def auto_response(message, snr, rssi, hop, message_from_id, channel_number, devi
# run the first command after sorting
bot_response = command_handler[cmds[0]['cmd']]()
# wait a 700ms to avoid message collision from lora-ack
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack
time.sleep(responseDelay)
return bot_response
@@ -252,8 +254,8 @@ def onReceive(packet, interface):
# repeat the message on the other device
if repeater_enabled and interface2_enabled:
# wait a 700ms to avoid message collision from lora-ack.
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack.
time.sleep(responseDelay)
rMsg = (f"{message_string} From:{get_name_from_number(message_from_id, 'short', rxNode)}")
# if channel found in the repeater list repeat the message
if str(channel_number) in repeater_channels: