Compare commits

...

138 Commits

Author SHA1 Message Date
SpudGunMan 71733de05f enhance DM/Channel logic for llm
fix logic for DM or Channel Sending
2024-09-10 12:59:00 -07:00
SpudGunMan d5e48a3e36 Update README.md 2024-09-10 00:56:16 -07:00
SpudGunMan e36755a21d Update llm.py
fix typo
2024-09-10 00:52:20 -07:00
SpudGunMan 9620164884 Update locationdata.py
enhance for LLM
2024-09-10 00:46:43 -07:00
SpudGunMan 711844cc83 enhanceMessages
fixes a few things I didnt wrap up and also enhances this suggestion https://github.com/SpudGunMan/meshing-around/issues/59
2024-09-10 00:46:32 -07:00
SpudGunMan 6eb82b26a7 Update mesh_bot.py 2024-09-09 23:27:12 -07:00
SpudGunMan b12cf6219a responseDelay 2024-09-06 09:31:30 -07:00
SpudGunMan 7e46305277 responseDelay 2024-09-06 09:30:07 -07:00
SpudGunMan 819c37bdec networkErrorFix 2024-09-05 23:01:46 -07:00
SpudGunMan c80690d66e Update README.md 2024-09-05 09:38:56 -07:00
SpudGunMan 084f879537 Update settings.py 2024-09-05 09:09:38 -07:00
SpudGunMan 7afd6bbbe9 Update README.md 2024-09-05 00:54:32 -07:00
SpudGunMan 46641e8a86 Update README.md 2024-09-05 00:51:01 -07:00
SpudGunMan 010b386ce1 Update README.md 2024-09-05 00:43:02 -07:00
SpudGunMan a73b320715 Update llm.py 2024-09-05 00:22:33 -07:00
SpudGunMan 5f822a6230 Update mesh_bot.py 2024-09-04 22:27:15 -07:00
SpudGunMan 024fac90cd Update install.sh 2024-09-04 19:50:49 -07:00
SpudGunMan 133bc36cca Update install.sh 2024-09-04 19:22:21 -07:00
SpudGunMan 51a7ff2820 checkPip 2024-09-04 19:17:37 -07:00
SpudGunMan bc96f8df49 installOllama 2024-09-04 18:42:04 -07:00
Kelly 979f197476 Merge pull request #57 from SpudGunMan/llmLocationAware
LLM location aware enhancement
2024-09-04 17:18:03 -07:00
SpudGunMan 1677b69363 comments# 2024-09-04 15:10:38 -07:00
SpudGunMan d627f694df typo 2024-09-04 15:05:15 -07:00
SpudGunMan 4c52cba21f SpErr 2024-09-04 15:00:44 -07:00
SpudGunMan 597fdd1695 Update llm.py 2024-09-04 14:53:33 -07:00
SpudGunMan 9031704b9b Update llm.py 2024-09-04 14:51:50 -07:00
SpudGunMan 510a5c5007 Update llm.py 2024-09-04 14:42:23 -07:00
SpudGunMan 469e76c50b Update llm.py 2024-09-04 13:14:28 -07:00
SpudGunMan f6c6c58c17 Update README.md 2024-09-04 11:06:09 -07:00
SpudGunMan e546866f78 Update llm.py 2024-09-04 09:40:57 -07:00
SpudGunMan 081566b5d9 lower value to speed up query 2024-09-04 09:40:04 -07:00
SpudGunMan ec078666ae saveSomeAPIcalls 2024-09-04 00:50:57 -07:00
SpudGunMan 1ce394c7a1 Update for LLM 2024-09-04 00:20:06 -07:00
SpudGunMan 2fc3930b43 Update mesh_bot.py 2024-09-03 23:22:02 -07:00
SpudGunMan 9fa9da5e74 Update mesh_bot.py 2024-09-03 23:20:23 -07:00
SpudGunMan d6ad0b5e94 Update mesh_bot.py 2024-09-03 23:20:13 -07:00
SpudGunMan 15dc50804f Update mesh_bot.py 2024-09-03 23:17:34 -07:00
SpudGunMan 63c3e35064 Update llm.py 2024-09-03 23:12:32 -07:00
SpudGunMan 297930c4d1 Update llm.py 2024-09-03 23:09:45 -07:00
SpudGunMan 098c344047 Update llm.py 2024-09-03 23:08:37 -07:00
SpudGunMan 4f74677d14 Update mesh_bot.py 2024-09-03 23:05:34 -07:00
SpudGunMan 0869b19408 addTimeAware
include the current date in the awareness of location
2024-09-03 23:05:00 -07:00
SpudGunMan 9b02611700 LocationAware 2024-09-03 23:02:04 -07:00
SpudGunMan 5daa71e6c1 llmLocationAware
enhance with local data to the AI
2024-09-03 22:52:27 -07:00
SpudGunMan aa5f2f66f8 Update llm.py 2024-09-03 21:10:11 -07:00
SpudGunMan 92d04f81c3 contextFromGoogle 2024-09-03 21:08:06 -07:00
SpudGunMan 5d53db4211 enhance 2024-09-03 17:13:04 -07:00
SpudGunMan eb3bbdd3c5 Update llm.py 2024-09-03 00:48:06 -07:00
SpudGunMan 1ac816ca37 Update README.md 2024-09-03 00:42:38 -07:00
SpudGunMan 33cf18cde5 enhance wiki 2024-09-03 00:29:41 -07:00
SpudGunMan 0c0d53dd78 Update README.md 2024-09-02 23:25:53 -07:00
Kelly 1959ee7560 Merge pull request #53 from mrpatrick1991/docker
Docker
2024-09-02 23:22:51 -07:00
Matthew Patrick ee13401b5a Update config.template
reset to be identical to main branch
2024-09-02 12:37:30 -06:00
Matthew Patrick 78b1cf4af5 edit docs and make dockerfile use config.ini not config.template 2024-09-02 12:35:44 -06:00
Matthew Patrick 0599260e31 created docker file
docker file and entry point script which copies the values in config.template to the container.
2024-09-02 12:18:20 -06:00
SpudGunMan 08dd921088 gemma2:2b 2024-09-02 11:09:03 -07:00
SpudGunMan e66e938d7d Update README.md 2024-09-02 11:04:23 -07:00
SpudGunMan b5b7d2a9d2 Update llm.py 2024-09-02 10:58:13 -07:00
SpudGunMan 46298d555b enhance 2024-09-02 10:47:39 -07:00
SpudGunMan 8fb34b5fde Update config.template 2024-09-02 10:46:31 -07:00
SpudGunMan 28f8986837 Update README.md 2024-09-02 10:46:12 -07:00
SpudGunMan e968173f61 Update pong_bot.py 2024-09-01 21:53:01 -07:00
SpudGunMan f703a8868b Update mesh_bot.py 2024-09-01 21:51:21 -07:00
SpudGunMan 0a29e5f156 Update mesh_bot.py 2024-09-01 11:16:01 -07:00
SpudGunMan c5c28ee042 Update llm.py 2024-09-01 10:57:44 -07:00
SpudGunMan 44ca43399d Update config.template 2024-09-01 09:01:49 -07:00
SpudGunMan 13a47d822d Update config.template 2024-09-01 09:01:00 -07:00
SpudGunMan 5621cd90bb Update config.template 2024-09-01 09:00:44 -07:00
SpudGunMan 9f7055ffd2 model to settings for LLM 2024-09-01 08:59:40 -07:00
SpudGunMan 37a9fc2eb0 Update system.py 2024-09-01 01:12:52 -07:00
SpudGunMan 923325874c Update README.md 2024-09-01 01:10:31 -07:00
SpudGunMan 7ca0c4d744 Update README.md 2024-09-01 01:10:02 -07:00
Kelly a584a71429 Merge pull request #52 from SpudGunMan/llm
Ollama Module
2024-09-01 01:06:44 -07:00
SpudGunMan 70f47635b4 Update system.py 2024-09-01 01:04:47 -07:00
SpudGunMan 8e35d77e07 Update system.py 2024-09-01 01:00:33 -07:00
SpudGunMan 7024f2d472 Update system.py 2024-09-01 00:58:52 -07:00
SpudGunMan 7e2dd4c7ff Update mesh_bot.py 2024-09-01 00:55:34 -07:00
SpudGunMan f20d83ca8c Update README.md 2024-09-01 00:48:45 -07:00
SpudGunMan f31f920137 Update system.py 2024-09-01 00:43:20 -07:00
SpudGunMan 0f428438a3 Update mesh_bot.py 2024-09-01 00:28:28 -07:00
SpudGunMan b7882b0322 Update mesh_bot.py 2024-09-01 00:17:11 -07:00
SpudGunMan 3a417a9281 Update mesh_bot.py 2024-09-01 00:11:37 -07:00
SpudGunMan 748085c2be Update mesh_bot.py 2024-09-01 00:09:51 -07:00
SpudGunMan 6a3f56f95f enhance 2024-08-31 23:56:55 -07:00
SpudGunMan f6d6fb7185 enhance 2024-08-31 23:55:33 -07:00
SpudGunMan 7865263c1c Update mesh_bot.py 2024-08-31 23:46:12 -07:00
SpudGunMan 2cf51d5a09 Update system.py 2024-08-31 23:37:23 -07:00
SpudGunMan f993be950f LLM module 2024-08-31 23:35:03 -07:00
SpudGunMan 52c4c49bab enhance 2024-08-31 23:29:41 -07:00
SpudGunMan 60fdc7b7ea Update system.py 2024-08-31 22:57:37 -07:00
SpudGunMan a330cff3e5 Update system.py 2024-08-31 22:56:05 -07:00
SpudGunMan 9ffbac7420 Update system.py
random fix
2024-08-31 22:55:12 -07:00
SpudGunMan 7909707894 config enable llm 2024-08-31 22:41:43 -07:00
SpudGunMan 8d8014b157 Update bbstools.py 2024-08-31 22:20:27 -07:00
SpudGunMan a459b7a393 R&R 2024-08-31 22:11:39 -07:00
SpudGunMan 7d405dc0c2 Update settings.py 2024-08-29 02:42:17 -07:00
SpudGunMan 3decf8749b Update settings.py 2024-08-29 02:41:06 -07:00
SpudGunMan ba6869ec76 Update system.py 2024-08-28 23:31:32 -07:00
SpudGunMan 33cb70ea17 Update mesh_bot.py 2024-08-28 23:25:21 -07:00
SpudGunMan 69f1b7471f Update mesh_bot.py 2024-08-28 23:22:34 -07:00
SpudGunMan 76a7d1dba7 wikipedia
is this needed? who knows its meshing about!
2024-08-28 23:10:36 -07:00
SpudGunMan 9f0d3c9d3b Update README.md 2024-08-28 12:54:08 -07:00
SpudGunMan ff6292160f Update mesh_bot.py 2024-08-28 12:43:27 -07:00
SpudGunMan 52dcb7972f Update mesh_bot.py 2024-08-28 12:28:55 -07:00
SpudGunMan 10e2b0ee59 Update system.py 2024-08-27 20:41:35 -07:00
SpudGunMan 473eccbdea fix BLE 2024-08-27 20:31:00 -07:00
SpudGunMan f6b2e0a506 Update README.md 2024-08-27 19:27:07 -07:00
SpudGunMan 22e16db1f2 typos 2024-08-27 18:10:29 -07:00
SpudGunMan 2c71ca9b8a Update README.md 2024-08-27 18:07:11 -07:00
SpudGunMan 023189bca9 Update README.md 2024-08-27 17:19:18 -07:00
SpudGunMan 8447985b98 Update mesh_bot.py 2024-08-27 17:19:14 -07:00
SpudGunMan ad123dc93c schedule 2024-08-27 16:58:06 -07:00
SpudGunMan 22983133ee Update mesh_bot.py 2024-08-27 16:44:22 -07:00
SpudGunMan 60c4a885fd Revert "Update mesh_bot.py"
This reverts commit 95d6d7b7d5.
2024-08-27 16:39:16 -07:00
SpudGunMan 95d6d7b7d5 Update mesh_bot.py 2024-08-27 16:24:44 -07:00
SpudGunMan 37a86b7e2b Update system.py 2024-08-27 16:19:52 -07:00
SpudGunMan c4ef1251c9 enhance code with inital brodcaster
https://github.com/SpudGunMan/meshing-around/issues/51 referenced in this enhancement. this is partially implemented for now in code
2024-08-27 16:06:52 -07:00
SpudGunMan 9d7e42aa60 onDisconnect
add monitor for ondisconnect
2024-08-27 13:08:59 -07:00
SpudGunMan 8536e354ad Update locationdata.py 2024-08-23 22:29:08 -07:00
SpudGunMan e3faf676cd Update system.py 2024-08-23 22:24:04 -07:00
SpudGunMan 630e016805 Update locationdata.py 2024-08-23 22:24:00 -07:00
SpudGunMan 23b8b8135c Update system.py 2024-08-21 23:13:50 -07:00
SpudGunMan 7f0b4c079a Update README.md 2024-08-21 22:56:42 -07:00
SpudGunMan 47649cdedc Update system.py 2024-08-21 22:48:44 -07:00
SpudGunMan 7915798ca2 Update system.py 2024-08-21 22:46:58 -07:00
SpudGunMan 86cd88910a Update system.py 2024-08-21 22:13:21 -07:00
SpudGunMan 229ccc75f0 Update log.py 2024-08-21 22:00:56 -07:00
SpudGunMan 6f3e3a7957 Update system.py 2024-08-21 21:54:51 -07:00
SpudGunMan 1f1996b909 Update locationdata.py 2024-08-21 21:50:03 -07:00
SpudGunMan c2069da919 Update locationdata.py 2024-08-21 21:49:29 -07:00
SpudGunMan 458957ddfb ohmyglob 2024-08-21 21:45:12 -07:00
SpudGunMan 95c266fbf3 typo 2024-08-21 21:43:58 -07:00
SpudGunMan 4857940165 Update mesh_bot.py 2024-08-21 21:41:05 -07:00
SpudGunMan 4c780d09e7 fix 2024-08-21 21:40:17 -07:00
SpudGunMan d616867cd1 Update mesh_bot.py 2024-08-21 21:38:27 -07:00
SpudGunMan 909c4ad3bc Update locationdata.py 2024-08-21 21:31:58 -07:00
SpudGunMan 44eff643a9 Update locationdata.py 2024-08-21 21:27:32 -07:00
SpudGunMan a223e57690 Update system.py 2024-08-21 20:04:16 -07:00
14 changed files with 683 additions and 107 deletions
+18
View File
@@ -0,0 +1,18 @@
FROM python:3.10-slim
ENV PYTHONUNBUFFERED=1
RUN apt-get update && apt-get install -y gettext && rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY . /app
COPY requirements.txt .
RUN pip install -r requirements.txt
COPY . .
COPY config.ini /app/config.ini
COPY entrypoint.sh /app/entrypoint.sh
RUN chmod +x /app/entrypoint.sh
ENTRYPOINT ["/app/entrypoint.sh"]
+81 -12
View File
@@ -6,9 +6,11 @@ Random Mesh Scripts for Network Testing and BBS Activities for Use with [Meshtas
## mesh_bot.sh
The feature-rich bot requires the internet for full functionality. These responder bots will trap keywords like ping and respond to a DM (direct message) with pong! The script will also monitor the group channels for keywords to trap. You can also `Ping @Data to Echo` as an example.
Along with network testing, this bot has a lot of other fun features, like simple mail messaging you can leave for another device, and when that device is seen, it can send the mail as a DM.
Along with network testing, this bot has a lot of other fun features, like simple mail messaging you can leave for another device, and when that device is seen, it can send the mail as a DM. Or a scheduler to send weather or a reminder weekly for the VHF net.
The bot is also capable of using dual radio/nodes, so you can monitor two networks at the same time and send messages to nodes using the same `bbspost @nodeNumber #message` or `bbspost @nodeShportName #message` function. There is a small message board to fit in the constraints of Meshtastic for posting bulletin messages with `bbspost $subject #message`.
The bot is also capable of using dual radio/nodes, so you can monitor two networks at the same time and send messages to nodes using the same `bbspost @nodeNumber #message` or `bbspost @nodeShortName #message` function. There is a small message board to fit in the constraints of Meshtastic for posting bulletin messages with `bbspost $subject #message`.
Look up data using wiki results or interact with [Ollama](https://ollama.com) LLM AI see the [OllamaDocs](https://github.com/ollama/ollama/tree/main/docs) If Ollama is enabled you can DM the bot directly. The default model for mesh-bot which is currently `gemma2:2b`
The bot will report on anyone who is getting close to the configured lat/long, if in a remote location.
@@ -18,7 +20,9 @@ The bot can also be used to monitor a radio frequency and let you know when high
Any messages that are over 160 characters are chunked into 160 message bytes to help traverse hops, in testing, this keeps delivery success higher.
Full list of commands for the bot.
[Donate$](https://www.paypal.com/donate?token=ZpiU7zDh-AQDyK76nWmWPQLf04iOm-Iyr3f85lpubt37NWGRYtfe11UyC0LmY1wdcC20UubWo4Kec-_G) via PayPal if you like the project!
## Full list of commands for the bot
- Various solar details for radio propagation (spaceWeather module)
- `sun` and `moon` return info on rise and set local time
@@ -28,14 +32,16 @@ Full list of commands for the bot.
- `bbshelp` returns the following
- `bbslist` list the messages by ID and subject
- `bbsread` read a message example use: `bbsread #1`
- `bbspost` post a message to public board or send a DM example use: `bbspost $subject #message, or bbspost @nodeNumber #message or bbspost @nodeShportName #message`
- `bbspost` post a message to public board or send a DM example use: `bbspost $subject #message, or bbspost @nodeNumber #message or bbspost @nodeShortName #message`
- `bbsdelete` delete a message example use: `bbsdelete #4`
- Other functions
- `whereami` returns the address of location of sender if known
- `tide` returns the local tides, NOAA data source
- `wx` and `wxc` returns local weather forecast, (wxc is metric value), NOAA or Open Meteo for weather forcasting.
- `wx` and `wxc` returns local weather forecast, (wxc is metric value), NOAA or Open Meteo for weather forecasting.
- `wxa` and `wxalert` return NOAA alerts. Short title or expanded details
- `joke` tells a joke
- `wiki: ` will search wikipedia, return the first few sentances of first result if a match `wiki: lora radio`
- `askai` and `ask:` will ask Ollama LLM AI for a response `askai what temp do I cook chicken`
- `messages` Replay the last messages heard, like Store and Forward
- `motd` or to set the message `motd $New Message Of the day`
- `lheard` returns the last 5 heard nodes with SNR, can also use `sitrep`
@@ -51,13 +57,20 @@ The project is written on Linux on a Pi and should work anywhere [Meshtastic](ht
Clone the project with `git clone https://github.com/spudgunman/meshing-around`
code is under a lot of development, so check back often with `git pull`
Copy [config.template](config.template) to `config.ini` and edit for your needs.
`pip install -r requirements.txt`
Optionally:
- `install.sh` will automate optional venv and requirements installation.
- `launch.sh` will activate and launch the app in the venv if built.
For Docker:
Check you have serial port properly shared and the GPU if using LLM with [NVidia](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/docker-specialized.html)
- `git clone https://github.com/spudgunman/meshing-around`
- `cd meshing-around && docker build -t meshing-around`
- `docker run meshing-around`
### Configurations
Copy the [config.template](config.template) to `config.ini` and set the appropriate interface for your method (serial/ble/tcp). While BLE and TCP will work, they are not as reliable as serial connections. There is a watchdog to reconnect tcp if possible.
Copy the [config.template](config.template) to `config.ini` and set the appropriate interface for your method (serial/ble/tcp). While BLE and TCP will work, they are not as reliable as serial connections. There is a watchdog to reconnect tcp if possible. To get BLE mac `meshtastic --ble-scan` **NOTE** I have only tested with a single BLE device and the code is written to only have one interface be a BLE port
```
#config.ini
@@ -84,7 +97,7 @@ Setting the default channel is the channel that won't be spammed by the bot. It'
respond_by_dm_only = True
defaultChannel = 0
```
The weather forcasting defaults to NOAA but for outside the USA you can set UseMeteoWxAPI `True` to use a world weather API. The lat and lon are for defaults when a node has no location data to use.
The weather forecasting defaults to NOAA but for outside the USA you can set UseMeteoWxAPI `True` to use a world weather API. The lat and lon are for defaults when a node has no location data to use.
```
[location]
enabled = True
@@ -102,7 +115,7 @@ enabled = False
DadJokes = False
StoreForward = False
```
Sentry Bot detects anyone comeing close to the bot-node
Sentry Bot detects anyone coming close to the bot-node
```
# detect anyone close to the bot
SentryEnabled = True
@@ -131,8 +144,8 @@ A module allowing a Hamlib compatible radio to connect to the bot, when function
[radioMon]
enabled = False
rigControlServerAddress = localhost:4532
# channel to brodcast to can be 2,3
sigWatchBrodcastCh = 2
# channel to broadcast to can be 2,3
sigWatchBroadcastCh = 2
# minimum SNR as reported by radio via hamlib
signalDetectionThreshold = -10
# hold time for high SNR
@@ -141,8 +154,55 @@ signalHoldTime = 10
signalCooldown = 5
signalCycleLimit = 5
```
Ollama Settings, for Ollama to work the command line `ollama run 'model'` needs to work properly. Check that you have enough RAM and your GPU are working as expected. The default model for this project, is set to `gemma2:2b` (run `ollama pull gemma2:2b` on command line, to download and setup) however I have found gemma2:2b to be lighter, faster and seems better overall vs llama3,1 (`olamma pull llama3.1`)
- From the command terminal of your system with mesh-bot, download the default model for mesh-bot which is currently `ollama pull gemma2:2b`
Enable History, set via code readme Ollama Config in [Settings](https://github.com/SpudGunMan/meshing-around?tab=readme-ov-file#configurations) and [llm.py](https://github.com/SpudGunMan/meshing-around/blob/eb3bbdd3c5e0f16fe3c465bea30c781bd132d2d3/modules/llm.py#L12)
Tested models are `llama3.1, gemma2 (and variants), phi3.5, mistrial` other models may not handle the template as well.
```
# Enable ollama LLM see more at https://ollama.com
ollama = True
# Ollama model to use (defaults to gemma2:2b)
ollamaModel = gemma2
#ollamaModel = llama3.1
```
also see llm.py for changing the defaults of
```
# LLM System Variables
llmEnableHistory = False # enable history for the LLM model to use in responses adds to compute time
llmContext_fromGoogle = True # enable context from google search results adds to compute time but really helps with responses accuracy
googleSearchResults = 3 # number of google search results to include in the context more results = more compute time
llm_history_limit = 6 # limit the history to 3 messages (come in pairs) more results = more compute time
```
Logging messages to disk or Syslog to disk uses the python native logging function. Take a look at the [/modules/log.py](/modules/log.py) you can set the file logger for syslog to INFO for example to not log DEBUG messages to file log, or modify the stdOut level.
```
[general]
# logging to file of the non Bot messages
LogMessagesToFile = True
# Logging of system messages to file
SyslogToFile = True
```
Example to log to disk only INFO and higher (ignore DEBUG)
```
*log.py
file_handler.setLevel(logging.INFO) # DEBUG used by default for system logs to disk example here shows INFO
```
The Scheduler is enabled in the [settings.py](modules/settings.py) by setting `scheduler_enabled = True` the actions and settings are via code only at this time. see [mesh_bot.py](mesh_bot.py) around line [425](https://github.com/SpudGunMan/meshing-around/blob/22983133ee4db3df34f66699f565e506de296197/mesh_bot.py#L425-L435) to edit schedule its most flexible to edit raw code right now. See https://schedule.readthedocs.io/en/stable/ for more.
```
# Send WX every Morning at 08:00 using handle_wxc function to channel 2 on device 1
#schedule.every().day.at("08:00").do(lambda: send_message(handle_wxc(0, 1, 'wx'), 2, 0, 1))
# Send a Net Starting Now Message Every Wednesday at 19:00 using send_message function to channel 2 on device 1
#schedule.every().wednesday.at("19:00").do(lambda: send_message("Net Starting Now", 2, 0, 1))
```
# requirements
Python 3.4 and likely higher is needed, developed on latest release.
Python 3.10 minimally is needed, developed on latest release.
The following can also be installed with `pip install -r requirements.txt` or using the install.sh script for venv and automation
@@ -160,6 +220,8 @@ pip install maidenhead
pip install beautifulsoup4
pip install dadjokes
pip install geopy
pip install schedule
pip install wikipedia
```
The following is needed for open-meteo use
```
@@ -167,6 +229,13 @@ pip install openmeteo_requests
pip install retry_requests
pip install numpy
```
The following is for the Ollama LLM
```
pip install langchain
pip install langchain-ollama
pip install ollama
pip install googlesearch-python
```
To enable emoji in the Debian console, install the fonts `sudo apt-get install fonts-noto-color-emoji`
@@ -176,6 +245,6 @@ I used ideas and snippets from other responder bots and want to call them out!
- https://github.com/pdxlocations/meshtastic-Python-Examples
- https://github.com/geoffwhittington/meshtastic-matrix-relay
GitHub user PiDiBi looking at test functions and other suggestions like wxc, CPU use, and alerting ideas
GitHub user mrpatrick1991 For Docker configs, PiDiBi looking at test functions and other suggestions like wxc, CPU use, and alerting ideas
Discord and Mesh user Cisien, and github Hailo1999, for testing and ideas!
+10 -5
View File
@@ -34,6 +34,12 @@ welcome_message = MeshBot, here for you like a friend who is not. Try sending: p
DadJokes = True
# enable or disable the Solar module
spaceWeather = True
# enable or disable the wikipedia search module
wikipedia = True
# Enable ollama LLM see more at https://ollama.com
ollama = False
# Ollama model to use (defaults to gemma2:2b)
# ollamaModel = llama3.1
# StoreForward Enabled and Limits
StoreForward = True
StoreLimit = 3
@@ -46,7 +52,6 @@ LogMessagesToFile = False
# Logging of system messages to file
SyslogToFile = False
[sentry]
# detect anyone close to the bot
SentryEnabled = True
@@ -75,7 +80,7 @@ lon = -123.0
NOAAforecastDuration = 4
# number of weather alerts to display
NOAAalertCount = 2
# use Open-Meteo API for weather data not NOAA usefull for non US locations
# use Open-Meteo API for weather data not NOAA useful for non US locations
UseMeteoWxAPI = False
# Default to metric units rather than imperial
useMetric = False
@@ -93,12 +98,12 @@ repeater_channels =
# using Hamlib rig control will monitor and alert on channel use
enabled = False
rigControlServerAddress = localhost:4532
# brodcast to all nodes on the channel can alsp be = 2,3
sigWatchBrodcastCh = 2
# broadcast to all nodes on the channel can alsp be = 2,3
sigWatchBroadcastCh = 2
# minimum SNR as reported by radio via hamlib
signalDetectionThreshold = -10
# hold time for high SNR
signalHoldTime = 10
# the following are combined to reset the monitor
signalCooldown = 5
signalCycleLimit = 5
signalCycleLimit = 5
+6
View File
@@ -0,0 +1,6 @@
#!/bin/bash
# Substitute environment variables in the config file
envsubst < /app/config.ini > /app/config.tmp && mv /app/config.tmp /app/config.ini
exec python /app/mesh_bot.py
+32
View File
@@ -3,10 +3,23 @@
# install.sh
cd "$(dirname "$0")"
printf "\nMeshing Around Installer\n"
# add user to groups for serial access
printf "\nAdding user to dialout and tty groups for serial access\n"
sudo usermod -a -G dialout $USER
sudo usermod -a -G tty $USER
# check for pip
if ! command -v pip &> /dev/null
then
printf "pip not found, please install pip with your OS\n"
sudo apt-get install python3-pip
else
printf "python pip found\n"
fi
# generate config file, check if it exists
if [ -f config.ini ]; then
printf "\nConfig file already exists, moving to backup config.old\n"
@@ -97,6 +110,25 @@ if [ $bot == "n" ]; then
fi
fi
printf "\nOptionally if you want to install the LLM Ollama compnents we will execute the following commands\n"
printf "\ncurl -fsSL https://ollama.com/install.sh | sh\n"
# ask if the user wants to install the LLM Ollama components
echo "Do you want to install the LLM Ollama components? (y/n)"
read ollama
if [ $ollama == "y" ]; then
curl -fsSL https://ollama.com/install.sh | sh
# ask if want to install gemma2:2b
printf "\n Ollama install done now we can install the Gemma2:2b components, multi GB download\n"
echo "Do you want to install the Gemma2:2b components? (y/n)"
read gemma
if [ $gemma == "y" ]; then
olamma pull gemma2:2b
fi
fi
printf "\nGoodbye!"
exit 0
+195 -17
View File
@@ -8,6 +8,8 @@ from pubsub import pub # pip install pubsub
from modules.log import *
from modules.system import *
responseDelay = 0.7 # delay in seconds for response to avoid message collision
def auto_response(message, snr, rssi, hop, message_from_id, channel_number, deviceID):
#Auto response to messages
message_lower = message.lower()
@@ -18,10 +20,13 @@ def auto_response(message, snr, rssi, hop, message_from_id, channel_number, devi
"pong": lambda: "🏓PING!!",
"motd": lambda: handle_motd(message),
"bbshelp": bbs_help,
"wxalert": lambda: handle_wxalert(message_from_id, deviceID),
"wxa": lambda: handle_wxalert(message_from_id, deviceID),
"wxalert": lambda: handle_wxalert(message_from_id, deviceID, message),
"wxa": lambda: handle_wxalert(message_from_id, deviceID, message),
"wxc": lambda: handle_wxc(message_from_id, deviceID, 'wxc'),
"wx": lambda: handle_wxc(message_from_id, deviceID, 'wx'),
"wiki:": lambda: handle_wiki(message),
"ask:": lambda: handle_llm(message_from_id, channel_number, deviceID, message, publicChannel),
"askai": lambda: handle_llm(message_from_id, channel_number, deviceID, message, publicChannel),
"joke": tell_joke,
"bbslist": bbs_list_messages,
"bbspost": lambda: handle_bbspost(message, message_from_id, deviceID),
@@ -54,8 +59,8 @@ def auto_response(message, snr, rssi, hop, message_from_id, channel_number, devi
# run the first command after sorting
bot_response = command_handler[cmds[0]['cmd']]()
# wait a 700ms to avoid message collision from lora-ack
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack
time.sleep(responseDelay)
return bot_response
@@ -80,14 +85,111 @@ def handle_motd(message):
else:
return MOTD
def handle_wxalert(message_from_id, deviceID):
def handle_wxalert(message_from_id, deviceID, message):
if use_meteo_wxApi:
return "wxalert is not supported"
else:
location = get_node_location(message_from_id, deviceID)
weatherAlert = getActiveWeatherAlertsDetail(str(location[0]), str(location[1]))
if "wxalert" in message:
# Detailed weather alert
weatherAlert = getActiveWeatherAlertsDetail(str(location[0]), str(location[1]))
else:
weatherAlert = getWeatherAlerts(str(location[0]), str(location[1]))
return weatherAlert
def handle_wiki(message):
# location = get_node_location(message_from_id, deviceID)
if "wiki:" in message.lower():
search = message.split(":")[1]
search = search.strip()
return get_wikipedia_summary(search)
else:
return "Please add a search term example:wiki: travelling gnome"
llmRunCounter = 0
llmTotalRuntime = []
llmLocationTable = {}
def handle_llm(message_from_id, channel_number, deviceID, message, publicChannel):
global llmRunCounter, llmTotalRuntime, llmLocationTable
if location_enabled:
location = get_node_location(message_from_id, deviceID)
# if message_from_id is is the llmLocationTable use the location from the table to save on API calls
if message_from_id in llmLocationTable:
location_name = llmLocationTable[message_from_id]
else:
location_name = where_am_i(str(location[0]), str(location[1]), short = True)
if NO_DATA_NOGPS in location_name:
location_name = "no location provided "
else:
location_name = "no location provided"
if "ask:" in message.lower():
user_input = message.split(":")[1]
elif "askai" in message.lower():
user_input = message.replace("askai", "")
else:
# likely a DM
user_input = message
# if the message_from_id is not in the llmLocationTable send the welcome message
if not message_from_id in llmLocationTable:
if (channel_number == publicChannel and antiSpam) or useDMForResponse:
# send via DM
send_message(welcome_message, channel_number, message_from_id, deviceID)
time.sleep(responseDelay)
else:
# send via channel
send_message(welcome_message, channel_number, 0, deviceID)
time.sleep(responseDelay)
# add the node to the llmLocationTable for future use
llmLocationTable[message_from_id] = location_name
user_input = user_input.strip()
if len(user_input) < 1:
return "Please ask a question"
# information for the user on how long the query will take on average
if llmRunCounter > 0:
averageRuntime = sum(llmTotalRuntime) / len(llmTotalRuntime)
if averageRuntime > 25:
msg = f"Please wait, average query time is: {int(averageRuntime)} seconds"
if (channel_number == publicChannel and antiSpam) or useDMForResponse:
# send via DM
send_message(msg, channel_number, message_from_id, deviceID)
time.sleep(responseDelay)
else:
# send via channel
send_message(msg, channel_number, 0, deviceID)
time.sleep(responseDelay)
else:
msg = "Please wait, response could take 30+ seconds. Fund the SysOp's GPU budget!"
if (channel_number == publicChannel and antiSpam) or useDMForResponse:
# send via DM
send_message(msg, channel_number, message_from_id, deviceID)
time.sleep(responseDelay)
else:
# send via channel
send_message(msg, channel_number, 0, deviceID)
time.sleep(responseDelay)
start = time.time()
#response = asyncio.run(llm_query(user_input, message_from_id))
response = llm_query(user_input, message_from_id, location_name)
# handle the runtime counter
end = time.time()
llmRunCounter += 1
llmTotalRuntime.append(end - start)
return response
def handle_wxc(message_from_id, deviceID, cmd):
location = get_node_location(message_from_id, deviceID)
if use_meteo_wxApi and not "wxc" in cmd and not use_metric:
@@ -196,10 +298,37 @@ def handle_testing(hop, snr, rssi):
else:
return "🏓Testing 1,2,3 " + hop
def onDisconnect(interface):
global retry_int1, retry_int2
rxType = type(interface).__name__
if rxType == 'SerialInterface':
rxInterface = interface.__dict__.get('devPath', 'unknown')
logger.critical(f"System: Lost Connection to Device {rxInterface}")
if port1 in rxInterface:
retry_int1 = True
elif interface2_enabled and port2 in rxInterface:
retry_int2 = True
if rxType == 'TCPInterface':
rxHost = interface.__dict__.get('hostname', 'unknown')
logger.critical(f"System: Lost Connection to Device {rxHost}")
if hostname1 in rxHost and interface1_type == 'tcp':
retry_int1 = True
elif interface2_enabled and hostname2 in rxHost and interface2_type == 'tcp':
retry_int2 = True
if rxType == 'BLEInterface':
logger.critical(f"System: Lost Connection to Device BLE")
if interface1_type == 'ble':
retry_int1 = True
elif interface2_enabled and interface2_type == 'ble':
retry_int2 = True
def onReceive(packet, interface):
# extract interface defailts from interface object
rxType = type(interface).__name__
rxNode = 0
#logger.debug(f"System: Packet Received on {rxType}")
# Debug print the interface object
#for item in interface.__dict__.items(): print (item)
@@ -217,6 +346,12 @@ def onReceive(packet, interface):
elif interface2_enabled and hostname2 in rxHost and interface2_type == 'tcp':
rxNode = 2
if rxType == 'BLEInterface':
if interface1_type == 'ble':
rxNode = 1
elif interface2_enabled and interface2_type == 'ble':
rxNode = 2
# Debug print the packet for debugging
#print(f"Packet Received\n {packet} \n END of packet \n")
message_from_id = 0
@@ -232,14 +367,16 @@ def onReceive(packet, interface):
msg = bbs_check_dm(message_from_id)
if msg:
# wait a 700ms to avoid message collision from lora-ack.
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack.
time.sleep(responseDelay)
logger.info(f"System: BBS DM Found: {msg[1]} For: {get_name_from_number(message_from_id, 'long', rxNode)}")
message = "Mail: " + msg[1] + " From: " + get_name_from_number(msg[2], 'long', rxNode)
bbs_delete_dm(msg[0], msg[1])
send_message(message, channel_number, message_from_id, rxNode)
# check for a message packet and process it
snr = 0
rssi = 0
try:
if 'decoded' in packet and packet['decoded']['portnum'] == 'TEXT_MESSAGE_APP':
message_bytes = packet['decoded']['payload']
@@ -297,10 +434,16 @@ def onReceive(packet, interface):
"From: " + CustomFormatter.white + f"{get_name_from_number(message_from_id, 'long', rxNode)}")
# respond with DM
send_message(auto_response(message_string, snr, rssi, hop, message_from_id, channel_number, rxNode), channel_number, message_from_id, rxNode)
else:
# respond with welcome message on DM
logger.warning(f"Device:{rxNode} Ignoring DM: {message_string} From: {get_name_from_number(message_from_id, 'long', rxNode)}")
send_message(welcome_message, channel_number, message_from_id, rxNode)
else:
if llm_enabled:
llm = handle_llm(message_from_id, channel_number, rxNode, message_string, publicChannel)
send_message(llm, channel_number, message_from_id, rxNode)
else:
# respond with welcome message on DM
logger.warning(f"Device:{rxNode} Ignoring DM: {message_string} From: {get_name_from_number(message_from_id, 'long', rxNode)}")
send_message(welcome_message, channel_number, message_from_id, rxNode)
# log the message to the message log
msgLogger.info(f"Device:{rxNode} Channel:{channel_number} | {get_name_from_number(message_from_id, 'long', rxNode)} | " + message_string.replace('\n', '-nl-'))
else:
# message is on a channel
@@ -344,8 +487,8 @@ def onReceive(packet, interface):
# repeat the message on the other device
if repeater_enabled and interface2_enabled:
# wait a 700ms to avoid message collision from lora-ack.
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack.
time.sleep(responseDelay)
rMsg = (f"{message_string} From:{get_name_from_number(message_from_id, 'short', rxNode)}")
# if channel found in the repeater list repeat the message
if str(channel_number) in repeater_channels:
@@ -362,8 +505,13 @@ def onReceive(packet, interface):
async def start_rx():
print (CustomFormatter.bold_white + f"\nMeshtastic Autoresponder Bot CTL+C to exit\n" + CustomFormatter.reset)
if llm_enabled:
logger.debug(f"System: Ollama LLM Enabled, loading model {llmModel} please wait")
llm_query(" ", myNodeNum1)
logger.debug(f"System: LLM model {llmModel} loaded")
# Start the receive subscriber using pubsub via meshtastic library
pub.subscribe(onReceive, 'meshtastic.receive')
pub.subscribe(onDisconnect, 'meshtastic.connection.lost')
logger.info(f"System: Autoresponder Started for Device1 {get_name_from_number(myNodeNum1, 'long', 1)},"
f"{get_name_from_number(myNodeNum1, 'short', 1)}. NodeID: {myNodeNum1}, {decimal_to_hex(myNodeNum1)}")
if interface2_enabled:
@@ -384,6 +532,8 @@ async def start_rx():
logger.debug(f"System: Location Telemetry Enabled using NOAA API")
if dad_jokes_enabled:
logger.debug(f"System: Dad Jokes Enabled!")
if wikipedia_enabled:
logger.debug(f"System: Wikipedia search Enabled")
if motd_enabled:
logger.debug(f"System: MOTD Enabled using {MOTD}")
if sentry_enabled:
@@ -394,8 +544,36 @@ async def start_rx():
logger.debug(f"System: Respond by DM only")
if repeater_enabled and interface2_enabled:
logger.debug(f"System: Repeater Enabled for Channels: {repeater_channels}")
if radio_dectection_enabled:
logger.debug(f"System: Radio Detection Enabled using rigctld at {rigControlServerAddress} brodcasting to channels: {sigWatchBrodcastCh} for {get_freq_common_name(get_hamlib('f'))}")
if radio_detection_enabled:
logger.debug(f"System: Radio Detection Enabled using rigctld at {rigControlServerAddress} brodcasting to channels: {sigWatchBroadcastCh} for {get_freq_common_name(get_hamlib('f'))}")
if scheduler_enabled:
# Examples of using the scheduler, Times here are in 24hr format
# https://schedule.readthedocs.io/en/stable/
# Good Morning Every day at 09:00 using send_message function to channel 2 on device 1
#schedule.every().day.at("09:00").do(lambda: send_message("Good Morning", 2, 0, 1))
# Send WX every Morning at 08:00 using handle_wxc function to channel 2 on device 1
#schedule.every().day.at("08:00").do(lambda: send_message(handle_wxc(0, 1, 'wx'), 2, 0, 1))
# Send a Net Starting Now Message Every Wednesday at 19:00 using send_message function to channel 2 on device 1
#schedule.every().wednesday.at("19:00").do(lambda: send_message("Net Starting Now", 2, 0, 1))
# Send a Welcome Notice for group on the 15th and 25th of the month at 12:00 using send_message function to channel 2 on device 1
#schedule.every().day.at("12:00").do(lambda: send_message("Welcome to the group", 2, 0, 1)).day(15, 25)
# Send a joke every 6 hours using tell_joke function to channel 2 on device 1
#schedule.every(6).hours.do(lambda: send_message(tell_joke(), 2, 0, 1))
# Send the Welcome Message every other day at 08:00 using send_message function to channel 2 on device 1
#schedule.every(2).days.at("08:00").do(lambda: send_message(welcome_message, 2, 0, 1))
# Send the MOTD every day at 13:00 using send_message function to channel 2 on device 1
#schedule.every().day.at("13:00").do(lambda: send_message(MOTD, 2, 0, 1))
#
logger.debug("System: Starting the broadcast scheduler")
await BroadcastScheduler()
# here we go loopty loo
while True:
@@ -406,7 +584,7 @@ async def start_rx():
async def main():
meshRxTask = asyncio.create_task(start_rx())
watchdogTask = asyncio.create_task(watchdog())
if radio_dectection_enabled:
if radio_detection_enabled:
hamlibTask = asyncio.create_task(handleSignalWatcher())
await asyncio.wait([meshRxTask, watchdogTask, hamlibTask])
else:
+3 -3
View File
@@ -18,14 +18,14 @@ def load_bbsdb():
bbs_messages = pickle.load(f)
except:
bbs_messages = [[1, "Welcome to meshBBS", "Welcome to the BBS, please post a message!",0]]
logger.debug("\nSystem: Creating new bbsdb.pkl")
logger.debug("System: Creating new bbsdb.pkl")
with open('bbsdb.pkl', 'wb') as f:
pickle.dump(bbs_messages, f)
def save_bbsdb():
global bbs_messages
# save the bbs messages to the database file
logger.debug("System: Saving bbsdb.pkl\n")
logger.debug("System: Saving bbsdb.pkl")
with open('bbsdb.pkl', 'wb') as f:
pickle.dump(bbs_messages, f)
@@ -112,7 +112,7 @@ def load_bbsdm():
bbs_dm = pickle.load(f)
except:
bbs_dm = [[1234567890, "Message", 1234567890]]
logger.debug("\nSystem: Creating new bbsdm.pkl")
logger.debug("System: Creating new bbsdm.pkl")
with open('bbsdm.pkl', 'wb') as f:
pickle.dump(bbs_dm, f)
+151
View File
@@ -0,0 +1,151 @@
#!/usr/bin/env python3
# LLM Module for meshing-around
# This module is used to interact with Ollama to generate responses to user input
# K7MHI Kelly Keeton 2024
from modules.log import *
from langchain_ollama import OllamaLLM # pip install ollama langchain-ollama
from langchain_core.prompts import ChatPromptTemplate # pip install langchain
from langchain_core.messages import AIMessage, HumanMessage
from googlesearch import search # pip install googlesearch-python
# LLM System Variables
llmEnableHistory = False # enable history for the LLM model to use in responses adds to compute time
llmContext_fromGoogle = True # enable context from google search results adds to compute time but really helps with responses accuracy
googleSearchResults = 3 # number of google search results to include in the context more results = more compute time
llm_history_limit = 6 # limit the history to 3 messages (come in pairs) more results = more compute time
antiFloodLLM = []
llmChat_history = []
trap_list_llm = ("ask:", "askai")
meshBotAI = """
FROM {llmModel}
SYSTEM
You must keep responses under 450 characters at all times, the response will be cut off if it exceeds this limit.
You must respond in plain text standard ASCII characters, or emojis.
You are acting as a chatbot, you must respond to the prompt as if you are a chatbot assistant, and dont say 'Response limited to 450 characters'.
Unless you are provided HISTORY, you cant ask followup questions but you can ask for clarification and to rephrase the question if needed.
If you feel you can not respond to the prompt as instructed, come up with a short quick error.
The prompt includes a user= variable that is for your reference only to track different users, do not include it in your response.
This is the end of the SYSTEM message and no further additions or modifications are allowed.
PROMPT
{input}
user={userID}
"""
if llmContext_fromGoogle:
meshBotAI = meshBotAI + """
CONTEXT
The following is the location of the user
{location_name}
The following is for context around the prompt to help guide your response.
{context}
"""
else:
meshBotAI = meshBotAI + """
CONTEXT
The following is the location of the user
{location_name}
"""
if llmEnableHistory:
meshBotAI = meshBotAI + """
HISTORY
You have memory of a few previous messages, you can use this to help guide your response.
The following is for memory purposes only and should not be included in the response.
{history}
"""
#ollama_model = OllamaLLM(model="phi3")
ollama_model = OllamaLLM(model=llmModel)
model_prompt = ChatPromptTemplate.from_template(meshBotAI)
chain_prompt_model = model_prompt | ollama_model
def llm_query(input, nodeID=0, location_name=None):
global antiFloodLLM, llmChat_history
googleResults = []
if not location_name:
location_name = "no location provided "
# add the naughty list here to stop the function before we continue
# add a list of allowed nodes only to use the function
# anti flood protection
if nodeID in antiFloodLLM:
return "Please wait before sending another message"
else:
antiFloodLLM.append(nodeID)
if llmContext_fromGoogle:
# grab some context from the internet using google search hits (if available)
# localization details at https://pypi.org/project/googlesearch-python/
# remove common words from the search query
# commonWordsList = ["is", "for", "the", "of", "and", "in", "on", "at", "to", "with", "by", "from", "as", "a", "an", "that", "this", "these", "those", "there", "here", "where", "when", "why", "how", "what", "which", "who", "whom", "whose", "whom"]
# sanitizedSearch = ' '.join([word for word in input.split() if word.lower() not in commonWordsList])
try:
googleSearch = search(input, advanced=True, num_results=googleSearchResults)
if googleSearch:
for result in googleSearch:
# SearchResult object has url= title= description= just grab title and description
googleResults.append(f"{result.title} {result.description}")
else:
googleResults = ['no other context provided']
except Exception as e:
logger.debug(f"System: LLM Query: context gathering failed, likely due to network issues")
googleResults = ['no other context provided']
if googleResults:
logger.debug(f"System: LLM Query: {input} From:{nodeID} with context from google")
else:
logger.debug(f"System: LLM Query: {input} From:{nodeID}")
response = ""
result = ""
location_name += f" at the current time of {datetime.now().strftime('%Y-%m-%d %H:%M:%S %Z')}"
try:
result = chain_prompt_model.invoke({"input": input, "llmModel": llmModel, "userID": nodeID, \
"history": llmChat_history, "context": googleResults, "location_name": location_name})
#logger.debug(f"System: LLM Response: " + result.strip().replace('\n', ' '))
except Exception as e:
logger.warning(f"System: LLM failure: {e}")
return "I am having trouble processing your request, please try again later."
response = result.strip().replace('\n', ' ')
# Store history of the conversation, with limit to prevent template growing too large causing speed issues
if len(llmChat_history) > llm_history_limit:
# remove the oldest two messages
llmChat_history.pop(0)
llmChat_history.pop(1)
inputWithUserID = input + f" user={nodeID}"
llmChat_history.append(HumanMessage(content=inputWithUserID))
llmChat_history.append(AIMessage(content=response))
# done with the query, remove the user from the anti flood list
antiFloodLLM.remove(nodeID)
return response
# import subprocess
# def get_ollama_cpu():
# try:
# psOutput = subprocess.run(['ollama', 'ps'], capture_output=True, text=True)
# if "GPU" in psOutput.stdout:
# logger.debug(f"System: Ollama process with GPU")
# else:
# logger.debug(f"System: Ollama process with CPU, query time will be slower")
# except Exception as e:
# logger.debug(f"System: Ollama process not found, {e}")
# return False
+37 -22
View File
@@ -11,7 +11,7 @@ from modules.log import *
trap_list_location = ("whereami", "tide", "moon", "wx", "wxc", "wxa", "wxalert")
def where_am_i(lat=0, lon=0):
def where_am_i(lat=0, lon=0, short=False):
whereIam = ""
grid = mh.to_maiden(float(lat), float(lon))
@@ -22,22 +22,33 @@ def where_am_i(lat=0, lon=0):
# initialize Nominatim API
geolocator = Nominatim(user_agent="mesh-bot")
# Nomatim API call to get address
if float(lat) == latitudeValue and float(lon) == longitudeValue:
# redacted address when no GPS and using default location
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
return whereIam
else:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['house_number', 'road', 'city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
try:
# Nomatim API call to get address
if short:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'county', 'country']
whereIam = f"City: {address.get('city', '')}. State: {address.get('state', '')}. County: {address.get('county', '')}. Country: {address.get('country', '')}."
return whereIam
if float(lat) == latitudeValue and float(lon) == longitudeValue:
# redacted address when no GPS and using default location
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
else:
location = geolocator.reverse(lat + ", " + lon)
address = location.raw['address']
address_components = ['house_number', 'road', 'city', 'state', 'postcode', 'county', 'country']
whereIam += ' '.join([address.get(component, '') for component in address_components if component in address])
whereIam += " Grid: " + grid
return whereIam
except Exception as e:
logger.debug("Location:Error fetching location data with whereami, likely network error")
return ERROR_FETCHING_DATA
def get_tide(lat=0, lon=0):
station_id = ""
@@ -192,7 +203,9 @@ def abbreviate_weather(row):
"West": "W",
"precipitation": "precip",
"showers": "shwrs",
"thunderstorms": "t-storms"
"thunderstorms": "t-storms",
"quarters": "qtrs",
"quarter": "qtr"
}
line = row
@@ -209,14 +222,15 @@ def getWeatherAlerts(lat=0, lon=0):
alert_url = "https://api.weather.gov/alerts/active.atom?point=" + str(lat) + "," + str(lon)
#alert_url = "https://api.weather.gov/alerts/active.atom?area=WA"
#logger.debug("Location:Fetching weather alerts from NOAA for " + str(lat) + ", " + str(lon))
try:
alert_data = requests.get(alert_url, timeout=urlTimeoutSeconds)
if not alert_data.ok:
logger.error("Location:Error fetching weather alerts from NOAA")
logger.warning("Location:Error fetching weather alerts from NOAA")
return ERROR_FETCHING_DATA
except (requests.exceptions.RequestException):
logger.error("Location:Error fetching weather alerts from NOAA")
logger.warning("Location:Error fetching weather alerts from NOAA")
return ERROR_FETCHING_DATA
alerts = ""
@@ -248,19 +262,20 @@ def getActiveWeatherAlertsDetail(lat=0, lon=0):
# get the latest details of weather alerts from NOAA
alerts = ""
if float(lat) == 0 and float(lon) == 0:
logger.error("Location:No GPS data, try sending location for weather alerts")
logger.warning("Location:No GPS data, try sending location for weather alerts")
return NO_DATA_NOGPS
alert_url = "https://api.weather.gov/alerts/active.atom?point=" + str(lat) + "," + str(lon)
#alert_url = "https://api.weather.gov/alerts/active.atom?area=WA"
#logger.debug("Location:Fetching weather alerts detailed from NOAA for " + str(lat) + ", " + str(lon))
try:
alert_data = requests.get(alert_url, timeout=urlTimeoutSeconds)
if not alert_data.ok:
logger.error("Location:Error fetching weather alerts detailed from NOAA")
logger.warning("Location:Error fetching weather alerts from NOAA")
return ERROR_FETCHING_DATA
except (requests.exceptions.RequestException):
logger.error("Location:Error fetching weather alerts detailed from NOAA")
logger.warning("Location:Error fetching weather alerts from NOAA")
return ERROR_FETCHING_DATA
alerts = ""
+2 -2
View File
@@ -60,13 +60,13 @@ logger.addHandler(stdout_handler)
if syslog_to_file:
# Create file handler for logging to a file
file_handler = logging.FileHandler('system{}.log'.format(today.strftime('%Y_%m_%d')))
file_handler.setLevel(logging.DEBUG) # DEBUG used for system logs
file_handler.setLevel(logging.DEBUG) # DEBUG used by default for system logs to disk
file_handler.setFormatter(logging.Formatter(logFormat))
logger.addHandler(file_handler)
if log_messages_to_file:
# Create file handler for logging to a file
file_handler = logging.FileHandler('messages{}.log'.format(today.strftime('%Y_%m_%d')))
file_handler.setLevel(logging.INFO) # INFO used for messages
file_handler.setLevel(logging.INFO) # INFO used for messages to disk
file_handler.setFormatter(logging.Formatter(msgLogFormat))
msgLogger.addHandler(file_handler)
+16 -7
View File
@@ -24,6 +24,11 @@ max_retry_count1 = 4 # max retry count for interface 1
max_retry_count2 = 4 # max retry count for interface 2
retry_int1 = False
retry_int2 = False
scheduler_enabled = False # enable the scheduler currently config via code only
wiki_return_limit = 3 # limit the number of sentences returned off the first paragraph first hit
llmRunCounter = 0
llmTotalRuntime = []
llmLocationTable = []
# Read the config file, if it does not exist, create basic config file
config = configparser.ConfigParser()
@@ -84,15 +89,19 @@ try:
publicChannel = config['general'].getint('defaultChannel', 0) # the meshtastic public channel
zuluTime = config['general'].getboolean('zuluTime', False) # aka 24 hour time
log_messages_to_file = config['general'].getboolean('LogMessagesToFile', True) # default True
syslog_to_file = config['general'].getboolean('SyslogToFile', False) # default True
syslog_to_file = config['general'].getboolean('SyslogToFile', False)
urlTimeoutSeconds = config['general'].getint('urlTimeout', 10) # default 10 seconds
store_forward_enabled = config['general'].getboolean('StoreForward', True) # default False
store_forward_enabled = config['general'].getboolean('StoreForward', True)
storeFlimit = config['general'].getint('StoreLimit', 3) # default 3 messages for S&F
welcome_message = config['general'].get(f'welcome_message', WELCOME_MSG)
welcome_message = config['general'].get('welcome_message', WELCOME_MSG)
welcome_message = (f"{welcome_message}").replace('\\n', '\n') # allow for newlines in the welcome message
motd_enabled = config['general'].getboolean('motdEnabled', True)
dad_jokes_enabled = config['general'].getboolean('DadJokes', True)
solar_conditions_enabled = config['general'].getboolean('spaceWeather', True)
MOTD = config['general'].get('motd', MOTD)
dad_jokes_enabled = config['general'].getboolean('DadJokes', False)
solar_conditions_enabled = config['general'].getboolean('spaceWeather', True)
wikipedia_enabled = config['general'].getboolean('wikipedia', False)
llm_enabled = config['general'].getboolean('ollama', False) # https://ollama.com
llmModel = config['general'].get('ollamaModel', 'gemma2:2b') # default gemma2:2b
sentry_enabled = config['sentry'].getboolean('SentryEnabled', False) # default False
secure_channel = config['sentry'].getint('SentryChannel', 2) # default 2
@@ -117,9 +126,9 @@ try:
repeater_enabled = config['repeater'].getboolean('enabled', False)
repeater_channels = config['repeater'].get('repeater_channels', '').split(',')
radio_dectection_enabled = config['radioMon'].getboolean('enabled', False)
radio_detection_enabled = config['radioMon'].getboolean('enabled', False)
rigControlServerAddress = config['radioMon'].get('rigControlServerAddress', 'localhost:4532') # default localhost:4532
sigWatchBrodcastCh = config['radioMon'].get('sigWatchBrodcastCh', '2').split(',') # default Channel 2
sigWatchBroadcastCh = config['radioMon'].get('sigWatchBroadcastCh', '2').split(',') # default Channel 2
signalDetectionThreshold = config['radioMon'].getint('signalDetectionThreshold', -10) # default -10 dBm
signalHoldTime = config['radioMon'].getint('signalHoldTime', 10) # default 10 seconds
signalCooldown = config['radioMon'].getint('signalCooldown', 5) # default 1 second
+82 -32
View File
@@ -64,6 +64,25 @@ if dad_jokes_enabled:
trap_list = trap_list + ("joke",)
help_message = help_message + ", joke"
# Wikipedia Search Configuration
if wikipedia_enabled:
import wikipedia # pip install wikipedia
trap_list = trap_list + ("wiki:",)
help_message = help_message + ", wiki:"
# LLM Configuration
if llm_enabled:
from modules.llm import * # from the spudgunman/meshing-around repo
trap_list = trap_list + trap_list_llm # items ask:
help_message = help_message + ", askai"
# Scheduled Broadcast Configuration
if scheduler_enabled:
import schedule # pip install schedule
# Reminder Scheduler is enabled every Monday at noon send a log message
schedule.every().monday.at("12:00").do(lambda: logger.info("System: Scheduled Broadcast Reminder"))
# Sentry Configuration
if sentry_enabled:
from math import sqrt
import geopy.distance # pip install geopy
@@ -74,11 +93,17 @@ if store_forward_enabled:
help_message = help_message + ", messages"
# Radio Monitor Configuration
if radio_dectection_enabled:
if radio_detection_enabled:
from modules.radio import * # from the spudgunman/meshing-around repo
# BLE dual interface prevention
if interface1_type == 'ble' and interface2_type == 'ble':
logger.critical(f"System: BLE Interface1 and Interface2 cannot both be BLE. Exiting")
exit()
# Interface1 Configuration
try:
logger.debug(f"System: Initializing Interface1")
if interface1_type == 'serial':
interface1 = meshtastic.serial_interface.SerialInterface(port1)
elif interface1_type == 'tcp':
@@ -89,11 +114,12 @@ try:
logger.critical(f"System: Interface Type: {interface1_type} not supported. Validate your config against config.template Exiting")
exit()
except Exception as e:
logger.critical(f"System: script abort. Initalizing Interface1 {e}")
logger.critical(f"System: script abort. Initializing Interface1 {e}")
exit()
# Interface2 Configuration
if interface2_enabled:
logger.debug(f"System: Initializing Interface2")
try:
if interface2_type == 'serial':
interface2 = meshtastic.serial_interface.SerialInterface(port2)
@@ -105,7 +131,7 @@ if interface2_enabled:
logger.critical(f"System: Interface Type: {interface2_type} not supported. Validate your config against config.template Exiting")
exit()
except Exception as e:
logger.critical(f"System: script abort. Initalizing Interface2 {e}")
logger.critical(f"System: script abort. Initializing Interface2 {e}")
exit()
#Get the node number of the device, check if the device is connected
@@ -204,6 +230,7 @@ def get_node_list(nodeInt=1):
node_list1 = []
node_list2 = []
short_node_list = []
last_heard = 0
if nodeInt == 1:
if interface1.nodes:
for node in interface1.nodes.values():
@@ -230,10 +257,8 @@ def get_node_list(nodeInt=1):
node_name = get_name_from_number(node['num'], 'long', nodeInt)
snr = node.get('snr', 0)
# issue where lastHeard is not always present, also had issues with None
# issue where lastHeard is not always present
last_heard = node.get('lastHeard', 0)
if last_heard is None:
last_heard = 0
# make a list of nodes with last heard time and SNR
item = (node_name, last_heard, snr)
@@ -244,13 +269,15 @@ def get_node_list(nodeInt=1):
try:
#print (f"Node List: {node_list1[:5]}\n")
node_list1.sort(key=lambda x: x[1], reverse=True)
node_list1.sort(key=lambda x: x[1] if x[1] is not None else 0, reverse=True)
#print (f"Node List: {node_list1[:5]}\n")
node_list2.sort(key=lambda x: x[1], reverse=True)
if interface2_enabled:
node_list2.sort(key=lambda x: x[1] if x[1] is not None else 0, reverse=True)
except Exception as e:
logger.error(f"System: Error sorting node list: {e}")
#print (f"Node List1: {node_list1[:5]}\n")
#print (f"Node List2: {node_list2[:5]}\n")
logger.debug(f"Node List1: {node_list1[:5]}\n")
if interface2_enabled:
logger.debug(f"Node List2: {node_list2[:5]}\n")
node_list = ERROR_FETCHING_DATA
try:
@@ -322,6 +349,7 @@ def get_node_location(number, nodeInt=1, channel=0):
else:
logger.warning(f"System: No nodes found")
return position
return position
def get_closest_nodes(nodeInt=1,returnCount=3):
node_list = []
@@ -336,7 +364,7 @@ def get_closest_nodes(nodeInt=1,returnCount=3):
longitude = node['position']['longitude']
#lastheard time in unix time
lastheard = node['lastHeard']
lastheard = node.get('lastHeard', 0)
#if last heard is over 24 hours ago, ignore the node
if lastheard < (time.time() - 86400):
continue
@@ -377,7 +405,7 @@ def get_closest_nodes(nodeInt=1,returnCount=3):
longitude = node['position']['longitude']
#lastheard time in unix time
lastheard = node['lastHeard']
lastheard = node.get('lastHeard', 0)
#if last heard is over 24 hours ago, ignore the node
if lastheard < (time.time() - 86400):
continue
@@ -400,7 +428,7 @@ def get_closest_nodes(nodeInt=1,returnCount=3):
return ERROR_FETCHING_DATA
def send_message(message, ch, nodeid=0, nodeInt=1):
if message == "":
if message == "" or message == None or len(message) == 0:
return
# if message over MESSAGE_CHUNK_SIZE characters, split it into multiple messages
if len(message) > MESSAGE_CHUNK_SIZE:
@@ -415,7 +443,7 @@ def send_message(message, ch, nodeid=0, nodeInt=1):
for word in split_message:
if len(line + word) < MESSAGE_CHUNK_SIZE:
if word == 'NEWLINE':
if 'NEWLINE' in word or '\n' in word or '\r' in word:
# chunk by newline if it exists
message_list.append(line)
line = ''
@@ -426,6 +454,7 @@ def send_message(message, ch, nodeid=0, nodeInt=1):
line = word + ' '
message_list.append(line) # needed add contents of the last 'line' into the list
message_list = [m.replace('NEWLINE', '') for m in message_list]
for m in message_list:
if nodeid == 0:
@@ -468,14 +497,30 @@ def tell_joke():
else:
return ''
def messageTrap(msg):
# Check if the message contains a trap word
message_list=msg.split(" ")
for m in message_list:
for t in trap_list:
if t.lower() == m.lower():
return True
return False
def get_wikipedia_summary(search_term):
wikipedia_search = wikipedia.search(search_term, results=3)
wikipedia_suggest = wikipedia.suggest(search_term)
#wikipedia_aroundme = wikipedia.geosearch(location[0], location[1], results=3)
#logger.debug(f"System: Wikipedia Nearby:{wikipedia_aroundme}")
if len(wikipedia_search) == 0:
logger.warning(f"System: No Wikipedia Results for:{search_term}")
return ERROR_FETCHING_DATA
try:
logger.debug(f"System: Searching Wikipedia for:{search_term}, First Result:{wikipedia_search[0]}, Suggest Word:{wikipedia_suggest}")
summary = wikipedia.summary(search_term, sentences=wiki_return_limit, auto_suggest=False, redirect=True)
except wikipedia.DisambiguationError as e:
logger.warning(f"System: Disambiguation Error for:{search_term} trying {wikipedia_search[0]}")
summary = wikipedia.summary(wikipedia_search[0], sentences=wiki_return_limit, auto_suggest=True, redirect=True)
except wikipedia.PageError as e:
logger.warning(f"System: Wikipedia Page Error for:{search_term} {e} trying {wikipedia_search[0]}")
summary = wikipedia.summary(wikipedia_search[0], sentences=wiki_return_limit, auto_suggest=True, redirect=True)
except Exception as e:
logger.error(f"System: Error with Wikipedia for:{search_term} {e}")
return ERROR_FETCHING_DATA
return summary
def messageTrap(msg):
# Check if the message contains a trap word
@@ -488,7 +533,7 @@ def messageTrap(msg):
def exit_handler():
# Close the interface and save the BBS messages
logger.debug(f"\nSystem: Closing Autoresponder\n")
logger.debug(f"System: Closing Autoresponder")
try:
interface1.close()
logger.debug(f"System: Interface1 Closed")
@@ -506,8 +551,14 @@ def exit_handler():
asyncLoop.close()
exit (0)
async def BroadcastScheduler():
# handle schedule checks for the broadcast of messages
while True:
schedule.run_pending()
await asyncio.sleep(1)
async def handleSignalWatcher():
global lastHamLibAlert, antiSpam, sigWatchBrodcastCh
global lastHamLibAlert, antiSpam, sigWatchBroadcastCh
# monitor rigctld for signal strength and frequency
while True:
msg = await signalWatcher()
@@ -518,21 +569,21 @@ async def handleSignalWatcher():
if time.time() - lastHamLibAlert > 60:
lastHamLibAlert = time.time()
# if sigWatchBrodcastCh list contains multiple channels, broadcast to all
if type(sigWatchBrodcastCh) is list:
for ch in sigWatchBrodcastCh:
if type(sigWatchBroadcastCh) is list:
for ch in sigWatchBroadcastCh:
if antiSpam and ch != publicChannel:
send_message(msg, int(ch), 0, 1)
if interface2_enabled:
send_message(msg, int(ch), 0, 2)
else:
logger.error(f"System: antiSpam prevented Alert from Hamlib {msg}")
logger.warning(f"System: antiSpam prevented Alert from Hamlib {msg}")
else:
if antiSpam and sigWatchBrodcastCh != publicChannel:
send_message(msg, int(sigWatchBrodcastCh), 0, 1)
if antiSpam and sigWatchBroadcastCh != publicChannel:
send_message(msg, int(sigWatchBroadcastCh), 0, 1)
if interface2_enabled:
send_message(msg, int(sigWatchBrodcastCh), 0, 2)
send_message(msg, int(sigWatchBroadcastCh), 0, 2)
else:
logger.error(f"System: antiSpam prevented Alert from Hamlib {msg}")
logger.warning(f"System: antiSpam prevented Alert from Hamlib {msg}")
await asyncio.sleep(1)
pass
@@ -619,7 +670,6 @@ async def watchdog():
with contextlib.redirect_stdout(None):
interface1.localNode.getMetadata()
print(f"System: if you see this upgrade python to >3.4")
#if "device_state_version:" not in meta:
except Exception as e:
logger.error(f"System: communicating with interface1, trying to reconnect: {e}")
retry_int1 = True
+43 -6
View File
@@ -8,6 +8,8 @@ from pubsub import pub # pip install pubsub
from modules.log import *
from modules.system import *
responseDelay = 0.7 # delay in seconds for response to avoid message collision
def auto_response(message, snr, rssi, hop, message_from_id, channel_number, deviceID):
# Auto response to messages
message_lower = message.lower()
@@ -37,8 +39,8 @@ def auto_response(message, snr, rssi, hop, message_from_id, channel_number, devi
# run the first command after sorting
bot_response = command_handler[cmds[0]['cmd']]()
# wait a 700ms to avoid message collision from lora-ack
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack
time.sleep(responseDelay)
return bot_response
@@ -85,6 +87,32 @@ def handle_testing(hop, snr, rssi):
else:
return "🏓Testing 1,2,3 " + hop
def onDisconnect(interface):
global retry_int1, retry_int2
rxType = type(interface).__name__
if rxType == 'SerialInterface':
rxInterface = interface.__dict__.get('devPath', 'unknown')
logger.critical(f"System: Lost Connection to Device {rxInterface}")
if port1 in rxInterface:
retry_int1 = True
elif interface2_enabled and port2 in rxInterface:
retry_int2 = True
if rxType == 'TCPInterface':
rxHost = interface.__dict__.get('hostname', 'unknown')
logger.critical(f"System: Lost Connection to Device {rxHost}")
if hostname1 in rxHost and interface1_type == 'tcp':
retry_int1 = True
elif interface2_enabled and hostname2 in rxHost and interface2_type == 'tcp':
retry_int2 = True
if rxType == 'BLEInterface':
logger.critical(f"System: Lost Connection to Device BLE")
if interface1_type == 'ble':
retry_int1 = True
elif interface2_enabled and interface2_type == 'ble':
retry_int2 = True
def onReceive(packet, interface):
# extract interface defailts from interface object
rxType = type(interface).__name__
@@ -106,11 +134,19 @@ def onReceive(packet, interface):
elif interface2_enabled and hostname2 in rxHost and interface2_type == 'tcp':
rxNode = 2
if rxType == 'BLEInterface':
if interface1_type == 'ble':
rxNode = 1
elif interface2_enabled and interface2_type == 'ble':
rxNode = 2
# Debug print the packet for debugging
#print(f"Packet Received\n {packet} \n END of packet \n")
message_from_id = 0
# check for a message packet and process it
snr = 0
rssi = 0
try:
if 'decoded' in packet and packet['decoded']['portnum'] == 'TEXT_MESSAGE_APP':
message_bytes = packet['decoded']['payload']
@@ -218,8 +254,8 @@ def onReceive(packet, interface):
# repeat the message on the other device
if repeater_enabled and interface2_enabled:
# wait a 700ms to avoid message collision from lora-ack.
time.sleep(0.7)
# wait a responseDelay to avoid message collision from lora-ack.
time.sleep(responseDelay)
rMsg = (f"{message_string} From:{get_name_from_number(message_from_id, 'short', rxNode)}")
# if channel found in the repeater list repeat the message
if str(channel_number) in repeater_channels:
@@ -238,6 +274,7 @@ async def start_rx():
print (CustomFormatter.bold_white + f"\nMeshtastic Autoresponder Bot CTL+C to exit\n" + CustomFormatter.reset)
# Start the receive subscriber using pubsub via meshtastic library
pub.subscribe(onReceive, 'meshtastic.receive')
pub.subscribe(onDisconnect, 'meshtastic.connection.lost')
logger.info(f"System: Autoresponder Started for Device1 {get_name_from_number(myNodeNum1, 'long', 1)},"
f"{get_name_from_number(myNodeNum1, 'short', 1)}. NodeID: {myNodeNum1}, {decimal_to_hex(myNodeNum1)}")
if interface2_enabled:
@@ -253,8 +290,8 @@ async def start_rx():
logger.debug(f"System: Respond by DM only")
if repeater_enabled and interface2_enabled:
logger.debug(f"System: Repeater Enabled for Channels: {repeater_channels}")
if radio_dectection_enabled:
logger.debug(f"System: Radio Detection Enabled using rigctld at {rigControlServerAddress} brodcasting to channels: {sigWatchBrodcastCh} for {get_freq_common_name(get_hamlib('f'))}")
if radio_detection_enabled:
logger.debug(f"System: Radio Detection Enabled using rigctld at {rigControlServerAddress} brodcasting to channels: {sigWatchBroadcastCh} for {get_freq_common_name(get_hamlib('f'))}")
# here we go loopty loo
while True:
+7 -1
View File
@@ -10,4 +10,10 @@ dadjokes
openmeteo_requests
retry_requests
numpy
geopy
geopy
schedule
wikipedia
langchain
langchain-ollama
ollama
googlesearch-python