forked from iarv/meshing-around
Compare commits
279 Commits
copilot/re
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
aa0aaed0b5 | ||
|
|
9db4dc8ab9 | ||
|
|
85e8f41dca | ||
|
|
ddb123b759 | ||
|
|
10afde663e | ||
|
|
c931d13e6e | ||
|
|
ba6075b616 | ||
|
|
68c065825b | ||
|
|
213f121807 | ||
|
|
530d78482a | ||
|
|
09515b9bc0 | ||
|
|
9b8c9d80c8 | ||
|
|
8ee838f5c6 | ||
|
|
757d6d30b8 | ||
|
|
1ee785d388 | ||
|
|
c3284f0a0f | ||
|
|
bdcc479360 | ||
|
|
b1444b24e4 | ||
|
|
aef67da492 | ||
|
|
b8b8145447 | ||
|
|
42a4842a5b | ||
|
|
201591d469 | ||
|
|
4ecdc7b108 | ||
|
|
3f78bf7a67 | ||
|
|
8af21b760c | ||
|
|
ea3ed46e86 | ||
|
|
d78d6acd1e | ||
|
|
e9b483f4e8 | ||
|
|
94660e7993 | ||
|
|
12aeaef250 | ||
|
|
2a6f76ab5b | ||
|
|
05df1e1a3c | ||
|
|
38131b4180 | ||
|
|
397c39b13d | ||
|
|
af7dfe8a51 | ||
|
|
d5d163aab9 | ||
|
|
58cc3e4314 | ||
|
|
3274dfdbc0 | ||
|
|
84a1a163d3 | ||
|
|
289eb70738 | ||
|
|
a6d51e41bf | ||
|
|
a63020bbb7 | ||
|
|
2416e73fbf | ||
|
|
f87f34f8bf | ||
|
|
eaed034d20 | ||
|
|
ec9ac1b1fe | ||
|
|
e84ce13878 | ||
|
|
a5fc8aca82 | ||
|
|
c31947194e | ||
|
|
c79f3cdfbc | ||
|
|
14b876b989 | ||
|
|
2cc5b23753 | ||
|
|
a5b0fda3ac | ||
|
|
9c5c332e01 | ||
|
|
ac5e96e463 | ||
|
|
0ce7deb740 | ||
|
|
a60333318b | ||
|
|
665acaa904 | ||
|
|
0aa8bccd04 | ||
|
|
2e5e8a7589 | ||
|
|
e3e6393bad | ||
|
|
be38588292 | ||
|
|
14fb3f9cb6 | ||
|
|
c40cd86592 | ||
|
|
69df48957e | ||
|
|
e29573ebc0 | ||
|
|
13b9b75f86 | ||
|
|
0bfe908391 | ||
|
|
5baee422c2 | ||
|
|
38ff05fd40 | ||
|
|
e1def5422a | ||
|
|
93031010cb | ||
|
|
21e614ab8e | ||
|
|
a5322867e3 | ||
|
|
2863a64ec8 | ||
|
|
678fde7b2c | ||
|
|
ec0f9f966c | ||
|
|
fd114301f6 | ||
|
|
1778cb6feb | ||
|
|
fc7ca37184 | ||
|
|
fe2110ca2b | ||
|
|
179113e83a | ||
|
|
79348be644 | ||
|
|
35c6232b0c | ||
|
|
2aa7ffb0e8 | ||
|
|
a7060bc516 | ||
|
|
998d979d71 | ||
|
|
cdfb451d67 | ||
|
|
994405955a | ||
|
|
17d92dc78d | ||
|
|
d0e33f943f | ||
|
|
f55c7311fa | ||
|
|
5f4f832af6 | ||
|
|
c3e8f4a93e | ||
|
|
e72b3c191e | ||
|
|
3774b8407b | ||
|
|
0e074a6885 | ||
|
|
c8800d837f | ||
|
|
289ada1fc0 | ||
|
|
e64e60358d | ||
|
|
658fb33b69 | ||
|
|
1568d026f2 | ||
|
|
232bf98efd | ||
|
|
3cce938334 | ||
|
|
e6a17d9258 | ||
|
|
d403e4c8c0 | ||
|
|
ae19c5b83f | ||
|
|
532efda9e8 | ||
|
|
d20eab03e9 | ||
|
|
df43b61a0c | ||
|
|
862347cbec | ||
|
|
9c412b8328 | ||
|
|
c3fcacd64b | ||
|
|
68b5de2950 | ||
|
|
f578ba6084 | ||
|
|
961bb3abba | ||
|
|
1f85fe7842 | ||
|
|
6808ef2e68 | ||
|
|
5efac1d8b6 | ||
|
|
12b2fe789d | ||
|
|
8f48442f60 | ||
|
|
180e9368e9 | ||
|
|
2c7a753cb5 | ||
|
|
c5ef0b4145 | ||
|
|
ffecd2a44f | ||
|
|
d1d5d6ba30 | ||
|
|
2fe1196a90 | ||
|
|
5c72dd7aa5 | ||
|
|
2834ac3d0d | ||
|
|
55c29c36ba | ||
|
|
0cc4bbf3cd | ||
|
|
b795268d99 | ||
|
|
e60593a3d9 | ||
|
|
8b2449eded | ||
|
|
0c7e8b99a9 | ||
|
|
3931848bd9 | ||
|
|
134ec9f7df | ||
|
|
b8937c6abe | ||
|
|
0e4f0ee83a | ||
|
|
04560b0589 | ||
|
|
78c0ab6bb6 | ||
|
|
2d4f81e662 | ||
|
|
c6f9bc4a90 | ||
|
|
409ae34f93 | ||
|
|
ec9fbc9bd1 | ||
|
|
51602a7fbd | ||
|
|
a49106500d | ||
|
|
ded62343fd | ||
|
|
d096433ab7 | ||
|
|
3273e57f0b | ||
|
|
7cc70dd555 | ||
|
|
edb3208e2c | ||
|
|
60a6244c69 | ||
|
|
f06a27957f | ||
|
|
384f5a62f3 | ||
|
|
dcaf9d7fb5 | ||
|
|
99faf72408 | ||
|
|
a5a7e19ddc | ||
|
|
912617dc34 | ||
|
|
ca6d0cce4e | ||
|
|
43051076ba | ||
|
|
83091e6100 | ||
|
|
6b512db552 | ||
|
|
09b684fad8 | ||
|
|
1122d6007e | ||
|
|
f51cace2c3 | ||
|
|
78cefd3704 | ||
|
|
421efd7521 | ||
|
|
e64f6317ab | ||
|
|
18a6c9dfac | ||
|
|
a96d57580a | ||
|
|
1388771cc1 | ||
|
|
2cbfdb0b78 | ||
|
|
38bef50e12 | ||
|
|
24090ce19f | ||
|
|
14ea1e3d97 | ||
|
|
fb7bf1975b | ||
|
|
1e7887d480 | ||
|
|
398a4c6c63 | ||
|
|
9ab6b3be89 | ||
|
|
255be455b7 | ||
|
|
95a35520c2 | ||
|
|
22ec62a2f2 | ||
|
|
3f95f1d533 | ||
|
|
4e04ebee76 | ||
|
|
91fb93ca8d | ||
|
|
f690f16771 | ||
|
|
2805240abc | ||
|
|
7a5b7e64d7 | ||
|
|
0fd881aa4b | ||
|
|
932112abb2 | ||
|
|
f3d1fd0ec5 | ||
|
|
e92b1a2876 | ||
|
|
11d3c1eaf4 | ||
|
|
0361153592 | ||
|
|
0c6fcf10ef | ||
|
|
647ae92649 | ||
|
|
254eef4be9 | ||
|
|
bd0a94e2a1 | ||
|
|
2d8256d9f7 | ||
|
|
1f9b81865e | ||
|
|
17221cf37f | ||
|
|
47dd75bfb3 | ||
|
|
d4773705ce | ||
|
|
4f46e659d9 | ||
|
|
404f84f39c | ||
|
|
c07ec534a7 | ||
|
|
4d88aed0d8 | ||
|
|
b1946608f4 | ||
|
|
b92cf48fd0 | ||
|
|
227ffc94e6 | ||
|
|
b9f5a0c7f9 | ||
|
|
d56c1380c3 | ||
|
|
e8a8eefcc2 | ||
|
|
5738e8d306 | ||
|
|
11359e4016 | ||
|
|
7bb31af1d2 | ||
|
|
fd115916f5 | ||
|
|
32b60297c8 | ||
|
|
f15a871967 | ||
|
|
a346354dbc | ||
|
|
3d8007bbf6 | ||
|
|
bb254474d0 | ||
|
|
37e3790ee4 | ||
|
|
0ec380931a | ||
|
|
9cfd1bc670 | ||
|
|
a672c94303 | ||
|
|
92b3574c22 | ||
|
|
27d8e198ae | ||
|
|
11eeaa445a | ||
|
|
57efc8a69b | ||
|
|
7442ce11b4 | ||
|
|
8bb6ba4d8e | ||
|
|
da10af8d93 | ||
|
|
46a33178f6 | ||
|
|
e07c5a923e | ||
|
|
d330f3e0d6 | ||
|
|
eddb2fe08c | ||
|
|
ebe729cf13 | ||
|
|
41a45c6e9c | ||
|
|
4224579f79 | ||
|
|
aa43d4acad | ||
|
|
4406f2b86f | ||
|
|
649c959304 | ||
|
|
3529e40743 | ||
|
|
f5c2dfa5e4 | ||
|
|
1fb144ae1e | ||
|
|
7e66ffc3a0 | ||
|
|
d7371fae98 | ||
|
|
e4c51c97a1 | ||
|
|
70f072d222 | ||
|
|
8bb587cc7a | ||
|
|
313c313412 | ||
|
|
e5e8fbd0b5 | ||
|
|
2ef96f3ae3 | ||
|
|
a58605aba3 | ||
|
|
ffdd3a1ea9 | ||
|
|
185de28139 | ||
|
|
0eea36fba2 | ||
|
|
cb9e62894d | ||
|
|
9443d5fb0a | ||
|
|
1751648b12 | ||
|
|
8823d415c3 | ||
|
|
55a1d951a7 | ||
|
|
c8096107a0 | ||
|
|
5bdf1a9d6c | ||
|
|
85344db27e | ||
|
|
5990a859d9 | ||
|
|
ad6a55b9cd | ||
|
|
6fcd981eae | ||
|
|
9564c92cc8 | ||
|
|
149dc10df6 | ||
|
|
e211efca4e | ||
|
|
a974de790b | ||
|
|
777c423f17 | ||
|
|
dbcb93eabb | ||
|
|
69518ea317 | ||
|
|
11faea2b4e | ||
|
|
acb0e870d6 |
8
.github/workflows/docker-image.yml
vendored
8
.github/workflows/docker-image.yml
vendored
@@ -25,10 +25,10 @@ jobs:
|
||||
#
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@28fdb31ff34708d19615a74d67103ddc2ea9725c
|
||||
uses: docker/login-action@3227f5311cb93ffd14d13e65d8cc400d30f4dd8a
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
||||
- name: Extract metadata (tags, labels) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@032a4b3bda1b716928481836ac5bfe36e1feaad6
|
||||
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
||||
- name: Build and push Docker image
|
||||
id: push
|
||||
uses: docker/build-push-action@9e436ba9f2d7bcd1d038c8e55d039d37896ddc5d
|
||||
uses: docker/build-push-action@8c1e8f8e5bf845ba3773a14f3967965548a2341e
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
|
||||
1
.github/workflows/greetings.yml
vendored
1
.github/workflows/greetings.yml
vendored
@@ -18,5 +18,4 @@ jobs:
|
||||
- uses: actions/first-interaction@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue_message: "Dependabot's first issue"
|
||||
pr_message: "Thank you for your pull request!"
|
||||
37
.gitignore
vendored
37
.gitignore
vendored
@@ -2,39 +2,30 @@
|
||||
config.ini
|
||||
config_new.ini
|
||||
ini_merge_log.txt
|
||||
|
||||
# Pickle files
|
||||
*.pkl
|
||||
|
||||
# virtualenv
|
||||
venv/
|
||||
install_notes.txt
|
||||
|
||||
# logs
|
||||
logs/
|
||||
install_notes.txt
|
||||
logs/*.log
|
||||
|
||||
# modified .service files
|
||||
etc/*.service
|
||||
|
||||
# Python cache
|
||||
__pycache__/
|
||||
|
||||
# rag data
|
||||
data/rag/*
|
||||
|
||||
# qrz db
|
||||
data/qrz.db
|
||||
|
||||
# checklist and inventory databases
|
||||
data/checklist.db
|
||||
data/inventory.db
|
||||
|
||||
# fileMonitor test file
|
||||
bee.txt
|
||||
bible.txt
|
||||
|
||||
# .csv files
|
||||
*.csv
|
||||
# data files
|
||||
data/*.json
|
||||
data/*.txt
|
||||
data/*.pkl
|
||||
data/*.csv
|
||||
data/*.db
|
||||
|
||||
# modules/custom_scheduler.py
|
||||
modules/custom_scheduler.py
|
||||
|
||||
# virtualenv
|
||||
venv/
|
||||
|
||||
# Python cache
|
||||
__pycache__/
|
||||
|
||||
25
INSTALL.md
25
INSTALL.md
@@ -196,4 +196,27 @@ From your project root, run one of the following commands:
|
||||
|
||||
- The script requires a Python virtual environment (`venv`) to be present in the project directory.
|
||||
- If `venv` is missing, the script will exit with an error message.
|
||||
- Always provide an argument (`mesh`, `pong`, `html`, `html5`, or `add`) to specify what you want to launch.
|
||||
- Always provide an argument (`mesh`, `pong`, `html`, `html5`, or `add`) to specify what you want to launch.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Permissions Issues
|
||||
|
||||
If you encounter errors related to file or directory permissions (e.g., "Permission denied" or services failing to start):
|
||||
|
||||
- Ensure you are running installation scripts with sufficient privileges (use `sudo` if needed).
|
||||
- The `logs`, `data`, and `config.ini` files must be owned by the user running the bot (often `meshbot` or your current user).
|
||||
- You can manually reset permissions using the provided script:
|
||||
|
||||
```sh
|
||||
sudo bash etc/set-permissions.sh meshbot
|
||||
```
|
||||
|
||||
- If you moved the project directory, re-run the permissions script to update ownership.
|
||||
|
||||
- For systemd service issues, check logs with:
|
||||
```sh
|
||||
sudo journalctl -u mesh_bot.service
|
||||
```
|
||||
|
||||
If problems persist, double-check that the user specified in your service files matches the owner of the project files and directories.
|
||||
36
README.md
36
README.md
@@ -40,17 +40,31 @@ Mesh Bot is a feature-rich Python bot designed to enhance your [Meshtastic](http
|
||||
- **New Node Greetings**: Automatically greet new nodes via text.
|
||||
|
||||
### Interactive AI and Data Lookup
|
||||
- **Weather, Earthquake, River, and Tide Data**: Get local alerts and info from NOAA/USGS; uses Open-Meteo for areas outside NOAA coverage. Global tide predictions available via tidepredict library for worldwide locations.
|
||||
- **Wikipedia Search**: Retrieve summaries from Wikipedia.
|
||||
- **Weather, Earthquake, River, and Tide Data**: Get local alerts and info from NOAA/USGS; uses Open-Meteo for areas outside NOAA coverage.
|
||||
- **Wikipedia Search**: Retrieve summaries from Wikipedia and Kiwix
|
||||
- **OpenWebUI, Ollama LLM Integration**: Query the [Ollama](https://github.com/ollama/ollama/tree/main/docs) AI for advanced responses. Supports RAG (Retrieval Augmented Generation) with Wikipedia/Kiwix context and [OpenWebUI](https://github.com/open-webui/open-webui) integration for enhanced AI capabilities. [LLM Readme](modules/llm.md)
|
||||
- **Satellite Passes**: Find upcoming satellite passes for your location.
|
||||
- **GeoMeasuring Tools**: Calculate distances and midpoints using collected GPS data; supports Fox & Hound direction finding.
|
||||
- **RSS & News Feeds**: Receive news and data from multiple sources directly on the mesh.
|
||||
|
||||
### Proximity Alerts
|
||||
- **Location-Based Alerts**: Get notified when members arrive at a configured latitude/longitude—ideal for campsites, geo-fences, or remote locations. Optionally, trigger scripts, send emails, or automate actions (e.g., change node config, turn on lights, or drop an `alert.txt` file to start a survey or game).
|
||||
- **Customizable Triggers**: Use proximity events for creative applications like "king of the hill" or 🧭 geocache games by adjusting the alert cycle.
|
||||
- **High Flying Alerts**: Receive notifications when nodes with high altitude are detected on the mesh.
|
||||
- **Voice/Command Triggers**: Activate bot functions using keywords or voice commands (see [Voice Commands](#voice-commands-vox) for "Hey Chirpy!" support).
|
||||
- **YOLOv5 alerts**: Use camera modules to detect objects or OCR
|
||||
|
||||
### EAS Alerts
|
||||
- **FEMA iPAWS/EAS Alerts**: Receive Emergency Alerts from FEMA via API on internet-connected nodes.
|
||||
- **NOAA EAS Alerts**: Get Emergency Alerts from NOAA via API.
|
||||
- **USGS Volcano Alerts**: Receive volcano alerts from USGS via API.
|
||||
- **NINA Alerts (Germany)**: Receive emergency alerts from the xrepository.de feed for Germany.
|
||||
- **Offline EAS Alerts**: Report EAS alerts over the mesh using external tools, even without internet.
|
||||
|
||||
### File Monitor Alerts
|
||||
- **File Monitoring**: Watch a text file for changes and broadcast updates to the mesh channel.
|
||||
- **News File Access**: Retrieve the contents of a news file on request; supports multiple news sources or files.
|
||||
- **Shell Command Access**: Execute shell commands via DM with replay protection (admin only).
|
||||
|
||||
#### Radio Frequency Monitoring
|
||||
- **SNR RF Activity Alerts**: Monitor radio frequencies and receive alerts when high SNR (Signal-to-Noise Ratio) activity is detected.
|
||||
@@ -58,6 +72,8 @@ Mesh Bot is a feature-rich Python bot designed to enhance your [Meshtastic](http
|
||||
- **Speech-to-Text Broadcasting**: Convert received audio to text using [Vosk](https://alphacephei.com/vosk/models) and broadcast it to the mesh.
|
||||
- **WSJT-X Integration**: Monitor WSJT-X (FT8, FT4, WSPR, etc.) decode messages and forward them to the mesh network with optional callsign filtering.
|
||||
- **JS8Call Integration**: Monitor JS8Call messages and forward them to the mesh network with optional callsign filtering.
|
||||
- **Meshages TTS**: The bot can speak mesh messages aloud using [KittenTTS](https://github.com/KittenML/KittenTTS). Enable this feature to have important alerts and messages read out loud on your device—ideal for hands-free operation or accessibility. See [radio.md](modules/radio.md) for setup instructions.
|
||||
- **Offline Tone out Decoder**: Decode fire Tone out and DTMF and action with alerts to mesh
|
||||
|
||||
### Asset Tracking, Check-In/Check-Out, and Inventory Management
|
||||
Advanced check-in/check-out and asset tracking for people and equipment—ideal for accountability, safety monitoring, and logistics (e.g., Radio-Net, FEMA, trailhead groups). Admin approval workflows, GPS location capture, and overdue alerts. The integrated inventory and point-of-sale (POS) system enables item management, sales tracking, cart-based transactions, and daily reporting, for swaps, emergency supply management, and field operations, maker-places.
|
||||
@@ -79,27 +95,14 @@ Advanced check-in/check-out and asset tracking for people and equipment—ideal
|
||||
- **User Feedback**: Users participate via DM; responses are logged for review.
|
||||
- **Reporting**: Retrieve survey results with `survey report` or `survey report <surveyname>`.
|
||||
|
||||
### EAS Alerts
|
||||
- **FEMA iPAWS/EAS Alerts**: Receive Emergency Alerts from FEMA via API on internet-connected nodes.
|
||||
- **NOAA EAS Alerts**: Get Emergency Alerts from NOAA via API.
|
||||
- **USGS Volcano Alerts**: Receive volcano alerts from USGS via API.
|
||||
- **Offline EAS Alerts**: Report EAS alerts over the mesh using external tools, even without internet.
|
||||
- **NINA Alerts (Germany)**: Receive emergency alerts from the xrepository.de feed for Germany.
|
||||
|
||||
### File Monitor Alerts
|
||||
- **File Monitoring**: Watch a text file for changes and broadcast updates to the mesh channel.
|
||||
- **News File Access**: Retrieve the contents of a news file on request; supports multiple news sources or files.
|
||||
- **Shell Command Access**: Execute shell commands via DM with replay protection (admin only).
|
||||
|
||||
### Data Reporting
|
||||
- **HTML Reports**: Visualize bot traffic and data flows with a built-in HTML generator. See [data reporting](logs/README.md) for details.
|
||||
- **RSS & News Feeds**: Receive news and data from multiple sources directly on the mesh.
|
||||
|
||||
### Robust Message Handling
|
||||
- **Automatic Message Chunking**: Messages over 160 characters are automatically split to ensure reliable delivery across multiple hops.
|
||||
|
||||
## Getting Started
|
||||
This project is developed on Linux (specifically a Raspberry Pi) but should work on any platform where the [Meshtastic protobuf API](https://meshtastic.org/docs/software/python/cli/) modules are supported, and with any compatible [Meshtastic](https://meshtastic.org/docs/getting-started/) hardware. For pico or low-powered devices, see projects for embedding, armbian or [buildroot](https://github.com/buildroot-meshtastic/buildroot-meshtastic), also see [femtofox](https://github.com/noon92/femtofox) for running on luckfox hardware. If you need a local console consider the [firefly](https://github.com/pdxlocations/firefly) project.
|
||||
This project is developed on Linux (specifically a Raspberry Pi) but should work on any platform where the [Meshtastic protobuf API](https://meshtastic.org/docs/software/python/cli/) modules are supported, and with any compatible [Meshtastic](https://meshtastic.org/docs/getting-started/) hardware, however it is **recomended to use the latest firmware code**. For pico or low-powered devices, see projects for embedding, armbian or [buildroot](https://github.com/buildroot-meshtastic/buildroot-meshtastic), also see [femtofox](https://github.com/noon92/femtofox) for running on luckfox hardware. If you need a local console consider the [firefly](https://github.com/pdxlocations/firefly) project.
|
||||
|
||||
🥔 Please use responsibly and follow local rulings for such equipment. This project captures packets, logs them, and handles over the air communications which can include PII such as GPS locations.
|
||||
|
||||
@@ -171,6 +174,7 @@ For testing and feature ideas on Discord and GitHub, if its stable its thanks to
|
||||
- **mrpatrick1991**: For OG Docker configurations. 💻
|
||||
- **A-c0rN**: Assistance with iPAWS and 🚨
|
||||
- **Mike O'Connell/skrrt**: For [eas_alert_parser](etc/eas_alert_parser.py) enhanced by **sheer.cold**
|
||||
- **dadud**: For idea on [etc/icad_tone.py](etc/icad_tone.py)
|
||||
- **WH6GXZ nurse dude**: Volcano Alerts 🌋
|
||||
- **mikecarper**: hamtest, leading to quiz etc.. 📋
|
||||
- **c.merphy360**: high altitude alerts. 🚀
|
||||
|
||||
121
config.template
121
config.template
@@ -62,6 +62,12 @@ rssFeedURL = http://www.hackaday.com/rss.xml,http://rss.slashdot.org/Slashdot/sl
|
||||
rssFeedNames = default,slashdot,mesh
|
||||
rssMaxItems = 3
|
||||
rssTruncate = 100
|
||||
# enable or disable the 'latest' command which uses NewsAPI.org key at https://newsapi.org/register
|
||||
enableNewsAPI = False
|
||||
newsAPI_KEY =
|
||||
newsAPIregion = us
|
||||
# could also be 'relevancy' or 'popularity' or 'publishedAt'
|
||||
sort_by = relevancy
|
||||
|
||||
# enable or disable the wikipedia search module
|
||||
wikipedia = True
|
||||
@@ -73,31 +79,29 @@ kiwixURL = http://127.0.0.1:8080
|
||||
# Kiwix library name (e.g., wikipedia_en_100_nopic_2025-09)
|
||||
kiwixLibraryName = wikipedia_en_100_nopic_2025-09
|
||||
|
||||
# Enable ollama LLM see more at https://ollama.com
|
||||
# Enable LLM local Ollama integration, set true for any LLM support
|
||||
ollama = False
|
||||
# Ollama model to use (defaults to gemma3:270m) gemma2 is good for older SYSTEM prompt
|
||||
# ollamaModel = gemma3:latest
|
||||
# ollamaModel = gemma2:2b
|
||||
# server instance to use (defaults to local machine install)
|
||||
# Ollama server instance to use (defaults to local machine install)
|
||||
ollamaHostName = http://localhost:11434
|
||||
|
||||
# Produce LLM replies to messages that aren't commands?
|
||||
# If False, the LLM only replies to the "ask:" and "askai" commands.
|
||||
llmReplyToNonCommands = True
|
||||
# if True, the input is sent raw to the LLM, if False uses SYSTEM prompt
|
||||
rawLLMQuery = True
|
||||
|
||||
# Enable Wikipedia/Kiwix integration with LLM for RAG (Retrieval Augmented Generation)
|
||||
# When enabled, LLM will automatically search Wikipedia/Kiwix and include context in responses
|
||||
llmUseWikiContext = False
|
||||
|
||||
# Use OpenWebUI instead of direct Ollama API (enables advanced RAG features)
|
||||
# Use OpenWebUI instead of direct Ollama API / still leave ollama = True
|
||||
useOpenWebUI = False
|
||||
# OpenWebUI server URL (e.g., http://localhost:3000)
|
||||
openWebUIURL = http://localhost:3000
|
||||
# OpenWebUI API key/token (required when useOpenWebUI is True)
|
||||
openWebUIAPIKey =
|
||||
|
||||
# Ollama model to use (defaults to gemma3:270m) gemma2 is good for older SYSTEM prompt
|
||||
# ollamaModel is used for both Ollama and OpenWebUI when useOpenWebUI its just the model name
|
||||
# ollamaModel = gemma3:latest
|
||||
# ollamaModel = gemma2:2b
|
||||
# if True, the query is sent raw to the LLM, if False uses internal SYSTEM prompt
|
||||
rawLLMQuery = True
|
||||
|
||||
# If False, the LLM only replies to the "ask:" and "askai" commands. otherwise DM's automatically go to LLM
|
||||
llmReplyToNonCommands = True
|
||||
# Enable Wikipedia/Kiwix integration with LLM for RAG (Retrieval Augmented Generation)
|
||||
# When enabled, LLM will automatically search Wikipedia/Kiwix and include context in responses
|
||||
llmUseWikiContext = False
|
||||
|
||||
# StoreForward Enabled and Limits
|
||||
StoreForward = True
|
||||
StoreLimit = 3
|
||||
@@ -203,18 +207,27 @@ useMetric = False
|
||||
# repeaterList lookup location (rbook / artsci / False)
|
||||
repeaterLookup = rbook
|
||||
|
||||
# Satalite Pass Prediction
|
||||
# Register for free API https://www.n2yo.com/login/ personal data page at bottom 'Are you developer?'
|
||||
n2yoAPIKey =
|
||||
# NORAD list https://www.n2yo.com/satellites/
|
||||
satList = 25544,7530
|
||||
|
||||
# use Open-Meteo API for weather data not NOAA useful for non US locations
|
||||
UseMeteoWxAPI = False
|
||||
|
||||
# NOAA weather forecast days
|
||||
NOAAforecastDuration = 3
|
||||
# number of weather alerts to display
|
||||
NOAAalertCount = 2
|
||||
|
||||
# use Open-Meteo API for weather data not NOAA useful for non US locations
|
||||
UseMeteoWxAPI = False
|
||||
|
||||
# Global Tide Prediction using tidepredict (for non-US locations or offline use)
|
||||
# When enabled, uses tidepredict library for global tide predictions instead of NOAA API
|
||||
# tidepredict uses University of Hawaii's Research Quality Dataset for worldwide coverage
|
||||
useTidePredict = False
|
||||
# NOAA Weather EAS Alert Broadcast
|
||||
wxAlertBroadcastEnabled = False
|
||||
# Enable Ignore any message that includes following word list
|
||||
ignoreEASenable = False
|
||||
ignoreEASwords = test,advisory
|
||||
# Add extra location to the weather alert
|
||||
enableExtraLocationWx = False
|
||||
|
||||
# NOAA Coastal Data Enable NOAA Coastal Waters Forecasts and Tide
|
||||
coastalEnabled = False
|
||||
@@ -230,52 +243,40 @@ coastalForecastDays = 3
|
||||
# for multiple rivers use comma separated list e.g. 12484500,14105700
|
||||
riverList =
|
||||
|
||||
# NOAA EAS Alert Broadcast
|
||||
wxAlertBroadcastEnabled = False
|
||||
# Enable Ignore any message that includes following word list
|
||||
ignoreEASenable = False
|
||||
ignoreEASwords = test,advisory
|
||||
# EAS Alert Broadcast Channels
|
||||
wxAlertBroadcastCh = 2
|
||||
# Add extra location to the weather alert
|
||||
enableExtraLocationWx = False
|
||||
|
||||
# Goverment Alert Broadcast defaults to FEMA IPAWS
|
||||
eAlertBroadcastEnabled = False
|
||||
# USA FEMA IPAWS alerts
|
||||
ipawsAlertEnabled = True
|
||||
# comma separated list of FIPS codes to trigger local alert. find your FIPS codes at https://en.wikipedia.org/wiki/Federal_Information_Processing_Standard_state_code
|
||||
myFIPSList = 57,58,53
|
||||
# find your SAME https://www.weather.gov/nwr/counties comma separated list of SAME code to further refine local alert.
|
||||
mySAMEList = 053029,053073
|
||||
# Goverment Alert Broadcast Channels
|
||||
eAlertBroadcastCh = 2
|
||||
# Enable Ignore, headline that includes following word list
|
||||
ignoreFEMAenable = True
|
||||
ignoreFEMAwords = test,exercise
|
||||
|
||||
# USGS Volcano alerts Enable USGS Volcano Alert Broadcast
|
||||
volcanoAlertBroadcastEnabled = False
|
||||
volcanoAlertBroadcastCh = 2
|
||||
# Enable Ignore any message that includes following word list
|
||||
ignoreUSGSEnable = False
|
||||
ignoreUSGSWords = test,advisory
|
||||
|
||||
# Use DE Alert Broadcast Data
|
||||
# Use Germany/DE Alert Broadcast Data
|
||||
enableDEalerts = False
|
||||
# comma separated list of regional codes trigger local alert.
|
||||
# find your regional codet at https://www.xrepository.de/api/xrepository/urn:de:bund:destatis:bevoelkerungsstatistik:schluessel:rs_2021-07-31/download/Regionalschl_ssel_2021-07-31.json
|
||||
myRegionalKeysDE = 110000000000,120510000000
|
||||
|
||||
# Satalite Pass Prediction
|
||||
# Register for free API https://www.n2yo.com/login/ personal data page at bottom 'Are you developer?'
|
||||
n2yoAPIKey =
|
||||
# NORAD list https://www.n2yo.com/satellites/
|
||||
satList = 25544,7530
|
||||
# Alerts are sent to the emergency_handler interface and channel duplicate messages are send here if set
|
||||
eAlertBroadcastCh =
|
||||
|
||||
# CheckList Checkin/Checkout
|
||||
[checklist]
|
||||
enabled = False
|
||||
checklist_db = data/checklist.db
|
||||
reverse_in_out = False
|
||||
# Auto approve new checklists
|
||||
auto_approve = True
|
||||
# Check-in reminder interval is 5min
|
||||
# Checkin broadcast interface and channel is emergency_handler interface and channel
|
||||
|
||||
# Inventory and Point of Sale System
|
||||
[inventory]
|
||||
@@ -322,6 +323,7 @@ value =
|
||||
# interval to use when time is not set (e.g. every 2 days)
|
||||
interval =
|
||||
# time of day in 24:00 hour format when value is 'day' and interval is not set
|
||||
# Process run :00,:20,:40 try and vary the 20 minute offsets to avoid collision
|
||||
time =
|
||||
|
||||
[radioMon]
|
||||
@@ -360,6 +362,10 @@ voxTrapList = chirpy
|
||||
# allow use of 'weather' and 'joke' commands via VOX
|
||||
voxEnableCmd = True
|
||||
|
||||
# Meshages Text-to-Speech (TTS) for incoming messages and DM
|
||||
meshagesTTS = False
|
||||
ttsChannels = 2
|
||||
|
||||
# WSJT-X UDP monitoring - listens for decode messages from WSJT-X, FT8/FT4/WSPR etc.
|
||||
wsjtxDetectionEnabled = False
|
||||
# UDP address and port where WSJT-X broadcasts (default: 127.0.0.1:2237)
|
||||
@@ -385,8 +391,10 @@ broadcastCh = 2
|
||||
# news command will return the contents of a text file
|
||||
enable_read_news = False
|
||||
news_file_path = ../data/news.txt
|
||||
# only return a single random line from the news file
|
||||
# only return a single random (head)line from the news file
|
||||
news_random_line = False
|
||||
# only return random news 'block' (seprated by two newlines) randomly (precidence over news_random_line)
|
||||
news_block_mode = True
|
||||
|
||||
# enable the use of exernal shell commands, this enables some data in `sysinfo`
|
||||
enable_runShellCmd = False
|
||||
@@ -394,9 +402,9 @@ enable_runShellCmd = False
|
||||
# direct shell command handler the x: command in DMs
|
||||
allowXcmd = False
|
||||
# Enable 2 factor authentication for x: commands
|
||||
2factor_enabled = True
|
||||
twoFactor_enabled = True
|
||||
# time in seconds to wait for the correct 2FA answer
|
||||
2factor_timeout = 100
|
||||
twoFactor_timeout = 100
|
||||
|
||||
[smtp]
|
||||
# enable or disable the SMTP module
|
||||
@@ -439,6 +447,7 @@ hangman = True
|
||||
hamtest = True
|
||||
tictactoe = True
|
||||
wordOfTheDay = True
|
||||
battleShip = True
|
||||
|
||||
# enable or disable the quiz game module questions are in data/quiz.json
|
||||
quiz = False
|
||||
@@ -474,3 +483,17 @@ DEBUGpacket = False
|
||||
# metaPacket detailed logging, the filter negates the port ID
|
||||
debugMetadata = False
|
||||
metadataFilter = TELEMETRY_APP,POSITION_APP
|
||||
# Enable or disable automatic banning of nodes
|
||||
autoBanEnabled = False
|
||||
# Number of offenses before auto-ban
|
||||
autoBanThreshold = 5
|
||||
# Throttle value for API requests no ban_hammer
|
||||
apiThrottleValue = 20
|
||||
# Timeframe for offenses (in seconds)
|
||||
autoBanTimeframe = 3600
|
||||
|
||||
[dataPersistence]
|
||||
# Enable or disable the data persistence loop service
|
||||
enabled = True
|
||||
# Interval in seconds for the persistence loop (how often to save data)
|
||||
interval = 300
|
||||
@@ -1 +1,3 @@
|
||||
database admin tool is in [./etc/db_admin.py](../etc/db_admin.py)
|
||||
database admin tool is in [./etc/db_admin.py](../etc/db_admin.py)
|
||||
this folder is populated with install.sh
|
||||
to manually populate ` cp etc/data/* data/. `
|
||||
BIN
etc/3dttt.jpg
Normal file
BIN
etc/3dttt.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 26 KiB |
@@ -1,264 +0,0 @@
|
||||
# Implementation Summary: Enhanced Check-in/Check-out and Point of Sale System
|
||||
|
||||
## Overview
|
||||
|
||||
This implementation addresses the GitHub issue requesting enhancements to the check-in/check-out system and the addition of a complete Point of Sale (POS) functionality to the meshing-around project.
|
||||
|
||||
## What Was Implemented
|
||||
|
||||
### 1. Enhanced Check-in/Check-out System
|
||||
|
||||
#### New Features Added:
|
||||
- **Time Window Monitoring**: Check-in with safety intervals (e.g., `checkin 60 Hunting in tree stand`)
|
||||
- Tracks if users don't check in within expected timeframe
|
||||
- Ideal for solo activities, remote work, or safety accountability
|
||||
- Provides `get_overdue_checkins()` function for alert integration
|
||||
|
||||
- **Approval Workflow**:
|
||||
- `checklistapprove <id>` - Approve pending check-ins (admin)
|
||||
- `checklistdeny <id>` - Deny/remove check-ins (admin)
|
||||
- Support for approval-based workflows
|
||||
|
||||
- **Enhanced Database Schema**:
|
||||
- Added `approved` field for approval workflows
|
||||
- Added `expected_checkin_interval` field for safety monitoring
|
||||
- Automatic migration for existing databases
|
||||
|
||||
#### New Commands:
|
||||
- `checklistapprove <id>` - Approve a check-in
|
||||
- `checklistdeny <id>` - Deny a check-in
|
||||
- Enhanced `checkin [interval] [note]` - Now supports interval parameter
|
||||
|
||||
### 2. Complete Point of Sale System
|
||||
|
||||
#### Features Implemented:
|
||||
|
||||
**Item Management:**
|
||||
- Add items with price, quantity, and location
|
||||
- Remove items from inventory
|
||||
- Update item prices and quantities
|
||||
- Quick sell functionality
|
||||
- Transaction returns/reversals
|
||||
- Full inventory listing with valuations
|
||||
|
||||
**Cart System:**
|
||||
- Per-user shopping carts
|
||||
- Add/remove items from cart
|
||||
- View cart with totals
|
||||
- Complete transactions (buy/sell)
|
||||
- Clear cart functionality
|
||||
|
||||
**Financial Features:**
|
||||
- Penny rounding support (USA mode)
|
||||
- Cash sales round down to nearest nickel
|
||||
- Taxed sales round up to nearest nickel
|
||||
- Transaction logging with full audit trail
|
||||
- Daily sales statistics
|
||||
- Revenue tracking
|
||||
- Hot item detection (best sellers)
|
||||
|
||||
**Database Schema:**
|
||||
Four tables for complete functionality:
|
||||
- `items` - Product inventory
|
||||
- `transactions` - Sales records
|
||||
- `transaction_items` - Line items per transaction
|
||||
- `carts` - Temporary shopping carts
|
||||
|
||||
#### Commands Implemented:
|
||||
|
||||
**Item Management:**
|
||||
- `itemadd <name> <price> <qty> [location]` - Add new item
|
||||
- `itemremove <name>` - Remove item
|
||||
- `itemreset <name> [price=X] [qty=Y]` - Update item
|
||||
- `itemsell <name> <qty> [notes]` - Quick sale
|
||||
- `itemreturn <transaction_id>` - Reverse transaction
|
||||
- `itemlist` - View all inventory
|
||||
- `itemstats` - Daily statistics
|
||||
|
||||
**Cart System:**
|
||||
- `cartadd <name> <qty>` - Add to cart
|
||||
- `cartremove <name>` - Remove from cart
|
||||
- `cartlist` / `cart` - View cart
|
||||
- `cartbuy` / `cartsell [notes]` - Complete transaction
|
||||
- `cartclear` - Empty cart
|
||||
|
||||
## Files Created/Modified
|
||||
|
||||
### New Files:
|
||||
1. **modules/inventory.py** (625 lines)
|
||||
- Complete inventory and POS module
|
||||
- All item management functions
|
||||
- Cart system implementation
|
||||
- Transaction processing
|
||||
- Penny rounding logic
|
||||
|
||||
2. **modules/inventory.md** (8,529 chars)
|
||||
- Comprehensive user guide
|
||||
- Command reference
|
||||
- Use case examples
|
||||
- Database schema documentation
|
||||
|
||||
3. **modules/checklist.md** (9,058 chars)
|
||||
- Enhanced checklist user guide
|
||||
- Safety monitoring documentation
|
||||
- Best practices
|
||||
- Scenario examples
|
||||
|
||||
### Modified Files:
|
||||
1. **modules/checklist.py**
|
||||
- Added time interval monitoring
|
||||
- Added approval workflow functions
|
||||
- Enhanced database schema
|
||||
- Updated command processing
|
||||
|
||||
2. **modules/settings.py**
|
||||
- Added inventory configuration section
|
||||
- Added `inventory_enabled` setting
|
||||
- Added `inventory_db` path setting
|
||||
- Added `disable_penny` setting
|
||||
|
||||
3. **config.template**
|
||||
- Added `[inventory]` section
|
||||
- Documentation for penny rounding
|
||||
|
||||
4. **modules/system.py**
|
||||
- Integrated inventory module
|
||||
- Added trap list for inventory commands
|
||||
|
||||
5. **mesh_bot.py**
|
||||
- Added inventory command handlers
|
||||
- Added checklist approval commands
|
||||
- Created `handle_inventory()` function
|
||||
|
||||
6. **modules/README.md**
|
||||
- Updated checklist section with new features
|
||||
- Added complete inventory/POS section
|
||||
- Updated table of contents
|
||||
|
||||
7. **.gitignore**
|
||||
- Added database files to ignore list
|
||||
|
||||
## Configuration
|
||||
|
||||
### Enable Inventory System:
|
||||
```ini
|
||||
[inventory]
|
||||
enabled = True
|
||||
inventory_db = data/inventory.db
|
||||
disable_penny = False # Set to True for USA penny rounding
|
||||
```
|
||||
|
||||
### Checklist Already Configured:
|
||||
```ini
|
||||
[checklist]
|
||||
enabled = False # Set to True to enable
|
||||
checklist_db = data/checklist.db
|
||||
reverse_in_out = False
|
||||
```
|
||||
|
||||
## Testing Results
|
||||
|
||||
All functionality tested and verified:
|
||||
- ✅ Module imports work correctly
|
||||
- ✅ Database initialization successful
|
||||
- ✅ Inventory commands function properly
|
||||
- ✅ Cart system working as expected
|
||||
- ✅ Checklist enhancements operational
|
||||
- ✅ Time interval monitoring active
|
||||
- ✅ Trap lists properly registered
|
||||
- ✅ Help commands return correct information
|
||||
|
||||
## Use Cases Addressed
|
||||
|
||||
### From Issue Comments:
|
||||
|
||||
1. **Point of Sale Logic** ✅
|
||||
- Complete POS system with inventory management
|
||||
- Cart-based transactions
|
||||
- Sales tracking and reporting
|
||||
|
||||
2. **Check-in Time Windows** ✅
|
||||
- Interval-based monitoring
|
||||
- Overdue detection
|
||||
- Safety accountability for solo activities
|
||||
|
||||
3. **Geo-location Awareness** ✅
|
||||
- Automatic GPS capture when checking in/out
|
||||
- Location stored with each check-in
|
||||
- Foundation for "are you ok" alerts
|
||||
|
||||
4. **Asset Management** ✅
|
||||
- Track any type of asset (tools, equipment, supplies)
|
||||
- Multiple locations support
|
||||
- Full transaction history
|
||||
|
||||
5. **Penny Rounding** ✅
|
||||
- Configurable USA cash sale rounding
|
||||
- Separate logic for cash vs taxed sales
|
||||
- Down for cash, up for tax
|
||||
|
||||
## Security Features
|
||||
|
||||
- Users on `bbs_ban_list` cannot use inventory or checklist commands
|
||||
- Admin-only approval commands
|
||||
- Parameterized SQL queries prevent injection
|
||||
- Per-user cart isolation
|
||||
- Full transaction audit trail
|
||||
|
||||
## Documentation Provided
|
||||
|
||||
1. **User Guides:**
|
||||
- Comprehensive inventory.md with examples
|
||||
- Detailed checklist.md with safety scenarios
|
||||
- Updated main README.md
|
||||
|
||||
2. **Technical Documentation:**
|
||||
- Database schema details
|
||||
- Configuration examples
|
||||
- Command reference
|
||||
- API documentation in code comments
|
||||
|
||||
3. **Examples:**
|
||||
- Emergency supply tracking
|
||||
- Event merchandise sales
|
||||
- Field equipment management
|
||||
- Safety monitoring scenarios
|
||||
|
||||
## Future Enhancement Opportunities
|
||||
|
||||
The implementation provides foundation for:
|
||||
- Scheduled overdue check-in alerts
|
||||
- Email/SMS notifications for overdue status
|
||||
- Dashboard/reporting interface
|
||||
- Barcode/QR code support
|
||||
- Multi-location inventory tracking
|
||||
- Inventory forecasting
|
||||
- Integration with external systems
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
- Existing checklist databases automatically migrate
|
||||
- New features are opt-in via configuration
|
||||
- No breaking changes to existing commands
|
||||
- Graceful handling of missing database columns
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
- SQLite databases for reliability and simplicity
|
||||
- Indexed primary keys for fast lookups
|
||||
- Efficient query design
|
||||
- Minimal memory footprint
|
||||
- No external dependencies beyond stdlib
|
||||
|
||||
## Conclusion
|
||||
|
||||
This implementation fully addresses all requirements from the GitHub issue:
|
||||
- ✅ Enhanced check-in/check-out with SQL improvements
|
||||
- ✅ Point of sale logic with inventory management
|
||||
- ✅ Time window notifications for safety
|
||||
- ✅ Asset tracking for any item type
|
||||
- ✅ Penny rounding for USA cash sales
|
||||
- ✅ Cart management system
|
||||
- ✅ Comprehensive documentation
|
||||
|
||||
The system is production-ready, well-tested, and documented for immediate use.
|
||||
@@ -72,4 +72,61 @@ python etc/simulator.py
|
||||
**Note:**
|
||||
Edit the `projectName` variable to match the handler function you want to test. You can expand this script to test additional handlers or scenarios as needed.
|
||||
|
||||
Feel free to add or update resources here as needed for documentation, configuration, or project support.
|
||||
## yolo_vision.py
|
||||
|
||||
**Purpose:**
|
||||
`yolo_vision.py` provides real-time object detection and movement tracking using a Raspberry Pi camera and YOLOv5. It is designed for integration with the Mesh Bot project, outputting alerts to both the console and an optional `alert.txt` file for further use (such as with Meshtastic).
|
||||
|
||||
**Features:**
|
||||
- Ignores specified object classes (e.g., "bed", "chair") to reduce false positives.
|
||||
- Configurable detection confidence threshold and movement sensitivity.
|
||||
- Tracks object movement direction (left, right, stationary).
|
||||
- Fuse counter: only alerts after an object is detected for several consecutive frames.
|
||||
- Optionally writes the latest alert (without timestamp) to a specified file, overwriting previous alerts.
|
||||
|
||||
**Configuration:**
|
||||
- `LOW_RES_MODE`: Use low or high camera resolution for CPU savings.
|
||||
- `IGNORE_CLASSES`: List of object classes to ignore.
|
||||
- `CONFIDENCE_THRESHOLD`: Minimum confidence for reporting detections.
|
||||
- `MOVEMENT_THRESHOLD`: Minimum pixel movement to consider as "moving".
|
||||
- `ALERT_FUSE_COUNT`: Number of consecutive detections before alerting.
|
||||
- `ALERT_FILE_PATH`: Path to alert file (set to `None` to disable file output).
|
||||
|
||||
**Usage:**
|
||||
Run this script to monitor the camera feed and generate alerts for detected and moving objects. Alerts are printed to the console and, if configured, written to `alert.txt` for integration with other systems.
|
||||
|
||||
---
|
||||
|
||||
## icad_tone.py
|
||||
|
||||
**Purpose:**
|
||||
`icad_tone.py` is a utility script for detecting fire and EMS radio tones using the [icad_tone_detection](https://github.com/thegreatcodeholio/icad_tone_detection) library. It analyzes audio from a live stream, soundcard, or WAV file, identifies various tone types (such as two-tone, long tone, hi/low, pulsed, MDC, and DTMF), and writes detected alerts to `alert.txt` for integration with Mesh Bot or Meshtastic.
|
||||
|
||||
**Usage:**
|
||||
Run the script from the command line, specifying a WAV file for offline analysis or configuring it to listen to a stream or soundcard for real-time monitoring.
|
||||
|
||||
```sh
|
||||
python etc/icad_tone.py --wav path/to/file.wav
|
||||
```
|
||||
Or, for live monitoring (after setting `HTTP_STREAM_URL` in the script):
|
||||
```sh
|
||||
python etc/icad_tone.py
|
||||
```
|
||||
|
||||
**What it does:**
|
||||
- Loads audio from a stream, soundcard, or WAV file.
|
||||
- Uses `icad_tone_detection` to analyze audio for tone patterns.
|
||||
- Prints raw detection results and summaries to the console.
|
||||
- Writes a summary of detected tones to `alert.txt` (overwriting each time).
|
||||
- Handles errors and missing dependencies gracefully.
|
||||
|
||||
**Configuration:**
|
||||
- `ALERT_FILE_PATH`: Path to the alert output file (default: `alert.txt`).
|
||||
- `AUDIO_SOURCE`: Set to `"http"` for streaming or `"soundcard"` for local audio input.
|
||||
- `HTTP_STREAM_URL`: URL of the audio stream (required if using HTTP source).
|
||||
- `SAMPLE_RATE`, `INPUT_CHANNELS`, `CHUNK_DURATION`: Audio processing parameters.
|
||||
|
||||
**Note:**
|
||||
- Requires installation of dependencies (`icad_tone_detection`)
|
||||
- Set `HTTP_STREAM_URL` to a valid stream if using HTTP mode.
|
||||
- Intended for experimental or hobbyist use; may require customization for your workflow.
|
||||
@@ -15,6 +15,7 @@ def setup_custom_schedules(send_message, tell_joke, welcome_message, handle_wxc,
|
||||
5. Make sure to uncomment (delete the single #) the example schedules down at the end of the file to enable them
|
||||
Python is sensitive to indentation so be careful when editing this file.
|
||||
https://thonny.org is included on pi's image and is a simple IDE to use for editing python files.
|
||||
6. System Tasks run every 20min try and avoid overlapping schedules to reduce API rapid fire issues. use like 8:05
|
||||
|
||||
Available functions you can import and use, be sure they are enabled modules in config.ini:
|
||||
- tell_joke() - Returns a random joke
|
||||
@@ -94,6 +95,8 @@ def setup_custom_schedules(send_message, tell_joke, welcome_message, handle_wxc,
|
||||
#schedule.every(2).minutes.do(lambda: send_joke(schedulerChannel, schedulerInterface))
|
||||
### Send a good morning message every day at 9 AM
|
||||
#schedule.every().day.at("09:00").do(lambda: send_good_morning(schedulerChannel, schedulerInterface))
|
||||
### Send a good morning message every day at 9 AM to DM node 4258675309 without above function
|
||||
#schedule.every().day.at("09:00").do(lambda: send_message("Good Morning Jenny", 0, 4258675309, schedulerInterface))
|
||||
### Send weather update every day at 8 AM
|
||||
#schedule.every().day.at("08:00").do(lambda: send_wx(schedulerChannel, schedulerInterface))
|
||||
### Send weather alerts every Wednesday at noon
|
||||
|
||||
222
etc/icad_tone.py
Normal file
222
etc/icad_tone.py
Normal file
@@ -0,0 +1,222 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# icad_tone.py - uses icad_tone_detection, for fire and EMS tone detection
|
||||
# https://github.com/thegreatcodeholio/icad_tone_detection
|
||||
# output to alert.txt for meshing-around bot
|
||||
# 2025 K7MHI Kelly Keeton
|
||||
|
||||
# ---------------------------
|
||||
# User Configuration Section
|
||||
# ---------------------------
|
||||
ALERT_FILE_PATH = "alert.txt" # Path to alert log file, or None to disable logging
|
||||
AUDIO_SOURCE = "soundcard" # "soundcard" for mic/line-in, "http" for stream
|
||||
HTTP_STREAM_URL = "" # Set to your stream URL if using "http"
|
||||
SAMPLE_RATE = 16000 # Audio sample rate (Hz)
|
||||
INPUT_CHANNELS = 1 # Number of input channels (1=mono)
|
||||
MIN_SAMPLES = 4096 # Minimum samples per detection window (increase for better accuracy)
|
||||
STREAM_BUFFER = 32000 # Number of bytes to buffer before detection (for MP3 streams)
|
||||
INPUT_DEVICE = 0 # Set to device index or name, or None for default
|
||||
# ---------------------------
|
||||
|
||||
import sys
|
||||
import time
|
||||
from icad_tone_detection import tone_detect
|
||||
from pydub import AudioSegment
|
||||
import requests
|
||||
import sounddevice as sd
|
||||
import numpy as np
|
||||
import argparse
|
||||
import io
|
||||
import warnings
|
||||
warnings.filterwarnings("ignore", message="nperseg = .* is greater than input length")
|
||||
def write_alert(message):
|
||||
if ALERT_FILE_PATH:
|
||||
try:
|
||||
with open(ALERT_FILE_PATH, "w") as f: # overwrite each time
|
||||
f.write(message + "\n")
|
||||
except Exception as e:
|
||||
print(f"Error writing to alert file: {e}", file=sys.stderr)
|
||||
|
||||
def detect_and_alert(audio_data, sample_rate):
|
||||
try:
|
||||
result = tone_detect(audio_data, sample_rate)
|
||||
except Exception as e:
|
||||
print(f"Detection error: {e}", file=sys.stderr)
|
||||
return
|
||||
# Only print if something is detected
|
||||
if result and any(getattr(result, t, []) for t in [
|
||||
"two_tone_result", "long_result", "hi_low_result", "pulsed_result", "mdc_result", "dtmf_result"
|
||||
]):
|
||||
print("Raw detection result:", result)
|
||||
# Prepare alert summary for all relevant tone types
|
||||
summary = []
|
||||
if hasattr(result, "dtmf_result") and result.dtmf_result:
|
||||
for dtmf in result.dtmf_result:
|
||||
summary.append(f"DTMF Digit: {dtmf.get('digit', '?')} | Duration: {dtmf.get('length', '?')}s")
|
||||
if hasattr(result, "hi_low_result") and result.hi_low_result:
|
||||
for hl in result.hi_low_result:
|
||||
summary.append(
|
||||
f"Hi/Low Alternations: {hl.get('alternations', '?')} | Duration: {hl.get('length', '?')}s"
|
||||
)
|
||||
if hasattr(result, "mdc_result") and result.mdc_result:
|
||||
for mdc in result.mdc_result:
|
||||
summary.append(
|
||||
f"MDC UnitID: {mdc.get('unitID', '?')} | Op: {mdc.get('op', '?')} | Duration: {mdc.get('length', '?')}s"
|
||||
)
|
||||
if hasattr(result, "pulsed_result") and result.pulsed_result:
|
||||
for pl in result.pulsed_result:
|
||||
summary.append(
|
||||
f"Pulsed Tone: {pl.get('detected', '?')}Hz | Cycles: {pl.get('cycles', '?')} | Duration: {pl.get('length', '?')}s"
|
||||
)
|
||||
if hasattr(result, "two_tone_result") and result.two_tone_result:
|
||||
for tt in result.two_tone_result:
|
||||
summary.append(
|
||||
f"Two-Tone: {tt.get('detected', ['?','?'])[0]}Hz/{tt.get('detected', ['?','?'])[1]}Hz | Tone A: {tt.get('tone_a_length', '?')}s | Tone B: {tt.get('tone_b_length', '?')}s"
|
||||
)
|
||||
if hasattr(result, "long_result") and result.long_result:
|
||||
for lt in result.long_result:
|
||||
summary.append(
|
||||
f"Long Tone: {lt.get('detected', '?')}Hz | Duration: {lt.get('length', '?')}s"
|
||||
)
|
||||
if summary:
|
||||
write_alert("\n".join(summary))
|
||||
|
||||
def get_supported_sample_rate(device, channels=1):
|
||||
# Try common sample rates
|
||||
for rate in [44100, 48000, 16000, 8000]:
|
||||
try:
|
||||
sd.check_input_settings(device=device, channels=channels, samplerate=rate)
|
||||
return rate
|
||||
except Exception:
|
||||
continue
|
||||
return None
|
||||
|
||||
def main():
|
||||
print("="*80)
|
||||
print(" iCAD Tone Decoder for Meshing-Around Booting Up!")
|
||||
if AUDIO_SOURCE == "soundcard":
|
||||
try:
|
||||
if INPUT_DEVICE is not None:
|
||||
sd.default.device = INPUT_DEVICE
|
||||
device_info = sd.query_devices(INPUT_DEVICE, kind='input')
|
||||
else:
|
||||
device_info = sd.query_devices(sd.default.device, kind='input')
|
||||
device_name = device_info['name']
|
||||
# Detect supported sample rate
|
||||
detected_rate = get_supported_sample_rate(sd.default.device, INPUT_CHANNELS)
|
||||
if detected_rate:
|
||||
SAMPLE_RATE = detected_rate
|
||||
else:
|
||||
print("No supported sample rate found, using default.", file=sys.stderr)
|
||||
except Exception:
|
||||
device_name = "Unknown"
|
||||
print(f" Mode: Soundcard | Device: {device_name} | Sample Rate: {SAMPLE_RATE} Hz | Channels: {INPUT_CHANNELS}")
|
||||
elif AUDIO_SOURCE == "http":
|
||||
print(f" Mode: HTTP Stream | URL: {HTTP_STREAM_URL} | Buffer: {STREAM_BUFFER} bytes")
|
||||
else:
|
||||
print(f" Mode: {AUDIO_SOURCE}")
|
||||
print("="*80)
|
||||
time.sleep(1)
|
||||
|
||||
parser = argparse.ArgumentParser(description="ICAD Tone Detection")
|
||||
parser.add_argument("--wav", type=str, help="Path to WAV file for detection")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.wav:
|
||||
print(f"Processing WAV file: {args.wav}")
|
||||
try:
|
||||
audio = AudioSegment.from_file(args.wav)
|
||||
if audio.channels > 1:
|
||||
audio = audio.set_channels(1)
|
||||
print(f"AudioSegment: channels={audio.channels}, frame_rate={audio.frame_rate}, duration={len(audio)}ms")
|
||||
detect_and_alert(audio, audio.frame_rate)
|
||||
except Exception as e:
|
||||
print(f"Error processing WAV file: {e}", file=sys.stderr)
|
||||
return
|
||||
|
||||
print("Starting ICAD Tone Detection...")
|
||||
|
||||
if AUDIO_SOURCE == "http":
|
||||
if not HTTP_STREAM_URL or HTTP_STREAM_URL.startswith("http://your-stream-url-here"):
|
||||
print("ERROR: Please set a valid HTTP_STREAM_URL or provide a WAV file using --wav option.", file=sys.stderr)
|
||||
sys.exit(2)
|
||||
print(f"Listening to HTTP stream: {HTTP_STREAM_URL}")
|
||||
try:
|
||||
response = requests.get(HTTP_STREAM_URL, stream=True, timeout=10)
|
||||
buffer = io.BytesIO()
|
||||
try:
|
||||
for chunk in response.iter_content(chunk_size=4096):
|
||||
buffer.write(chunk)
|
||||
# Use STREAM_BUFFER for detection window
|
||||
if buffer.tell() > STREAM_BUFFER:
|
||||
buffer.seek(0)
|
||||
audio = AudioSegment.from_file(buffer, format="mp3")
|
||||
if audio.channels > 1:
|
||||
audio = audio.set_channels(1)
|
||||
# --- Simple audio level detection ---
|
||||
samples = np.array(audio.get_array_of_samples())
|
||||
if samples.dtype != np.float32:
|
||||
samples = samples.astype(np.float32) / 32767.0 # Normalize to -1..1
|
||||
rms = np.sqrt(np.mean(samples**2))
|
||||
if rms > 0.01:
|
||||
print(f"Audio detected! RMS: {rms:.3f} ", end='\r')
|
||||
if rms > 0.5:
|
||||
print(f"WARNING: Audio too loud! RMS: {rms:.3f} ", end='\r')
|
||||
# --- End audio level detection ---
|
||||
detect_and_alert(audio, audio.frame_rate)
|
||||
buffer = io.BytesIO()
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopped by user.")
|
||||
sys.exit(0)
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"Connection error: {e}", file=sys.stderr)
|
||||
sys.exit(3)
|
||||
except Exception as e:
|
||||
print(f"Error processing HTTP stream: {e}", file=sys.stderr)
|
||||
sys.exit(4)
|
||||
elif AUDIO_SOURCE == "soundcard":
|
||||
print("Listening to audio device:")
|
||||
buffer = np.array([], dtype=np.float32)
|
||||
min_samples = MIN_SAMPLES # Use configured minimum samples
|
||||
|
||||
def callback(indata, frames, time_info, status):
|
||||
nonlocal buffer
|
||||
try:
|
||||
samples = indata[:, 0]
|
||||
buffer = np.concatenate((buffer, samples))
|
||||
# --- Simple audio level detection ---
|
||||
rms = np.sqrt(np.mean(samples**2))
|
||||
if rms > 0.01:
|
||||
print(f"Audio detected! RMS: {rms:.3f} ", end='\r')
|
||||
if rms > 0.5:
|
||||
print(f"WARNING: Audio too loud! RMS: {rms:.3f} ", end='\r')
|
||||
# --- End audio level detection ---
|
||||
# Only process when buffer is large enough
|
||||
while buffer.size >= min_samples:
|
||||
int_samples = np.int16(buffer[:min_samples] * 32767)
|
||||
audio = AudioSegment(
|
||||
data=int_samples.tobytes(),
|
||||
sample_width=2,
|
||||
frame_rate=SAMPLE_RATE,
|
||||
channels=1
|
||||
)
|
||||
detect_and_alert(audio, SAMPLE_RATE)
|
||||
buffer = buffer[min_samples:] # keep remainder for next window
|
||||
except Exception as e:
|
||||
print(f"Callback error: {e}", file=sys.stderr)
|
||||
try:
|
||||
with sd.InputStream(samplerate=SAMPLE_RATE, channels=INPUT_CHANNELS, dtype='float32', callback=callback):
|
||||
print("Press Ctrl+C to stop.")
|
||||
import signal
|
||||
signal.pause() # Wait for Ctrl+C, keeps CPU usage minimal
|
||||
except KeyboardInterrupt:
|
||||
print("Stopped by user.")
|
||||
except Exception as e:
|
||||
print(f"Error accessing soundcard: {e}", file=sys.stderr)
|
||||
sys.exit(5)
|
||||
else:
|
||||
print("Unknown AUDIO_SOURCE. Set to 'http' or 'soundcard'.", file=sys.stderr)
|
||||
sys.exit(6)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -13,16 +13,17 @@ User=pi
|
||||
Group=pi
|
||||
WorkingDirectory=/dir/
|
||||
ExecStart=python3 mesh_bot.py
|
||||
ExecStop=pkill -f mesh_bot.py
|
||||
Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
|
||||
Environment=SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
ExecStop=
|
||||
KillSignal=SIGINT
|
||||
Environment="REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
|
||||
Environment="SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt"
|
||||
|
||||
# Disable Python's buffering of STDOUT and STDERR, so that output from the
|
||||
# service shows up immediately in systemd's logs
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
|
||||
Restart=on-failure
|
||||
Type=notify #try simple if any problems
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
|
||||
@@ -23,7 +23,6 @@ ExecStop=pkill -f report_generator5.py
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
|
||||
Restart=on-failure
|
||||
Type=notify #try simple if any problems
|
||||
|
||||
[Install]
|
||||
WantedBy=timers.target
|
||||
@@ -8,9 +8,10 @@ User=pi
|
||||
Group=pi
|
||||
WorkingDirectory=/dir/
|
||||
ExecStart=python3 modules/web.py
|
||||
ExecStop=pkill -f mesh_bot_w3.py
|
||||
Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
|
||||
Environment=SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
ExecStop=
|
||||
KillSignal=SIGINT
|
||||
Environment="REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
|
||||
Environment="SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt"
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
Restart=on-failure
|
||||
|
||||
|
||||
@@ -13,16 +13,16 @@ User=pi
|
||||
Group=pi
|
||||
WorkingDirectory=/dir/
|
||||
ExecStart=python3 pong_bot.py
|
||||
ExecStop=pkill -f pong_bot.py
|
||||
Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
|
||||
Environment=SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
ExecStop=
|
||||
KillSignal=SIGINT
|
||||
Environment="REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt"
|
||||
Environment="SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt"
|
||||
|
||||
# Disable Python's buffering of STDOUT and STDERR, so that output from the
|
||||
# service shows up immediately in systemd's logs
|
||||
Environment=PYTHONUNBUFFERED=1
|
||||
|
||||
Restart=on-failure
|
||||
Type=notify #try simple if any problems
|
||||
|
||||
[Install]
|
||||
WantedBy=default.target
|
||||
@@ -9,15 +9,18 @@ fi
|
||||
|
||||
# Use first argument as user, or default to meshbot
|
||||
TARGET_USER="${1:-meshbot}"
|
||||
echo "DEBUG: TARGET_USER='$TARGET_USER'"
|
||||
|
||||
# Check if user exists
|
||||
if ! id "$TARGET_USER" &>/dev/null; then
|
||||
if ! id "$TARGET_USER" >/dev/null 2>&1; then
|
||||
echo "User '$TARGET_USER' does not exist."
|
||||
read -p "Would you like to use the current user ($(logname)) instead? [y/N]: " yn
|
||||
if [[ "$yn" =~ ^[Yy]$ ]]; then
|
||||
TARGET_USER="$(logname)"
|
||||
CUR_USER="$(whoami)"
|
||||
printf "Would you like to use the current user (%s) instead? [y/N]: " "$CUR_USER"
|
||||
read yn
|
||||
if [ "$yn" = "y" ] || [ "$yn" = "Y" ]; then
|
||||
TARGET_USER="$CUR_USER"
|
||||
echo "Using current user: $TARGET_USER"
|
||||
if ! id "$TARGET_USER" &>/dev/null; then
|
||||
if ! id "$TARGET_USER" >/dev/null 2>&1; then
|
||||
echo "Current user '$TARGET_USER' does not exist or cannot be determined."
|
||||
exit 1
|
||||
fi
|
||||
@@ -27,14 +30,24 @@ if ! id "$TARGET_USER" &>/dev/null; then
|
||||
fi
|
||||
fi
|
||||
|
||||
id "$TARGET_USER"
|
||||
|
||||
echo "Setting ownership to $TARGET_USER:$TARGET_USER"
|
||||
|
||||
chown -R "$TARGET_USER:$TARGET_USER" "/opt/meshing-around/-around"
|
||||
chown -R "$TARGET_USER:$TARGET_USER" "/opt/meshing-around/-around/logs"
|
||||
chown -R "$TARGET_USER:$TARGET_USER" "/opt/meshing-around/-around/data"
|
||||
chown "$TARGET_USER:$TARGET_USER" "/opt/meshing-around/-around/config.ini"
|
||||
chmod 640 "/opt/meshing-around/-around/config.ini"
|
||||
chmod 750 "/opt/meshing-around/-around/logs"
|
||||
chmod 750 "/opt/meshing-around/-around/data"
|
||||
for dir in "/opt/meshing-around" "/opt/meshing-around/logs" "/opt/meshing-around/data"; do
|
||||
if [ -d "$dir" ]; then
|
||||
chown -R "$TARGET_USER:$TARGET_USER" "$dir"
|
||||
chmod 775 "$dir"
|
||||
else
|
||||
echo "Warning: Directory $dir does not exist, skipping."
|
||||
fi
|
||||
done
|
||||
|
||||
if [ -f "/opt/meshing-around/config.ini" ]; then
|
||||
chown "$TARGET_USER:$TARGET_USER" "/opt/meshing-around/config.ini"
|
||||
chmod 664 "/opt/meshing-around/config.ini"
|
||||
else
|
||||
echo "Warning: /opt/meshing-around/config.ini does not exist, skipping."
|
||||
fi
|
||||
|
||||
echo "Permissions and ownership have been set."
|
||||
@@ -2,7 +2,7 @@
|
||||
# # Simulate meshing-around de K7MHI 2024
|
||||
from modules.log import logger, getPrettyTime # Import the logger; ### --> If you are reading this put the script in the project root <-- ###
|
||||
import time
|
||||
import datetime
|
||||
from datetime import datetime
|
||||
import random
|
||||
|
||||
# Initialize the tool
|
||||
@@ -51,8 +51,8 @@ def example_handler(message, nodeID, deviceID):
|
||||
msg = f"Hello {get_name_from_number(nodeID)}, simulator ready for testing {projectName} project! on device {deviceID}"
|
||||
msg += f" Your location is {location}"
|
||||
msg += f" you said: {message}"
|
||||
|
||||
|
||||
# Add timestamp
|
||||
msg += f" [Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}]"
|
||||
return msg
|
||||
|
||||
|
||||
|
||||
213
etc/yolo_vision.py
Normal file
213
etc/yolo_vision.py
Normal file
@@ -0,0 +1,213 @@
|
||||
#!/usr/bin/env python3
|
||||
# YOLOv5 Object Detection with Movement Tracking using Raspberry Pi AI Camera or USB Webcam
|
||||
# YOLOv5 Requirements: yolo5 https://docs.ultralytics.com/yolov5/quickstart_tutorial/
|
||||
# PiCamera2 Requirements: picamera2 https://github.com/raspberrypi/picamera2 `sudo apt install imx500-all`
|
||||
# NVIDIA GPU PyTorch: https://developer.nvidia.com/cuda-downloads
|
||||
# OCR with Tesseract: https://tesseract-ocr.github.io/tessdoc/Installation.html. `sudo apt-get install tesseract-ocr`
|
||||
# Adjust settings below as needed, indended for meshing-around alert.txt output to meshtastic
|
||||
# 2025 K7MHI Kelly Keeton
|
||||
|
||||
PI_CAM = 1 # 1 for Raspberry Pi AI Camera, 0 for USB webcam
|
||||
YOLO_MODEL = "yolov5s" # e.g., 'yolov5s', 'yolov5m', 'yolov5l', 'yolov5x'
|
||||
LOW_RES_MODE = 0 # 1 for low res (320x240), 0 for high res (640x480)
|
||||
IGNORE_CLASSES = ["bed", "chair"] # Add object names to ignore
|
||||
CONFIDENCE_THRESHOLD = 0.8 # Only show detections above this confidence
|
||||
MOVEMENT_THRESHOLD = 50 # Pixels to consider as movement (adjust as needed)
|
||||
IGNORE_STATIONARY = True # Whether to ignore stationary objects in output
|
||||
ALERT_FUSE_COUNT = 5 # Number of consecutive detections before alerting
|
||||
ALERT_FILE_PATH = "alert.txt" # e.g., "/opt/meshing-around/alert.txt" or None for no file output
|
||||
OCR_PROCESSING_ENABLED = True # Whether to perform OCR on detected objects
|
||||
SAVE_EVIDENCE_IMAGES = True # Whether to save evidence images when OCR text is found in bbox
|
||||
EVIDENCE_IMAGE_DIR = "." # Change to desired directory, e.g., "/opt/meshing-around/data/images"
|
||||
EVIDENCE_IMAGE_PATTERN = "evidence_{timestamp}.png"
|
||||
|
||||
try:
|
||||
import torch # YOLOv5 https://docs.ultralytics.com/yolov5/quickstart_tutorial/
|
||||
from PIL import Image # pip install pillow
|
||||
import numpy as np # pip install numpy
|
||||
import time
|
||||
import warnings
|
||||
import sys
|
||||
import os
|
||||
import datetime
|
||||
if OCR_PROCESSING_ENABLED:
|
||||
import pytesseract # pip install pytesseract
|
||||
|
||||
if PI_CAM:
|
||||
from picamera2 import Picamera2 # pip install picamera2
|
||||
else:
|
||||
import cv2
|
||||
except ImportError as e:
|
||||
print(f"Missing required module: {e.name}. Please review the comments in program, and try again.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
# Suppress FutureWarnings from imports upstream noise
|
||||
warnings.filterwarnings("ignore", category=FutureWarning)
|
||||
CAMERA_TYPE = "RaspPi AI-Cam" if PI_CAM else "USB Webcam"
|
||||
RESOLUTION = "320x240" if LOW_RES_MODE else "640x480"
|
||||
|
||||
# Load YOLOv5
|
||||
model = torch.hub.load("ultralytics/yolov5", YOLO_MODEL)
|
||||
|
||||
if PI_CAM:
|
||||
picam2 = Picamera2()
|
||||
if LOW_RES_MODE:
|
||||
picam2.preview_configuration.main.size = (320, 240)
|
||||
else:
|
||||
picam2.preview_configuration.main.size = (640, 480)
|
||||
picam2.preview_configuration.main.format = "RGB888"
|
||||
picam2.configure("preview")
|
||||
picam2.start()
|
||||
else:
|
||||
if LOW_RES_MODE:
|
||||
cam_res = (320, 240)
|
||||
else:
|
||||
cam_res = (640, 480)
|
||||
cap = cv2.VideoCapture(0)
|
||||
cap.set(cv2.CAP_PROP_FRAME_WIDTH, cam_res[0])
|
||||
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cam_res[1])
|
||||
|
||||
print("="*80)
|
||||
print(f" Sentinal Vision 3000 Booting Up!")
|
||||
print(f" Model: {YOLO_MODEL} | Camera: {CAMERA_TYPE} | Resolution: {RESOLUTION} | OCR: {'Enabled' if OCR_PROCESSING_ENABLED else 'Disabled'}")
|
||||
print("="*80)
|
||||
time.sleep(1)
|
||||
|
||||
def alert_output(msg, alert_file_path=ALERT_FILE_PATH):
|
||||
print(msg)
|
||||
if alert_file_path:
|
||||
# Remove timestamp for file output
|
||||
msg_no_time = " ".join(msg.split("] ")[1:]) if "] " in msg else msg
|
||||
with open(alert_file_path, "w") as f: # Use "a" to append instead of overwrite
|
||||
f.write(msg_no_time + "\n")
|
||||
|
||||
def extract_text_from_bbox(img, bbox):
|
||||
try:
|
||||
cropped = img.crop((bbox[0], bbox[1], bbox[2], bbox[3]))
|
||||
text = pytesseract.image_to_string(cropped, config="--psm 7")
|
||||
text_stripped = text.strip()
|
||||
if text_stripped and SAVE_EVIDENCE_IMAGES:
|
||||
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
image_path = os.path.join(EVIDENCE_IMAGE_DIR, EVIDENCE_IMAGE_PATTERN.format(timestamp=timestamp))
|
||||
cropped.save(image_path)
|
||||
print(f"Saved evidence image: {image_path}")
|
||||
return f"{text_stripped}"
|
||||
except Exception as e:
|
||||
print(f"Error during OCR: {e}")
|
||||
print("More at https://tesseract-ocr.github.io/tessdoc/Installation.html")
|
||||
return False
|
||||
|
||||
try:
|
||||
i = 0 # Frame counter if zero will be infinite
|
||||
system_normal_printed = False # system nominal flag, if true disables printing
|
||||
while True:
|
||||
i += 1
|
||||
if PI_CAM:
|
||||
frame = picam2.capture_array()
|
||||
else:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
print("Failed to grab frame from webcam.")
|
||||
break
|
||||
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
img = Image.fromarray(frame)
|
||||
|
||||
results = model(img)
|
||||
df = results.pandas().xyxy[0]
|
||||
df = df[df['confidence'] >= CONFIDENCE_THRESHOLD] # Filter by confidence
|
||||
df = df[~df['name'].isin(IGNORE_CLASSES)] # Filter out ignored classes
|
||||
counts = df['name'].value_counts()
|
||||
if counts.empty:
|
||||
if not system_normal_printed:
|
||||
print("System nominal: No objects detected.")
|
||||
system_normal_printed = True
|
||||
continue # Skip the rest of the loop if nothing detected
|
||||
if counts.sum() > ALERT_FUSE_COUNT:
|
||||
system_normal_printed = False # Reset flag if something is detected
|
||||
|
||||
# Movement tracking
|
||||
if not hasattr(__builtins__, 'prev_centers'):
|
||||
__builtins__.prev_centers = {}
|
||||
if not hasattr(__builtins__, 'stationary_reported'):
|
||||
__builtins__.stationary_reported = set()
|
||||
if not hasattr(__builtins__, 'fuse_counters'):
|
||||
__builtins__.fuse_counters = {}
|
||||
|
||||
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
current_centers = {}
|
||||
detected_this_frame = set()
|
||||
|
||||
for idx, row in df.iterrows():
|
||||
obj_id = f"{row['name']}_{idx}"
|
||||
x_center = (row['xmin'] + row['xmax']) / 2
|
||||
current_centers[obj_id] = x_center
|
||||
detected_this_frame.add(obj_id)
|
||||
|
||||
prev_x = __builtins__.prev_centers.get(obj_id)
|
||||
direction = ""
|
||||
count = counts[row['name']]
|
||||
|
||||
# Fuse logic
|
||||
fuse_counters = __builtins__.fuse_counters
|
||||
if obj_id not in fuse_counters:
|
||||
fuse_counters[obj_id] = 1
|
||||
else:
|
||||
fuse_counters[obj_id] += 1
|
||||
|
||||
if fuse_counters[obj_id] < ALERT_FUSE_COUNT:
|
||||
continue # Don't alert yet
|
||||
|
||||
# OCR on detected region
|
||||
bbox = [row['xmin'], row['ymin'], row['xmax'], row['ymax']]
|
||||
if OCR_PROCESSING_ENABLED:
|
||||
ocr_text = extract_text_from_bbox(img, bbox)
|
||||
|
||||
if prev_x is not None:
|
||||
delta = x_center - prev_x
|
||||
if abs(delta) < MOVEMENT_THRESHOLD:
|
||||
direction = "stationary"
|
||||
if IGNORE_STATIONARY:
|
||||
if obj_id not in __builtins__.stationary_reported:
|
||||
msg = f"[{timestamp}] {count} {row['name']} {direction}"
|
||||
if OCR_PROCESSING_ENABLED and ocr_text:
|
||||
msg += f" | OCR: {ocr_text}"
|
||||
alert_output(msg)
|
||||
__builtins__.stationary_reported.add(obj_id)
|
||||
else:
|
||||
msg = f"[{timestamp}] {count} {row['name']} {direction}"
|
||||
if OCR_PROCESSING_ENABLED and ocr_text:
|
||||
msg += f" | OCR: {ocr_text}"
|
||||
alert_output(msg)
|
||||
else:
|
||||
direction = "moving right" if delta > 0 else "moving left"
|
||||
msg = f"[{timestamp}] {count} {row['name']} {direction}"
|
||||
if OCR_PROCESSING_ENABLED and ocr_text:
|
||||
msg += f" | OCR: {ocr_text}"
|
||||
alert_output(msg)
|
||||
__builtins__.stationary_reported.discard(obj_id)
|
||||
else:
|
||||
direction = "detected"
|
||||
msg = f"[{timestamp}] {count} {row['name']} {direction}"
|
||||
if OCR_PROCESSING_ENABLED and ocr_text:
|
||||
msg += f" | OCR: {ocr_text}"
|
||||
alert_output(msg)
|
||||
|
||||
# Reset fuse counters for objects not detected in this frame
|
||||
for obj_id in list(__builtins__.fuse_counters.keys()):
|
||||
if obj_id not in detected_this_frame:
|
||||
__builtins__.fuse_counters[obj_id] = 0
|
||||
|
||||
__builtins__.prev_centers = current_centers
|
||||
|
||||
time.sleep(1) # Adjust frame rate as needed
|
||||
except KeyboardInterrupt:
|
||||
print("\nInterrupted by user. Shutting down...")
|
||||
except Exception as e:
|
||||
print(f"\nAn error occurred: {e}", file=sys.stderr)
|
||||
finally:
|
||||
if PI_CAM:
|
||||
picam2.close()
|
||||
print("Camera closed. Goodbye!")
|
||||
else:
|
||||
cap.release()
|
||||
print("Webcam released. Goodbye!")
|
||||
256
install.sh
256
install.sh
@@ -13,8 +13,10 @@ for arg in "$@"; do
|
||||
done
|
||||
|
||||
if [[ $NOPE -eq 1 ]]; then
|
||||
echo "Uninstalling Meshing Around and all related services..."
|
||||
|
||||
echo "----------------------------------------------"
|
||||
echo "Uninstalling Meshing Around ..."
|
||||
echo "----------------------------------------------"
|
||||
|
||||
sudo systemctl stop mesh_bot || true
|
||||
sudo systemctl disable mesh_bot || true
|
||||
|
||||
@@ -54,6 +56,14 @@ if [[ $NOPE -eq 1 ]]; then
|
||||
sudo rm -f /etc/systemd/system/ollama.service
|
||||
sudo rm -rf /usr/local/bin/ollama
|
||||
sudo rm -rf ~/.ollama
|
||||
# remove ollama service account if exists
|
||||
if id ollama &>/dev/null; then
|
||||
sudo userdel ollama || true
|
||||
fi
|
||||
# remove ollama group if exists
|
||||
if getent group ollama &>/dev/null; then
|
||||
sudo groupdel ollama || true
|
||||
fi
|
||||
echo "Ollama removed."
|
||||
else
|
||||
echo "Ollama not removed."
|
||||
@@ -66,48 +76,72 @@ fi
|
||||
|
||||
# install.sh, Meshing Around installer script
|
||||
# Thanks for using Meshing Around!
|
||||
printf "\n########################"
|
||||
printf "\nMeshing Around Installer\n"
|
||||
printf "########################\n"
|
||||
printf "\nThis script will try and install the Meshing Around Bot and its dependencies.\n"
|
||||
printf "Installer works best in raspian/debian/ubuntu or foxbuntu embedded systems.\n"
|
||||
printf "If there is a problem, try running the installer again.\n"
|
||||
printf "\nChecking for dependencies...\n"
|
||||
|
||||
# fuse check for existing installation
|
||||
echo "=============================================="
|
||||
echo " Meshing Around Automated Installer "
|
||||
echo "=============================================="
|
||||
echo
|
||||
echo "This script will attempt to install the Meshing Around Bot and its dependencies."
|
||||
echo "Recommended for Raspbian, Debian, Ubuntu, or Foxbuntu embedded systems."
|
||||
echo "If you encounter any issues, try running the installer again."
|
||||
echo
|
||||
echo "----------------------------------------------"
|
||||
echo "Checking for dependencies..."
|
||||
echo "----------------------------------------------"
|
||||
# check if we have an existing installation
|
||||
if [[ -f config.ini ]]; then
|
||||
printf "\nDetected existing installation, please backup and remove existing installation before proceeding\n"
|
||||
echo
|
||||
echo "=========================================================="
|
||||
echo " Detected existing installation of Meshing Around."
|
||||
echo " Please backup and remove the existing installation"
|
||||
echo " before proceeding with a new install."
|
||||
echo "=========================================================="
|
||||
exit 1
|
||||
fi
|
||||
# check if we have write access to the install path
|
||||
if [[ ! -w ${program_path} ]]; then
|
||||
echo
|
||||
echo "=========================================================="
|
||||
echo " ERROR: Install path not writable."
|
||||
echo " Try running the installer with sudo?"
|
||||
echo "=========================================================="
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# check if we are in /opt/meshing-around
|
||||
if [[ "$program_path" != "/opt/meshing-around" ]]; then
|
||||
printf "\nIt is suggested to project path to /opt/meshing-around\n"
|
||||
printf "Do you want to move the project to /opt/meshing-around? (y/n)"
|
||||
echo "----------------------------------------------"
|
||||
echo " Project Path Decision"
|
||||
echo "----------------------------------------------"
|
||||
printf "\nIt is recommended to install Meshing Around in /opt/meshing-around if used as a service.\n"
|
||||
printf "Do you want to move the project to /opt/meshing-around now? (y/n): "
|
||||
read move
|
||||
if [[ $(echo "$move" | grep -i "^y") ]]; then
|
||||
sudo mv "$program_path" /opt/meshing-around
|
||||
cd /opt/meshing-around
|
||||
printf "\nProject moved to /opt/meshing-around. re-run the installer\n"
|
||||
sudo git config --global --add safe.directory /opt/meshing-around
|
||||
printf "\nProject moved to /opt/meshing-around.\n"
|
||||
printf "Please re-run the installer from the new location.\n"
|
||||
exit 0
|
||||
else
|
||||
echo "Continuing installation in current directory: $program_path"
|
||||
fi
|
||||
fi
|
||||
|
||||
# check write access to program path
|
||||
if [[ ! -w ${program_path} ]]; then
|
||||
printf "\nInstall path not writable, try running the installer with sudo\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# if hostname = femtofox, then we are on embedded
|
||||
|
||||
echo "----------------------------------------------"
|
||||
echo "Embedded install? auto answers install stuff..."
|
||||
echo "----------------------------------------------"
|
||||
if [[ $(hostname) == "femtofox" ]]; then
|
||||
printf "\nDetected femtofox embedded system\n"
|
||||
printf "\n[INFO] Detected femtofox embedded system.\n"
|
||||
embedded="y"
|
||||
else
|
||||
# check if running on embedded
|
||||
printf "\nAre You installing into an embedded system like a luckfox or -native? most should say no here (y/n)"
|
||||
printf "\nAre you installing on an embedded system (like Luckfox)?\n"
|
||||
printf "Most users should answer 'n' here. (y/n): "
|
||||
read embedded
|
||||
fi
|
||||
|
||||
|
||||
if [[ $(echo "${embedded}" | grep -i "^y") ]]; then
|
||||
printf "\nDetected embedded skipping dependency installation\n"
|
||||
else
|
||||
@@ -137,6 +171,12 @@ else
|
||||
printf "\nDependencies installed\n"
|
||||
fi
|
||||
|
||||
echo "----------------------------------------------"
|
||||
echo "Installing service files and templates..."
|
||||
echo "----------------------------------------------"
|
||||
# bootstrap
|
||||
mkdir -p "$program_path/logs"
|
||||
mkdir -p "$program_path/data"
|
||||
|
||||
# copy service files
|
||||
cp etc/pong_bot.tmp etc/pong_bot.service
|
||||
@@ -157,6 +197,10 @@ if [[ ! -f modules/custom_scheduler.py ]]; then
|
||||
printf "\nCustom scheduler template copied to modules/custom_scheduler.py\n"
|
||||
fi
|
||||
|
||||
# copy contents of etc/data to data/
|
||||
printf "\nCopying data templates to data/ directory\n"
|
||||
cp -r etc/data/* data/
|
||||
|
||||
# generate config file, check if it exists
|
||||
if [[ -f config.ini ]]; then
|
||||
printf "\nConfig file already exists, moving to backup config.old\n"
|
||||
@@ -166,6 +210,10 @@ fi
|
||||
cp config.template config.ini
|
||||
printf "\nConfig files generated!\n"
|
||||
|
||||
echo "----------------------------------------------"
|
||||
echo "Customizing configuration..."
|
||||
echo "----------------------------------------------"
|
||||
|
||||
# update lat,long in config.ini
|
||||
latlong=$(curl --silent --max-time 20 https://ipinfo.io/loc || echo "48.50,-123.0")
|
||||
IFS=',' read -r lat lon <<< "$latlong"
|
||||
@@ -233,6 +281,10 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "----------------------------------------------"
|
||||
echo "Installing bot service? - mesh or pong or none"
|
||||
echo "----------------------------------------------"
|
||||
|
||||
# if $1 is passed
|
||||
if [[ $1 == "pong" ]]; then
|
||||
bot="pong"
|
||||
@@ -247,31 +299,38 @@ else
|
||||
read bot
|
||||
fi
|
||||
|
||||
# ask if we should add a user for the bot
|
||||
if [[ $(echo "${embedded}" | grep -i "^n") ]]; then
|
||||
printf "\nDo you want to add a local user (meshbot) no login, for the bot? (y/n)"
|
||||
read meshbotservice
|
||||
# Decide which user to use for the service
|
||||
if [[ $(echo "${bot}" | grep -i "^n") ]]; then
|
||||
# Not installing as a service, use current user
|
||||
bot_user=$(whoami)
|
||||
else
|
||||
# Installing as a service (meshbot or pongbot), always use meshbot account
|
||||
if ! id meshbot &>/dev/null; then
|
||||
sudo useradd -M meshbot
|
||||
sudo usermod -L meshbot
|
||||
if ! getent group meshbot &>/dev/null; then
|
||||
sudo groupadd meshbot
|
||||
fi
|
||||
sudo usermod -a -G meshbot meshbot
|
||||
echo "Added user meshbot with no home directory"
|
||||
else
|
||||
echo "User meshbot already exists"
|
||||
fi
|
||||
bot_user="meshbot"
|
||||
fi
|
||||
|
||||
if [[ $(echo "${meshbotservice}" | grep -i "^y") ]] || [[ $(echo "${embedded}" | grep -i "^y") ]]; then
|
||||
sudo useradd -M meshbot
|
||||
sudo usermod -L meshbot
|
||||
sudo groupadd meshbot
|
||||
sudo usermod -a -G meshbot meshbot
|
||||
whoami="meshbot"
|
||||
echo "Added user meshbot with no home directory"
|
||||
else
|
||||
whoami=$(whoami)
|
||||
fi
|
||||
echo "----------------------------------------------"
|
||||
echo "Finalizing service installation..."
|
||||
echo "----------------------------------------------"
|
||||
|
||||
# set the correct user in the service file
|
||||
replace="s|User=pi|User=$whoami|g"
|
||||
replace="s|User=pi|User=$bot_user|g"
|
||||
sed -i "$replace" etc/pong_bot.service
|
||||
sed -i "$replace" etc/mesh_bot.service
|
||||
sed -i "$replace" etc/mesh_bot_reporting.service
|
||||
sed -i "$replace" etc/mesh_bot_reporting.timer
|
||||
# set the correct group in the service file
|
||||
replace="s|Group=pi|Group=$whoami|g"
|
||||
replace="s|Group=pi|Group=$bot_user|g"
|
||||
sed -i "$replace" etc/pong_bot.service
|
||||
sed -i "$replace" etc/mesh_bot.service
|
||||
sed -i "$replace" etc/mesh_bot_reporting.service
|
||||
@@ -280,19 +339,10 @@ printf "\n service files updated\n"
|
||||
|
||||
# add user to groups for serial access
|
||||
printf "\nAdding user to dialout, bluetooth, and tty groups for serial access\n"
|
||||
sudo usermod -a -G dialout "$whoami"
|
||||
sudo usermod -a -G tty "$whoami"
|
||||
sudo usermod -a -G bluetooth "$whoami"
|
||||
echo "Added user $whoami to dialout, tty, and bluetooth groups"
|
||||
|
||||
sudo chown -R "$whoami:$whoami" "$program_path/logs"
|
||||
sudo chown -R "$whoami:$whoami" "$program_path/data"
|
||||
sudo chown "$whoami:$whoami" "$program_path/config.ini"
|
||||
sudo chmod 640 "$program_path/config.ini"
|
||||
echo "Permissions set for meshbot on config.ini"
|
||||
sudo chmod 750 "$program_path/logs"
|
||||
sudo chmod 750 "$program_path/data"
|
||||
echo "Permissions set for meshbot on logs and data directories"
|
||||
sudo usermod -a -G dialout "$bot_user"
|
||||
sudo usermod -a -G tty "$bot_user"
|
||||
sudo usermod -a -G bluetooth "$bot_user"
|
||||
echo "Added user $bot_user to dialout, tty, and bluetooth groups"
|
||||
|
||||
# check and see if some sort of NTP is running
|
||||
if ! systemctl is-active --quiet ntp.service && \
|
||||
@@ -321,17 +371,17 @@ if [[ $(echo "${bot}" | grep -i "^m") ]]; then
|
||||
fi
|
||||
|
||||
# install mesh_bot_reporting timer to run daily at 4:20 am
|
||||
# echo ""
|
||||
# echo "Installing mesh_bot_reporting.timer to run mesh_bot_reporting daily at 4:20 am..."
|
||||
# sudo cp etc/mesh_bot_reporting.service /etc/systemd/system/
|
||||
# sudo cp etc/mesh_bot_reporting.timer /etc/systemd/system/
|
||||
# sudo systemctl daemon-reload
|
||||
# sudo systemctl enable mesh_bot_reporting.timer
|
||||
# sudo systemctl start mesh_bot_reporting.timer
|
||||
# echo "mesh_bot_reporting.timer installed and enabled"
|
||||
# echo "Check timer status with: systemctl status mesh_bot_reporting.timer"
|
||||
# echo "List all timers with: systemctl list-timers"
|
||||
# echo ""
|
||||
echo ""
|
||||
echo "Installing mesh_bot_reporting.timer to run mesh_bot_reporting daily at 4:20 am..."
|
||||
sudo cp etc/mesh_bot_reporting.service /etc/systemd/system/
|
||||
sudo cp etc/mesh_bot_reporting.timer /etc/systemd/system/
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable mesh_bot_reporting.timer
|
||||
sudo systemctl start mesh_bot_reporting.timer
|
||||
echo "mesh_bot_reporting.timer installed and enabled"
|
||||
echo "Check timer status with: systemctl status mesh_bot_reporting.timer"
|
||||
echo "List all timers with: systemctl list-timers"
|
||||
echo ""
|
||||
|
||||
# # install mesh_bot_w3_server service
|
||||
# echo "Installing mesh_bot_w3_server.service to run the web3 server..."
|
||||
@@ -343,6 +393,10 @@ fi
|
||||
# echo "Check service status with: systemctl status mesh_bot_w3_server.service"
|
||||
# echo ""
|
||||
|
||||
echo "----------------------------------------------"
|
||||
echo "Extra options for installation..."
|
||||
echo "----------------------------------------------"
|
||||
|
||||
# check if running on embedded for final steps
|
||||
if [[ $(echo "${embedded}" | grep -i "^n") ]]; then
|
||||
# ask if emoji font should be installed for linux
|
||||
@@ -404,25 +458,20 @@ if [[ $(echo "${embedded}" | grep -i "^n") ]]; then
|
||||
printf "sudo systemctl disable %s.service\n" "$service" >> install_notes.txt
|
||||
printf "sudo systemctl disable %s.service\n" "$service" >> install_notes.txt
|
||||
printf "\n older chron statment to run the report generator hourly:\n" >> install_notes.txt
|
||||
printf "0 * * * * /usr/bin/python3 $program_path/etc/report_generator5.py" >> install_notes.txt
|
||||
printf " to edit crontab run 'crontab -e'\n" >> install_notes.txt
|
||||
#printf "0 * * * * /usr/bin/python3 $program_path/etc/report_generator5.py" >> install_notes.txt
|
||||
#printf " to edit crontab run 'crontab -e'\n" >> install_notes.txt
|
||||
printf "\nmesh_bot_reporting.timer installed to run daily at 4:20 am\n" >> install_notes.txt
|
||||
printf "Check timer status: systemctl status mesh_bot_reporting.timer\n" >> install_notes.txt
|
||||
printf "List all timers: systemctl list-timers\n" >> install_notes.txt
|
||||
printf "View timer logs: journalctl -u mesh_bot_reporting.timer\n" >> install_notes.txt
|
||||
printf "*** Stay Up to date using 'bash update.sh' ***\n" >> install_notes.txt
|
||||
printf "sudo ./update.sh && sudo -u meshbot ./launch.sh mesh_bot.py\n" >> install_notes.txt
|
||||
|
||||
if [[ $(echo "${venv}" | grep -i "^y") ]]; then
|
||||
printf "\nFor running on venv, virtual launch bot with './launch.sh mesh' in path $program_path\n" >> install_notes.txt
|
||||
fi
|
||||
|
||||
read -p "Press enter to complete the installation, these commands saved to install_notes.txt"
|
||||
|
||||
printf "\nGood time to reboot? (y/n)"
|
||||
read reboot
|
||||
if [[ $(echo "${reboot}" | grep -i "^y") ]]; then
|
||||
sudo reboot
|
||||
fi
|
||||
else
|
||||
# we are on embedded
|
||||
# replace "type = serial" with "type = tcp" in config.ini
|
||||
@@ -435,21 +484,6 @@ else
|
||||
# add service dependency for meshtasticd into service file
|
||||
#replace="s|After=network.target|After=network.target meshtasticd.service|g"
|
||||
|
||||
# Set up the meshing around service
|
||||
sudo cp /opt/meshing-around/etc/$service.service /etc/systemd/system/$service.service
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl enable $service.service
|
||||
sudo systemctl start $service.service
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
# # check if the cron job already exists
|
||||
# if ! crontab -l | grep -q "$chronjob"; then
|
||||
# # add the cron job to run the report_generator5.py script
|
||||
# (crontab -l 2>/dev/null; echo "$chronjob") | crontab -
|
||||
# printf "\nAdded cron job to run report_generator5.py\n"
|
||||
# else
|
||||
# printf "\nCron job already exists, skipping\n"
|
||||
# fi
|
||||
# document the service install
|
||||
printf "Reference following commands:\n\n" > install_notes.txt
|
||||
printf "sudo systemctl status %s.service\n" "$service" >> install_notes.txt
|
||||
@@ -460,16 +494,35 @@ else
|
||||
printf "sudo systemctl stop %s.service\n" "$service" >> install_notes.txt
|
||||
printf "sudo systemctl disable %s.service\n" "$service" >> install_notes.txt
|
||||
printf "older crontab to run the report generator hourly:" >> install_notes.txt
|
||||
printf "0 * * * * /usr/bin/python3 $program_path/etc/report_generator5.py" >> install_notes.txt
|
||||
printf " to edit crontab run 'crontab -e'" >> install_notes.txt
|
||||
#printf "0 * * * * /usr/bin/python3 $program_path/etc/report_generator5.py" >> install_notes.txt
|
||||
#printf " to edit crontab run 'crontab -e'" >> install_notes.txt
|
||||
printf "\nmesh_bot_reporting.timer installed to run daily at 4:20 am\n" >> install_notes.txt
|
||||
printf "Check timer status: systemctl status mesh_bot_reporting.timer\n" >> install_notes.txt
|
||||
printf "List all timers: systemctl list-timers\n" >> install_notes.txt
|
||||
printf "*** Stay Up to date using 'bash update.sh' ***\n" >> install_notes.txt
|
||||
printf "sudo ./update.sh && sudo -u meshbot ./launch.sh mesh_bot.py\n" >> install_notes.txt
|
||||
fi
|
||||
|
||||
printf "\nInstallation complete?\n"
|
||||
echo "----------------------------------------------"
|
||||
echo "Finalizing permissions..."
|
||||
echo "----------------------------------------------"
|
||||
export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
|
||||
export SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
sudo chown -R "$bot_user:$bot_user" "$program_path/logs"
|
||||
sudo chown -R "$bot_user:$bot_user" "$program_path/data"
|
||||
sudo chown "$bot_user:$bot_user" "$program_path/config.ini"
|
||||
sudo chmod 664 "$program_path/config.ini"
|
||||
echo "Permissions set for meshbot on config.ini"
|
||||
sudo chmod 775 "$program_path/logs"
|
||||
sudo chmod 775 "$program_path/data"
|
||||
echo "Permissions set for meshbot on logs and data directories"
|
||||
|
||||
printf "\nGood time to reboot? (y/n)"
|
||||
read reboot
|
||||
if [[ $(echo "${reboot}" | grep -i "^y") ]]; then
|
||||
sudo reboot
|
||||
fi
|
||||
printf "\nInstallation complete! 73\n"
|
||||
exit 0
|
||||
|
||||
# to uninstall the product run the following commands as needed
|
||||
@@ -512,6 +565,25 @@ exit 0
|
||||
# sudo rm -rf ~/.ollama
|
||||
|
||||
|
||||
# after install shenannigans
|
||||
# if install done manually
|
||||
# copy modules/custom_scheduler.py template if it does not exist
|
||||
# copy data files from etc/data to data/
|
||||
|
||||
|
||||
#### after install shenannigans
|
||||
# add 'bee = True' to config.ini General section.
|
||||
# wget https://gist.github.com/MattIPv4/045239bc27b16b2bcf7a3a9a4648c08a -O bee.txt
|
||||
# wget https://gist.githubusercontent.com/MattIPv4/045239bc27b16b2bcf7a3a9a4648c08a/raw/2411e31293a35f3e565f61e7490a806d4720ea7e/bee%2520movie%2520script -O bee.txt
|
||||
# place bee.txt in project root
|
||||
|
||||
####
|
||||
# download bible in text from places like https://www.biblesupersearch.com/bible-downloads/
|
||||
# in the project root place bible.txt and use verse = True
|
||||
# to use machine reading format like this
|
||||
# Genesis 1:1 In the beginning God created the heavens and the earth.
|
||||
# Genesis 1:2 And the earth was waste and void..
|
||||
# or simple format like this (less preferred)
|
||||
# Chapter 1
|
||||
# 1 In the beginning God created the heavens and the earth.
|
||||
# 2 And the earth was waste and void..
|
||||
|
||||
|
||||
|
||||
@@ -17,6 +17,9 @@ else
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
|
||||
export SSL_CERT_FILE=/etc/ssl/certs/ca-certificates.crt
|
||||
|
||||
# launch the application
|
||||
if [[ "$1" == pong* ]]; then
|
||||
python3 pong_bot.py
|
||||
@@ -28,8 +31,12 @@ elif [[ "$1" == "html5" ]]; then
|
||||
python3 etc/report_generator5.py
|
||||
elif [[ "$1" == add* ]]; then
|
||||
python3 script/addFav.py
|
||||
elif [[ "$1" == "game" ]]; then
|
||||
python3 script/game_serve.py
|
||||
elif [[ "$1" == "display" ]]; then
|
||||
python3 script/game_serve.py
|
||||
else
|
||||
echo "Please provide a bot to launch (pong/mesh) or a report to generate (html/html5) or addFav"
|
||||
echo "Please provide a bot to launch (pong/mesh/display) or a report to generate (html/html5) or addFav"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
465
mesh_bot.py
465
mesh_bot.py
@@ -16,7 +16,7 @@ import modules.settings as my_settings
|
||||
from modules.system import *
|
||||
|
||||
# list of commands to remove from the default list for DM only
|
||||
restrictedCommands = ["blackjack", "videopoker", "dopewars", "lemonstand", "golfsim", "mastermind", "hangman", "hamtest", "tictactoe", "quiz", "q:", "survey", "s:"]
|
||||
restrictedCommands = ["blackjack", "videopoker", "dopewars", "lemonstand", "golfsim", "mastermind", "hangman", "hamtest", "tictactoe", "tic-tac-toe", "quiz", "q:", "survey", "s:", "battleship"]
|
||||
restrictedResponse = "🤖only available in a Direct Message📵" # "" for none
|
||||
|
||||
def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_number, deviceID, isDM):
|
||||
@@ -31,6 +31,7 @@ def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_n
|
||||
"ask:": lambda: handle_llm(message_from_id, channel_number, deviceID, message, publicChannel),
|
||||
"askai": lambda: handle_llm(message_from_id, channel_number, deviceID, message, publicChannel),
|
||||
"bannode": lambda: handle_bbsban(message, message_from_id, isDM),
|
||||
"battleship": lambda: handleBattleship(message, message_from_id, deviceID),
|
||||
"bbsack": lambda: bbs_sync_posts(message, message_from_id, deviceID),
|
||||
"bbsdelete": lambda: handle_bbsdelete(message, message_from_id),
|
||||
"bbshelp": bbs_help,
|
||||
@@ -40,10 +41,10 @@ def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_n
|
||||
"bbspost": lambda: handle_bbspost(message, message_from_id, deviceID),
|
||||
"bbsread": lambda: handle_bbsread(message),
|
||||
"blackjack": lambda: handleBlackJack(message, message_from_id, deviceID),
|
||||
"approvecl": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"denycl": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"checkin": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"checklist": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"checklistapprove": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"checklistdeny": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"checkout": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"chess": lambda: handle_gTnW(chess=True),
|
||||
"clearsms": lambda: handle_sms(message_from_id, message),
|
||||
@@ -84,6 +85,7 @@ def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_n
|
||||
"cartremove": lambda: handle_inventory(message, message_from_id, deviceID),
|
||||
"cartsell": lambda: handle_inventory(message, message_from_id, deviceID),
|
||||
"joke": lambda: tell_joke(message_from_id),
|
||||
"latest": lambda: get_newsAPI(message, message_from_id, deviceID, isDM),
|
||||
"leaderboard": lambda: get_mesh_leaderboard(message, message_from_id, deviceID),
|
||||
"lemonstand": lambda: handleLemonade(message, message_from_id, deviceID),
|
||||
"lheard": lambda: handle_lheard(message, message_from_id, deviceID, isDM),
|
||||
@@ -96,8 +98,6 @@ def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_n
|
||||
"ping": lambda: handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, channel_number),
|
||||
"pinging": lambda: handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, channel_number),
|
||||
"pong": lambda: "🏓PING!!🛜",
|
||||
"purgein": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"purgeout": lambda: handle_checklist(message, message_from_id, deviceID),
|
||||
"q:": lambda: quizHandler(message, message_from_id, deviceID),
|
||||
"quiz": lambda: quizHandler(message, message_from_id, deviceID),
|
||||
"readnews": lambda: handleNews(message_from_id, deviceID, message, isDM),
|
||||
@@ -109,7 +109,7 @@ def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_n
|
||||
"setsms": lambda: handle_sms( message_from_id, message),
|
||||
"sitrep": lambda: handle_lheard(message, message_from_id, deviceID, isDM),
|
||||
"sms:": lambda: handle_sms(message_from_id, message),
|
||||
"solar": lambda: drap_xray_conditions() + "\n" + solar_conditions(),
|
||||
"solar": lambda: drap_xray_conditions() + "\n" + solar_conditions() + "\n" + get_noaa_scales_summary(),
|
||||
"sun": lambda: handle_sun(message_from_id, deviceID, channel_number),
|
||||
"survey": lambda: surveyHandler(message, message_from_id, deviceID),
|
||||
"s:": lambda: surveyHandler(message, message_from_id, deviceID),
|
||||
@@ -120,6 +120,7 @@ def auto_response(message, snr, rssi, hop, pkiStatus, message_from_id, channel_n
|
||||
"tic-tac-toe": lambda: handleTicTacToe(message, message_from_id, deviceID),
|
||||
"tide": lambda: handle_tide(message_from_id, deviceID, channel_number),
|
||||
"valert": lambda: get_volcano_usgs(),
|
||||
"verse": lambda: read_verse(),
|
||||
"videopoker": lambda: handleVideoPoker(message, message_from_id, deviceID),
|
||||
"whereami": lambda: handle_whereami(message_from_id, deviceID, channel_number),
|
||||
"whoami": lambda: handle_whoami(message_from_id, deviceID, hop, snr, rssi, pkiStatus),
|
||||
@@ -250,7 +251,11 @@ def handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, chann
|
||||
global multiPing
|
||||
myNodeNum = globals().get(f'myNodeNum{deviceID}', 777)
|
||||
if "?" in message and isDM:
|
||||
return message.split("?")[0].title() + " command returns SNR and RSSI, or hopcount from your message. Try adding e.g. @place or #tag"
|
||||
pingHelp = "🤖Ping Command Help:\n" \
|
||||
"🏓 Send 'ping' or 'ack' or 'test' to get a response.\n" \
|
||||
"🏓 Send 'ping <number>' to get multiple pings in DM"
|
||||
"🏓 ping @USERID to send a Joke from the bot"
|
||||
return pingHelp
|
||||
|
||||
msg = ""
|
||||
type = ''
|
||||
@@ -282,10 +287,12 @@ def handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, chann
|
||||
#flood
|
||||
msg += " [F]"
|
||||
|
||||
if (float(snr) != 0 or float(rssi) != 0) and "Hops" not in hop:
|
||||
if (float(snr) != 0 or float(rssi) != 0) and "Hop" not in hop:
|
||||
msg += f"\nSNR:{snr} RSSI:{rssi}"
|
||||
elif "Hops" in hop:
|
||||
msg += f"\n{hop}🐇 "
|
||||
elif "Hop" in hop:
|
||||
# janky, remove the words Gateway or MQTT if present
|
||||
hop = hop.replace("Gateway", "").replace("Direct", "").replace("MQTT", "").strip()
|
||||
msg += f"\n{hop} "
|
||||
|
||||
if "@" in message:
|
||||
msg = msg + " @" + message.split("@")[1]
|
||||
@@ -331,8 +338,11 @@ def handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, chann
|
||||
# no autoping in channels
|
||||
pingCount = 1
|
||||
|
||||
if pingCount > 51:
|
||||
if pingCount > 51 and pingCount <= 101:
|
||||
pingCount = 50
|
||||
if pingCount > 800:
|
||||
ban_hammer(message_from_id, deviceID, reason="Excessive auto-ping request")
|
||||
return "🚫⛔️auto-ping request denied."
|
||||
except ValueError:
|
||||
pingCount = -1
|
||||
|
||||
@@ -359,7 +369,8 @@ def handle_emergency(message_from_id, deviceID, message):
|
||||
# if user in bbs_ban_list return
|
||||
if str(message_from_id) in my_settings.bbs_ban_list:
|
||||
# silent discard
|
||||
logger.warning(f"System: {message_from_id} on spam list, no emergency responder alert sent")
|
||||
hammer_value = ban_hammer(message_from_id, deviceID, reason="Emergency Alert from banned node")
|
||||
logger.warning(f"System: {message_from_id} on spam list, no emergency responder alert sent. Ban hammer value: {hammer_value}")
|
||||
return ''
|
||||
# trgger alert to emergency_responder_alert_channel
|
||||
if message_from_id != 0:
|
||||
@@ -391,11 +402,42 @@ def handle_motd(message, message_from_id, isDM):
|
||||
return msg
|
||||
|
||||
def handle_echo(message, message_from_id, deviceID, isDM, channel_number):
|
||||
# Check if user is admin
|
||||
isAdmin = isNodeAdmin(message_from_id)
|
||||
|
||||
# Admin extended syntax: echo <string> c=<channel> d=<device>
|
||||
if isAdmin and message.strip().lower().startswith("echo ") and not message.strip().endswith("?"):
|
||||
msg_to_echo = message.split(" ", 1)[1]
|
||||
target_channel = channel_number
|
||||
target_device = deviceID
|
||||
|
||||
# Split into words to find c= and d=, but preserve spaces in message
|
||||
words = msg_to_echo.split()
|
||||
new_words = []
|
||||
for w in words:
|
||||
if w.startswith("c=") and w[2:].isdigit():
|
||||
target_channel = int(w[2:])
|
||||
elif w.startswith("d=") and w[2:].isdigit():
|
||||
target_device = int(w[2:])
|
||||
else:
|
||||
new_words.append(w)
|
||||
msg_to_echo = " ".join(new_words).strip()
|
||||
# Replace motd/MOTD with the current MOTD from settings
|
||||
msg_to_echo = " ".join(my_settings.MOTD if w.lower() == "motd" else w for w in msg_to_echo.split())
|
||||
# Replace welcome! with the current welcome_message from settings
|
||||
msg_to_echo = " ".join(my_settings.welcome_message if w.lower() == "welcome!" else w for w in msg_to_echo.split())
|
||||
|
||||
# Send echo to specified channel/device
|
||||
logger.debug(f"System: Admin Echo to channel {target_channel} device {target_device} message: {msg_to_echo}")
|
||||
time.sleep(splitDelay) # throttle for 2x send
|
||||
send_message(msg_to_echo, target_channel, 0, target_device)
|
||||
time.sleep(splitDelay) # throttle for 2x send
|
||||
return f"🐬echoed to channel {target_channel} device {target_device}"
|
||||
|
||||
# dev echoBinary off
|
||||
echoBinary = False
|
||||
if echoBinary:
|
||||
try:
|
||||
#send_raw_bytes echo the data to the channel with synch word:
|
||||
port_num = 256
|
||||
synch_word = b"echo:"
|
||||
parts = message.split("echo ", 1)
|
||||
@@ -404,25 +446,29 @@ def handle_echo(message, message_from_id, deviceID, isDM, channel_number):
|
||||
raw_bytes = synch_word + msg_to_echo.encode('utf-8')
|
||||
send_raw_bytes(message_from_id, raw_bytes, nodeInt=deviceID, channel=channel_number, portnum=port_num)
|
||||
return f"Sent binary echo message to {message_from_id} to {port_num} on channel {channel_number} device {deviceID}"
|
||||
else:
|
||||
return "Please provide a message to echo back to you. Example:echo Hello World"
|
||||
except Exception as e:
|
||||
logger.error(f"System: Echo Exception {e}")
|
||||
return f"Sent binary echo message to {message_from_id} to {port_num} on channel {channel_number} device {deviceID}"
|
||||
|
||||
if "?" in message.lower():
|
||||
return "command returns your message back to you. Example:echo Hello World"
|
||||
elif "echo " in message.lower():
|
||||
parts = message.lower().split("echo ", 1)
|
||||
if "?" in message:
|
||||
isAdmin = isNodeAdmin(message_from_id)
|
||||
if isAdmin:
|
||||
return (
|
||||
"Admin usage: echo <message> c=<channel> d=<device>\n"
|
||||
"Example: echo Hello world c=1 d=2"
|
||||
)
|
||||
return "command returns your message back to you. Example: echo Hello World"
|
||||
|
||||
# process normal echo back to user
|
||||
elif message.strip().lower().startswith("echo "):
|
||||
parts = message.split("echo ", 1)
|
||||
if len(parts) > 1 and parts[1].strip() != "":
|
||||
echo_msg = parts[1]
|
||||
if channel_number != my_settings.echoChannel and not isDM:
|
||||
echo_msg = "@" + get_name_from_number(message_from_id, 'short', deviceID) + " " + echo_msg
|
||||
return echo_msg
|
||||
else:
|
||||
return "Please provide a message to echo back to you. Example:echo Hello World"
|
||||
else:
|
||||
return "Please provide a message to echo back to you. Example:echo Hello World"
|
||||
return "Please provide a message to echo back to you. Example: echo Hello World"
|
||||
return "🐬echo.."
|
||||
|
||||
def handle_wxalert(message_from_id, deviceID, message):
|
||||
if my_settings.use_meteo_wxApi:
|
||||
@@ -441,15 +487,26 @@ def handle_wxalert(message_from_id, deviceID, message):
|
||||
|
||||
def handleNews(message_from_id, deviceID, message, isDM):
|
||||
news = ''
|
||||
# if news source is provided pass that to read_news()
|
||||
if "?" in message.lower():
|
||||
return "returns the news. Add a source e.g. 📰readnews mesh"
|
||||
elif "readnews" in message.lower():
|
||||
source = message.lower().replace("readnews", "").strip()
|
||||
if source:
|
||||
news = read_news(source)
|
||||
# if news source is provided pass that to read_news()
|
||||
if my_settings.news_block_mode:
|
||||
news = read_news(source=source, news_block_mode=True)
|
||||
elif my_settings.news_random_line_only:
|
||||
news = read_news(source=source, random_line_only=True)
|
||||
else:
|
||||
news = read_news(source=source)
|
||||
else:
|
||||
news = read_news()
|
||||
# no source provided, use news.txt
|
||||
if my_settings.news_block_mode:
|
||||
news = read_news(news_block_mode=True)
|
||||
elif my_settings.news_random_line_only:
|
||||
news = read_news(random_line_only=True)
|
||||
else:
|
||||
news = read_news()
|
||||
|
||||
if news:
|
||||
# if not a DM add the username to the beginning of msg
|
||||
@@ -544,6 +601,11 @@ def handle_satpass(message_from_id, deviceID, message='', vox=False):
|
||||
satList = my_settings.satListConfig
|
||||
message = message.lower()
|
||||
|
||||
# check api_throttle
|
||||
check_throttle = api_throttle(message_from_id, deviceID, apiName='satpass')
|
||||
if check_throttle:
|
||||
return check_throttle
|
||||
|
||||
# if user has a NORAD ID in the message
|
||||
if "satpass " in message:
|
||||
try:
|
||||
@@ -600,7 +662,7 @@ def handle_llm(message_from_id, channel_number, deviceID, message, publicChannel
|
||||
if not any(node['nodeID'] == message_from_id and node['welcome'] == True for node in seenNodes):
|
||||
if (channel_number == publicChannel and my_settings.antiSpam) or my_settings.useDMForResponse:
|
||||
# send via DM
|
||||
send_message(my_settings.welcome_message, channel_number, message_from_id, deviceID)
|
||||
send_message(my_settings.welcome_message, 0, message_from_id, deviceID)
|
||||
else:
|
||||
# send via channel
|
||||
send_message(my_settings.welcome_message, channel_number, 0, deviceID)
|
||||
@@ -633,7 +695,7 @@ def handle_llm(message_from_id, channel_number, deviceID, message, publicChannel
|
||||
if msg != '':
|
||||
if (channel_number == publicChannel and my_settings.antiSpam) or my_settings.useDMForResponse:
|
||||
# send via DM
|
||||
send_message(msg, channel_number, message_from_id, deviceID)
|
||||
send_message(msg, 0, message_from_id, deviceID)
|
||||
else:
|
||||
# send via channel
|
||||
send_message(msg, channel_number, 0, deviceID)
|
||||
@@ -676,11 +738,6 @@ def handleDopeWars(message, nodeID, rxNode):
|
||||
if p.get('userID') == nodeID:
|
||||
p['last_played'] = time.time()
|
||||
msg = playDopeWars(nodeID, message)
|
||||
|
||||
# if message starts wth 'e'xit remove player from tracker
|
||||
if message.lower().startswith('e'):
|
||||
dwPlayerTracker[:] = [p for p in dwPlayerTracker if p.get('userID') != nodeID]
|
||||
msg = 'You have exited Dope Wars.'
|
||||
return msg
|
||||
|
||||
def handle_gTnW(chess = False):
|
||||
@@ -1004,32 +1061,149 @@ def handleHamtest(message, nodeID, deviceID):
|
||||
|
||||
def handleTicTacToe(message, nodeID, deviceID):
|
||||
global tictactoeTracker
|
||||
index = 0
|
||||
msg = ''
|
||||
|
||||
# Find or create player tracker entry
|
||||
for i in range(len(tictactoeTracker)):
|
||||
if tictactoeTracker[i]['nodeID'] == nodeID:
|
||||
tictactoeTracker[i]["last_played"] = time.time()
|
||||
index = i+1
|
||||
break
|
||||
|
||||
tracker_entry = next((entry for entry in tictactoeTracker if entry['nodeID'] == nodeID), None)
|
||||
|
||||
# Handle end/exit command
|
||||
if message.lower().startswith('e'):
|
||||
if index:
|
||||
if tracker_entry:
|
||||
tictactoe.end(nodeID)
|
||||
tictactoeTracker.pop(index-1)
|
||||
tictactoeTracker.remove(tracker_entry)
|
||||
return "Thanks for playing! 🎯"
|
||||
|
||||
if not index:
|
||||
# If not found, create new tracker entry and ask for 2D/3D if not specified
|
||||
if not tracker_entry:
|
||||
mode = "2D"
|
||||
if "3d" in message.lower():
|
||||
mode = "3D"
|
||||
elif "2d" in message.lower():
|
||||
mode = "2D"
|
||||
tictactoeTracker.append({
|
||||
"nodeID": nodeID,
|
||||
"last_played": time.time()
|
||||
"last_played": time.time(),
|
||||
"mode": mode
|
||||
})
|
||||
msg = "🎯Tic-Tac-Toe🤖 '(e)nd'\n"
|
||||
|
||||
msg += tictactoe.play(nodeID, message)
|
||||
msg = f"🎯Tic-Tac-Toe🤖 '{mode}' mode. (e)nd to quit\n"
|
||||
msg += tictactoe.new_game(nodeID, mode=mode)
|
||||
return msg
|
||||
else:
|
||||
tracker_entry["last_played"] = time.time()
|
||||
|
||||
msg = tictactoe.play(nodeID, message)
|
||||
return msg
|
||||
|
||||
|
||||
def handleBattleship(message, nodeID, deviceID):
|
||||
global battleshipTracker
|
||||
from modules.games import battleship
|
||||
|
||||
# Helper to get short_name from tracker
|
||||
def get_short_name(nid):
|
||||
entry = next((e for e in battleshipTracker if e['nodeID'] == nid), None)
|
||||
return entry['short_name'] if entry and 'short_name' in entry else get_name_from_number(nid, 'short', deviceID)
|
||||
|
||||
msg_lower = message.lower().strip()
|
||||
tracker_entry = next((entry for entry in battleshipTracker if entry['nodeID'] == nodeID), None)
|
||||
|
||||
# End/exit command
|
||||
if msg_lower.startswith('end') or msg_lower.startswith('exit'):
|
||||
if tracker_entry:
|
||||
if 'session_id' in tracker_entry:
|
||||
battleship.Battleship.end_game(tracker_entry['session_id'])
|
||||
battleshipTracker.remove(tracker_entry)
|
||||
return "Thanks for playing Battleship! 🚢"
|
||||
|
||||
# Create new P2P game with short code
|
||||
if msg_lower.startswith("battleship new"):
|
||||
short_name = get_name_from_number(nodeID, 'short', deviceID)
|
||||
msg, code = battleship.Battleship.new_game(nodeID, vs_ai=False)
|
||||
battleshipTracker.append({
|
||||
"nodeID": nodeID,
|
||||
"short_name": short_name,
|
||||
"last_played": time.time(),
|
||||
"session_id": battleship.Battleship.short_codes.get(code, code)
|
||||
})
|
||||
return f"{msg}"
|
||||
|
||||
# Show open P2P games waiting for a player
|
||||
if msg_lower.startswith("battleship lobby"):
|
||||
open_codes = []
|
||||
for code, session_id in battleship.Battleship.short_codes.items():
|
||||
session = battleship.Battleship.sessions.get(session_id)
|
||||
if session and session.player2_id is None:
|
||||
open_codes.append(code)
|
||||
if not open_codes:
|
||||
return "No open Battleship games waiting for players."
|
||||
return "Open Battleship games (join with 'battleship join <code>'):\n" + ", ".join(open_codes)
|
||||
|
||||
# Join existing P2P game using short code
|
||||
if msg_lower.startswith("battleship join"):
|
||||
try:
|
||||
code = msg_lower.split("join", 1)[1].strip()
|
||||
except IndexError:
|
||||
return "Usage: battleship join <code>"
|
||||
session = battleship.Battleship.get_session(code)
|
||||
if not session:
|
||||
return "Session not found."
|
||||
if session.player2_id is not None:
|
||||
return "Session already has two players."
|
||||
session.player2_id = nodeID
|
||||
session.next_turn = nodeID # Make joining player go first!
|
||||
short_name = get_name_from_number(nodeID, 'short', deviceID)
|
||||
battleshipTracker.append({
|
||||
"nodeID": nodeID,
|
||||
"short_name": short_name,
|
||||
"last_played": time.time(),
|
||||
"session_id": session.session_id
|
||||
})
|
||||
p1_short_name = get_short_name(session.player1_id)
|
||||
send_message(
|
||||
f"{p1_short_name}, your opponent {short_name} has joined the game! It's their turn first.",
|
||||
0, # channel 0 for DM
|
||||
session.player1_id, # recipient nodeID
|
||||
deviceID
|
||||
)
|
||||
time.sleep(splitDelay) # slight delay to avoid message overlap
|
||||
return "You joined the game! It's your turn. Enter your move (e.g., 'B4')."
|
||||
|
||||
# If not found, create new tracker entry and new game vs AI (default)
|
||||
if not tracker_entry:
|
||||
short_name = get_name_from_number(nodeID, 'short', deviceID)
|
||||
msg, session_id = battleship.Battleship.new_game(nodeID)
|
||||
battleshipTracker.append({
|
||||
"nodeID": nodeID,
|
||||
"short_name": short_name,
|
||||
"last_played": time.time(),
|
||||
"session_id": session_id
|
||||
})
|
||||
return msg
|
||||
|
||||
# Update last played
|
||||
tracker_entry["last_played"] = time.time()
|
||||
session_id = tracker_entry.get("session_id")
|
||||
|
||||
# Play the game and check if we need to alert the next player
|
||||
response = battleship.playBattleship(message, nodeID, deviceID, session_id=session_id)
|
||||
|
||||
# --- Notify the next player when it's their turn in P2P ---
|
||||
session = battleship.Battleship.get_session(session_id)
|
||||
if session and not session.vs_ai and session.player1_id and session.player2_id:
|
||||
# Only notify if the game is not over (optional: add a game-over check)
|
||||
if getattr(session, "last_move", None):
|
||||
next_player_id = session.next_turn
|
||||
# Only notify if it's not the player who just moved
|
||||
if next_player_id != nodeID:
|
||||
next_player_short_name = get_short_name(next_player_id)
|
||||
send_message(
|
||||
f"{next_player_short_name}, it's your turn in Battleship! Enter your move (e.g., 'B4').",
|
||||
0, # channel 0 for DM
|
||||
next_player_id,
|
||||
deviceID
|
||||
)
|
||||
time.sleep(splitDelay) # slight delay to avoid message overlap
|
||||
|
||||
return response
|
||||
|
||||
def quizHandler(message, nodeID, deviceID):
|
||||
global quizGamePlayer
|
||||
user_name = get_name_from_number(nodeID)
|
||||
@@ -1416,10 +1590,18 @@ def handle_history(message, nodeid, deviceID, isDM, lheard=False):
|
||||
|
||||
def handle_whereami(message_from_id, deviceID, channel_number):
|
||||
location = get_node_location(message_from_id, deviceID, channel_number)
|
||||
# check api_throttle
|
||||
check_throttle = api_throttle(message_from_id, deviceID, apiName='whereami')
|
||||
if check_throttle:
|
||||
return check_throttle
|
||||
return where_am_i(str(location[0]), str(location[1]))
|
||||
|
||||
def handle_repeaterQuery(message_from_id, deviceID, channel_number):
|
||||
location = get_node_location(message_from_id, deviceID, channel_number)
|
||||
# check api_throttle
|
||||
check_throttle = api_throttle(message_from_id, deviceID, apiName='repeaterQuery')
|
||||
if check_throttle:
|
||||
return check_throttle
|
||||
if repeater_lookup == "rbook":
|
||||
return getRepeaterBook(str(location[0]), str(location[1]))
|
||||
elif repeater_lookup == "artsci":
|
||||
@@ -1428,21 +1610,10 @@ def handle_repeaterQuery(message_from_id, deviceID, channel_number):
|
||||
return "Repeater lookup not enabled"
|
||||
|
||||
def handle_tide(message_from_id, deviceID, channel_number, vox=False):
|
||||
# Check if tidepredict (xtide) is enabled
|
||||
if vox:
|
||||
return get_NOAAtide(str(my_settings.latitudeValue), str(my_settings.longitudeValue))
|
||||
location = get_node_location(message_from_id, deviceID, channel_number)
|
||||
lat = str(location[0])
|
||||
lon = str(location[1])
|
||||
if lat == "0.0" or lon == "0.0":
|
||||
lat = str(my_settings.latitudeValue)
|
||||
lon = str(my_settings.longitudeValue)
|
||||
|
||||
if my_settings.useTidePredict:
|
||||
logger.debug("System: Location: Using tidepredict")
|
||||
return xtide.get_tide_predictions(lat, lon)
|
||||
else:
|
||||
# Fallback to NOAA tide data
|
||||
logger.debug("System: Location: Using NOAA")
|
||||
return get_NOAAtide(str(location[0]), str(location[1]))
|
||||
return get_NOAAtide(str(location[0]), str(location[1]))
|
||||
|
||||
def handle_moon(message_from_id, deviceID, channel_number, vox=False):
|
||||
if vox:
|
||||
@@ -1552,6 +1723,9 @@ def handle_boot(mesh=True):
|
||||
|
||||
if my_settings.solar_conditions_enabled:
|
||||
logger.debug("System: Celestial Telemetry Enabled")
|
||||
|
||||
if my_settings.meshagesTTS:
|
||||
logger.debug("System: Meshages TTS Text-to-Speech Enabled")
|
||||
|
||||
if my_settings.location_enabled:
|
||||
if my_settings.use_meteo_wxApi:
|
||||
@@ -1564,23 +1738,23 @@ def handle_boot(mesh=True):
|
||||
|
||||
if my_settings.coastalEnabled:
|
||||
logger.debug("System: Coastal Forecast and Tide Enabled!")
|
||||
if my_settings.useTidePredict:
|
||||
logger.debug("System: Using Local TidePredict for Tide Data")
|
||||
|
||||
if games_enabled:
|
||||
logger.debug("System: Games Enabled!")
|
||||
|
||||
if my_settings.wikipedia_enabled:
|
||||
if my_settings.use_kiwix_server:
|
||||
logger.debug(f"System: Wikipedia search Enabled using Kiwix server at {kiwix_url}")
|
||||
logger.debug(f"System: Wikipedia search Enabled using Kiwix server at {my_settings.kiwix_url}")
|
||||
else:
|
||||
logger.debug("System: Wikipedia search Enabled")
|
||||
|
||||
if my_settings.rssEnable:
|
||||
logger.debug(f"System: RSS Feed Reader Enabled for feeds: {rssFeedNames}")
|
||||
logger.debug(f"System: RSS Feed Reader Enabled for feeds: {my_settings.rssFeedNames}")
|
||||
if my_settings.enable_headlines:
|
||||
logger.debug("System: News Headlines Enabled from NewsAPI.org")
|
||||
|
||||
if my_settings.radio_detection_enabled:
|
||||
logger.debug(f"System: Radio Detection Enabled using rigctld at {my_settings.rigControlServerAddress} broadcasting to channels: {my_settings.sigWatchBroadcastCh} for {get_freq_common_name(get_hamlib('f'))}")
|
||||
logger.debug(f"System: Radio Detection Enabled using rigctld at {my_settings.rigControlServerAddress} broadcasting to channels: {my_settings.sigWatchBroadcastCh}")
|
||||
|
||||
if my_settings.file_monitor_enabled:
|
||||
logger.warning(f"System: File Monitor Enabled for {my_settings.file_monitor_file_path}, broadcasting to channels: {my_settings.file_monitor_broadcastCh}")
|
||||
@@ -1591,21 +1765,23 @@ def handle_boot(mesh=True):
|
||||
if my_settings.read_news_enabled:
|
||||
logger.debug(f"System: File Monitor News Reader Enabled for {my_settings.news_file_path}")
|
||||
if my_settings.bee_enabled:
|
||||
logger.debug("System: File Monitor Bee Monitor Enabled for bee.txt")
|
||||
|
||||
if my_settings.wxAlertBroadcastEnabled:
|
||||
logger.debug(f"System: Weather Alert Broadcast Enabled on channels {my_settings.wxAlertBroadcastChannel}")
|
||||
|
||||
if my_settings.emergencyAlertBrodcastEnabled:
|
||||
logger.debug(f"System: Emergency Alert Broadcast Enabled on channels {my_settings.emergencyAlertBroadcastCh} for FIPS codes {my_settings.myStateFIPSList}")
|
||||
if my_settings.myStateFIPSList == ['']:
|
||||
logger.warning("System: No FIPS codes set for iPAWS Alerts")
|
||||
|
||||
if my_settings.emergency_responder_enabled:
|
||||
logger.debug(f"System: Emergency Responder Enabled on channels {my_settings.emergency_responder_alert_channel} for interface {my_settings.emergency_responder_alert_interface}")
|
||||
|
||||
logger.debug("System: File Monitor Bee Monitor Enabled for 🐝bee.txt")
|
||||
if my_settings.bible_enabled:
|
||||
logger.debug("System: File Monitor Bible Verse Enabled for bible.txt")
|
||||
if my_settings.usAlerts:
|
||||
logger.debug(f"System: Emergency Alert Broadcast Enabled on channel {my_settings.emergency_responder_alert_channel} for interface {my_settings.emergency_responder_alert_interface}")
|
||||
if my_settings.enableDEalerts:
|
||||
logger.debug(f"System: NINA Alerts Enabled with counties {my_settings.myRegionalKeysDE}")
|
||||
if my_settings.volcanoAlertBroadcastEnabled:
|
||||
logger.debug(f"System: Volcano Alert Broadcast Enabled on channels {my_settings.volcanoAlertBroadcastChannel}")
|
||||
logger.debug(f"System: Volcano Alert Broadcast Enabled on channels {my_settings.emergency_responder_alert_channel} ignoreUSGSWords {my_settings.ignoreUSGSWords}")
|
||||
if my_settings.ipawsAlertEnabled:
|
||||
logger.debug(f"System: iPAWS Alerts Enabled with FIPS codes {my_settings.myStateFIPSList} ignorelist {my_settings.ignoreFEMAwords}")
|
||||
if my_settings.enableDEalerts:
|
||||
logger.debug(f"System: NINA Alerts Enabled with counties {my_settings.myRegionalKeysDE}")
|
||||
if my_settings.wxAlertBroadcastEnabled:
|
||||
logger.debug(f"System: Weather Alert Broadcast Enabled on channels {my_settings.emergency_responder_alert_channel} ignoreEASwords {my_settings.ignoreEASwords}")
|
||||
if my_settings.emergency_responder_enabled:
|
||||
logger.debug(f"System: Emergency Responder Enabled on channels {my_settings.emergency_responder_alert_channel}")
|
||||
|
||||
if my_settings.qrz_hello_enabled:
|
||||
if my_settings.train_qrz:
|
||||
@@ -1623,6 +1799,10 @@ def handle_boot(mesh=True):
|
||||
if my_settings.useDMForResponse:
|
||||
logger.debug("System: Respond by DM only")
|
||||
|
||||
if my_settings.autoBanEnabled:
|
||||
logger.debug(f"System: Auto-Ban Enabled for {my_settings.autoBanThreshold} messages in {my_settings.autoBanTimeframe} seconds")
|
||||
load_bbsBanList()
|
||||
|
||||
if my_settings.log_messages_to_file:
|
||||
logger.debug("System: Logging Messages to disk")
|
||||
if my_settings.syslog_to_file:
|
||||
@@ -1726,24 +1906,38 @@ def onReceive(packet, interface):
|
||||
# check if the packet has a channel flag use it ## FIXME needs to be channel hash lookup
|
||||
if packet.get('channel'):
|
||||
channel_number = packet.get('channel')
|
||||
# get channel name from channel number from connected devices
|
||||
for device in channel_list:
|
||||
if device["interface_id"] == rxNode:
|
||||
device_channels = device['channels']
|
||||
for chan_name, info in device_channels.items():
|
||||
if info['number'] == channel_number:
|
||||
channel_name = chan_name
|
||||
channel_name = "unknown"
|
||||
try:
|
||||
res = resolve_channel_name(channel_number, rxNode, interface)
|
||||
if res:
|
||||
try:
|
||||
channel_name, _ = res
|
||||
except Exception:
|
||||
channel_name = "unknown"
|
||||
else:
|
||||
# Search all interfaces for this channel
|
||||
cache = build_channel_cache()
|
||||
found_on_other = None
|
||||
for device in cache:
|
||||
for chan_name, info in device.get("channels", {}).items():
|
||||
if str(info.get('number')) == str(channel_number) or str(info.get('hash')) == str(channel_number):
|
||||
found_on_other = device.get("interface_id")
|
||||
found_chan_name = chan_name
|
||||
break
|
||||
if found_on_other:
|
||||
break
|
||||
|
||||
# get channel hashes for the interface
|
||||
device = next((d for d in channel_list if d["interface_id"] == rxNode), None)
|
||||
if device:
|
||||
# Find the channel name whose hash matches channel_number
|
||||
for chan_name, info in device['channels'].items():
|
||||
if info['hash'] == channel_number:
|
||||
print(f"Matched channel hash {info['hash']} to channel name {chan_name}")
|
||||
channel_name = chan_name
|
||||
break
|
||||
if found_on_other and found_on_other != rxNode:
|
||||
logger.debug(
|
||||
f"System: Received Packet on Channel:{channel_number} ({found_chan_name}) on Interface:{rxNode}, but this channel is configured on Interface:{found_on_other}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"System: channel resolution error: {e}")
|
||||
|
||||
#debug channel info
|
||||
# if "unknown" in str(channel_name):
|
||||
# logger.debug(f"System: Received Packet on Channel:{channel_number} on Interface:{rxNode}")
|
||||
# else:
|
||||
# logger.debug(f"System: Received Packet on Channel:{channel_number} Name:{channel_name} on Interface:{rxNode}")
|
||||
|
||||
# check if the packet has a simulator flag
|
||||
simulator_flag = packet.get('decoded', {}).get('simulator', False)
|
||||
@@ -1755,9 +1949,14 @@ def onReceive(packet, interface):
|
||||
message_from_id = packet['from']
|
||||
|
||||
# if message_from_id is not in the seenNodes list add it
|
||||
if not any(node['nodeID'] == message_from_id for node in seenNodes):
|
||||
seenNodes.append({'nodeID': message_from_id, 'rxInterface': rxNode, 'channel': channel_number, 'welcome': False, 'lastSeen': time.time()})
|
||||
|
||||
if not any(node.get('nodeID') == message_from_id for node in seenNodes):
|
||||
seenNodes.append({'nodeID': message_from_id, 'rxInterface': rxNode, 'channel': channel_number, 'welcome': False, 'first_seen': time.time(), 'lastSeen': time.time()})
|
||||
else:
|
||||
# update lastSeen time
|
||||
for node in seenNodes:
|
||||
if node.get('nodeID') == message_from_id:
|
||||
node['lastSeen'] = time.time()
|
||||
break
|
||||
# BBS DM MAIL CHECKER
|
||||
if bbs_enabled and 'decoded' in packet:
|
||||
msg = bbs_check_dm(message_from_id)
|
||||
@@ -1766,7 +1965,12 @@ def onReceive(packet, interface):
|
||||
message = "Mail: " + msg[1] + " From: " + get_name_from_number(msg[2], 'long', rxNode)
|
||||
bbs_delete_dm(msg[0], msg[1])
|
||||
send_message(message, channel_number, message_from_id, rxNode)
|
||||
|
||||
|
||||
# CHECK with ban_hammer() if the node is banned
|
||||
if str(message_from_id) in my_settings.bbs_ban_list or str(message_from_id) in my_settings.autoBanlist:
|
||||
logger.warning(f"System: Banned Node {message_from_id} tried to send a message. Ignored. Try adding to node firmware-blocklist")
|
||||
return
|
||||
|
||||
# handle TEXT_MESSAGE_APP
|
||||
try:
|
||||
if 'decoded' in packet and packet['decoded']['portnum'] == 'TEXT_MESSAGE_APP':
|
||||
@@ -1822,31 +2026,38 @@ def onReceive(packet, interface):
|
||||
else:
|
||||
hop_count = hop_away
|
||||
|
||||
if hop == "" and hop_count > 0:
|
||||
if hop_count > 0:
|
||||
# set hop string from calculated hop count
|
||||
hop = f"{hop_count} Hop" if hop_count == 1 else f"{hop_count} Hops"
|
||||
|
||||
if hop_start == hop_limit and "lora" in str(transport_mechanism).lower() and (snr != 0 or rssi != 0):
|
||||
if hop_start == hop_limit and "lora" in str(transport_mechanism).lower() and (snr != 0 or rssi != 0) and hop_count == 0:
|
||||
# 2.7+ firmware direct hop over LoRa
|
||||
hop = "Direct"
|
||||
|
||||
if ((hop_start == 0 and hop_limit >= 0) or via_mqtt or ("mqtt" in str(transport_mechanism).lower())):
|
||||
if via_mqtt or "mqtt" in str(transport_mechanism).lower():
|
||||
hop = "MQTT"
|
||||
elif hop == "" and hop_count == 0 and (snr != 0 or rssi != 0):
|
||||
# this came from a UDP but we had signal info so gateway is used
|
||||
hop = "Gateway"
|
||||
elif "unknown" in str(transport_mechanism).lower() and (snr == 0 and rssi == 0):
|
||||
# we for sure detected this sourced from a UDP like host
|
||||
via_mqtt = True
|
||||
elif "udp" in str(transport_mechanism).lower():
|
||||
hop = "Gateway"
|
||||
|
||||
if hop in ("MQTT", "Gateway") and hop_count > 0:
|
||||
hop = f"{hop_count} Hops"
|
||||
hop = f" {hop_count} Hops"
|
||||
|
||||
# Add relay node info if present
|
||||
if packet.get('relayNode') is not None:
|
||||
relay_val = packet['relayNode']
|
||||
last_byte = relay_val & 0xFF
|
||||
if last_byte == 0x00:
|
||||
hex_val = 'OldFW'
|
||||
else:
|
||||
hex_val = f"{last_byte:02X}"
|
||||
hop += f" Relay:{hex_val}"
|
||||
|
||||
if enableHopLogs:
|
||||
logger.debug(f"System: Packet HopDebugger: hop_away:{hop_away} hop_limit:{hop_limit} hop_start:{hop_start} calculated_hop_count:{hop_count} final_hop_value:{hop} via_mqtt:{via_mqtt} transport_mechanism:{transport_mechanism} Hostname:{rxNodeHostName}")
|
||||
|
||||
# check with stringSafeChecker if the message is safe
|
||||
if stringSafeCheck(message_string) is False:
|
||||
if stringSafeCheck(message_string, message_from_id) is False:
|
||||
logger.warning(f"System: Possibly Unsafe Message from {get_name_from_number(message_from_id, 'long', rxNode)}")
|
||||
|
||||
if help_message in message_string or welcome_message in message_string or "CMD?:" in message_string:
|
||||
@@ -1902,7 +2113,13 @@ def onReceive(packet, interface):
|
||||
else:
|
||||
# respond with help message on DM
|
||||
send_message(help_message, channel_number, message_from_id, rxNode)
|
||||
|
||||
|
||||
# add message to tts queue
|
||||
if meshagesTTS:
|
||||
# add to the tts_read_queue
|
||||
readMe = f"DM from {get_name_from_number(message_from_id, 'short', rxNode)}: {message_string}"
|
||||
tts_read_queue.append(readMe)
|
||||
|
||||
# log the message to the message log
|
||||
if log_messages_to_file:
|
||||
msgLogger.info(f"Device:{rxNode} Channel:{channel_number} | {get_name_from_number(message_from_id, 'long', rxNode)} | DM | " + message_string.replace('\n', '-nl-'))
|
||||
@@ -1999,13 +2216,19 @@ def onReceive(packet, interface):
|
||||
msg = f"🎉 {get_name_from_number(message_from_id, 'long', rxNode)} found the Word of the Day🎊:\n {wordWas}, {metaWas}"
|
||||
send_message(msg, channel_number, 0, rxNode)
|
||||
if bingo_win:
|
||||
msg = f"🎉 {get_name_from_number(message_from_id, 'long', rxNode)} scored BINGO!🥳 {bingo_message}"
|
||||
msg = f"🎉 {get_name_from_number(message_from_id, 'long', rxNode)} scored word-search-BINGO!🥳 {bingo_message}"
|
||||
send_message(msg, channel_number, 0, rxNode)
|
||||
|
||||
slotMachine = theWordOfTheDay.emojiMiniGame(message_string, emojiSeen=emojiSeen, nodeID=message_from_id, nodeInt=rxNode)
|
||||
if slotMachine:
|
||||
msg = f"🎉 {get_name_from_number(message_from_id, 'long', rxNode)} played the Slot Machine and got: {slotMachine} 🥳"
|
||||
msg = f"🎉 {get_name_from_number(message_from_id, 'long', rxNode)} played the emote-Fruit-Machine and got: {slotMachine} 🥳"
|
||||
send_message(msg, channel_number, 0, rxNode)
|
||||
|
||||
# add message to tts queue
|
||||
if my_settings.meshagesTTS and channel_number == my_settings.ttsChannels:
|
||||
# add to the tts_read_queue
|
||||
readMe = f"DM from {get_name_from_number(message_from_id, 'short', rxNode)}: {message_string}"
|
||||
tts_read_queue.append(readMe)
|
||||
else:
|
||||
# Evaluate non TEXT_MESSAGE_APP packets
|
||||
consumeMetadata(packet, rxNode, channel_number)
|
||||
@@ -2036,6 +2259,7 @@ gameTrackers = [
|
||||
(hamtestTracker, "HamTest", handleHamtest),
|
||||
(tictactoeTracker, "TicTacToe", handleTicTacToe),
|
||||
(surveyTracker, "Survey", surveyHandler),
|
||||
(battleshipTracker, "Battleship", handleBattleship),
|
||||
# quiz does not use a tracker (quizGamePlayer) always active
|
||||
]
|
||||
|
||||
@@ -2048,8 +2272,11 @@ async def main():
|
||||
# Create core tasks
|
||||
tasks.append(asyncio.create_task(start_rx(), name="mesh_rx"))
|
||||
tasks.append(asyncio.create_task(watchdog(), name="watchdog"))
|
||||
|
||||
|
||||
# Add optional tasks
|
||||
if my_settings.dataPersistence_enabled:
|
||||
tasks.append(asyncio.create_task(dataPersistenceLoop(), name="data_persistence"))
|
||||
|
||||
if my_settings.file_monitor_enabled:
|
||||
tasks.append(asyncio.create_task(handleFileWatcher(), name="file_monitor"))
|
||||
|
||||
@@ -2057,7 +2284,11 @@ async def main():
|
||||
tasks.append(asyncio.create_task(handleSignalWatcher(), name="hamlib"))
|
||||
|
||||
if my_settings.voxDetectionEnabled:
|
||||
from modules.radio import voxMonitor
|
||||
tasks.append(asyncio.create_task(voxMonitor(), name="vox_detection"))
|
||||
|
||||
if my_settings.meshagesTTS:
|
||||
tasks.append(asyncio.create_task(handleTTS(), name="tts_handler"))
|
||||
|
||||
if my_settings.wsjtx_detection_enabled:
|
||||
tasks.append(asyncio.create_task(handleWsjtxWatcher(), name="wsjtx_monitor"))
|
||||
|
||||
@@ -12,16 +12,17 @@ This document provides an overview of all modules available in the Mesh-Bot proj
|
||||
- [Checklist](#checklist)
|
||||
- [Inventory & Point of Sale](#inventory--point-of-sale)
|
||||
- [Location & Weather](#location--weather)
|
||||
- [Map Command](#map-command)
|
||||
- [EAS & Emergency Alerts](#eas--emergency-alerts)
|
||||
- [File Monitoring & News](#file-monitoring--news)
|
||||
- [Radio Monitoring](#radio-monitoring)
|
||||
- [Voice Commands (VOX)](#voice-commands-vox)
|
||||
- [Ollama LLM/AI](#ollama-llmai)
|
||||
- [Wikipedia Search](#wikipedia-search)
|
||||
- [News & Headlines (`latest` Command)](#news--headlines-latest-command)
|
||||
- [DX Spotter Module](#dx-spotter-module)
|
||||
- [Mesh Bot Scheduler User Guide](#mesh-bot-scheduler-user-guide)
|
||||
- [Mesh Bot Scheduler](#-mesh-bot-scheduler-user-guide)
|
||||
- [Other Utilities](#other-utilities)
|
||||
- [Echo Command](#echo-command)
|
||||
- [Messaging Settings](#messaging-settings)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Configuration Guide](#configuration-guide)
|
||||
@@ -38,29 +39,85 @@ See [modules/adding_more.md](adding_more.md) for developer notes.
|
||||
|
||||
### ping / pinging / test / testing / ack
|
||||
|
||||
- **Usage:** `ping`, `pinging`, `test`, `testing`, `ack`, `ping @user`, `ping #tag`
|
||||
- **Description:** Sends a ping to the bot. The bot responds with signal information such as SNR (Signal-to-Noise Ratio), RSSI (Received Signal Strength Indicator), and hop count. Used for making field report etc.
|
||||
- **Targeted Ping:**
|
||||
You can direct a ping to a specific user or group by mentioning their short name or tag:
|
||||
- `ping @NODE` — Pings a Joke to specific node by its short name.
|
||||
- **Example:**
|
||||
- **Usage:**
|
||||
- `ping`, `pinging`, `test`, `testing`, `ack`
|
||||
- `ping <number>` — Request multiple auto-pings (DM only)
|
||||
- `ping @user` — Target a specific user (can trigger a joke via BBS DM)
|
||||
- `ping ?` — Get help (DM only)
|
||||
- `ping stop` — Stop auto-ping
|
||||
|
||||
- **Description:**
|
||||
Sends a ping to the bot. The bot responds with signal and routing information such as SNR (Signal-to-Noise Ratio), RSSI (Received Signal Strength Indicator), hop count, and gateway status. Used for field reports, connectivity checks, and diagnostics.
|
||||
|
||||
#### **Response Types and Examples**
|
||||
|
||||
- **Basic Ping:**
|
||||
```
|
||||
ping
|
||||
```
|
||||
Response:
|
||||
```
|
||||
SNR: 12.5, RSSI: -80, Hops: 2
|
||||
🏓PONG [RF]
|
||||
SNR:12.5 RSSI:-80
|
||||
```
|
||||
- `[GW]` = Received via Gateway (internet or MQTT)
|
||||
- `[RF]` = Received via direct radio
|
||||
- `[F]` = Received via mesh/flood route
|
||||
|
||||
- **Meta Ping:**
|
||||
```
|
||||
ping @Top of the hill
|
||||
ping @Top Of Hill
|
||||
```
|
||||
Response:
|
||||
```
|
||||
PING @Top of the hill SNR: 10.2, RSSI: -85, Hops: 1
|
||||
🏓PONG @Top Of Hill [RF]
|
||||
SNR: 12.5, RSSI: -80, Hops: 2
|
||||
```
|
||||
- **Help:**
|
||||
Send `ping?` in a Direct Message (DM) for usage instructions.
|
||||
|
||||
- **Multi-ping (auto-ping):**
|
||||
```
|
||||
ping 10
|
||||
```
|
||||
Response:
|
||||
```
|
||||
🚦Initalizing 10 auto-ping
|
||||
```
|
||||
- The bot will send 10 pings at intervals (DM only).
|
||||
- Use `ping stop` to cancel.
|
||||
|
||||
- **Help:**
|
||||
```
|
||||
ping?
|
||||
```
|
||||
Response (DM only):
|
||||
```
|
||||
🤖Ping Command Help:
|
||||
🏓 Send 'ping' or 'ack' or 'test' to get a response.
|
||||
🏓 Send 'ping <number>' to get multiple pings in DM
|
||||
🏓 ping @USERID to send a Joke from the bot
|
||||
```
|
||||
|
||||
#### **Response Field Explanations**
|
||||
|
||||
- **SNR:** Signal-to-Noise Ratio (dB) — higher is better.
|
||||
- **RSSI:** Received Signal Strength Indicator (dBm) — closer to 0 is stronger.
|
||||
- **[GW]:** Message received via Gateway (internet/MQTT).
|
||||
- **[RF]:** Message received via direct radio.
|
||||
- **[F]:** Message received via mesh/flood route.
|
||||
|
||||
- **Joke via BBS DM:** If you ping `@'shortname'` and BBS is enabled, the bot will DM a joke to that user.
|
||||
|
||||
#### **Notes**
|
||||
|
||||
- You can mention users or tags in your ping/test messages (e.g., `ping @user`) to target specific nodes.
|
||||
- Some commands (like multi-ping) are only available in Direct Messages, depending on configuration.
|
||||
- If you request too many auto-pings, the bot may throttle or deny the request.
|
||||
- Use `ping stop` to cancel an ongoing auto-ping.
|
||||
|
||||
---
|
||||
|
||||
**Tip:**
|
||||
Use `ping?` in DM for a quick help message on all ping options.
|
||||
---
|
||||
|
||||
### Notes
|
||||
@@ -139,8 +196,8 @@ The checklist module provides asset tracking and accountability features with sa
|
||||
| `checkin` | Check in a node/asset |
|
||||
| `checkout` | Check out a node/asset |
|
||||
| `checklist` | Show active check-ins |
|
||||
| `purgein` | Delete your check-in record |
|
||||
| `purgeout` | Delete your check-out record |
|
||||
| `approvecl` | Admin Approve id |
|
||||
| `denycl` | Admin Remove id |
|
||||
|
||||
#### Advanced Features
|
||||
|
||||
@@ -150,10 +207,10 @@ The checklist module provides asset tracking and accountability features with sa
|
||||
- Ideal for solo activities, remote work, or safety accountability
|
||||
|
||||
- **Approval Workflow**
|
||||
- `checklistapprove <id>` - Approve a pending check-in (admin)
|
||||
- `checklistdeny <id>` - Deny/remove a check-in (admin)
|
||||
- `approvecl <id>` - Approve a pending check-in (admin)
|
||||
- `denycl <id>` - Deny/remove a check-in (admin)
|
||||
|
||||
more at [modules/checklist.md](modules/checklist.md)
|
||||
more at [modules/checklist.md](checklist.md)
|
||||
|
||||
#### Examples
|
||||
|
||||
@@ -213,7 +270,7 @@ The inventory module provides a full point-of-sale (POS) system with inventory t
|
||||
| `cartbuy` or `cartsell` | Complete transaction |
|
||||
| `cartclear` | Empty your cart |
|
||||
|
||||
more at [modules/inventory.py](modules/inventory.py)
|
||||
more at [modules/inventory.py](inventory.py)
|
||||
|
||||
#### Features
|
||||
|
||||
@@ -280,27 +337,25 @@ The system uses SQLite with four tables:
|
||||
|
||||
## Location & Weather
|
||||
|
||||
| Command | Description |
|
||||
|--------------|-----------------------------------------------|
|
||||
| `wx` | Local weather forecast (NOAA/Open-Meteo) |
|
||||
| `wxc` | Weather in metric/imperial |
|
||||
| `wxa` | NOAA alerts |
|
||||
| `wxalert` | NOAA alerts (expanded) |
|
||||
| `mwx` | NOAA Coastal Marine Forecast |
|
||||
| `tide` | Tide info (NOAA/tidepredict for global) |
|
||||
| `riverflow` | NOAA river flow info |
|
||||
| `earthquake` | USGS earthquake info |
|
||||
| `valert` | USGS volcano alerts |
|
||||
| `rlist` | Nearby repeaters from RepeaterBook |
|
||||
| `satpass` | Satellite pass info |
|
||||
| `howfar` | Distance traveled since last check |
|
||||
| `howtall` | Calculate height using sun angle |
|
||||
| `whereami` | Show current location |
|
||||
|
||||
| Command | Description |
|
||||
|--------------|---------------------------------------------------------|
|
||||
| `wx` | Local weather forecast (NOAA/Open-Meteo) |
|
||||
| `wxc` | Weather in metric/imperial units |
|
||||
| `wxa` | NOAA weather alerts (summary) |
|
||||
| `wxalert` | NOAA weather alerts (detailed/expanded) |
|
||||
| `mwx` | NOAA Coastal Marine Forecast |
|
||||
| `tide` | NOAA tide information |
|
||||
| `riverflow` | NOAA river flow information |
|
||||
| `earthquake` | USGS earthquake information |
|
||||
| `valert` | USGS volcano alerts |
|
||||
| `rlist` | Nearby repeaters from RepeaterBook |
|
||||
| `satpass` | Satellite pass information |
|
||||
| `howfar` | Distance traveled since last check |
|
||||
| `howtall` | Calculate height using sun angle |
|
||||
| `whereami` | Show current location/address |
|
||||
| `map` | Log/view location data to map.csv |
|
||||
Configure in `[location]` section of `config.ini`.
|
||||
|
||||
**Note**: For global tide predictions outside the US, enable `useTidePredict = True` in `config.ini`. See [xtide.md](xtide.md) for setup details.
|
||||
|
||||
Certainly! Here’s a README help section for your `mapHandler` command, suitable for users of your meshbot:
|
||||
|
||||
---
|
||||
@@ -343,7 +398,6 @@ The `map` command allows you to log your current GPS location with a custom desc
|
||||
|--------------|-----------------------------------------------|
|
||||
| `ea`/`ealert`| FEMA iPAWS/EAS alerts (USA/DE) |
|
||||
|
||||
Enable in `[eas]` section of `config.ini`.
|
||||
|
||||
---
|
||||
|
||||
@@ -365,10 +419,6 @@ The Radio Monitoring module provides several ways to integrate amateur radio sof
|
||||
|
||||
### Hamlib Integration
|
||||
|
||||
| Command | Description |
|
||||
|--------------|-----------------------------------------------|
|
||||
| `radio` | Monitor radio SNR via Hamlib |
|
||||
|
||||
Monitors signal strength (S-meter) from a connected radio via Hamlib's `rigctld` daemon. When the signal exceeds a configured threshold, it broadcasts an alert to the mesh network with frequency and signal strength information.
|
||||
|
||||
### WSJT-X Integration
|
||||
@@ -459,9 +509,6 @@ Enable and configure VOX features in the `[vox]` section of `config.ini`.
|
||||
| Command | Description |
|
||||
|--------------|-----------------------------------------------|
|
||||
| `askai` | Ask Ollama LLM AI |
|
||||
| `ask:` | Ask Ollama LLM AI (raw) |
|
||||
|
||||
Configure in `[ollama]` section of `config.ini`.
|
||||
|
||||
More at [LLM Readme](llm.md)
|
||||
|
||||
@@ -473,11 +520,66 @@ More at [LLM Readme](llm.md)
|
||||
|--------------|-----------------------------------------------|
|
||||
| `wiki` | Search Wikipedia or local Kiwix server |
|
||||
|
||||
Configure in `[wikipedia]` section of `config.ini`.
|
||||
Configure in `[general]` section of `config.ini`.
|
||||
|
||||
---
|
||||
|
||||
## News & Headlines (`latest` Command)
|
||||
|
||||
The `latest` command allows you to fetch current news headlines or articles on any topic using the NewsAPI integration. This is useful for quickly checking the latest developments on a subject, even from the mesh.
|
||||
|
||||
### Usage
|
||||
|
||||
- **Get the latest headlines on a topic:**
|
||||
```
|
||||
latest <topic>
|
||||
```
|
||||
Example:
|
||||
```
|
||||
latest meshtastic
|
||||
```
|
||||
This will return the most recent news articles about "meshtastic".
|
||||
|
||||
- **General latest news:**
|
||||
```
|
||||
latest
|
||||
```
|
||||
Returns the latest general news headlines.
|
||||
|
||||
### How It Works
|
||||
|
||||
- The bot queries NewsAPI.org for the most recent articles matching your topic.
|
||||
- Each result includes the article title and a short description.
|
||||
|
||||
You need to go register for the developer key and read terms of use.
|
||||
|
||||
```ini
|
||||
# enable or disable the headline command which uses NewsAPI.org
|
||||
enableNewsAPI = True
|
||||
newsAPI_KEY = key at https://newsapi.org/register
|
||||
newsAPIregion = us
|
||||
```
|
||||
|
||||
### Example Output
|
||||
|
||||
```
|
||||
🗞️:📰Meshtastic project launches new firmware
|
||||
The open-source mesh radio project Meshtastic has released a major firmware update...
|
||||
|
||||
📰How Meshtastic is changing off-grid communication
|
||||
A look at how Meshtastic devices are being used for emergency response...
|
||||
|
||||
📰Meshtastic featured at DEF CON 2025
|
||||
The Meshtastic team presented new features at DEF CON, drawing large crowds...
|
||||
```
|
||||
|
||||
### Notes
|
||||
|
||||
- You can search for any topic, e.g., `latest wildfire`, `latest ham radio`, etc.
|
||||
- The number of results can be adjusted in the configuration.
|
||||
- Requires internet access for the bot to fetch news.
|
||||
|
||||
___
|
||||
## DX Spotter Module
|
||||
|
||||
The DX Spotter module allows you to fetch and display recent DX cluster spots from [spothole.app](https://spothole.app) directly in your mesh-bot.
|
||||
@@ -690,6 +792,73 @@ You can use any of these options to schedule messages on specific days:
|
||||
- `history` — Command history
|
||||
- `cmd`/`cmd?` — Show help message (the bot avoids the use of saying or using help)
|
||||
|
||||
|
||||
|
||||
| Command | Description | ✅ Works Off-Grid |
|
||||
|--------------|-------------|------------------|
|
||||
| `echo` | Echo string back. Admins can use `echo <message> c=<channel> d=<device>` to send to any channel/device. | ✅ |
|
||||
---
|
||||
|
||||
### Echo Command
|
||||
|
||||
The `echo` command returns your message back to you.
|
||||
**Admins** can use an extended syntax to send a message to any channel and device.
|
||||
|
||||
#### Usage
|
||||
|
||||
- **Basic Echo (all users):**
|
||||
```
|
||||
echo Hello World
|
||||
```
|
||||
Response:
|
||||
```
|
||||
Hello World
|
||||
```
|
||||
|
||||
- **Admin Extended Syntax:**
|
||||
```
|
||||
echo <message> c=<channel> d=<device>
|
||||
```
|
||||
Example:
|
||||
```
|
||||
echo Hello world c=1 d=2
|
||||
```
|
||||
This will send "Hello world" to channel 1, device 2.
|
||||
|
||||
#### Special Keyword Substitution
|
||||
|
||||
- In admin echo, if you include the word `motd` or `MOTD` (case-insensitive), it will be replaced with the current Message of the Day.
|
||||
- If you include the word `welcome!` (case-insensitive), it will be replaced with the current Welcome Message as set in your configuration.
|
||||
|
||||
- Example:
|
||||
```
|
||||
echo Today's message is motd c=1 d=2
|
||||
```
|
||||
If the MOTD is "Potatos Are Cool!", the message sent will be:
|
||||
```
|
||||
Today's message is Potatos Are Cool!
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Only admins can use the `c=<channel>` and `d=<device>` override.
|
||||
- If you omit `c=<channel>` and `d=<device>`, the message is echoed back to your current channel/device.
|
||||
- MOTD substitution works for any standalone `motd` or `MOTD` in the message.
|
||||
|
||||
#### Help
|
||||
|
||||
- Send `echo?` for usage instructions.
|
||||
- Admins will see this help message:
|
||||
```
|
||||
Admin usage: echo <message> c=<channel> d=<device>
|
||||
Example: echo Hello world c=1 d=2
|
||||
```
|
||||
|
||||
#### Notes
|
||||
- Only admins can use the `c=<channel>` and `d=<device>` override.
|
||||
- If you omit `c=<channel>` and `d=<device>`, the message is echoed back to your current channel/device.
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
@@ -703,7 +872,7 @@ You can use any of these options to schedule messages on specific days:
|
||||
## Troubleshooting
|
||||
|
||||
- Use the `logger` module for debug output.
|
||||
- See [modules/README.md](modules/README.md) for developer help.
|
||||
- See [modules/README.md](adding_more.md) for developer help.
|
||||
- Use `etc/simulator.py` for local testing.
|
||||
- Check the logs in the `logs/` directory for errors.
|
||||
|
||||
@@ -974,7 +1143,6 @@ This uses USA: SAME, FIPS, to locate the alerts in the feed. By default ignoring
|
||||
|
||||
```ini
|
||||
eAlertBroadcastEnabled = False # Goverment IPAWS/CAP Alert Broadcast
|
||||
eAlertBroadcastCh = 2,3 # Goverment Emergency IPAWS/CAP Alert Broadcast Channels
|
||||
ignoreFEMAenable = True # Ignore any headline that includes followig word list
|
||||
ignoreFEMAwords = test,exercise
|
||||
# comma separated list of FIPS codes to trigger local alert. find your FIPS codes at https://en.wikipedia.org/wiki/Federal_Information_Processing_Standard_state_code
|
||||
@@ -1142,6 +1310,4 @@ enabled = True # QRZ Hello to new nodes
|
||||
qrz_hello_string = "send CMD or DM me for more info." # will be sent to all heard nodes once
|
||||
training = True # Training mode will not send the hello message to new nodes, use this to build up database
|
||||
```
|
||||
|
||||
|
||||
Happy meshing!
|
||||
@@ -26,7 +26,6 @@ The enhanced checklist module provides asset tracking and accountability feature
|
||||
### 📍 Location Tracking
|
||||
- Automatic GPS location capture when checking in/out
|
||||
- View last known location in checklist
|
||||
- Track movement over time
|
||||
|
||||
- **Time Window Monitoring**: Check-in with safety intervals (e.g., `checkin 60 Hunting in tree stand`)
|
||||
- Tracks if users don't check in within expected timeframe
|
||||
@@ -34,20 +33,65 @@ The enhanced checklist module provides asset tracking and accountability feature
|
||||
- Provides `get_overdue_checkins()` function for alert integration
|
||||
|
||||
- **Approval Workflow**:
|
||||
- `checklistapprove <id>` - Approve pending check-ins (admin)
|
||||
- `checklistdeny <id>` - Deny/remove check-ins (admin)
|
||||
- `approvecl <id>` - Approve pending check-ins (admin)
|
||||
- `denycl <id>` - Deny/remove check-ins (admin)
|
||||
- Support for approval-based workflows
|
||||
|
||||
- **Enhanced Database Schema**:
|
||||
- Added `approved` field for approval workflows
|
||||
- Added `expected_checkin_interval` field for safety monitoring
|
||||
- Automatic migration for existing databases
|
||||
|
||||
#### New Commands:
|
||||
- `checklistapprove <id>` - Approve a check-in
|
||||
- `checklistdeny <id>` - Deny a check-in
|
||||
- `approvecl <id>` - Approve a check-in
|
||||
- `denycl <id>` - Deny a check-in
|
||||
- Enhanced `checkin [interval] [note]` - Now supports interval parameter
|
||||
|
||||
### Enhanced Check Out Options
|
||||
|
||||
You can now check out in three ways:
|
||||
|
||||
#### 1. Check Out the Most Recent Active Check-in
|
||||
```
|
||||
checkout [notes]
|
||||
```
|
||||
Checks out your most recent active check-in.
|
||||
*Example:*
|
||||
```
|
||||
checkout Heading back to camp
|
||||
```
|
||||
|
||||
#### 2. Check Out All Active Check-ins
|
||||
```
|
||||
checkout all [notes]
|
||||
```
|
||||
Checks out **all** of your active check-ins at once.
|
||||
*Example:*
|
||||
```
|
||||
checkout all Done for the day
|
||||
```
|
||||
*Response:*
|
||||
```
|
||||
Checked out 2 check-ins for Hunter1. Durations: 01:23:45, 00:15:30
|
||||
```
|
||||
|
||||
#### 3. Check Out a Specific Check-in by ID
|
||||
```
|
||||
checkout <checkin_id> [notes]
|
||||
```
|
||||
Checks out a specific check-in using its ID (as shown in the `checklist` command).
|
||||
*Example:*
|
||||
```
|
||||
checkout 123 Leaving early
|
||||
```
|
||||
*Response:*
|
||||
```
|
||||
Checked out check-in ID 123 for Hunter1. Duration: 00:45:12
|
||||
```
|
||||
|
||||
**Tip:**
|
||||
- Use `checklist` to see your current check-in IDs and durations.
|
||||
- You can always add a note to any checkout command for context.
|
||||
|
||||
---
|
||||
|
||||
These options allow you to manage your check-ins more flexibly, whether you want to check out everything at once or just a specific session.
|
||||
|
||||
## Configuration
|
||||
|
||||
Add to your `config.ini`:
|
||||
@@ -106,38 +150,31 @@ ID: Hunter1 checked-In for 01:23:45📝Solo hunting
|
||||
ID: Tech2 checked-In for 00:15:30📝Equipment repair
|
||||
```
|
||||
|
||||
#### Purge Records
|
||||
```
|
||||
purgein # Delete your check-in record
|
||||
purgeout # Delete your check-out record
|
||||
```
|
||||
|
||||
Use these to manually remove your records if needed.
|
||||
|
||||
### Admin Commands
|
||||
|
||||
#### Approve Check-in
|
||||
```
|
||||
checklistapprove <checkin_id>
|
||||
approvecl <checkin_id>
|
||||
```
|
||||
|
||||
Approve a pending check-in (requires admin privileges).
|
||||
|
||||
**Example:**
|
||||
```
|
||||
checklistapprove 123
|
||||
approvecl 123
|
||||
```
|
||||
|
||||
#### Deny Check-in
|
||||
```
|
||||
checklistdeny <checkin_id>
|
||||
denycl <checkin_id>
|
||||
```
|
||||
|
||||
Deny and remove a check-in (requires admin privileges).
|
||||
|
||||
**Example:**
|
||||
```
|
||||
checklistdeny 456
|
||||
denycl 456
|
||||
```
|
||||
|
||||
## Safety Monitoring Feature
|
||||
@@ -153,7 +190,7 @@ checkin 60 Hunting in remote area
|
||||
This tells the system:
|
||||
- You're checking in now
|
||||
- You expect to check in again or check out within 60 minutes
|
||||
- If 60 minutes pass without activity, you'll be marked as overdue
|
||||
- If 60 minutes pass without activity, you'll be marked as overdue alert
|
||||
|
||||
### Use Cases for Time Intervals
|
||||
|
||||
@@ -174,14 +211,17 @@ This tells the system:
|
||||
|
||||
4. **Check-in Points**: Regular status updates during long operations
|
||||
```
|
||||
checkin 15 Descending cliff face
|
||||
checkin 15 Descending cliff
|
||||
```
|
||||
|
||||
5. **Check-in a reminder**: Reminders to check in on something like a pot roast
|
||||
```
|
||||
checkin 30 🍠🍖
|
||||
```
|
||||
|
||||
### Overdue Check-ins
|
||||
|
||||
The system tracks all check-ins with time intervals and can identify who is overdue. The module provides the `get_overdue_checkins()` function that returns a list of overdue users.
|
||||
|
||||
**Note**: Automatic alerts for overdue check-ins require integration with the bot's scheduler or alert system. The checklist module provides the detection capability, but sending notifications must be configured separately through the main bot's alert features.
|
||||
The system tracks all check-ins with time intervals and can identify who is overdue. The module provides the `get_overdue_checkins()` function that returns a list of overdue users. It alerts on the 20min watchdog.
|
||||
|
||||
## Practical Examples
|
||||
|
||||
@@ -258,15 +298,12 @@ checkin 45 Site survey tower location 2
|
||||
|
||||
The checklist system automatically captures GPS coordinates when available. This can be used for:
|
||||
- Tracking last known position
|
||||
- Geo-fencing applications
|
||||
- Emergency response coordination
|
||||
- Asset location management
|
||||
|
||||
### Alert Systems
|
||||
|
||||
The overdue check-in feature can trigger:
|
||||
- Notifications to supervisors
|
||||
- Emergency alerts
|
||||
- Automated messages to response teams
|
||||
- Email/SMS notifications (if configured)
|
||||
|
||||
@@ -274,9 +311,7 @@ The overdue check-in feature can trigger:
|
||||
|
||||
Combine with the scheduler module to:
|
||||
- Send reminders to check in
|
||||
- Automatically generate reports
|
||||
- Schedule periodic check-in requirements
|
||||
- Send daily summaries
|
||||
|
||||
## Best Practices
|
||||
|
||||
@@ -306,6 +341,17 @@ Combine with the scheduler module to:
|
||||
checklist
|
||||
```
|
||||
|
||||
The list will show ✅ approved and ☑️ unapproved
|
||||
The alarm will only alert on approved.
|
||||
|
||||
in config.ini
|
||||
```ini
|
||||
# Auto approve new checklists
|
||||
auto_approve = True
|
||||
# Check-in reminder interval is 5min
|
||||
# Checkin broadcast interface and channel is emergency_handler interface and channel
|
||||
```
|
||||
|
||||
2. **Respond to Overdue Situations**: Act on overdue check-ins promptly
|
||||
|
||||
3. **Set Clear Policies**: Establish when and how to use the system
|
||||
|
||||
@@ -3,69 +3,50 @@
|
||||
|
||||
import sqlite3
|
||||
from modules.log import logger
|
||||
from modules.settings import checklist_db, reverse_in_out, bbs_ban_list
|
||||
from modules.settings import checklist_db, reverse_in_out, bbs_ban_list, bbs_admin_list, checklist_auto_approve
|
||||
import time
|
||||
|
||||
trap_list_checklist = ("checkin", "checkout", "checklist", "purgein", "purgeout",
|
||||
"checklistapprove", "checklistdeny", "checklistadd", "checklistremove")
|
||||
trap_list_checklist = ("checkin", "checkout", "checklist", "approvecl", "denycl",)
|
||||
|
||||
def initialize_checklist_database():
|
||||
try:
|
||||
conn = sqlite3.connect(checklist_db)
|
||||
c = conn.cursor()
|
||||
# Check if the checkin table exists, and create it if it doesn't
|
||||
logger.debug("System: Checklist: Initializing database...")
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS checkin
|
||||
(checkin_id INTEGER PRIMARY KEY, checkin_name TEXT, checkin_date TEXT,
|
||||
checkin_time TEXT, location TEXT, checkin_notes TEXT,
|
||||
approved INTEGER DEFAULT 1, expected_checkin_interval INTEGER DEFAULT 0)''')
|
||||
# Check if the checkout table exists, and create it if it doesn't
|
||||
approved INTEGER DEFAULT 1, expected_checkin_interval INTEGER DEFAULT 0,
|
||||
removed INTEGER DEFAULT 0)''')
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS checkout
|
||||
(checkout_id INTEGER PRIMARY KEY, checkout_name TEXT, checkout_date TEXT,
|
||||
checkout_time TEXT, location TEXT, checkout_notes TEXT)''')
|
||||
|
||||
# Add new columns if they don't exist (for migration)
|
||||
try:
|
||||
c.execute("ALTER TABLE checkin ADD COLUMN approved INTEGER DEFAULT 1")
|
||||
except sqlite3.OperationalError:
|
||||
pass # Column already exists
|
||||
|
||||
try:
|
||||
c.execute("ALTER TABLE checkin ADD COLUMN expected_checkin_interval INTEGER DEFAULT 0")
|
||||
except sqlite3.OperationalError:
|
||||
pass # Column already exists
|
||||
|
||||
try:
|
||||
c.execute("ALTER TABLE checkin ADD COLUMN removed INTEGER DEFAULT 0")
|
||||
except sqlite3.OperationalError:
|
||||
pass # Column already exists
|
||||
|
||||
# Add this to your DB init (if not already present)
|
||||
try:
|
||||
c.execute("ALTER TABLE checkout ADD COLUMN removed INTEGER DEFAULT 0")
|
||||
except sqlite3.OperationalError:
|
||||
pass # Column already exists
|
||||
|
||||
checkout_time TEXT, location TEXT, checkout_notes TEXT,
|
||||
checkin_id INTEGER, removed INTEGER DEFAULT 0)''')
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Checklist: Failed to initialize database: {e}")
|
||||
logger.error(f"Checklist: Failed to initialize database: {e} Please delete old checklist database file. rm data/checklist.db")
|
||||
return False
|
||||
|
||||
def checkin(name, date, time, location, notes):
|
||||
location = ", ".join(map(str, location))
|
||||
# checkin a user
|
||||
# Auto-approve if setting is enabled
|
||||
approved_value = 1 if checklist_auto_approve else 0
|
||||
conn = sqlite3.connect(checklist_db)
|
||||
c = conn.cursor()
|
||||
try:
|
||||
c.execute("INSERT INTO checkin (checkin_name, checkin_date, checkin_time, location, checkin_notes) VALUES (?, ?, ?, ?, ?)", (name, date, time, location, notes))
|
||||
# # remove any checkouts that are older than the checkin
|
||||
# c.execute("DELETE FROM checkout WHERE checkout_date < ? OR (checkout_date = ? AND checkout_time < ?)", (date, date, time))
|
||||
c.execute(
|
||||
"INSERT INTO checkin (checkin_name, checkin_date, checkin_time, location, checkin_notes, removed, approved) VALUES (?, ?, ?, ?, ?, 0, ?)",
|
||||
(name, date, time, location, notes, approved_value)
|
||||
)
|
||||
except sqlite3.OperationalError as e:
|
||||
if "no such table" in str(e):
|
||||
initialize_checklist_database()
|
||||
c.execute("INSERT INTO checkin (checkin_name, checkin_date, checkin_time, location, checkin_notes) VALUES (?, ?, ?, ?, ?)", (name, date, time, location, notes))
|
||||
c.execute(
|
||||
"INSERT INTO checkin (checkin_name, checkin_date, checkin_time, location, checkin_notes, removed, approved) VALUES (?, ?, ?, ?, ?, 0, ?)",
|
||||
(name, date, time, location, notes, approved_value)
|
||||
)
|
||||
else:
|
||||
raise
|
||||
conn.commit()
|
||||
@@ -75,71 +56,90 @@ def checkin(name, date, time, location, notes):
|
||||
else:
|
||||
return "Checked✅In: " + str(name)
|
||||
|
||||
def delete_checkin(checkin_id):
|
||||
# delete a checkin
|
||||
conn = sqlite3.connect(checklist_db)
|
||||
c = conn.cursor()
|
||||
c.execute("DELETE FROM checkin WHERE checkin_id = ?", (checkin_id,))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return "Checkin deleted." + str(checkin_id)
|
||||
|
||||
def checkout(name, date, time_str, location, notes):
|
||||
def checkout(name, date, time_str, location, notes, all=False, checkin_id=None):
|
||||
location = ", ".join(map(str, location))
|
||||
checkin_record = None # Ensure variable is always defined
|
||||
conn = sqlite3.connect(checklist_db)
|
||||
c = conn.cursor()
|
||||
checked_out_ids = []
|
||||
durations = []
|
||||
try:
|
||||
# Check if the user has a checkin before checking out
|
||||
c.execute("""
|
||||
SELECT checkin_id FROM checkin
|
||||
WHERE checkin_name = ?
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM checkout
|
||||
WHERE checkout_name = checkin_name
|
||||
AND (checkout_date > checkin_date OR (checkout_date = checkin_date AND checkout_time > checkin_time))
|
||||
)
|
||||
ORDER BY checkin_date DESC, checkin_time DESC
|
||||
LIMIT 1
|
||||
""", (name,))
|
||||
checkin_record = c.fetchone()
|
||||
if checkin_record:
|
||||
c.execute("INSERT INTO checkout (checkout_name, checkout_date, checkout_time, location, checkout_notes) VALUES (?, ?, ?, ?, ?)", (name, date, time_str, location, notes))
|
||||
# calculate length of time checked in
|
||||
c.execute("SELECT checkin_time, checkin_date FROM checkin WHERE checkin_id = ?", (checkin_record[0],))
|
||||
checkin_time, checkin_date = c.fetchone()
|
||||
checkin_datetime = time.strptime(checkin_date + " " + checkin_time, "%Y-%m-%d %H:%M:%S")
|
||||
time_checked_in_seconds = time.time() - time.mktime(checkin_datetime)
|
||||
timeCheckedIn = time.strftime("%H:%M:%S", time.gmtime(time_checked_in_seconds))
|
||||
# # remove the checkin record older than the checkout
|
||||
# c.execute("DELETE FROM checkin WHERE checkin_date < ? OR (checkin_date = ? AND checkin_time < ?)", (date, date, time_str))
|
||||
if checkin_id is not None:
|
||||
# Check out a specific check-in by ID
|
||||
c.execute("""
|
||||
SELECT checkin_id, checkin_time, checkin_date FROM checkin
|
||||
WHERE checkin_id = ? AND checkin_name = ?
|
||||
""", (checkin_id, name))
|
||||
row = c.fetchone()
|
||||
if row:
|
||||
c.execute("INSERT INTO checkout (checkout_name, checkout_date, checkout_time, location, checkout_notes, checkin_id) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
(name, date, time_str, location, notes, row[0]))
|
||||
checkin_time, checkin_date = row[1], row[2]
|
||||
checkin_datetime = time.strptime(checkin_date + " " + checkin_time, "%Y-%m-%d %H:%M:%S")
|
||||
time_checked_in_seconds = time.time() - time.mktime(checkin_datetime)
|
||||
durations.append(time.strftime("%H:%M:%S", time.gmtime(time_checked_in_seconds)))
|
||||
checked_out_ids.append(row[0])
|
||||
elif all:
|
||||
# Check out all active check-ins for this user
|
||||
c.execute("""
|
||||
SELECT checkin_id, checkin_time, checkin_date FROM checkin
|
||||
WHERE checkin_name = ?
|
||||
AND removed = 0
|
||||
AND checkin_id NOT IN (
|
||||
SELECT checkin_id FROM checkout WHERE checkin_id IS NOT NULL
|
||||
)
|
||||
""", (name,))
|
||||
rows = c.fetchall()
|
||||
for row in rows:
|
||||
c.execute("INSERT INTO checkout (checkout_name, checkout_date, checkout_time, location, checkout_notes, checkin_id) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
(name, date, time_str, location, notes, row[0]))
|
||||
checkin_time, checkin_date = row[1], row[2]
|
||||
checkin_datetime = time.strptime(checkin_date + " " + checkin_time, "%Y-%m-%d %H:%M:%S")
|
||||
time_checked_in_seconds = time.time() - time.mktime(checkin_datetime)
|
||||
durations.append(time.strftime("%H:%M:%S", time.gmtime(time_checked_in_seconds)))
|
||||
checked_out_ids.append(row[0])
|
||||
else:
|
||||
# Default: check out the most recent active check-in
|
||||
c.execute("""
|
||||
SELECT checkin_id, checkin_time, checkin_date FROM checkin
|
||||
WHERE checkin_name = ?
|
||||
AND removed = 0
|
||||
AND checkin_id NOT IN (
|
||||
SELECT checkin_id FROM checkout WHERE checkin_id IS NOT NULL
|
||||
)
|
||||
ORDER BY checkin_date DESC, checkin_time DESC
|
||||
LIMIT 1
|
||||
""", (name,))
|
||||
row = c.fetchone()
|
||||
if row:
|
||||
c.execute("INSERT INTO checkout (checkout_name, checkout_date, checkout_time, location, checkout_notes, checkin_id) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
(name, date, time_str, location, notes, row[0]))
|
||||
checkin_time, checkin_date = row[1], row[2]
|
||||
checkin_datetime = time.strptime(checkin_date + " " + checkin_time, "%Y-%m-%d %H:%M:%S")
|
||||
time_checked_in_seconds = time.time() - time.mktime(checkin_datetime)
|
||||
durations.append(time.strftime("%H:%M:%S", time.gmtime(time_checked_in_seconds)))
|
||||
checked_out_ids.append(row[0])
|
||||
except sqlite3.OperationalError as e:
|
||||
if "no such table" in str(e):
|
||||
conn.close()
|
||||
initialize_checklist_database()
|
||||
# Try again after initializing
|
||||
return checkout(name, date, time_str, location, notes)
|
||||
return checkout(name, date, time_str, location, notes, all=all, checkin_id=checkin_id)
|
||||
else:
|
||||
conn.close()
|
||||
raise
|
||||
conn.commit()
|
||||
conn.close()
|
||||
if checkin_record:
|
||||
if reverse_in_out:
|
||||
return "Checked⌛️In: " + str(name) + " duration " + timeCheckedIn
|
||||
if checked_out_ids:
|
||||
if all:
|
||||
return f"Checked out {len(checked_out_ids)} check-ins for {name}. Durations: {', '.join(durations)}"
|
||||
elif checkin_id is not None:
|
||||
return f"Checked out check-in ID {checkin_id} for {name}. Duration: {durations[0]}"
|
||||
else:
|
||||
return "Checked⌛️Out: " + str(name) + " duration " + timeCheckedIn
|
||||
if reverse_in_out:
|
||||
return f"Checked⌛️In: {name} duration {durations[0]}"
|
||||
else:
|
||||
return f"Checked⌛️Out: {name} duration {durations[0]}"
|
||||
else:
|
||||
return "None found for " + str(name)
|
||||
|
||||
def delete_checkout(checkout_id):
|
||||
# delete a checkout
|
||||
conn = sqlite3.connect(checklist_db)
|
||||
c = conn.cursor()
|
||||
c.execute("DELETE FROM checkout WHERE checkout_id = ?", (checkout_id,))
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return "Checkout deleted." + str(checkout_id)
|
||||
return f"None found for {name}"
|
||||
|
||||
def approve_checkin(checkin_id):
|
||||
"""Approve a pending check-in"""
|
||||
@@ -254,25 +254,27 @@ def get_overdue_checkins():
|
||||
return []
|
||||
|
||||
def format_overdue_alert():
|
||||
header = "⚠️ OVERDUE CHECK-INS:\a\n"
|
||||
alert = ""
|
||||
try:
|
||||
"""Format overdue check-ins as an alert message"""
|
||||
overdue = get_overdue_checkins()
|
||||
logger.debug(f"Overdue check-ins: {overdue}") # Add this line
|
||||
if not overdue:
|
||||
return None
|
||||
|
||||
alert = "⚠️ OVERDUE CHECK-INS:\n"
|
||||
for entry in overdue:
|
||||
hours = entry['overdue_minutes'] // 60
|
||||
minutes = entry['overdue_minutes'] % 60
|
||||
alert += f"{entry['name']}: {hours}h {minutes}m overdue"
|
||||
if hours > 0:
|
||||
alert += f"{entry['name']}: {hours}h {minutes}m overdue"
|
||||
else:
|
||||
alert += f"{entry['name']}: {minutes}m overdue"
|
||||
# if entry['location']:
|
||||
# alert += f" @ {entry['location']}"
|
||||
if entry['checkin_notes']:
|
||||
alert += f" 📝{entry['checkin_notes']}"
|
||||
alert += "\n"
|
||||
|
||||
return alert.rstrip()
|
||||
if alert:
|
||||
return header + alert.rstrip()
|
||||
except Exception as e:
|
||||
logger.error(f"Checklist: Error formatting overdue alert: {e}")
|
||||
return None
|
||||
@@ -285,9 +287,9 @@ def list_checkin():
|
||||
c.execute("""
|
||||
SELECT * FROM checkin
|
||||
WHERE removed = 0
|
||||
AND checkin_id NOT IN (
|
||||
SELECT checkin_id FROM checkout
|
||||
WHERE checkout_date > checkin_date OR (checkout_date = checkin_date AND checkout_time > checkin_time)
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM checkout
|
||||
WHERE checkout.checkin_id = checkin.checkin_id
|
||||
)
|
||||
""")
|
||||
rows = c.fetchall()
|
||||
@@ -298,12 +300,16 @@ def list_checkin():
|
||||
return list_checkin()
|
||||
else:
|
||||
conn.close()
|
||||
logger.error(f"Checklist: Error listing checkins: {e}")
|
||||
initialize_checklist_database()
|
||||
return "Error listing checkins."
|
||||
conn.close()
|
||||
timeCheckedIn = ""
|
||||
|
||||
# Get overdue info
|
||||
overdue = {entry['id']: entry for entry in get_overdue_checkins()}
|
||||
|
||||
checkin_list = ""
|
||||
for row in rows:
|
||||
checkin_id = row[0]
|
||||
# Calculate length of time checked in, including days
|
||||
total_seconds = time.time() - time.mktime(time.strptime(row[2] + " " + row[3], "%Y-%m-%d %H:%M:%S"))
|
||||
days = int(total_seconds // 86400)
|
||||
@@ -314,9 +320,31 @@ def list_checkin():
|
||||
timeCheckedIn = f"{days}d {hours:02}:{minutes:02}:{seconds:02}"
|
||||
else:
|
||||
timeCheckedIn = f"{hours:02}:{minutes:02}:{seconds:02}"
|
||||
checkin_list += "ID: " + str(row[0]) + " " + row[1] + " checked-In for " + timeCheckedIn
|
||||
|
||||
# Add ⏰ if routine check-ins are required
|
||||
routine = ""
|
||||
if len(row) > 7 and row[7] and int(row[7]) > 0:
|
||||
routine = f" ⏰({row[7]}m)"
|
||||
|
||||
# Indicate approval status
|
||||
approved_marker = "✅" if row[6] == 1 else "☑️"
|
||||
|
||||
# Check if overdue
|
||||
if checkin_id in overdue:
|
||||
overdue_minutes = overdue[checkin_id]['overdue_minutes']
|
||||
overdue_hours = overdue_minutes // 60
|
||||
overdue_mins = overdue_minutes % 60
|
||||
if overdue_hours > 0:
|
||||
overdue_str = f"overdue by {overdue_hours}h {overdue_mins}m"
|
||||
else:
|
||||
overdue_str = f"overdue by {overdue_mins}m"
|
||||
status = f"{row[1]} {overdue_str}{routine}"
|
||||
else:
|
||||
status = f"{row[1]} checked-In for {timeCheckedIn}{routine}"
|
||||
|
||||
checkin_list += f"ID: {checkin_id} {approved_marker} {status}"
|
||||
if row[5] != "":
|
||||
checkin_list += "📝" + row[5]
|
||||
checkin_list += " 📝" + row[5]
|
||||
if row != rows[-1]:
|
||||
checkin_list += "\n"
|
||||
# if empty list
|
||||
@@ -331,6 +359,9 @@ def process_checklist_command(nodeID, message, name="none", location="none"):
|
||||
if str(nodeID) in bbs_ban_list:
|
||||
logger.warning("System: Checklist attempt from the ban list")
|
||||
return "unable to process command"
|
||||
is_admin = False
|
||||
if str(nodeID) in bbs_admin_list:
|
||||
is_admin = True
|
||||
|
||||
message_lower = message.lower()
|
||||
parts = message.split()
|
||||
@@ -359,22 +390,44 @@ def process_checklist_command(nodeID, message, name="none", location="none"):
|
||||
return result
|
||||
|
||||
elif ("checkout" in message_lower and not reverse_in_out) or ("checkin" in message_lower and reverse_in_out):
|
||||
return checkout(name, current_date, current_time, location, comment)
|
||||
# Support: checkout all, checkout <id>, or checkout [note]
|
||||
all_flag = False
|
||||
checkin_id = None
|
||||
actual_comment = comment
|
||||
|
||||
elif "purgein" in message_lower:
|
||||
return mark_checkin_removed_by_name(name)
|
||||
# Split the command into parts after the keyword
|
||||
checkout_args = parts[1:] if len(parts) > 1 else []
|
||||
|
||||
elif "purgeout" in message_lower:
|
||||
return mark_checkout_removed_by_name(name)
|
||||
if checkout_args:
|
||||
if checkout_args[0].lower() == "all":
|
||||
all_flag = True
|
||||
actual_comment = " ".join(checkout_args[1:]) if len(checkout_args) > 1 else ""
|
||||
elif checkout_args[0].isdigit():
|
||||
checkin_id = int(checkout_args[0])
|
||||
actual_comment = " ".join(checkout_args[1:]) if len(checkout_args) > 1 else ""
|
||||
else:
|
||||
actual_comment = " ".join(checkout_args)
|
||||
|
||||
elif message_lower.startswith("checklistapprove "):
|
||||
return checkout(name, current_date, current_time, location, actual_comment, all=all_flag, checkin_id=checkin_id)
|
||||
|
||||
# elif "purgein" in message_lower:
|
||||
# return mark_checkin_removed_by_name(name)
|
||||
|
||||
# elif "purgeout" in message_lower:
|
||||
# return mark_checkout_removed_by_name(name)
|
||||
|
||||
elif "approvecl " in message_lower:
|
||||
if not is_admin:
|
||||
return "You do not have permission to approve check-ins."
|
||||
try:
|
||||
checkin_id = int(parts[1])
|
||||
return approve_checkin(checkin_id)
|
||||
except (ValueError, IndexError):
|
||||
return "Usage: checklistapprove <checkin_id>"
|
||||
|
||||
elif message_lower.startswith("checklistdeny "):
|
||||
|
||||
elif "denycl " in message_lower:
|
||||
if not is_admin:
|
||||
return "You do not have permission to deny check-ins."
|
||||
try:
|
||||
checkin_id = int(parts[1])
|
||||
return deny_checkin(checkin_id)
|
||||
@@ -385,21 +438,15 @@ def process_checklist_command(nodeID, message, name="none", location="none"):
|
||||
if not reverse_in_out:
|
||||
return ("Command: checklist followed by\n"
|
||||
"checkin [interval] [note]\n"
|
||||
"checkout [note]\n"
|
||||
"purgein - delete your checkin\n"
|
||||
"purgeout - delete your checkout\n"
|
||||
"checklistapprove <id> - approve checkin\n"
|
||||
"checklistdeny <id> - deny checkin\n"
|
||||
"Example: checkin 60 Hunting in tree stand")
|
||||
"checkout [all] [note]\n"
|
||||
"Example: checkin 60 Leaving for a hike")
|
||||
else:
|
||||
return ("Command: checklist followed by\n"
|
||||
"checkout [interval] [note]\n"
|
||||
"checkout [all] [interval] [note]\n"
|
||||
"checkin [note]\n"
|
||||
"purgeout - delete your checkout\n"
|
||||
"purgein - delete your checkin\n"
|
||||
"Example: checkout 60 Leaving park")
|
||||
"Example: checkout 60 Leaving for a hike")
|
||||
|
||||
elif "checklist" in message_lower:
|
||||
elif message_lower.strip() == "checklist":
|
||||
return list_checkin()
|
||||
|
||||
else:
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# Fetches DX spots from Spothole API based on user commands
|
||||
# 2025 K7MHI Kelly Keeton
|
||||
import requests
|
||||
import datetime
|
||||
from datetime import datetime, timedelta
|
||||
from modules.log import logger
|
||||
from modules.settings import latitudeValue, longitudeValue
|
||||
|
||||
@@ -69,7 +69,6 @@ def get_spothole_spots(source=None, band=None, mode=None, date=None, dx_call=Non
|
||||
url = "https://spothole.app/api/v1/spots"
|
||||
params = {}
|
||||
fetched_count = 0
|
||||
|
||||
|
||||
# Add administrative filters if provided
|
||||
qrt = False # Always fetch active spots
|
||||
@@ -83,7 +82,7 @@ def get_spothole_spots(source=None, band=None, mode=None, date=None, dx_call=Non
|
||||
params["needs_sig"] = str(needs_sig).lower()
|
||||
params["needs_sig_ref"] = 'true'
|
||||
# Only get spots from last 9 hours
|
||||
received_since_dt = datetime.datetime.utcnow() - datetime.timedelta(hours=9)
|
||||
received_since_dt = datetime.utcnow() - timedelta(hours=9)
|
||||
received_since = int(received_since_dt.timestamp())
|
||||
params["received_since"] = received_since
|
||||
|
||||
@@ -170,7 +169,7 @@ def get_spothole_spots(source=None, band=None, mode=None, date=None, dx_call=Non
|
||||
return spots
|
||||
|
||||
def handle_post_dxspot():
|
||||
time = int(datetime.datetime.utcnow().timestamp())
|
||||
time = int(datetime.utcnow().timestamp())
|
||||
freq = 14200000 # 14 MHz
|
||||
comment = "Test spot please ignore"
|
||||
de_spot = "N0CALL"
|
||||
|
||||
@@ -6,6 +6,7 @@ from modules.settings import (
|
||||
file_monitor_file_path,
|
||||
news_file_path,
|
||||
news_random_line_only,
|
||||
news_block_mode,
|
||||
allowXcmd,
|
||||
bbs_admin_list,
|
||||
xCmd2factorEnabled,
|
||||
@@ -23,16 +24,38 @@ trap_list_filemon = ("readnews",)
|
||||
NEWS_DATA_DIR = os.path.join(os.path.dirname(__file__), '..', 'data')
|
||||
newsSourcesList = []
|
||||
|
||||
def read_file(file_monitor_file_path, random_line_only=False):
|
||||
def read_file(file_monitor_file_path, random_line_only=False, news_block_mode=False, verse_only=False):
|
||||
try:
|
||||
if not os.path.exists(file_monitor_file_path):
|
||||
if file_monitor_file_path == "bee.txt":
|
||||
return "🐝buzz 💐buzz buzz🍯"
|
||||
if random_line_only:
|
||||
if file_monitor_file_path == 'bible.txt':
|
||||
return "🐝Go, and make disciples of all nations."
|
||||
if verse_only:
|
||||
# process verse/bible file
|
||||
verse = get_verses(file_monitor_file_path)
|
||||
return verse
|
||||
elif news_block_mode:
|
||||
with open(file_monitor_file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read().replace('\r\n', '\n').replace('\r', '\n')
|
||||
blocks = []
|
||||
block = []
|
||||
for line in content.split('\n'):
|
||||
if line.strip() == '':
|
||||
if block:
|
||||
blocks.append('\n'.join(block).strip())
|
||||
block = []
|
||||
else:
|
||||
block.append(line)
|
||||
if block:
|
||||
blocks.append('\n'.join(block).strip())
|
||||
blocks = [b for b in blocks if b]
|
||||
return random.choice(blocks) if blocks else None
|
||||
elif random_line_only:
|
||||
# read a random line from the file
|
||||
with open(file_monitor_file_path, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
return random.choice(lines)
|
||||
lines = [line.strip() for line in f if line.strip()]
|
||||
return random.choice(lines) if lines else None
|
||||
else:
|
||||
# read the whole file
|
||||
with open(file_monitor_file_path, 'r', encoding='utf-8') as f:
|
||||
@@ -42,13 +65,67 @@ def read_file(file_monitor_file_path, random_line_only=False):
|
||||
logger.warning(f"FileMon: Error reading file: {file_monitor_file_path}")
|
||||
return None
|
||||
|
||||
def read_news(source=None):
|
||||
def read_news(source=None, random_line_only=False, news_block_mode=False):
|
||||
# Reads the news file. If a source is provided, reads {source}_news.txt.
|
||||
if source:
|
||||
file_path = os.path.join(NEWS_DATA_DIR, f"{source}_news.txt")
|
||||
else:
|
||||
file_path = os.path.join(NEWS_DATA_DIR, news_file_path)
|
||||
return read_file(file_path, news_random_line_only)
|
||||
# Block mode takes precedence over line mode
|
||||
if news_block_mode:
|
||||
return read_file(file_path, random_line_only=False, news_block_mode=True)
|
||||
elif random_line_only:
|
||||
return read_file(file_path, random_line_only=True, news_block_mode=False)
|
||||
else:
|
||||
return read_file(file_path)
|
||||
|
||||
def read_verse():
|
||||
# Reads a random verse from the file bible.txt in the data/ directory
|
||||
verses = get_verses('bible.txt')
|
||||
if verses:
|
||||
return random.choice(verses)
|
||||
return None
|
||||
|
||||
def get_verses(file_monitor_file_path):
|
||||
# Handles both "4 ..." and "1 Timothy 4:15 ..." style verse starts
|
||||
verses = []
|
||||
current_verse = []
|
||||
with open(file_monitor_file_path, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
stripped = line.strip()
|
||||
# Check for "number space" OR "Book Chapter:Verse" at start
|
||||
is_numbered = stripped and len(stripped) > 1 and stripped[0].isdigit() and stripped[1] == ' '
|
||||
is_reference = (
|
||||
stripped and
|
||||
':' in stripped and
|
||||
any(stripped.startswith(book + ' ') for book in [
|
||||
"Genesis", "Exodus", "Leviticus", "Numbers", "Deuteronomy", "Joshua", "Judges", "Ruth",
|
||||
"1 Samuel", "2 Samuel", "1 Kings", "2 Kings", "1 Chronicles", "2 Chronicles", "Ezra", "Nehemiah",
|
||||
"Esther", "Job", "Psalms", "Proverbs", "Ecclesiastes", "Song of Solomon", "Isaiah", "Jeremiah",
|
||||
"Lamentations", "Ezekiel", "Daniel", "Hosea", "Joel", "Amos", "Obadiah", "Jonah", "Micah",
|
||||
"Nahum", "Habakkuk", "Zephaniah", "Haggai", "Zechariah", "Malachi", "Matthew", "Mark", "Luke",
|
||||
"John", "Acts", "Romans", "1 Corinthians", "2 Corinthians", "Galatians", "Ephesians", "Philippians",
|
||||
"Colossians", "1 Thessalonians", "2 Thessalonians", "1 Timothy", "2 Timothy", "Titus", "Philemon",
|
||||
"Hebrews", "James", "1 Peter", "2 Peter", "1 John", "2 John", "3 John", "Jude", "Revelation"
|
||||
])
|
||||
)
|
||||
if is_numbered or is_reference:
|
||||
if current_verse:
|
||||
verses.append(' '.join(current_verse).strip())
|
||||
current_verse = []
|
||||
# For numbered, drop the number; for reference, keep the whole line
|
||||
if is_numbered:
|
||||
current_verse.append(stripped.split(' ', 1)[1])
|
||||
else:
|
||||
current_verse.append(stripped)
|
||||
elif stripped and not stripped.lower().startswith('psalm'):
|
||||
current_verse.append(stripped)
|
||||
elif not stripped and current_verse:
|
||||
verses.append(' '.join(current_verse).strip())
|
||||
current_verse = []
|
||||
if current_verse:
|
||||
verses.append(' '.join(current_verse).strip())
|
||||
return verses
|
||||
|
||||
def write_news(content, append=False):
|
||||
# write the news file on demand
|
||||
|
||||
@@ -6,13 +6,16 @@
|
||||
- [DopeWars](#dopewars-game-module)
|
||||
- [GolfSim](#golfsim-game-module)
|
||||
- [Lemonade Stand](#lemonade-stand-game-module)
|
||||
- [Tic-Tac-Toe](#tic-tac-toe-game-module)
|
||||
- [Tic-Tac-Toe (2D/3D)](#tic-tac-toe-game-module)
|
||||
- [MasterMind](#mastermind-game-module)
|
||||
- [Battleship](#battleship-game-module)
|
||||
- [Video Poker](#video-poker-game-module)
|
||||
- [Hangman](#hangman-game-module)
|
||||
- [Quiz](#quiz-game-module)
|
||||
- [Survey](#survey--module-game)
|
||||
- [Word of the Day Game](#word-of-the-day-game--rules--features)
|
||||
- [Game Server](#game-server-configuration-gameini)
|
||||
- [PyGame Help](#pygame-help)
|
||||
---
|
||||
|
||||
|
||||
@@ -305,31 +308,45 @@ Play another week🥤? or (E)nd Game
|
||||
|
||||
A classic Tic-Tac-Toe game for the Meshtastic mesh-bot. Play against the bot, track your stats, and see if you can beat the AI!
|
||||
|
||||

|
||||
|
||||
## How to Play
|
||||
|
||||
- **Start the Game:**
|
||||
Send the command `tictactoe` via DM to the bot to begin a new game.
|
||||
|
||||
- **3D Mode:**
|
||||
You can play in 3D mode by sending `new 3d` during a game session. The board expands to 27 positions (1-27) and supports 3D win lines.
|
||||
|
||||
- **Run as a Game Server (Optional):**
|
||||
For UDP/visual/remote play, you can run the dedicated game server:
|
||||
```sh
|
||||
python3 script/game_serve.py
|
||||
```
|
||||
This enables networked play and visual board updates if supported.
|
||||
[PyGame Help](#pygame-help)
|
||||
|
||||
- **Objective:**
|
||||
Get three of your marks in a row (horizontally, vertically, or diagonally) before the bot does.
|
||||
|
||||
- **Game Flow:**
|
||||
1. **Board Layout:**
|
||||
- The board is numbered 1-9, left to right, top to bottom.
|
||||
- Example:
|
||||
- The board is numbered 1-9 (2D) or 1-27 (3D), left to right, top to bottom.
|
||||
- Example (2D):
|
||||
```
|
||||
1 | 2 | 3
|
||||
4 | 5 | 6
|
||||
7 | 8 | 9
|
||||
```
|
||||
2. **Making Moves:**
|
||||
- On your turn, type the number (1-9) where you want to place your mark.
|
||||
- On your turn, type the number (1-9 or 1-27) where you want to place your mark.
|
||||
- The bot will respond with the updated board and make its move.
|
||||
3. **Commands:**
|
||||
- `n` — Start a new game.
|
||||
- `new 2d` or `new 3d` — Start a new game in 2D or 3D mode.
|
||||
- `e` or `q` — End the current game.
|
||||
- `b` — Show the current board.
|
||||
- Enter a number (1-9) to make a move.
|
||||
- Enter a number (1-9 or 1-27) to make a move.
|
||||
4. **Winning:**
|
||||
- The first to get three in a row wins.
|
||||
- If the board fills with no winner, it’s a tie.
|
||||
@@ -356,12 +373,12 @@ Your turn! Pick 1-9:
|
||||
- Emojis are used for X and O unless disabled in settings.
|
||||
- Your win/loss stats are tracked across games.
|
||||
- The bot will try to win, block you, or pick a random move.
|
||||
- Play via DM for best experience.
|
||||
- Play via DM for best experience, or run the game server for network/visual play.
|
||||
- Only one game session per player at a time.
|
||||
|
||||
## Credits
|
||||
|
||||
- Written for Meshtastic mesh-bot by Martin
|
||||
- Written for Meshtastic mesh-bot by Martin, refactored by K7MHI
|
||||
|
||||
# MasterMind Game Module
|
||||
|
||||
@@ -504,6 +521,77 @@ Place your Bet, or (L)eave Table.
|
||||
- Adapted for Meshtastic mesh-bot by K7MHI Kelly Keeton 2024
|
||||
|
||||
|
||||
# Battleship Game Module
|
||||
|
||||
A classic Battleship game for the Meshtastic mesh-bot. Play solo against the AI or challenge another user in peer-to-peer (P2P) mode!
|
||||
|
||||
## How to Play
|
||||
|
||||
- **Start a New Game (vs AI):**
|
||||
Send `battleship` via DM to the bot to start a new game against the AI.
|
||||
|
||||
- **Start a New P2P Game:**
|
||||
Send `battleship new` to create a game and receive a join code.
|
||||
Share the code with another user.
|
||||
|
||||
- **Join a P2P Game:**
|
||||
Send `battleship join <code>` (replace `<code>` with the provided number) to join a waiting game.
|
||||
|
||||
- **View Open Games:**
|
||||
Send `battleship lobby` to see a list of open P2P games waiting for players.
|
||||
|
||||
- **Gameplay:**
|
||||
- Enter your move using coordinates:
|
||||
- Format: `B4` or `B,4` (row letter, column number)
|
||||
- Example: `C7`
|
||||
- The bot will show your radar, ship status, and results after each move.
|
||||
- In P2P, you and your opponent take turns. The bot will notify you when it’s your turn.
|
||||
|
||||
- **End Game:**
|
||||
Send `end` or `exit` to leave your current game.
|
||||
|
||||
## Rules & Features
|
||||
|
||||
- 10x10 grid, classic ship sizes (Carrier, Battleship, Cruiser, Submarine, Destroyer).
|
||||
- Ships are placed randomly.
|
||||
- In P2P, the joining player goes first.
|
||||
- Radar view shows a 4x4 grid centered on your last move.
|
||||
- Game tracks whose turn it is and notifies the next player in P2P mode.
|
||||
- Game ends when all ships of one player are sunk.
|
||||
|
||||
## Example Session
|
||||
|
||||
```
|
||||
New 🚢Battleship🤖 game started!
|
||||
Enter your move using coordinates: row-letter, column-number.
|
||||
Example: B5 or C,7
|
||||
Type 'exit' or 'end' to quit the game.
|
||||
|
||||
> B4
|
||||
|
||||
Your move: 💥Hit!
|
||||
AI ships: 5/5 afloat
|
||||
Radar:
|
||||
🗺️3 4 5 6
|
||||
B ~ ~ * ~
|
||||
C ~ ~ ~ ~
|
||||
D ~ ~ ~ ~
|
||||
E ~ ~ ~ ~
|
||||
AI move: D7 (missed)
|
||||
Your ships: 5/5 afloat
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
- Only one Battleship session per player at a time.
|
||||
- Play via DM for best experience.
|
||||
- In P2P, share the join code with your opponent.
|
||||
- Coordinates are not case-sensitive.
|
||||
|
||||
## Credits
|
||||
|
||||
- Written for Meshtastic mesh-bot by K7MHI Kelly Keeton 2025
|
||||
|
||||
# Word of the Day Game — Rules & Features
|
||||
|
||||
- **Word of the Day:**
|
||||
@@ -718,4 +806,54 @@ This module implements a survey system for the Meshtastic mesh-bot.
|
||||
|
||||
---
|
||||
|
||||
**Written for Meshtastic mesh-bot by K7MHI Kelly Keeton 2025**
|
||||
**Written for Meshtastic mesh-bot by K7MHI Kelly Keeton 2025**
|
||||
|
||||
___
|
||||
|
||||
# Game Server Configuration (`game.ini`)
|
||||
|
||||
The game server (`script/game_serve.py`) supports configuration via a `game.ini` file placed in the same directory as the script. This allows you to customize network and node settings without modifying the Python code.
|
||||
|
||||
## How to Use
|
||||
|
||||
1. **Create a `game.ini` file** in the `script/` directory (next to `game_serve.py`).
|
||||
|
||||
If `game.ini` is not present, the server will use built-in default values.
|
||||
|
||||
---
|
||||
|
||||
|
||||
# PyGame Help
|
||||
|
||||
'pygame - Community Edition' ('pygame-ce' for short) is a fork of the original 'pygame' library by former 'pygame' core contributors.
|
||||
|
||||
It offers many new features and optimizations, receives much better maintenance and runs under a better governance model, while being highly compatible with code written for upstream pygame (`import pygame` still works).
|
||||
|
||||
**Details**
|
||||
- [Initial announcement on Reddit](<https://www.reddit.com/r/pygame/comments/1112q10/pygame_community_edition_announcement/>) (or https://discord.com/channels/772505616680878080/772506385304649738/1074593440148500540)
|
||||
- [Why the forking happened](<https://www.reddit.com/r/pygame/comments/18xy7nf/what_was_the_disagreement_that_led_to_pygamece/>)
|
||||
|
||||
**Helpful Links**
|
||||
- https://discord.com/channels/772505616680878080/772506385304649738
|
||||
- [Our GitHub releases](<https://github.com/pygame-community/pygame-ce/releases>)
|
||||
- [Our docs](https://pyga.me/docs/)
|
||||
|
||||
**Installation**
|
||||
```sh
|
||||
pip uninstall pygame # Uninstall pygame first since it would conflict with pygame-ce
|
||||
pip install pygame-ce
|
||||
```
|
||||
-# Because 'pygame' installs to the same location as 'pygame-ce', it must first be uninstalled.
|
||||
-# Note that the `import pygame` syntax has not changed with pygame-ce.
|
||||
|
||||
# mUDP Help
|
||||
|
||||
mUDP library provides UDP-based broadcasting of Meshtastic-compatible packets. MeshBot uses this for the game_server_display server.
|
||||
|
||||
**Details**
|
||||
- [pdxlocations/mudp](https://github.com/pdxlocations/mudp)
|
||||
|
||||
**Installation**
|
||||
```sh
|
||||
pip install mudp
|
||||
```
|
||||
510
modules/games/battleship.py
Normal file
510
modules/games/battleship.py
Normal file
@@ -0,0 +1,510 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Battleship game module Meshing Around
|
||||
# 2025 K7MHI Kelly Keeton
|
||||
import random
|
||||
import copy
|
||||
import uuid
|
||||
import time
|
||||
from modules.settings import battleshipTracker
|
||||
|
||||
OCEAN = "~"
|
||||
FIRE = "x"
|
||||
HIT = "*"
|
||||
SIZE = 10
|
||||
SHIPS = [5, 4, 3, 3, 2]
|
||||
SHIP_NAMES = ["✈️Carrier", "Battleship", "Cruiser", "Submarine", "Destroyer"]
|
||||
|
||||
class Session:
|
||||
def __init__(self, player1_id, player2_id=None, vs_ai=True):
|
||||
self.session_id = str(uuid.uuid4())
|
||||
self.vs_ai = vs_ai
|
||||
self.player1_id = player1_id
|
||||
self.player2_id = player2_id
|
||||
self.game = Battleship(vs_ai=vs_ai)
|
||||
self.next_turn = player1_id
|
||||
self.last_move = None
|
||||
self.shots_fired = 0
|
||||
self.start_time = time.time()
|
||||
|
||||
class Battleship:
|
||||
sessions = {}
|
||||
short_codes = {}
|
||||
|
||||
@classmethod
|
||||
def _generate_short_code(cls):
|
||||
while True:
|
||||
code = str(random.randint(1000, 9999))
|
||||
if code not in cls.short_codes:
|
||||
return code
|
||||
|
||||
@classmethod
|
||||
def new_game(cls, player_id, vs_ai=True, p2p_id=None):
|
||||
session = Session(player1_id=player_id, player2_id=p2p_id, vs_ai=vs_ai)
|
||||
cls.sessions[session.session_id] = session
|
||||
if not vs_ai:
|
||||
code = cls._generate_short_code()
|
||||
cls.short_codes[code] = session.session_id
|
||||
msg = (
|
||||
"New 🚢Battleship🚢 game started!\n"
|
||||
"Joining player goes first, waiting for them to join...\n"
|
||||
f"Share\n'battleship join {code}'"
|
||||
)
|
||||
return msg, code
|
||||
else:
|
||||
msg = (
|
||||
"New 🚢Battleship🤖 game started!\n"
|
||||
"Enter your move using coordinates: row-letter, column-number.\n"
|
||||
"Example: B5 or C,7\n"
|
||||
"Type 'exit' or 'end' to quit the game."
|
||||
)
|
||||
return msg, session.session_id
|
||||
|
||||
@classmethod
|
||||
def end_game(cls, session_id):
|
||||
if session_id in cls.sessions:
|
||||
del cls.sessions[session_id]
|
||||
return "Thanks for playing 🚢Battleship🚢"
|
||||
|
||||
@classmethod
|
||||
def get_session(cls, code_or_session_id):
|
||||
session_id = cls.short_codes.get(code_or_session_id, code_or_session_id)
|
||||
return cls.sessions.get(session_id)
|
||||
|
||||
def __init__(self, vs_ai=True):
|
||||
if vs_ai:
|
||||
self.player_board = self._blank_board()
|
||||
self.ai_board = self._blank_board()
|
||||
self.player_radar = self._blank_board()
|
||||
self.ai_radar = self._blank_board()
|
||||
self.number_board = self._blank_board()
|
||||
self.player_alive = sum(SHIPS)
|
||||
self.ai_alive = sum(SHIPS)
|
||||
self._place_ships(self.player_board, self.number_board)
|
||||
self._place_ships(self.ai_board)
|
||||
self.ai_targets = []
|
||||
self.ai_last_hit = None
|
||||
self.ai_orientation = None
|
||||
else:
|
||||
# P2P: Each player has their own board and radar
|
||||
self.player1_board = self._blank_board()
|
||||
self.player2_board = self._blank_board()
|
||||
self.player1_radar = self._blank_board()
|
||||
self.player2_radar = self._blank_board()
|
||||
self.player1_alive = sum(SHIPS)
|
||||
self.player2_alive = sum(SHIPS)
|
||||
self._place_ships(self.player1_board)
|
||||
self._place_ships(self.player2_board)
|
||||
|
||||
def _blank_board(self):
|
||||
return [[OCEAN for _ in range(SIZE)] for _ in range(SIZE)]
|
||||
|
||||
def _place_ships(self, board, number_board=None):
|
||||
for idx, ship_len in enumerate(SHIPS):
|
||||
placed = False
|
||||
while not placed:
|
||||
vertical = random.choice([True, False])
|
||||
if vertical:
|
||||
row = random.randint(0, SIZE - ship_len)
|
||||
col = random.randint(0, SIZE - 1)
|
||||
if all(board[row + i][col] == OCEAN for i in range(ship_len)):
|
||||
for i in range(ship_len):
|
||||
board[row + i][col] = str(idx)
|
||||
if number_board is not None:
|
||||
number_board[row + i][col] = idx
|
||||
placed = True
|
||||
else:
|
||||
row = random.randint(0, SIZE - 1)
|
||||
col = random.randint(0, SIZE - ship_len)
|
||||
if all(board[row][col + i] == OCEAN for i in range(ship_len)):
|
||||
for i in range(ship_len):
|
||||
board[row][col + i] = str(idx)
|
||||
if number_board is not None:
|
||||
number_board[row][col + i] = idx
|
||||
placed = True
|
||||
|
||||
def player_move(self, row, col):
|
||||
"""Player fires at AI's board. Returns 'hit', 'miss', or 'sunk:<ship_idx>'."""
|
||||
if self.player_radar[row][col] != OCEAN:
|
||||
return "repeat"
|
||||
if self.ai_board[row][col] not in (OCEAN, FIRE, HIT):
|
||||
self.player_radar[row][col] = HIT
|
||||
ship_idx = int(self.ai_board[row][col])
|
||||
self.ai_board[row][col] = HIT
|
||||
if self._is_ship_sunk(self.ai_board, ship_idx):
|
||||
self.ai_alive -= SHIPS[ship_idx]
|
||||
return f"sunk:{ship_idx}"
|
||||
return "hit"
|
||||
else:
|
||||
self.player_radar[row][col] = FIRE
|
||||
self.ai_board[row][col] = FIRE
|
||||
return "miss"
|
||||
|
||||
def ai_move(self):
|
||||
"""AI fires at player's board. Returns (row, col, result or 'sunk:<ship_idx>')."""
|
||||
while True:
|
||||
row = random.randint(0, SIZE - 1)
|
||||
col = random.randint(0, SIZE - 1)
|
||||
if self.ai_radar[row][col] == OCEAN:
|
||||
break
|
||||
if self.player_board[row][col] not in (OCEAN, FIRE, HIT):
|
||||
self.ai_radar[row][col] = HIT
|
||||
ship_idx = int(self.player_board[row][col])
|
||||
self.player_board[row][col] = HIT
|
||||
if self._is_ship_sunk(self.player_board, ship_idx):
|
||||
self.player_alive -= SHIPS[ship_idx]
|
||||
return row, col, f"sunk:{ship_idx}"
|
||||
return row, col, "hit"
|
||||
else:
|
||||
self.ai_radar[row][col] = FIRE
|
||||
self.player_board[row][col] = FIRE
|
||||
return row, col, "miss"
|
||||
|
||||
def p2p_player_move(self, row, col, attacker, defender, radar, defender_alive_attr):
|
||||
"""P2P: attacker fires at defender's board, updates radar and defender's board."""
|
||||
if radar[row][col] != OCEAN:
|
||||
return "repeat"
|
||||
if defender[row][col] not in (OCEAN, FIRE, HIT):
|
||||
radar[row][col] = HIT
|
||||
ship_idx = int(defender[row][col])
|
||||
defender[row][col] = HIT
|
||||
if self._is_ship_sunk(defender, ship_idx):
|
||||
setattr(self, defender_alive_attr, getattr(self, defender_alive_attr) - SHIPS[ship_idx])
|
||||
return f"sunk:{ship_idx}"
|
||||
return "hit"
|
||||
else:
|
||||
radar[row][col] = FIRE
|
||||
defender[row][col] = FIRE
|
||||
return "miss"
|
||||
|
||||
def _is_ship_sunk(self, board, ship_idx):
|
||||
for row in board:
|
||||
for cell in row:
|
||||
if cell == str(ship_idx):
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_game_over(self, vs_ai=True):
|
||||
if vs_ai:
|
||||
return self.player_alive == 0 or self.ai_alive == 0
|
||||
else:
|
||||
return self.player1_alive == 0 or self.player2_alive == 0
|
||||
|
||||
def get_player_board(self):
|
||||
return copy.deepcopy(self.player_board)
|
||||
|
||||
def get_player_radar(self):
|
||||
return copy.deepcopy(self.player_radar)
|
||||
|
||||
def get_ai_board(self):
|
||||
return copy.deepcopy(self.ai_board)
|
||||
|
||||
def get_ai_radar(self):
|
||||
return copy.deepcopy(self.ai_radar)
|
||||
|
||||
def get_ship_status(self, board):
|
||||
status = {}
|
||||
for idx in range(len(SHIPS)):
|
||||
afloat = any(str(idx) in row for row in board)
|
||||
status[idx] = "Afloat" if afloat else "Sunk"
|
||||
return status
|
||||
|
||||
def display_draw_board(self, board, label="Board"):
|
||||
print(f"{label}")
|
||||
print(" " + " ".join(str(i+1).rjust(2) for i in range(SIZE)))
|
||||
for idx, row in enumerate(board):
|
||||
print(chr(ord('A') + idx) + " " + " ".join(cell.rjust(2) for cell in row))
|
||||
|
||||
def get_short_name(node_id):
|
||||
from mesh_bot import battleshipTracker
|
||||
entry = next((e for e in battleshipTracker if e['nodeID'] == node_id), None)
|
||||
return entry['short_name'] if entry and 'short_name' in entry else str(node_id)
|
||||
|
||||
def playBattleship(message, nodeID, deviceID, session_id=None):
|
||||
if not session_id or session_id not in Battleship.sessions:
|
||||
return Battleship.new_game(nodeID, vs_ai=True)
|
||||
|
||||
session = Battleship.get_session(session_id)
|
||||
game = session.game
|
||||
|
||||
# Check for game over
|
||||
if not session.vs_ai and game.is_game_over(vs_ai=False):
|
||||
winner = None
|
||||
if game.player1_alive == 0:
|
||||
winner = get_short_name(session.player2_id)
|
||||
elif game.player2_alive == 0:
|
||||
winner = get_short_name(session.player1_id)
|
||||
else:
|
||||
winner = "Nobody"
|
||||
elapsed = int(time.time() - session.start_time)
|
||||
mins, secs = divmod(elapsed, 60)
|
||||
time_str = f"{mins}m {secs}s" if mins else f"{secs}s"
|
||||
shots = session.shots_fired
|
||||
return (
|
||||
f"Game over! {winner} wins! 🚢🏆\n"
|
||||
f"Game finished in {shots} shots and {time_str}.\n"
|
||||
)
|
||||
|
||||
if not session.vs_ai and session.player2_id is None:
|
||||
code = next((k for k, v in Battleship.short_codes.items() if v == session.session_id), None)
|
||||
return (
|
||||
f"Waiting for another player to join.\n"
|
||||
f"Share this code: {code}\n"
|
||||
"Type 'end' to cancel this P2P game."
|
||||
)
|
||||
|
||||
if nodeID != session.next_turn:
|
||||
return "It's not your turn!"
|
||||
|
||||
msg = message.strip().lower()
|
||||
if msg.startswith("battleship"):
|
||||
msg = msg[len("battleship"):].strip()
|
||||
if msg.startswith("b:"):
|
||||
msg = msg[2:].strip()
|
||||
msg = msg.replace(" ", "")
|
||||
|
||||
# --- Ping Command ---
|
||||
if msg == "p":
|
||||
import random
|
||||
# 30% chance to fail
|
||||
if random.random() < 0.3:
|
||||
return "I can hear a couple of 🦞lobsters dukin' it out down there..."
|
||||
# Determine center of ping
|
||||
if session.vs_ai:
|
||||
# Use last move if available, else center of board
|
||||
if session.shots_fired > 0:
|
||||
# Find last move coordinates from radar (most recent HIT or FIRE)
|
||||
radar = game.get_player_radar()
|
||||
found = False
|
||||
for i in range(SIZE):
|
||||
for j in range(SIZE):
|
||||
if radar[i][j] in (HIT, FIRE):
|
||||
center_y, center_x = i, j
|
||||
found = True
|
||||
if not found:
|
||||
center_y, center_x = SIZE // 2, SIZE // 2
|
||||
else:
|
||||
center_y, center_x = SIZE // 2, SIZE // 2
|
||||
# Scan 3x3 area on AI board for unsunk ship cells
|
||||
board = game.ai_board
|
||||
else:
|
||||
# For P2P, use player's radar and opponent's board
|
||||
if session.last_move:
|
||||
coord = session.last_move[1]
|
||||
center_y = ord(coord[0]) - ord('A')
|
||||
center_x = int(coord[1:]) - 1
|
||||
else:
|
||||
center_y, center_x = SIZE // 2, SIZE // 2
|
||||
# Scan 3x3 area on opponent's board
|
||||
if nodeID == session.player1_id:
|
||||
board = game.player2_board
|
||||
else:
|
||||
board = game.player1_board
|
||||
|
||||
min_y = max(0, center_y - 1)
|
||||
max_y = min(SIZE, center_y + 2)
|
||||
min_x = max(0, center_x - 1)
|
||||
max_x = min(SIZE, center_x + 2)
|
||||
ship_cells = set()
|
||||
for i in range(min_y, max_y):
|
||||
for j in range(min_x, max_x):
|
||||
cell = board[i][j]
|
||||
if cell.isdigit():
|
||||
ship_cells.add(cell)
|
||||
pong_count = len(ship_cells)
|
||||
if pong_count == 0:
|
||||
return "silence in the deep..."
|
||||
elif pong_count == 1:
|
||||
return "something lurking nearby."
|
||||
else:
|
||||
return f"targets in the area!"
|
||||
|
||||
x = y = None
|
||||
if "," in msg:
|
||||
parts = msg.split(",")
|
||||
if len(parts) == 2 and len(parts[0]) == 1 and parts[0].isalpha() and parts[1].isdigit():
|
||||
y = ord(parts[0]) - ord('a')
|
||||
x = int(parts[1]) - 1
|
||||
else:
|
||||
return "Invalid coordinates. Use format A2 or A,2 (row letter, column number)."
|
||||
elif len(msg) >= 2 and msg[0].isalpha() and msg[1:].isdigit():
|
||||
y = ord(msg[0]) - ord('a')
|
||||
x = int(msg[1:]) - 1
|
||||
else:
|
||||
return "Invalid command. Use format A2 or A,2 (row letter, column number)."
|
||||
|
||||
if x is None or y is None or not (0 <= x < SIZE and 0 <= y < SIZE):
|
||||
return "Coordinates out of range."
|
||||
|
||||
ai_row = ai_col = ai_result = None
|
||||
over = False
|
||||
|
||||
if session.vs_ai:
|
||||
result = game.player_move(y, x)
|
||||
ai_row, ai_col, ai_result = game.ai_move()
|
||||
over = game.is_game_over(vs_ai=True)
|
||||
else:
|
||||
# P2P: determine which player is moving and fire at the other player's board
|
||||
if nodeID == session.player1_id:
|
||||
attacker = "player1"
|
||||
defender = "player2"
|
||||
result = game.p2p_player_move(
|
||||
y, x,
|
||||
game.player1_board, game.player2_board,
|
||||
game.player1_radar, "player2_alive"
|
||||
)
|
||||
else:
|
||||
attacker = "player2"
|
||||
defender = "player1"
|
||||
result = game.p2p_player_move(
|
||||
y, x,
|
||||
game.player2_board, game.player1_board,
|
||||
game.player2_radar, "player1_alive"
|
||||
)
|
||||
over = game.is_game_over(vs_ai=False)
|
||||
coord_str = f"{chr(y+65)}{x+1}"
|
||||
session.last_move = (nodeID, coord_str, result)
|
||||
|
||||
# --- DEBUG DISPLAY ---
|
||||
DEBUG = False
|
||||
if DEBUG:
|
||||
if session.vs_ai:
|
||||
game.display_draw_board(game.player_board, label=f"Player Board ({session.player1_id})")
|
||||
game.display_draw_board(game.player_radar, label="Player Radar")
|
||||
game.display_draw_board(game.ai_board, label="AI Board")
|
||||
game.display_draw_board(game.ai_radar, label="AI Radar")
|
||||
else:
|
||||
p1_id = session.player1_id
|
||||
p2_id = session.player2_id if session.player2_id else "Waiting"
|
||||
game.display_draw_board(game.player1_board, label=f"Player 1 Board ({p1_id})")
|
||||
game.display_draw_board(game.player1_radar, label="Player 1 Radar")
|
||||
game.display_draw_board(game.player2_board, label=f"Player 2 Board ({p2_id})")
|
||||
game.display_draw_board(game.player2_radar, label="Player 2 Radar")
|
||||
|
||||
# Format radar as a 4x4 grid centered on the player's move
|
||||
if session.vs_ai:
|
||||
radar = game.get_player_radar()
|
||||
else:
|
||||
radar = game.player1_radar if nodeID == session.player1_id else game.player2_radar
|
||||
|
||||
window_size = 4
|
||||
half_window = window_size // 2
|
||||
min_row = max(0, min(y - half_window, SIZE - window_size))
|
||||
max_row = min(SIZE, min_row + window_size)
|
||||
min_col = max(0, min(x - half_window, SIZE - window_size))
|
||||
max_col = min(SIZE, min_col + window_size)
|
||||
|
||||
radar_str = "🗺️" + " ".join(str(i+1) for i in range(min_col, max_col)) + "\n"
|
||||
for idx in range(min_row, max_row):
|
||||
radar_str += chr(ord('A') + idx) + " :" + " ".join(radar[idx][j] for j in range(min_col, max_col)) + "\n"
|
||||
|
||||
def format_ship_status(status_dict):
|
||||
afloat = 0
|
||||
for idx, state in status_dict.items():
|
||||
if state == "Afloat":
|
||||
afloat += 1
|
||||
return f"{afloat}/{len(SHIPS)} afloat"
|
||||
|
||||
if session.vs_ai:
|
||||
ai_status_str = format_ship_status(game.get_ship_status(game.ai_board))
|
||||
player_status_str = format_ship_status(game.get_ship_status(game.player_board))
|
||||
else:
|
||||
ai_status_str = format_ship_status(game.get_ship_status(game.player2_board))
|
||||
player_status_str = format_ship_status(game.get_ship_status(game.player1_board))
|
||||
|
||||
def move_result_text(res, is_player=True):
|
||||
if res.startswith("sunk:"):
|
||||
idx = int(res.split(":")[1])
|
||||
name = SHIP_NAMES[idx]
|
||||
return f"Sunk🎯 {name}!"
|
||||
elif res == "hit":
|
||||
return "💥Hit!"
|
||||
elif res == "miss":
|
||||
return "missed"
|
||||
elif res == "repeat":
|
||||
return "📋already targeted"
|
||||
else:
|
||||
return res
|
||||
|
||||
# After a valid move, switch turns
|
||||
if session.vs_ai:
|
||||
session.next_turn = nodeID
|
||||
else:
|
||||
session.next_turn = session.player2_id if nodeID == session.player1_id else session.player1_id
|
||||
|
||||
# Increment shots fired
|
||||
session.shots_fired += 1
|
||||
|
||||
# Waste of ammo comment
|
||||
funny_comment = ""
|
||||
if session.shots_fired % 50 == 0:
|
||||
funny_comment = f"\n🥵{session.shots_fired} rounds!"
|
||||
elif session.shots_fired % 25 == 0:
|
||||
funny_comment = f"\n🥔{session.shots_fired} fired!"
|
||||
|
||||
# Output message
|
||||
if session.vs_ai:
|
||||
msg_out = (
|
||||
f"Your move: {move_result_text(result)}\n"
|
||||
f"AI ships: {ai_status_str}\n"
|
||||
f"Radar:\n{radar_str}"
|
||||
f"AI move: {chr(ai_row+65)}{ai_col+1} ({move_result_text(ai_result, False)})\n"
|
||||
f"Your ships: {player_status_str}"
|
||||
f"{funny_comment}"
|
||||
)
|
||||
else:
|
||||
my_name = get_short_name(nodeID)
|
||||
opponent_id = session.player2_id if nodeID == session.player1_id else session.player1_id
|
||||
opponent_short_name = get_short_name(opponent_id) if opponent_id else "Waiting"
|
||||
opponent_label = f"{opponent_short_name}:"
|
||||
my_move_result_str = f"Your move: {move_result_text(result)}\n"
|
||||
last_move_str = ""
|
||||
if session.last_move and session.last_move[0] != nodeID:
|
||||
last_player_short_name = get_short_name(session.last_move[0])
|
||||
last_coord = session.last_move[1]
|
||||
last_result = move_result_text(session.last_move[2])
|
||||
last_move_str = f"Last move by {last_player_short_name}: {last_coord} ({last_result})\n"
|
||||
if session.next_turn == nodeID:
|
||||
turn_prompt = f"Your turn, {my_name}! Enter your move:"
|
||||
else:
|
||||
turn_prompt = f"Waiting for {opponent_short_name}..."
|
||||
msg_out = (
|
||||
f"{my_move_result_str}"
|
||||
f"{last_move_str}"
|
||||
f"{opponent_label} {ai_status_str}\n"
|
||||
f"Radar:\n{radar_str}"
|
||||
f"Your ships: {player_status_str}\n"
|
||||
f"{turn_prompt}"
|
||||
f"{funny_comment}"
|
||||
)
|
||||
|
||||
if over:
|
||||
elapsed = int(time.time() - session.start_time)
|
||||
mins, secs = divmod(elapsed, 60)
|
||||
time_str = f"{mins}m {secs}s" if mins else f"{secs}s"
|
||||
shots = session.shots_fired
|
||||
if session.vs_ai:
|
||||
if game.player_alive == 0:
|
||||
winner = "AI 🤖"
|
||||
msg_out += f"\nGame over! {winner} wins! Better luck next time.\n"
|
||||
else:
|
||||
winner = get_short_name(nodeID)
|
||||
msg_out += (
|
||||
f"\nGame over! {winner} wins! You sank all the AI's ships! 🎉\n"
|
||||
f"Took {shots} shots in {time_str}.\n"
|
||||
)
|
||||
else:
|
||||
# P2P: Announce winner by short name
|
||||
if game.player1_alive == 0:
|
||||
winner = get_short_name(session.player2_id)
|
||||
elif game.player2_alive == 0:
|
||||
winner = get_short_name(session.player1_id)
|
||||
else:
|
||||
winner = "Nobody"
|
||||
msg_out += (
|
||||
f"\nGame over! {winner} wins! 🚢🏆\n"
|
||||
f"Game finished in {shots} shots and {time_str}.\n"
|
||||
)
|
||||
msg_out += "Type 'battleship' to start a new game."
|
||||
|
||||
return msg_out
|
||||
106
modules/games/battleship_vid.py
Normal file
106
modules/games/battleship_vid.py
Normal file
@@ -0,0 +1,106 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# Battleship Display Module Meshing Around
|
||||
# 2025 K7MHI Kelly Keeton
|
||||
import pygame
|
||||
import sys
|
||||
import time
|
||||
|
||||
from modules.games.battleship import Battleship, SHIP_NAMES, SIZE, OCEAN, FIRE, HIT
|
||||
|
||||
CELL_SIZE = 40
|
||||
BOARD_MARGIN = 50
|
||||
STATUS_WIDTH = 320
|
||||
|
||||
latest_battleship_board = None
|
||||
latest_battleship_meta = None
|
||||
|
||||
def draw_board(screen, board, top_left, cell_size, show_ships=False):
|
||||
font = pygame.font.Font(None, 28)
|
||||
x0, y0 = top_left
|
||||
for y in range(SIZE):
|
||||
for x in range(SIZE):
|
||||
rect = pygame.Rect(x0 + x*cell_size, y0 + y*cell_size, cell_size, cell_size)
|
||||
pygame.draw.rect(screen, (100, 100, 200), rect, 1)
|
||||
val = board[y][x]
|
||||
# Show ships if requested, otherwise hide ship numbers
|
||||
if not show_ships and val.isdigit():
|
||||
val = OCEAN
|
||||
color = (200, 200, 255) if val == OCEAN else (255, 0, 0) if val == FIRE else (0, 255, 0) if val == HIT else (255,255,255)
|
||||
if val != OCEAN:
|
||||
pygame.draw.rect(screen, color, rect)
|
||||
text = font.render(val, True, (0,0,0))
|
||||
screen.blit(text, rect.move(10, 5))
|
||||
# Draw row/col labels
|
||||
for i in range(SIZE):
|
||||
# Col numbers
|
||||
num_surface = font.render(str(i+1), True, (255, 255, 0))
|
||||
screen.blit(num_surface, (x0 + i*cell_size + cell_size//2 - 8, y0 - 24))
|
||||
# Row letters
|
||||
letter_surface = font.render(chr(ord('A') + i), True, (255, 255, 0))
|
||||
screen.blit(letter_surface, (x0 - 28, y0 + i*cell_size + cell_size//2 - 10))
|
||||
|
||||
def draw_status_panel(screen, game, top_left, width, height, is_player=True):
|
||||
font = pygame.font.Font(None, 32)
|
||||
x0, y0 = top_left
|
||||
pygame.draw.rect(screen, (30, 30, 60), (x0, y0, width, height), border_radius=10)
|
||||
# Title
|
||||
title = font.render("Game Status", True, (255, 255, 0))
|
||||
screen.blit(title, (x0 + 10, y0 + 10))
|
||||
# Ships status
|
||||
ships_title = font.render("Ships Remaining:", True, (200, 200, 255))
|
||||
screen.blit(ships_title, (x0 + 10, y0 + 60))
|
||||
# Get ship status
|
||||
if is_player:
|
||||
status_dict = game.get_ship_status(game.player_board)
|
||||
else:
|
||||
status_dict = game.get_ship_status(game.ai_board)
|
||||
for i, ship in enumerate(SHIP_NAMES):
|
||||
status = status_dict.get(i, "Afloat")
|
||||
name_color = (200, 200, 255)
|
||||
if status.lower() == "sunk":
|
||||
status_color = (255, 0, 0)
|
||||
status_text = "Sunk"
|
||||
else:
|
||||
status_color = (0, 255, 0)
|
||||
status_text = "Afloat"
|
||||
ship_name_surface = font.render(f"{ship}:", True, name_color)
|
||||
screen.blit(ship_name_surface, (x0 + 20, y0 + 100 + i * 35))
|
||||
status_surface = font.render(f"{status_text}", True, status_color)
|
||||
screen.blit(status_surface, (x0 + 180, y0 + 100 + i * 35))
|
||||
|
||||
def battleship_visual_main(game):
|
||||
pygame.init()
|
||||
screen = pygame.display.set_mode((2*SIZE*CELL_SIZE + STATUS_WIDTH + 3*BOARD_MARGIN, SIZE*CELL_SIZE + 2*BOARD_MARGIN))
|
||||
pygame.display.set_caption("Battleship Visualizer")
|
||||
clock = pygame.time.Clock()
|
||||
running = True
|
||||
while running:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
|
||||
running = False
|
||||
screen.fill((20, 20, 30))
|
||||
# Draw radar (left)
|
||||
draw_board(screen, game.get_player_radar(), (BOARD_MARGIN, BOARD_MARGIN+30), CELL_SIZE, show_ships=False)
|
||||
radar_label = pygame.font.Font(None, 36).render("Your Radar", True, (0,255,255))
|
||||
screen.blit(radar_label, (BOARD_MARGIN, BOARD_MARGIN))
|
||||
# Draw player board (right)
|
||||
draw_board(screen, game.get_player_board(), (SIZE*CELL_SIZE + 2*BOARD_MARGIN, BOARD_MARGIN+30), CELL_SIZE, show_ships=True)
|
||||
board_label = pygame.font.Font(None, 36).render("Your Board", True, (0,255,255))
|
||||
screen.blit(board_label, (SIZE*CELL_SIZE + 2*BOARD_MARGIN, BOARD_MARGIN))
|
||||
# Draw status panel (far right)
|
||||
draw_status_panel(screen, game, (2*SIZE*CELL_SIZE + 2*BOARD_MARGIN, BOARD_MARGIN), STATUS_WIDTH, SIZE*CELL_SIZE)
|
||||
pygame.display.flip()
|
||||
clock.tick(30)
|
||||
pygame.quit()
|
||||
sys.exit()
|
||||
|
||||
def parse_battleship_message(msg):
|
||||
# Expected payload:
|
||||
# MBSP|label|timestamp|nodeID|deviceID|sessionID|status|shotsFired|boardType|shipsStatus|boardString
|
||||
print("Parsing Battleship message:", msg)
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# # Example: create a new game and show the boards
|
||||
# game = Battleship(vs_ai=True)
|
||||
# battleship_visual_main(game)
|
||||
@@ -5,6 +5,7 @@ import random
|
||||
import time
|
||||
import pickle
|
||||
from modules.log import logger
|
||||
from modules.settings import dwPlayerTracker
|
||||
|
||||
# Global variables
|
||||
total_days = 7 # number of days or rotations the player has to play
|
||||
@@ -391,6 +392,13 @@ def endGameDw(nodeID):
|
||||
return msg
|
||||
if cash < starting_cash:
|
||||
msg = "You lost money, better go get a real job.💸"
|
||||
|
||||
# remove player from all trackers and databases
|
||||
dwPlayerTracker[:] = [p for p in dwPlayerTracker if p.get('userID') != nodeID]
|
||||
dwCashDb[:] = [p for p in dwCashDb if p.get('userID') != nodeID]
|
||||
dwInventoryDb[:] = [p for p in dwInventoryDb if p.get('userID') != nodeID]
|
||||
dwLocationDb[:] = [p for p in dwLocationDb if p.get('userID') != nodeID]
|
||||
dwGameDayDb[:] = [p for p in dwGameDayDb if p.get('userID') != nodeID]
|
||||
|
||||
return msg
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import random
|
||||
import time
|
||||
import pickle
|
||||
from modules.log import logger
|
||||
from modules.settings import golfTracker
|
||||
|
||||
# Clubs setup
|
||||
driver_distances = list(range(230, 280, 5))
|
||||
|
||||
@@ -10,6 +10,7 @@ import json
|
||||
import random
|
||||
import os
|
||||
from modules.log import logger
|
||||
from modules.settings import hamtestTracker
|
||||
|
||||
class HamTest:
|
||||
def __init__(self):
|
||||
|
||||
@@ -3,6 +3,7 @@ from modules.log import logger, getPrettyTime
|
||||
import os
|
||||
import json
|
||||
import random
|
||||
from modules.settings import hangmanTracker
|
||||
|
||||
class Hangman:
|
||||
WORDS = [
|
||||
|
||||
@@ -211,7 +211,7 @@ def compareCodeMMind(secret_code, user_guess, nodeID):
|
||||
def playGameMMind(diff, secret_code, turn_count, nodeID, message):
|
||||
msg = ''
|
||||
won = False
|
||||
if turn_count <= 10:
|
||||
if turn_count < 11:
|
||||
user_guess = getGuessMMind(diff, message, nodeID)
|
||||
if user_guess == "XXXX":
|
||||
msg += f"⛔️Invalid guess. Please enter 4 valid colors letters.\n🔴🟢🔵🔴 is RGBR"
|
||||
@@ -240,7 +240,7 @@ def playGameMMind(diff, secret_code, turn_count, nodeID, message):
|
||||
# reset turn count in tracker
|
||||
for i in range(len(mindTracker)):
|
||||
if mindTracker[i]['nodeID'] == nodeID:
|
||||
mindTracker[i]['turns'] = 0
|
||||
mindTracker[i]['turns'] = 1
|
||||
mindTracker[i]['secret_code'] = ''
|
||||
mindTracker[i]['cmd'] = 'new'
|
||||
|
||||
@@ -277,6 +277,7 @@ def start_mMind(nodeID, message):
|
||||
if mindTracker[i]['nodeID'] == nodeID:
|
||||
mindTracker[i]['cmd'] = 'makeCode'
|
||||
mindTracker[i]['diff'] = diff
|
||||
mindTracker[i]['turns'] = 1
|
||||
# Return color message to player
|
||||
msg += chooseDifficultyMMind(message.lower()[0])
|
||||
return msg
|
||||
|
||||
@@ -4,273 +4,298 @@
|
||||
import random
|
||||
import time
|
||||
import modules.settings as my_settings
|
||||
from modules.settings import tictactoeTracker
|
||||
|
||||
useSynchCompression = True
|
||||
if useSynchCompression:
|
||||
import zlib
|
||||
|
||||
# to (max), molly and jake, I miss you both so much.
|
||||
|
||||
if my_settings.disable_emojis_in_games:
|
||||
X = "X"
|
||||
O = "O"
|
||||
else:
|
||||
X = "❌"
|
||||
O = "⭕️"
|
||||
|
||||
class TicTacToe:
|
||||
def __init__(self):
|
||||
def __init__(self, display_module):
|
||||
if getattr(my_settings, "disable_emojis_in_games", False):
|
||||
self.X = "X"
|
||||
self.O = "O"
|
||||
self.digit_emojis = None
|
||||
else:
|
||||
self.X = "❌"
|
||||
self.O = "⭕️"
|
||||
# Unicode emoji digits 1️⃣-9️⃣
|
||||
self.digit_emojis = [
|
||||
"1️⃣", "2️⃣", "3️⃣", "4️⃣", "5️⃣", "6️⃣", "7️⃣", "8️⃣", "9️⃣"
|
||||
]
|
||||
self.display_module = display_module
|
||||
self.game = {}
|
||||
self.win_lines_3d = self.generate_3d_win_lines()
|
||||
|
||||
def new_game(self, id):
|
||||
positiveThoughts = ["🚀I need to call NATO",
|
||||
"🏅Going for the gold!",
|
||||
"Mastering ❌TTT⭕️",]
|
||||
sorryNotGoinWell = ["😭Not your day, huh?",
|
||||
"📉Results here dont define you.",
|
||||
"🤖WOPR would be proud."]
|
||||
"""Start a new game"""
|
||||
games = won = 0
|
||||
ret = ""
|
||||
if id in self.game:
|
||||
games = self.game[id]["games"]
|
||||
won = self.game[id]["won"]
|
||||
if games > 3:
|
||||
if won / games >= 3.14159265358979323846: # win rate > pi
|
||||
ret += random.choice(positiveThoughts) + "\n"
|
||||
else:
|
||||
ret += random.choice(sorryNotGoinWell) + "\n"
|
||||
# Retain stats
|
||||
ret += f"Games:{games} 🥇❌:{won}\n"
|
||||
|
||||
self.game[id] = {
|
||||
"board": [" "] * 9, # 3x3 board as flat list
|
||||
"player": X, # Human is X, bot is O
|
||||
"games": games + 1,
|
||||
"won": won,
|
||||
"turn": "human" # whose turn it is
|
||||
def new_game(self, nodeID, mode="2D", channel=None, deviceID=None):
|
||||
board_size = 9 if mode == "2D" else 27
|
||||
self.game[nodeID] = {
|
||||
"board": [" "] * board_size,
|
||||
"mode": mode,
|
||||
"channel": channel,
|
||||
"nodeID": nodeID,
|
||||
"deviceID": deviceID,
|
||||
"player": self.X,
|
||||
"games": 1,
|
||||
"won": 0,
|
||||
"turn": "human"
|
||||
}
|
||||
ret += self.show_board(id)
|
||||
ret += "Pick 1-9:"
|
||||
return ret
|
||||
|
||||
def rndTeaPrice(self, tea=42):
|
||||
"""Return a random tea between 0 and tea."""
|
||||
return random.uniform(0, tea)
|
||||
self.update_display(nodeID, status="new")
|
||||
msg = f"{mode} game started!\n"
|
||||
if mode == "2D":
|
||||
msg += self.show_board(nodeID)
|
||||
msg += "Pick 1-9:"
|
||||
else:
|
||||
msg += "Play on the MeshBot Display!\n"
|
||||
msg += "Pick 1-27:"
|
||||
return msg
|
||||
|
||||
def show_board(self, id):
|
||||
"""Display compact board with move numbers"""
|
||||
g = self.game[id]
|
||||
b = g["board"]
|
||||
|
||||
# Show board with positions
|
||||
board_str = ""
|
||||
for i in range(3):
|
||||
row = ""
|
||||
for j in range(3):
|
||||
pos = i * 3 + j
|
||||
if my_settings.disable_emojis_in_games:
|
||||
cell = b[pos] if b[pos] != " " else str(pos + 1)
|
||||
else:
|
||||
cell = b[pos] if b[pos] != " " else f" {str(pos + 1)} "
|
||||
row += cell
|
||||
if j < 2:
|
||||
row += " | "
|
||||
board_str += row
|
||||
if i < 2:
|
||||
board_str += "\n"
|
||||
|
||||
return board_str + "\n"
|
||||
def update_display(self, nodeID, status=None):
|
||||
from modules.system import send_raw_bytes
|
||||
g = self.game[nodeID]
|
||||
mapping = {" ": "0", "X": "1", "O": "2", "❌": "1", "⭕️": "2"}
|
||||
board_str = "".join(mapping.get(cell, "0") for cell in g["board"])
|
||||
msg = f"MTTT:{board_str}|{g['nodeID']}|{g['channel']}|{g['deviceID']}"
|
||||
if status:
|
||||
msg += f"|status={status}"
|
||||
if useSynchCompression:
|
||||
payload = zlib.compress(msg.encode("utf-8"))
|
||||
else:
|
||||
payload = msg.encode("utf-8")
|
||||
send_raw_bytes(nodeID, payload, portnum=256)
|
||||
if self.display_module:
|
||||
self.display_module.update_board(
|
||||
g["board"], g["channel"], g["nodeID"], g["deviceID"]
|
||||
)
|
||||
|
||||
def make_move(self, id, position):
|
||||
"""Make a move for the current player"""
|
||||
g = self.game[id]
|
||||
|
||||
# Validate position
|
||||
if position < 1 or position > 9:
|
||||
return False
|
||||
|
||||
pos = position - 1
|
||||
if g["board"][pos] != " ":
|
||||
return False
|
||||
|
||||
# Make human move
|
||||
g["board"][pos] = X
|
||||
return True
|
||||
def show_board(self, nodeID):
|
||||
g = self.game[nodeID]
|
||||
if g["mode"] == "2D":
|
||||
b = g["board"]
|
||||
s = ""
|
||||
for i in range(3):
|
||||
row = []
|
||||
for j in range(3):
|
||||
cell = b[i*3+j]
|
||||
if cell != " ":
|
||||
row.append(cell)
|
||||
else:
|
||||
if self.digit_emojis:
|
||||
row.append(self.digit_emojis[i*3+j])
|
||||
else:
|
||||
row.append(str(i*3+j+1))
|
||||
s += " | ".join(row) + "\n"
|
||||
return s
|
||||
return ""
|
||||
|
||||
def bot_move(self, id):
|
||||
"""AI makes a move: tries to win, block, or pick random"""
|
||||
g = self.game[id]
|
||||
def make_move(self, nodeID, position):
|
||||
g = self.game[nodeID]
|
||||
board = g["board"]
|
||||
|
||||
# Try to win
|
||||
move = self.find_winning_move(id, O)
|
||||
if move != -1:
|
||||
board[move] = O
|
||||
return move
|
||||
|
||||
# Try to block player
|
||||
move = self.find_winning_move(id, X)
|
||||
if move != -1:
|
||||
board[move] = O
|
||||
return move
|
||||
|
||||
# Pick random move
|
||||
move = self.find_random_move(id)
|
||||
if move != -1:
|
||||
board[move] = O
|
||||
return move
|
||||
|
||||
# No moves possible
|
||||
return -1
|
||||
max_pos = 9 if g["mode"] == "2D" else 27
|
||||
if 1 <= position <= max_pos and board[position-1] == " ":
|
||||
board[position-1] = g["player"]
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_winning_move(self, id, player):
|
||||
"""Find a winning move for the given player"""
|
||||
g = self.game[id]
|
||||
board = g["board"][:]
|
||||
|
||||
# Check all empty positions
|
||||
for i in range(9):
|
||||
if board[i] == " ":
|
||||
board[i] = player
|
||||
if self.check_winner_on_board(board) == player:
|
||||
return i
|
||||
board[i] = " "
|
||||
return -1
|
||||
|
||||
def find_random_move(self, id: str, tea_price: float = 42.0) -> int:
|
||||
"""Find a random empty position, using time and tea_price for extra randomness."""
|
||||
board = self.game[id]["board"]
|
||||
def bot_move(self, nodeID):
|
||||
g = self.game[nodeID]
|
||||
board = g["board"]
|
||||
max_pos = 9 if g["mode"] == "2D" else 27
|
||||
# Try to win or block
|
||||
for player in (self.O, self.X):
|
||||
move = self.find_winning_move(nodeID, player)
|
||||
if move != -1:
|
||||
board[move] = self.O
|
||||
return move+1
|
||||
# Otherwise random move
|
||||
empty = [i for i, cell in enumerate(board) if cell == " "]
|
||||
current_time = time.time()
|
||||
from_china = self.rndTeaPrice(time.time() % 7) # Correct usage
|
||||
tea_price = from_china
|
||||
tea_price = (42 * 7) - (13 / 2) + (tea_price % 5)
|
||||
if not empty:
|
||||
return -1
|
||||
# Combine time and tea_price for a seed
|
||||
seed = int(current_time * 1000) ^ int(tea_price * 1000)
|
||||
local_random = random.Random(seed)
|
||||
local_random.shuffle(empty)
|
||||
return empty[0]
|
||||
if empty:
|
||||
move = random.choice(empty)
|
||||
board[move] = self.O
|
||||
return move+1
|
||||
return -1
|
||||
|
||||
def check_winner_on_board(self, board):
|
||||
"""Check winner on given board state"""
|
||||
# Winning combinations
|
||||
wins = [
|
||||
[0,1,2], [3,4,5], [6,7,8], # Rows
|
||||
[0,3,6], [1,4,7], [2,5,8], # Columns
|
||||
[0,4,8], [2,4,6] # Diagonals
|
||||
]
|
||||
|
||||
for combo in wins:
|
||||
if board[combo[0]] == board[combo[1]] == board[combo[2]] != " ":
|
||||
return board[combo[0]]
|
||||
def find_winning_move(self, nodeID, player):
|
||||
g = self.game[nodeID]
|
||||
board = g["board"]
|
||||
lines = self.get_win_lines(g["mode"])
|
||||
for line in lines:
|
||||
cells = [board[i] for i in line]
|
||||
if cells.count(player) == 2 and cells.count(" ") == 1:
|
||||
return line[cells.index(" ")]
|
||||
return -1
|
||||
|
||||
def play(self, nodeID, input_msg):
|
||||
try:
|
||||
if nodeID not in self.game:
|
||||
return self.new_game(nodeID)
|
||||
g = self.game[nodeID]
|
||||
mode = g["mode"]
|
||||
max_pos = 9 if mode == "2D" else 27
|
||||
|
||||
input_str = input_msg.strip().lower()
|
||||
if input_str in ("end", "e", "quit", "q"):
|
||||
msg = "Game ended."
|
||||
self.update_display(nodeID)
|
||||
return msg
|
||||
|
||||
# Add refresh/draw command
|
||||
if input_str in ("refresh", "board", "b"):
|
||||
self.update_display(nodeID, status="refresh")
|
||||
if mode == "2D":
|
||||
return self.show_board(nodeID) + f"Pick 1-{max_pos}:"
|
||||
else:
|
||||
return "Display refreshed."
|
||||
|
||||
# Allow 'new', 'new 2d', 'new 3d'
|
||||
if input_str.startswith("new"):
|
||||
parts = input_str.split()
|
||||
if len(parts) > 1 and parts[1] in ("2d", "3d"):
|
||||
new_mode = "2D" if parts[1] == "2d" else "3D"
|
||||
else:
|
||||
new_mode = mode
|
||||
msg = self.new_game(nodeID, new_mode, g["channel"], g["deviceID"])
|
||||
return msg
|
||||
|
||||
# Accept emoji digits as input
|
||||
pos = None
|
||||
# Try to match emoji digits if enabled
|
||||
if self.digit_emojis:
|
||||
try:
|
||||
# Remove variation selectors for matching
|
||||
normalized_input = input_msg.replace("\ufe0f", "")
|
||||
for idx, emoji in enumerate(self.digit_emojis[:max_pos]):
|
||||
if normalized_input == emoji.replace("\ufe0f", ""):
|
||||
pos = idx + 1
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
if pos is None:
|
||||
try:
|
||||
pos = int(input_msg)
|
||||
except Exception:
|
||||
return f"Enter a number or emoji between 1 and {max_pos}."
|
||||
|
||||
if not self.make_move(nodeID, pos):
|
||||
return f"Invalid move! Pick 1-{max_pos}:"
|
||||
|
||||
winner = self.check_winner(nodeID)
|
||||
if winner:
|
||||
# Add positive/sorry messages and stats
|
||||
positiveThoughts = [
|
||||
"🚀I need to call NATO",
|
||||
"🏅Going for the gold!",
|
||||
"Mastering ❌TTT⭕️",
|
||||
]
|
||||
sorryNotGoinWell = [
|
||||
"😭Not your day, huh?",
|
||||
"📉Results here dont define you.",
|
||||
"🤖WOPR would be proud."
|
||||
]
|
||||
games = won = 0
|
||||
ret = ""
|
||||
if nodeID in self.game:
|
||||
self.game[nodeID]["won"] += 1
|
||||
games = self.game[nodeID]["games"]
|
||||
won = self.game[nodeID]["won"]
|
||||
if games > 3:
|
||||
if won / games >= 3.14159265358979323846: # win rate > pi
|
||||
ret += random.choice(positiveThoughts) + "\n"
|
||||
else:
|
||||
ret += random.choice(sorryNotGoinWell) + "\n"
|
||||
# Retain stats
|
||||
ret += f"Games:{games} 🥇❌:{won}\n"
|
||||
msg = f"You ({g['player']}) win!\n" + ret
|
||||
msg += "Type 'new' to play again or 'end' to quit."
|
||||
self.update_display(nodeID, status="win")
|
||||
return msg
|
||||
|
||||
if " " not in g["board"]:
|
||||
msg = "Tie game!"
|
||||
msg += "\nType 'new' to play again or 'end' to quit."
|
||||
self.update_display(nodeID, status="tie")
|
||||
return msg
|
||||
|
||||
# Bot's turn
|
||||
g["player"] = self.O
|
||||
bot_pos = self.bot_move(nodeID)
|
||||
winner = self.check_winner(nodeID)
|
||||
if winner:
|
||||
self.update_display(nodeID, status="loss")
|
||||
msg = f"Bot ({g['player']}) wins!\n"
|
||||
msg += "Type 'new' to play again or 'end' to quit."
|
||||
return msg
|
||||
|
||||
if " " not in g["board"]:
|
||||
msg = "Tie game!"
|
||||
msg += "\nType 'new' to play again or 'end' to quit."
|
||||
self.update_display(nodeID, status="tie")
|
||||
return msg
|
||||
|
||||
g["player"] = self.X
|
||||
prompt = f"Pick 1-{max_pos}:"
|
||||
if mode == "2D":
|
||||
prompt = self.show_board(nodeID) + prompt
|
||||
self.update_display(nodeID)
|
||||
return prompt
|
||||
|
||||
except Exception as e:
|
||||
return f"An unexpected error occurred: {e}"
|
||||
|
||||
def check_winner(self, nodeID):
|
||||
g = self.game[nodeID]
|
||||
board = g["board"]
|
||||
lines = self.get_win_lines(g["mode"])
|
||||
for line in lines:
|
||||
vals = [board[i] for i in line]
|
||||
if vals[0] != " " and all(v == vals[0] for v in vals):
|
||||
return vals[0]
|
||||
return None
|
||||
|
||||
def check_winner(self, id):
|
||||
"""Check if there's a winner"""
|
||||
g = self.game[id]
|
||||
return self.check_winner_on_board(g["board"])
|
||||
def get_win_lines(self, mode):
|
||||
if mode == "2D":
|
||||
return [
|
||||
[0,1,2],[3,4,5],[6,7,8], # rows
|
||||
[0,3,6],[1,4,7],[2,5,8], # columns
|
||||
[0,4,8],[2,4,6] # diagonals
|
||||
]
|
||||
return self.win_lines_3d
|
||||
|
||||
def is_board_full(self, id):
|
||||
"""Check if board is full"""
|
||||
g = self.game[id]
|
||||
return " " not in g["board"]
|
||||
def generate_3d_win_lines(self):
|
||||
lines = []
|
||||
# Rows in each layer
|
||||
for z in range(3):
|
||||
for y in range(3):
|
||||
lines.append([z*9 + y*3 + x for x in range(3)])
|
||||
# Columns in each layer
|
||||
for z in range(3):
|
||||
for x in range(3):
|
||||
lines.append([z*9 + y*3 + x for y in range(3)])
|
||||
# Pillars (vertical columns through layers)
|
||||
for y in range(3):
|
||||
for x in range(3):
|
||||
lines.append([z*9 + y*3 + x for z in range(3)])
|
||||
# Diagonals in each layer
|
||||
for z in range(3):
|
||||
lines.append([z*9 + i*3 + i for i in range(3)]) # TL to BR
|
||||
lines.append([z*9 + i*3 + (2-i) for i in range(3)]) # TR to BL
|
||||
# Vertical diagonals in columns
|
||||
for x in range(3):
|
||||
lines.append([z*9 + z*3 + x for z in range(3)]) # (0,0,x)-(1,1,x)-(2,2,x)
|
||||
lines.append([z*9 + (2-z)*3 + x for z in range(3)]) # (0,2,x)-(1,1,x)-(2,0,x)
|
||||
for y in range(3):
|
||||
lines.append([z*9 + y*3 + z for z in range(3)]) # (z,y,z)
|
||||
lines.append([z*9 + y*3 + (2-z) for z in range(3)]) # (z,y,2-z)
|
||||
# Main space diagonals
|
||||
lines.append([0, 13, 26])
|
||||
lines.append([2, 13, 24])
|
||||
lines.append([6, 13, 20])
|
||||
lines.append([8, 13, 18])
|
||||
return lines
|
||||
|
||||
def game_over_msg(self, id):
|
||||
"""Generate game over message"""
|
||||
g = self.game[id]
|
||||
winner = self.check_winner(id)
|
||||
|
||||
if winner == X:
|
||||
g["won"] += 1
|
||||
return "🎉You won! (n)ew (e)nd"
|
||||
elif winner == O:
|
||||
return "🤖Bot wins! (n)ew (e)nd"
|
||||
else:
|
||||
return "🤝Tie, The only winning move! (n)ew (e)nd"
|
||||
|
||||
def play(self, id, input_msg):
|
||||
"""Main game play function"""
|
||||
if id not in self.game:
|
||||
return self.new_game(id)
|
||||
|
||||
# If input is just "tictactoe", show current board
|
||||
if input_msg.lower().strip() == ("tictactoe" or "tic-tac-toe"):
|
||||
return self.show_board(id) + "Your turn! Pick 1-9:"
|
||||
|
||||
g = self.game[id]
|
||||
|
||||
# Parse player move
|
||||
try:
|
||||
# Extract just the number from the input
|
||||
numbers = [char for char in input_msg if char.isdigit()]
|
||||
if not numbers:
|
||||
if input_msg.lower().startswith('q'):
|
||||
self.end_game(id)
|
||||
return "Game ended. To start a new game, type 'tictactoe'."
|
||||
elif input_msg.lower().startswith('n'):
|
||||
return self.new_game(id)
|
||||
elif input_msg.lower().startswith('b'):
|
||||
return self.show_board(id) + "Your turn! Pick 1-9:"
|
||||
position = int(numbers[0])
|
||||
except (ValueError, IndexError):
|
||||
return "Enter 1-9, or (e)nd (n)ew game, send (b)oard to see board🧩"
|
||||
|
||||
# Make player move
|
||||
if not self.make_move(id, position):
|
||||
return "Invalid move! Pick 1-9:"
|
||||
|
||||
# Check if player won
|
||||
if self.check_winner(id):
|
||||
result = self.game_over_msg(id) + "\n" + self.show_board(id)
|
||||
self.end_game(id)
|
||||
return result
|
||||
|
||||
# Check for tie
|
||||
if self.is_board_full(id):
|
||||
result = self.game_over_msg(id) + "\n" + self.show_board(id)
|
||||
self.end_game(id)
|
||||
return result
|
||||
|
||||
# Bot's turn
|
||||
bot_pos = self.bot_move(id)
|
||||
|
||||
# Check if bot won
|
||||
if self.check_winner(id):
|
||||
result = self.game_over_msg(id) + "\n" + self.show_board(id)
|
||||
self.end_game(id)
|
||||
return result
|
||||
|
||||
# Check for tie after bot move
|
||||
if self.is_board_full(id):
|
||||
result = self.game_over_msg(id) + "\n" + self.show_board(id)
|
||||
self.end_game(id)
|
||||
return result
|
||||
|
||||
# Continue game
|
||||
return self.show_board(id) + "Your turn! Pick 1-9:"
|
||||
|
||||
def end_game(self, id):
|
||||
"""Clean up finished game but keep stats"""
|
||||
if id in self.game:
|
||||
games = self.game[id]["games"]
|
||||
won = self.game[id]["won"]
|
||||
# Remove game but we'll create new one on next play
|
||||
del self.game[id]
|
||||
# Preserve stats for next game
|
||||
self.game[id] = {
|
||||
"board": [" "] * 9,
|
||||
"player": X,
|
||||
"games": games,
|
||||
"won": won,
|
||||
"turn": "human"
|
||||
}
|
||||
|
||||
|
||||
def end(self, id):
|
||||
"""End game completely (called by 'end' command)"""
|
||||
if id in self.game:
|
||||
del self.game[id]
|
||||
|
||||
|
||||
# Global instances for the bot system
|
||||
tictactoeTracker = []
|
||||
tictactoe = TicTacToe()
|
||||
def end(self, nodeID):
|
||||
"""End and remove the game for the given nodeID."""
|
||||
if nodeID in self.game:
|
||||
del self.game[nodeID]
|
||||
199
modules/games/tictactoe_vid.py
Normal file
199
modules/games/tictactoe_vid.py
Normal file
@@ -0,0 +1,199 @@
|
||||
# Tic-Tac-Toe Video Display Module for Meshtastic mesh-bot
|
||||
# Uses Pygame to render the game board visually
|
||||
# 2025 K7MHI Kelly Keeton
|
||||
|
||||
try:
|
||||
import pygame
|
||||
except ImportError:
|
||||
print("Pygame is not installed. Please install it with 'pip install pygame-ce' to use the Tic-Tac-Toe display module.")
|
||||
exit(1)
|
||||
|
||||
latest_board = [" "] * 9 # or 27 for 3D
|
||||
latest_meta = {} # To store metadata like status
|
||||
|
||||
def handle_tictactoe_payload(payload, from_id=None):
|
||||
global latest_board, latest_meta
|
||||
#print("Received payload:", payload)
|
||||
board, meta = parse_tictactoe_message(payload)
|
||||
#print("Parsed board:", board)
|
||||
if board:
|
||||
latest_board = board
|
||||
latest_meta = meta if meta else {}
|
||||
|
||||
def parse_tictactoe_message(msg):
|
||||
# msg is already stripped of 'MTTT:' prefix
|
||||
parts = msg.split("|")
|
||||
if not parts or len(parts[0]) < 9:
|
||||
return None, None # Not enough data for a board
|
||||
board_str = parts[0]
|
||||
meta = {}
|
||||
if len(parts) > 1:
|
||||
meta["nodeID"] = parts[1] if len(parts) > 1 else ""
|
||||
meta["channel"] = parts[2] if len(parts) > 2 else ""
|
||||
meta["deviceID"] = parts[3] if len(parts) > 3 else ""
|
||||
# Look for status in any remaining parts
|
||||
for part in parts[4:]:
|
||||
if part.startswith("status="):
|
||||
meta["status"] = part.split("=", 1)[1]
|
||||
symbol_map = {"0": " ", "1": "❌", "2": "⭕️"}
|
||||
board = [symbol_map.get(c, " ") for c in board_str]
|
||||
return board, meta
|
||||
|
||||
def draw_board(screen, board, meta=None):
|
||||
screen.fill((30, 30, 30))
|
||||
width, height = screen.get_size()
|
||||
margin = int(min(width, height) * 0.05)
|
||||
font_size = int(height * 0.12)
|
||||
font = pygame.font.Font(None, font_size)
|
||||
|
||||
# Draw the title at the top center, scaled
|
||||
title_font = pygame.font.Font(None, int(height * 0.08))
|
||||
title_text = title_font.render("MeshBot Tic-Tac-Toe", True, (220, 220, 255))
|
||||
title_rect = title_text.get_rect(center=(width // 2, margin // 2 + 10))
|
||||
screen.blit(title_text, title_rect)
|
||||
|
||||
# Add a buffer below the title
|
||||
title_buffer = int(height * 0.06)
|
||||
|
||||
# --- Show win/draw message if present ---
|
||||
if meta and "status" in meta:
|
||||
status = meta["status"]
|
||||
msg_font = pygame.font.Font(None, int(height * 0.06)) # Smaller font
|
||||
msg_y = title_rect.bottom + int(height * 0.04) # Just under the title
|
||||
if status == "win":
|
||||
msg = "Game Won!"
|
||||
text = msg_font.render(msg, True, (100, 255, 100))
|
||||
text_rect = text.get_rect(center=(width // 2, msg_y))
|
||||
screen.blit(text, text_rect)
|
||||
elif status == "tie":
|
||||
msg = "Tie Game!"
|
||||
text = msg_font.render(msg, True, (255, 220, 120))
|
||||
text_rect = text.get_rect(center=(width // 2, msg_y))
|
||||
screen.blit(text, text_rect)
|
||||
elif status == "loss":
|
||||
msg = "You Lost!"
|
||||
text = msg_font.render(msg, True, (255, 100, 100))
|
||||
text_rect = text.get_rect(center=(width // 2, msg_y))
|
||||
screen.blit(text, text_rect)
|
||||
elif status == "new":
|
||||
msg = "Welcome! New Game"
|
||||
text = msg_font.render(msg, True, (200, 255, 200))
|
||||
text_rect = text.get_rect(center=(width // 2, msg_y))
|
||||
screen.blit(text, text_rect)
|
||||
# Do NOT return here—let the board draw as normal
|
||||
elif status != "refresh":
|
||||
msg = status.capitalize()
|
||||
text = msg_font.render(msg, True, (255, 220, 120))
|
||||
text_rect = text.get_rect(center=(width // 2, msg_y))
|
||||
screen.blit(text, text_rect)
|
||||
# Don't return here—let the board draw as normal
|
||||
|
||||
# Show waiting message if board is empty, unless status is "new"
|
||||
if all(cell.strip() == "" or cell.strip() == " " for cell in board):
|
||||
if not (meta and meta.get("status") == "new"):
|
||||
msg_font = pygame.font.Font(None, int(height * 0.09))
|
||||
msg = "Waiting for player..."
|
||||
text = msg_font.render(msg, True, (200, 200, 200))
|
||||
text_rect = text.get_rect(center=(width // 2, height // 2))
|
||||
screen.blit(text, text_rect)
|
||||
pygame.display.flip()
|
||||
return
|
||||
|
||||
def draw_x(rect):
|
||||
thickness = max(4, rect.width // 12)
|
||||
pygame.draw.line(screen, (255, 80, 80), rect.topleft, rect.bottomright, thickness)
|
||||
pygame.draw.line(screen, (255, 80, 80), rect.topright, rect.bottomleft, thickness)
|
||||
|
||||
def draw_o(rect):
|
||||
center = rect.center
|
||||
radius = rect.width // 2 - max(6, rect.width // 16)
|
||||
thickness = max(4, rect.width // 12)
|
||||
pygame.draw.circle(screen, (80, 180, 255), center, radius, thickness)
|
||||
|
||||
if len(board) == 9:
|
||||
# 2D: Center a single 3x3 grid, scale to fit
|
||||
size = min((width - 2*margin)//3, (height - 2*margin - title_buffer)//3)
|
||||
offset_x = (width - size*3) // 2
|
||||
offset_y = (height - size*3) // 2 + title_buffer // 2
|
||||
offset_y = max(offset_y, title_rect.bottom + title_buffer)
|
||||
# Index number font and buffer
|
||||
small_index_font = pygame.font.Font(None, int(size * 0.38))
|
||||
index_buffer_x = int(size * 0.16)
|
||||
index_buffer_y = int(size * 0.10)
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
rect = pygame.Rect(offset_x + j*size, offset_y + i*size, size, size)
|
||||
pygame.draw.rect(screen, (200, 200, 200), rect, 2)
|
||||
idx = i*3 + j
|
||||
# Draw index number in top-left, start at 1
|
||||
idx_text = small_index_font.render(str(idx + 1), True, (120, 120, 160))
|
||||
idx_rect = idx_text.get_rect(topleft=(rect.x + index_buffer_x, rect.y + index_buffer_y))
|
||||
screen.blit(idx_text, idx_rect)
|
||||
val = board[idx].strip()
|
||||
if val == "❌" or val == "X":
|
||||
draw_x(rect)
|
||||
elif val == "⭕️" or val == "O":
|
||||
draw_o(rect)
|
||||
elif val:
|
||||
text = font.render(val, True, (255, 255, 255))
|
||||
text_rect = text.get_rect(center=rect.center)
|
||||
screen.blit(text, text_rect)
|
||||
elif len(board) == 27:
|
||||
# 3D: Stack three 3x3 grids vertically, with horizontal offsets for 3D effect, scale to fit
|
||||
size = min((width - 2*margin)//7, (height - 4*margin - title_buffer)//9)
|
||||
base_offset_x = (width - (size * 3)) // 2
|
||||
offset_y = (height - (size*9 + margin*2)) // 2 + title_buffer // 2
|
||||
offset_y = max(offset_y, title_rect.bottom + title_buffer)
|
||||
small_font = pygame.font.Font(None, int(height * 0.045))
|
||||
small_index_font = pygame.font.Font(None, int(size * 0.38))
|
||||
index_buffer_x = int(size * 0.16)
|
||||
index_buffer_y = int(size * 0.10)
|
||||
for display_idx, layer in enumerate(reversed(range(3))):
|
||||
layer_offset_x = base_offset_x + (layer - 1) * 2 * size
|
||||
layer_y = offset_y + display_idx * (size*3 + margin)
|
||||
label_text = f"Layer {layer+1}"
|
||||
label = small_font.render(label_text, True, (180, 180, 220))
|
||||
label_rect = label.get_rect(center=(layer_offset_x + size*3//2, layer_y + size*3 + int(size*0.2)))
|
||||
screen.blit(label, label_rect)
|
||||
for i in range(3):
|
||||
for j in range(3):
|
||||
rect = pygame.Rect(layer_offset_x + j*size, layer_y + i*size, size, size)
|
||||
pygame.draw.rect(screen, (200, 200, 200), rect, 2)
|
||||
idx = layer*9 + i*3 + j
|
||||
idx_text = small_index_font.render(str(idx + 1), True, (120, 120, 160))
|
||||
idx_rect = idx_text.get_rect(topleft=(rect.x + index_buffer_x, rect.y + index_buffer_y))
|
||||
screen.blit(idx_text, idx_rect)
|
||||
val = board[idx].strip()
|
||||
if val == "❌" or val == "X":
|
||||
draw_x(rect)
|
||||
elif val == "⭕️" or val == "O":
|
||||
draw_o(rect)
|
||||
elif val:
|
||||
text = font.render(val, True, (255, 255, 255))
|
||||
text_rect = text.get_rect(center=rect.center)
|
||||
screen.blit(text, text_rect)
|
||||
pygame.display.flip()
|
||||
|
||||
def ttt_main(fullscreen=True):
|
||||
global latest_board, latest_meta
|
||||
pygame.init()
|
||||
if fullscreen:
|
||||
screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
|
||||
else:
|
||||
# Use a reasonable windowed size if not fullscreen
|
||||
screen = pygame.display.set_mode((900, 700))
|
||||
pygame.display.set_caption("Tic-Tac-Toe 3D Display")
|
||||
info = pygame.display.Info()
|
||||
mode = "fullscreen" if fullscreen else "windowed"
|
||||
print(f"[MeshBot TTT Display] Pygame version: {pygame.version.ver}")
|
||||
print(f"[MeshBot TTT Display] Resolution: {info.current_w}x{info.current_h} ({mode})")
|
||||
print(f"[MeshBot TTT Display] Display driver: {pygame.display.get_driver()}")
|
||||
running = True
|
||||
while running:
|
||||
for event in pygame.event.get():
|
||||
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
|
||||
running = False
|
||||
draw_board(screen, latest_board, latest_meta)
|
||||
pygame.display.flip()
|
||||
pygame.time.wait(75) # or 50-100 for lower CPU
|
||||
pygame.quit()
|
||||
@@ -4,6 +4,7 @@ import random
|
||||
import time
|
||||
import pickle
|
||||
from modules.log import logger, getPrettyTime
|
||||
from modules.settings import vpTracker
|
||||
|
||||
vpStartingCash = 20
|
||||
# Define the Card class
|
||||
@@ -260,6 +261,7 @@ class PlayerVP:
|
||||
|
||||
|
||||
def getLastCmdVp(nodeID):
|
||||
global vpTracker
|
||||
last_cmd = ""
|
||||
for i in range(len(vpTracker)):
|
||||
if vpTracker[i]['nodeID'] == nodeID:
|
||||
@@ -267,6 +269,7 @@ def getLastCmdVp(nodeID):
|
||||
return last_cmd
|
||||
|
||||
def setLastCmdVp(nodeID, cmd):
|
||||
global vpTracker
|
||||
for i in range(len(vpTracker)):
|
||||
if vpTracker[i]['nodeID'] == nodeID:
|
||||
vpTracker[i]['cmd'] = cmd
|
||||
|
||||
@@ -175,7 +175,6 @@ def getArtSciRepeaters(lat=0, lon=0):
|
||||
return msg
|
||||
|
||||
def get_NOAAtide(lat=0, lon=0):
|
||||
# get tide data from NOAA for lat/lon
|
||||
station_id = ""
|
||||
location = lat,lon
|
||||
if float(lat) == 0 and float(lon) == 0:
|
||||
@@ -262,7 +261,7 @@ def get_NOAAweather(lat=0, lon=0, unit=0, report_days=None):
|
||||
logger.warning("Location:Error fetching weather data from NOAA for location")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
except Exception:
|
||||
logger.warning(f"Location:Error fetching weather data error: {Exception}")
|
||||
logger.warning(f"Location:Error fetching weather data malformed: {Exception}")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
# get the forecast URL from the JSON response
|
||||
weather_json = weather_data.json()
|
||||
@@ -273,7 +272,7 @@ def get_NOAAweather(lat=0, lon=0, unit=0, report_days=None):
|
||||
logger.warning("Location:Error fetching weather forecast from NOAA")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
except Exception:
|
||||
logger.warning(f"Location:Error fetching weather data error: {Exception}")
|
||||
logger.warning(f"Location:Error fetching weather data missing: {Exception}")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
|
||||
# from periods, get the detailedForecast from number of days in NOAAforecastDuration
|
||||
@@ -410,21 +409,52 @@ def getWeatherAlertsNOAA(lat=0, lon=0, useDefaultLatLon=False):
|
||||
try:
|
||||
alert_data = requests.get(alert_url, timeout=my_settings.urlTimeoutSeconds)
|
||||
if not alert_data.ok:
|
||||
logger.warning("Location:Error fetching weather alerts from NOAA")
|
||||
logger.warning("Location:Error fetching weather alerts from NOAA bad data")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
except Exception:
|
||||
logger.warning(f"Location:Error fetching weather data error: {Exception}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Location:Error fetching weather alert request error: {type(e).__name__}: {e}")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
|
||||
alerts = ""
|
||||
alertxml = xml.dom.minidom.parseString(alert_data.text)
|
||||
for i in alertxml.getElementsByTagName("entry"):
|
||||
title = i.getElementsByTagName("title")[0].childNodes[0].nodeValue
|
||||
area_desc = i.getElementsByTagName("cap:areaDesc")[0].childNodes[0].nodeValue
|
||||
if my_settings.enableExtraLocationWx:
|
||||
alerts += f"{title}. {area_desc.replace(' ', '')}\n"
|
||||
area_desc_nodes = i.getElementsByTagName("cap:areaDesc")
|
||||
if area_desc_nodes and area_desc_nodes[0].childNodes:
|
||||
area_desc = area_desc_nodes[0].childNodes[0].nodeValue
|
||||
else:
|
||||
alerts += f"{title}\n"
|
||||
area_desc = ""
|
||||
|
||||
# Extract NWSheadline from cap:parameter if present
|
||||
nws_headline = ""
|
||||
for param in i.getElementsByTagName("cap:parameter"):
|
||||
try:
|
||||
value_name = param.getElementsByTagName("valueName")[0].childNodes[0].nodeValue
|
||||
if value_name == "NWSheadline":
|
||||
nws_headline = param.getElementsByTagName("value")[0].childNodes[0].nodeValue
|
||||
break
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
# If title is "Special Weather Statement" and headline exists, use headline only
|
||||
if "special" in title.lower() and nws_headline:
|
||||
main_alert = nws_headline
|
||||
else:
|
||||
main_alert = title
|
||||
|
||||
if my_settings.enableExtraLocationWx:
|
||||
# adds location data which is too much data?
|
||||
alerts += f"{main_alert}. {area_desc.replace(' ', '')}"
|
||||
# Only add headline if not already used as main_alert
|
||||
# if nws_headline and main_alert != nws_headline:
|
||||
# alerts += f" ALERT: {nws_headline}"
|
||||
alerts += "\n"
|
||||
else:
|
||||
alerts += f"{main_alert}"
|
||||
# Only add headline if not already used as main_alert
|
||||
# if nws_headline and main_alert != nws_headline:
|
||||
# alerts += f" ALERT: {nws_headline}"
|
||||
alerts += "\n"
|
||||
|
||||
if alerts == "" or alerts == None:
|
||||
return my_settings.NO_ALERTS
|
||||
@@ -463,7 +493,6 @@ def alertBrodcastNOAA():
|
||||
# broadcast the alerts send to wxBrodcastCh
|
||||
elif currentAlert[0] not in wxAlertCacheNOAA:
|
||||
# Check if the current alert is not in the weather alert cache
|
||||
logger.debug("Location:Broadcasting weather alerts")
|
||||
wxAlertCacheNOAA = currentAlert[0]
|
||||
return currentAlert
|
||||
|
||||
@@ -484,10 +513,10 @@ def getActiveWeatherAlertsDetailNOAA(lat=0, lon=0):
|
||||
try:
|
||||
alert_data = requests.get(alert_url, timeout=my_settings.urlTimeoutSeconds)
|
||||
if not alert_data.ok:
|
||||
logger.warning("Location:Error fetching weather alerts from NOAA")
|
||||
logger.warning("Location:Error fetching weather alerts from NOAA bad data")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
except Exception:
|
||||
logger.warning(f"Location:Error fetching weather data error: {Exception}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Location:Error fetching active weather alert request error: {type(e).__name__}: {e}")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
|
||||
alerts = ""
|
||||
@@ -1006,60 +1035,133 @@ def distance(lat=0,lon=0,nodeID=0, reset=False):
|
||||
|
||||
return msg
|
||||
|
||||
def get_openskynetwork(lat=0, lon=0):
|
||||
# get the latest aircraft data from OpenSky Network in the area
|
||||
if lat == 0 and lon == 0:
|
||||
return my_settings.NO_ALERTS
|
||||
# setup a bounding box of 50km around the lat/lon
|
||||
box_size = 0.45 # approx 50km
|
||||
# return limits for aircraft search
|
||||
search_limit = 3
|
||||
lamin = lat - box_size
|
||||
lamax = lat + box_size
|
||||
lomin = lon - box_size
|
||||
lomax = lon + box_size
|
||||
|
||||
# fetch the aircraft data from OpenSky Network
|
||||
opensky_url = f"https://opensky-network.org/api/states/all?lamin={lamin}&lomin={lomin}&lamax={lamax}&lomax={lomax}"
|
||||
try:
|
||||
aircraft_data = requests.get(opensky_url, timeout=my_settings.urlTimeoutSeconds)
|
||||
if not aircraft_data.ok:
|
||||
logger.warning("Location:Error fetching aircraft data from OpenSky Network")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
except (requests.exceptions.RequestException):
|
||||
logger.warning("Location:Error fetching aircraft data from OpenSky Network")
|
||||
return my_settings.ERROR_FETCHING_DATA
|
||||
aircraft_json = aircraft_data.json()
|
||||
if 'states' not in aircraft_json or not aircraft_json['states']:
|
||||
return my_settings.NO_ALERTS
|
||||
aircraft_list = aircraft_json['states']
|
||||
aircraft_report = ""
|
||||
logger.debug(f"Location: OpenSky Network: Found {len(aircraft_list)} possible aircraft in area")
|
||||
for aircraft in aircraft_list:
|
||||
if len(aircraft_report.split("\n")) >= search_limit:
|
||||
break
|
||||
# extract values from JSON
|
||||
def get_openskynetwork(lat=0, lon=0, altitude=0, node_altitude=0, altitude_window=900):
|
||||
"""
|
||||
Returns the aircraft dict from OpenSky Network closest in altitude (within altitude_window meters)
|
||||
to the given node_altitude. If no aircraft found, returns my_settings.NO_ALERTS.
|
||||
"""
|
||||
def _to_float(v):
|
||||
try:
|
||||
callsign = aircraft[1].strip() if aircraft[1] else "N/A"
|
||||
origin_country = aircraft[2]
|
||||
velocity = aircraft[9]
|
||||
true_track = aircraft[10]
|
||||
vertical_rate = aircraft[11]
|
||||
sensors = aircraft[12]
|
||||
geo_altitude = aircraft[13]
|
||||
squawk = aircraft[14] if len(aircraft) > 14 else "N/A"
|
||||
except Exception as e:
|
||||
logger.debug("Location:Error extracting aircraft data from OpenSky Network")
|
||||
continue
|
||||
|
||||
# format the aircraft data
|
||||
aircraft_report += f"{callsign} Alt:{int(geo_altitude) if geo_altitude else 'N/A'}m Vel:{int(velocity) if velocity else 'N/A'}m/s Heading:{int(true_track) if true_track else 'N/A'}°\n"
|
||||
|
||||
# remove last newline
|
||||
if aircraft_report.endswith("\n"):
|
||||
aircraft_report = aircraft_report[:-1]
|
||||
aircraft_report = abbreviate_noaa(aircraft_report)
|
||||
return aircraft_report if aircraft_report else my_settings.NO_ALERTS
|
||||
# handle numeric and numeric-strings, treat empty/'N/A' as None
|
||||
if v is None:
|
||||
return None
|
||||
if isinstance(v, (int, float)):
|
||||
return float(v)
|
||||
s = str(v).strip()
|
||||
if s == "" or s.upper() == "N/A":
|
||||
return None
|
||||
return float(s)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
try:
|
||||
# basic input validation/coercion
|
||||
try:
|
||||
lat = float(lat)
|
||||
lon = float(lon)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
try:
|
||||
node_altitude = _to_float(node_altitude) or 0.0
|
||||
except Exception:
|
||||
node_altitude = 0.0
|
||||
|
||||
if lat == 0 and lon == 0:
|
||||
return False
|
||||
|
||||
box_size = 0.45 # approx 50km
|
||||
lamin = lat - box_size
|
||||
lamax = lat + box_size
|
||||
lomin = lon - box_size
|
||||
lomax = lon + box_size
|
||||
|
||||
opensky_url = (
|
||||
f"https://opensky-network.org/api/states/all?lamin={lamin}&lomin={lomin}"
|
||||
f"&lamax={lamax}&lomax={lomax}"
|
||||
)
|
||||
try:
|
||||
aircraft_data = requests.get(opensky_url, timeout=my_settings.urlTimeoutSeconds)
|
||||
if not aircraft_data.ok:
|
||||
logger.warning("Location:Error fetching aircraft data from OpenSky Network")
|
||||
return False
|
||||
except (requests.exceptions.RequestException):
|
||||
logger.warning("Location:Error fetching aircraft data from OpenSky Network")
|
||||
return False
|
||||
|
||||
aircraft_json = aircraft_data.json()
|
||||
if 'states' not in aircraft_json or not aircraft_json['states']:
|
||||
return False
|
||||
|
||||
aircraft_list = aircraft_json['states']
|
||||
logger.debug(f"Location: OpenSky Network: Found {len(aircraft_list)} possible aircraft in area")
|
||||
closest = None
|
||||
min_diff = float('inf')
|
||||
|
||||
if len(aircraft_list) == 1:
|
||||
# Only one aircraft found; return normalized values (altitudes coerced to numbers or None)
|
||||
aircraft = aircraft_list[0]
|
||||
baro_alt = _to_float(aircraft[7]) # barometric altitude
|
||||
geo_alt = _to_float(aircraft[13]) # geometric altitude
|
||||
return {
|
||||
"callsign": aircraft[1].strip() if aircraft[1] else "N/A",
|
||||
"origin_country": aircraft[2] if aircraft[2] is not None else "N/A",
|
||||
"velocity": aircraft[9] if aircraft[9] is not None else "N/A",
|
||||
"true_track": aircraft[10] if aircraft[10] is not None else "N/A",
|
||||
"vertical_rate": aircraft[11] if aircraft[11] is not None else "N/A",
|
||||
"sensors": aircraft[12] if aircraft[12] is not None else "N/A",
|
||||
"altitude": baro_alt,
|
||||
"geo_altitude": geo_alt,
|
||||
"squawk": aircraft[14] if len(aircraft) > 14 and aircraft[14] is not None else "N/A",
|
||||
}
|
||||
|
||||
for aircraft in aircraft_list:
|
||||
try:
|
||||
callsign = aircraft[1].strip() if aircraft[1] else "N/A"
|
||||
origin_country = aircraft[2] if aircraft[2] is not None else "N/A"
|
||||
velocity = aircraft[9] if aircraft[9] is not None else "N/A"
|
||||
true_track = aircraft[10] if aircraft[10] is not None else "N/A"
|
||||
vertical_rate = aircraft[11] if aircraft[11] is not None else "N/A"
|
||||
sensors = aircraft[12] if aircraft[12] is not None else "N/A"
|
||||
baro_altitude = _to_float(aircraft[7])
|
||||
geo_altitude = _to_float(aircraft[13])
|
||||
squawk = aircraft[14] if len(aircraft) > 14 and aircraft[14] is not None else "N/A"
|
||||
except Exception:
|
||||
logger.debug("Location:Error extracting aircraft data from OpenSky Network")
|
||||
continue
|
||||
|
||||
# Prefer geo_altitude, fallback to baro_altitude
|
||||
plane_alt = geo_altitude if geo_altitude is not None else baro_altitude
|
||||
# skip if we can't get a numeric plane altitude or node_altitude is zero/unset
|
||||
if plane_alt is None or (node_altitude == 0 or node_altitude is None):
|
||||
continue
|
||||
|
||||
# safe numeric diff
|
||||
try:
|
||||
diff = abs(float(plane_alt) - float(node_altitude))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if diff <= altitude_window and diff < min_diff:
|
||||
min_diff = diff
|
||||
closest = {
|
||||
"callsign": callsign,
|
||||
"origin_country": origin_country,
|
||||
"velocity": velocity,
|
||||
"true_track": true_track,
|
||||
"vertical_rate": vertical_rate,
|
||||
"sensors": sensors,
|
||||
"altitude": baro_altitude,
|
||||
"geo_altitude": geo_altitude,
|
||||
"squawk": squawk,
|
||||
}
|
||||
if closest:
|
||||
return closest
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(f"SYSTEM: Location HighFly: Error processing OpenSky Network data: {e}")
|
||||
return False
|
||||
|
||||
def log_locationData_toMap(userID, location, message):
|
||||
"""
|
||||
|
||||
55
modules/radio.md
Normal file
55
modules/radio.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Radio Module: Meshages TTS (Text-to-Speech) Setup
|
||||
|
||||
The radio module supports audible mesh messages using the [KittenTTS](https://github.com/KittenML/KittenTTS) engine. This allows the bot to generate and play speech from text, making mesh alerts and messages audible on your device.
|
||||
|
||||
## Features
|
||||
|
||||
- Converts mesh messages to speech using KittenTTS.
|
||||
|
||||
## Installation
|
||||
|
||||
1. **Install Python dependencies:**
|
||||
|
||||
- `kittentts` is the TTS engine.
|
||||
|
||||
`pip install https://github.com/KittenML/KittenTTS/releases/download/0.1/kittentts-0.1.0-py3-none-any.whl`
|
||||
|
||||
2. **Install PortAudio (required for sounddevice):**
|
||||
|
||||
- **macOS:**
|
||||
```sh
|
||||
brew install portaudio
|
||||
```
|
||||
- **Linux (Debian/Ubuntu):**
|
||||
```sh
|
||||
sudo apt-get install portaudio19-dev
|
||||
```
|
||||
- **Windows:**
|
||||
No extra step needed; `sounddevice` will use the default audio driver.
|
||||
|
||||
## Configuration
|
||||
|
||||
- Enable TTS in your `config.ini`:
|
||||
```ini
|
||||
[radioMon]
|
||||
meshagesTTS = True
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
When enabled, the bot will generate and play speech for mesh messages using the selected voice.
|
||||
No additional user action is required.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- If you see errors about missing `sounddevice` or `portaudio`, ensure you have installed the dependencies above.
|
||||
- On macOS, you may need to allow microphone/audio access for your terminal.
|
||||
- If you have audio issues, check your system’s default output device.
|
||||
|
||||
## References
|
||||
|
||||
- [KittenTTS GitHub](https://github.com/KittenML/KittenTTS)
|
||||
- [KittenTTS Model on HuggingFace](https://huggingface.co/KittenML/kitten-tts-nano-0.2)
|
||||
- [sounddevice documentation](https://python-sounddevice.readthedocs.io/)
|
||||
|
||||
---
|
||||
195
modules/radio.py
195
modules/radio.py
@@ -16,6 +16,9 @@ import struct
|
||||
import json
|
||||
from modules.log import logger
|
||||
|
||||
# verbose debug logging for trap words function
|
||||
debugVoxTmsg = False
|
||||
|
||||
from modules.settings import (
|
||||
radio_detection_enabled,
|
||||
rigControlServerAddress,
|
||||
@@ -31,14 +34,52 @@ from modules.settings import (
|
||||
voxTrapList,
|
||||
voxOnTrapList,
|
||||
voxEnableCmd,
|
||||
ERROR_FETCHING_DATA
|
||||
ERROR_FETCHING_DATA,
|
||||
meshagesTTS,
|
||||
)
|
||||
|
||||
# module global variables
|
||||
previousStrength = -40
|
||||
signalCycle = 0
|
||||
|
||||
|
||||
# verbose debug logging for trap words function
|
||||
debugVoxTmsg = False
|
||||
FREQ_NAME_MAP = {
|
||||
462562500: "GRMS CH1",
|
||||
462587500: "GRMS CH2",
|
||||
462612500: "GRMS CH3",
|
||||
462637500: "GRMS CH4",
|
||||
462662500: "GRMS CH5",
|
||||
462687500: "GRMS CH6",
|
||||
462712500: "GRMS CH7",
|
||||
467562500: "GRMS CH8",
|
||||
467587500: "GRMS CH9",
|
||||
467612500: "GRMS CH10",
|
||||
467637500: "GRMS CH11",
|
||||
467662500: "GRMS CH12",
|
||||
467687500: "GRMS CH13",
|
||||
467712500: "GRMS CH14",
|
||||
467737500: "GRMS CH15",
|
||||
462550000: "GRMS CH16",
|
||||
462575000: "GMRS CH17",
|
||||
462600000: "GMRS CH18",
|
||||
462625000: "GMRS CH19",
|
||||
462675000: "GMRS CH20",
|
||||
462670000: "GMRS CH21",
|
||||
462725000: "GMRS CH22",
|
||||
462725500: "GMRS CH23",
|
||||
467575000: "GMRS CH24",
|
||||
467600000: "GMRS CH25",
|
||||
467625000: "GMRS CH26",
|
||||
467650000: "GMRS CH27",
|
||||
467675000: "GMRS CH28",
|
||||
467700000: "FRS CH1",
|
||||
462650000: "FRS CH5",
|
||||
462700000: "FRS CH7",
|
||||
462737500: "FRS CH16",
|
||||
146520000: "2M Simplex Calling",
|
||||
446000000: "70cm Simplex Calling",
|
||||
156800000: "Marine CH16",
|
||||
# Add more as needed
|
||||
}
|
||||
|
||||
# --- WSJT-X and JS8Call Settings Initialization ---
|
||||
wsjtxMsgQueue = [] # Queue for WSJT-X detected messages
|
||||
@@ -100,9 +141,9 @@ try:
|
||||
watched_callsigns = list({cs.upper() for cs in callsigns})
|
||||
|
||||
except ImportError:
|
||||
logger.debug("RadioMon: WSJT-X/JS8Call settings not configured")
|
||||
logger.debug("System: RadioMon: WSJT-X/JS8Call settings not configured")
|
||||
except Exception as e:
|
||||
logger.warning(f"RadioMon: Error loading WSJT-X/JS8Call settings: {e}")
|
||||
logger.warning(f"System: RadioMon: Error loading WSJT-X/JS8Call settings: {e}")
|
||||
|
||||
|
||||
if radio_detection_enabled:
|
||||
@@ -136,51 +177,43 @@ if voxDetectionEnabled:
|
||||
voxModel = Model(lang=voxLanguage) # use built in model for specified language
|
||||
|
||||
except Exception as e:
|
||||
print(f"RadioMon: Error importing VOX dependencies: {e}")
|
||||
print(f"System: RadioMon: Error importing VOX dependencies: {e}")
|
||||
print(f"To use VOX detection please install the vosk and sounddevice python modules")
|
||||
print(f"pip install vosk sounddevice")
|
||||
print(f"sounddevice needs pulseaudio, apt-get install portaudio19-dev")
|
||||
voxDetectionEnabled = False
|
||||
logger.error(f"RadioMon: VOX detection disabled due to import error")
|
||||
logger.error(f"System: RadioMon: VOX detection disabled due to import error")
|
||||
|
||||
FREQ_NAME_MAP = {
|
||||
462562500: "GRMS CH1",
|
||||
462587500: "GRMS CH2",
|
||||
462612500: "GRMS CH3",
|
||||
462637500: "GRMS CH4",
|
||||
462662500: "GRMS CH5",
|
||||
462687500: "GRMS CH6",
|
||||
462712500: "GRMS CH7",
|
||||
467562500: "GRMS CH8",
|
||||
467587500: "GRMS CH9",
|
||||
467612500: "GRMS CH10",
|
||||
467637500: "GRMS CH11",
|
||||
467662500: "GRMS CH12",
|
||||
467687500: "GRMS CH13",
|
||||
467712500: "GRMS CH14",
|
||||
467737500: "GRMS CH15",
|
||||
462550000: "GRMS CH16",
|
||||
462575000: "GMRS CH17",
|
||||
462600000: "GMRS CH18",
|
||||
462625000: "GMRS CH19",
|
||||
462675000: "GMRS CH20",
|
||||
462670000: "GMRS CH21",
|
||||
462725000: "GMRS CH22",
|
||||
462725500: "GMRS CH23",
|
||||
467575000: "GMRS CH24",
|
||||
467600000: "GMRS CH25",
|
||||
467625000: "GMRS CH26",
|
||||
467650000: "GMRS CH27",
|
||||
467675000: "GMRS CH28",
|
||||
467700000: "FRS CH1",
|
||||
462650000: "FRS CH5",
|
||||
462700000: "FRS CH7",
|
||||
462737500: "FRS CH16",
|
||||
146520000: "2M Simplex Calling",
|
||||
446000000: "70cm Simplex Calling",
|
||||
156800000: "Marine CH16",
|
||||
# Add more as needed
|
||||
}
|
||||
if meshagesTTS:
|
||||
try:
|
||||
# TTS for meshages imports
|
||||
logger.debug("System: RadioMon: Initializing TTS model for audible meshages")
|
||||
import sounddevice as sd
|
||||
from kittentts import KittenTTS
|
||||
ttsModel = KittenTTS("KittenML/kitten-tts-nano-0.2")
|
||||
available_voices = [
|
||||
'expr-voice-2-m', 'expr-voice-2-f', 'expr-voice-3-m', 'expr-voice-3-f',
|
||||
'expr-voice-4-m', 'expr-voice-4-f', 'expr-voice-5-m', 'expr-voice-5-f'
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"To use Meshages TTS please review the radio.md documentation for setup instructions.")
|
||||
meshagesTTS = False
|
||||
|
||||
async def generate_and_play_tts(text, voice, samplerate=24000):
|
||||
"""Async: Generate speech and play audio."""
|
||||
text = text.strip()
|
||||
if not text:
|
||||
return
|
||||
try:
|
||||
logger.debug(f"System: RadioMon: Generating TTS for text: {text} with voice: {voice}")
|
||||
audio = await asyncio.to_thread(ttsModel.generate, text, voice=voice)
|
||||
if audio is None or len(audio) == 0:
|
||||
return
|
||||
await asyncio.to_thread(sd.play, audio, samplerate)
|
||||
await asyncio.to_thread(sd.wait)
|
||||
del audio
|
||||
except Exception as e:
|
||||
logger.warning(f"System: RadioMon: Error in generate_and_play_tts: {e}")
|
||||
|
||||
def get_freq_common_name(freq):
|
||||
freq = int(freq)
|
||||
@@ -194,14 +227,14 @@ def get_freq_common_name(freq):
|
||||
def get_hamlib(msg="f"):
|
||||
# get data from rigctld server
|
||||
if "socket" not in globals():
|
||||
logger.warning("RadioMon: 'socket' module not imported. Hamlib disabled.")
|
||||
logger.warning("System: RadioMon: 'socket' module not imported. Hamlib disabled.")
|
||||
return ERROR_FETCHING_DATA
|
||||
try:
|
||||
rigControlSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
rigControlSocket.settimeout(2)
|
||||
rigControlSocket.connect((rigControlServerAddress.split(":")[0],int(rigControlServerAddress.split(":")[1])))
|
||||
except Exception as e:
|
||||
logger.error(f"RadioMon: Error connecting to rigctld: {e}")
|
||||
logger.error(f"System: RadioMon: Error connecting to rigctld: {e}")
|
||||
return ERROR_FETCHING_DATA
|
||||
|
||||
try:
|
||||
@@ -215,7 +248,7 @@ def get_hamlib(msg="f"):
|
||||
data = data.replace(b'\n',b'')
|
||||
return data.decode("utf-8").rstrip()
|
||||
except Exception as e:
|
||||
logger.error(f"RadioMon: Error fetching data from rigctld: {e}")
|
||||
logger.error(f"System: RadioMon: Error fetching data from rigctld: {e}")
|
||||
return ERROR_FETCHING_DATA
|
||||
|
||||
def get_sig_strength():
|
||||
@@ -225,7 +258,7 @@ def get_sig_strength():
|
||||
def checkVoxTrapWords(text):
|
||||
try:
|
||||
if not voxOnTrapList:
|
||||
logger.debug(f"RadioMon: VOX detected: {text}")
|
||||
logger.debug(f"System: RadioMon: VOX detected: {text}")
|
||||
return text
|
||||
if text:
|
||||
traps = [voxTrapList] if isinstance(voxTrapList, str) else voxTrapList
|
||||
@@ -235,27 +268,27 @@ def checkVoxTrapWords(text):
|
||||
trap_lower = trap_clean.lower()
|
||||
idx = text_lower.find(trap_lower)
|
||||
if debugVoxTmsg:
|
||||
logger.debug(f"RadioMon: VOX checking for trap word '{trap_lower}' in: '{text}' (index: {idx})")
|
||||
logger.debug(f"System: RadioMon: VOX checking for trap word '{trap_lower}' in: '{text}' (index: {idx})")
|
||||
if idx != -1:
|
||||
new_text = text[idx + len(trap_clean):].strip()
|
||||
if debugVoxTmsg:
|
||||
logger.debug(f"RadioMon: VOX detected trap word '{trap_lower}' in: '{text}' (remaining: '{new_text}')")
|
||||
logger.debug(f"System: RadioMon: VOX detected trap word '{trap_lower}' in: '{text}' (remaining: '{new_text}')")
|
||||
new_words = new_text.split()
|
||||
if voxEnableCmd:
|
||||
for word in new_words:
|
||||
if word in botMethods:
|
||||
logger.info(f"RadioMon: VOX action '{word}' with '{new_text}'")
|
||||
logger.info(f"System: RadioMon: VOX action '{word}' with '{new_text}'")
|
||||
if word == "joke":
|
||||
return botMethods[word](vox=True)
|
||||
else:
|
||||
return botMethods[word](None, None, None, vox=True)
|
||||
logger.debug(f"RadioMon: VOX returning text after trap word '{trap_lower}': '{new_text}'")
|
||||
logger.debug(f"System: RadioMon: VOX returning text after trap word '{trap_lower}': '{new_text}'")
|
||||
return new_text
|
||||
if debugVoxTmsg:
|
||||
logger.debug(f"RadioMon: VOX no trap word found in: '{text}'")
|
||||
logger.debug(f"System: RadioMon: VOX no trap word found in: '{text}'")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.debug(f"RadioMon: Error in checkVoxTrapWords: {e}")
|
||||
logger.debug(f"System: RadioMon: Error in checkVoxTrapWords: {e}")
|
||||
return None
|
||||
|
||||
async def signalWatcher():
|
||||
@@ -265,7 +298,7 @@ async def signalWatcher():
|
||||
signalStrength = int(get_sig_strength())
|
||||
if signalStrength >= previousStrength and signalStrength > signalDetectionThreshold:
|
||||
message = f"Detected {get_freq_common_name(get_hamlib('f'))} active. S-Meter:{signalStrength}dBm"
|
||||
logger.debug(f"RadioMon: {message}. Waiting for {signalHoldTime} seconds")
|
||||
logger.debug(f"System: RadioMon: {message}. Waiting for {signalHoldTime} seconds")
|
||||
previousStrength = signalStrength
|
||||
signalCycle = 0
|
||||
await asyncio.sleep(signalHoldTime)
|
||||
@@ -285,7 +318,7 @@ async def signalWatcher():
|
||||
async def make_vox_callback(loop, q):
|
||||
def vox_callback(indata, frames, time, status):
|
||||
if status:
|
||||
logger.warning(f"RadioMon: VOX input status: {status}")
|
||||
logger.warning(f"System: RadioMon: VOX input status: {status}")
|
||||
try:
|
||||
loop.call_soon_threadsafe(q.put_nowait, bytes(indata))
|
||||
except asyncio.QueueFull:
|
||||
@@ -298,7 +331,7 @@ async def make_vox_callback(loop, q):
|
||||
loop.call_soon_threadsafe(q.put_nowait, bytes(indata))
|
||||
except asyncio.QueueFull:
|
||||
# If still full, just drop this frame
|
||||
logger.debug("RadioMon: VOX queue full, dropping audio frame")
|
||||
logger.debug("System: RadioMon: VOX queue full, dropping audio frame")
|
||||
except RuntimeError:
|
||||
# Loop may be closed
|
||||
pass
|
||||
@@ -310,7 +343,7 @@ async def voxMonitor():
|
||||
model = voxModel
|
||||
device_info = sd.query_devices(voxInputDevice, 'input')
|
||||
samplerate = 16000
|
||||
logger.debug(f"RadioMon: VOX monitor started on device {device_info['name']} with samplerate {samplerate} using trap words: {voxTrapList if voxOnTrapList else 'none'}")
|
||||
logger.debug(f"System: RadioMon: VOX monitor started on device {device_info['name']} with samplerate {samplerate} using trap words: {voxTrapList if voxOnTrapList else 'none'}")
|
||||
rec = KaldiRecognizer(model, samplerate)
|
||||
loop = asyncio.get_running_loop()
|
||||
callback = await make_vox_callback(loop, q)
|
||||
@@ -337,7 +370,7 @@ async def voxMonitor():
|
||||
|
||||
await asyncio.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.error(f"RadioMon: Error in VOX monitor: {e}")
|
||||
logger.warning(f"System: RadioMon: Error in VOX monitor: {e}")
|
||||
|
||||
def decode_wsjtx_packet(data):
|
||||
"""Decode WSJT-X UDP packet according to the protocol specification"""
|
||||
@@ -439,7 +472,7 @@ def decode_wsjtx_packet(data):
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"RadioMon: Error decoding WSJT-X packet: {e}")
|
||||
logger.debug(f"System: RadioMon: Error decoding WSJT-X packet: {e}")
|
||||
return None
|
||||
|
||||
def check_callsign_match(message, callsigns):
|
||||
@@ -481,7 +514,7 @@ def check_callsign_match(message, callsigns):
|
||||
async def wsjtxMonitor():
|
||||
"""Monitor WSJT-X UDP broadcasts for decode messages"""
|
||||
if not wsjtx_enabled:
|
||||
logger.warning("RadioMon: WSJT-X monitoring called but not enabled")
|
||||
logger.warning("System: RadioMon: WSJT-X monitoring called but not enabled")
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -490,9 +523,9 @@ async def wsjtxMonitor():
|
||||
sock.bind((wsjtx_udp_address, wsjtx_udp_port))
|
||||
sock.setblocking(False)
|
||||
|
||||
logger.info(f"RadioMon: WSJT-X UDP listener started on {wsjtx_udp_address}:{wsjtx_udp_port}")
|
||||
logger.info(f"System: RadioMon: WSJT-X UDP listener started on {wsjtx_udp_address}:{wsjtx_udp_port}")
|
||||
if watched_callsigns:
|
||||
logger.info(f"RadioMon: Watching for callsigns: {', '.join(watched_callsigns)}")
|
||||
logger.info(f"System: RadioMon: Watching for callsigns: {', '.join(watched_callsigns)}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
@@ -507,29 +540,29 @@ async def wsjtxMonitor():
|
||||
# Check if message contains watched callsigns
|
||||
if check_callsign_match(message, watched_callsigns):
|
||||
msg_text = f"WSJT-X {mode}: {message} (SNR: {snr:+d}dB)"
|
||||
logger.info(f"RadioMon: {msg_text}")
|
||||
logger.info(f"System: RadioMon: {msg_text}")
|
||||
wsjtxMsgQueue.append(msg_text)
|
||||
|
||||
except BlockingIOError:
|
||||
# No data available
|
||||
await asyncio.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.debug(f"RadioMon: Error in WSJT-X monitor loop: {e}")
|
||||
logger.debug(f"System: RadioMon: Error in WSJT-X monitor loop: {e}")
|
||||
await asyncio.sleep(1)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"RadioMon: Error starting WSJT-X monitor: {e}")
|
||||
logger.warning(f"System: RadioMon: Error starting WSJT-X monitor: {e}")
|
||||
|
||||
async def js8callMonitor():
|
||||
"""Monitor JS8Call TCP API for messages"""
|
||||
if not js8call_enabled:
|
||||
logger.warning("RadioMon: JS8Call monitoring called but not enabled")
|
||||
logger.warning("System: RadioMon: JS8Call monitoring called but not enabled")
|
||||
return
|
||||
|
||||
try:
|
||||
logger.info(f"RadioMon: JS8Call TCP listener connecting to {js8call_tcp_address}:{js8call_tcp_port}")
|
||||
logger.info(f"System: RadioMon: JS8Call TCP listener connecting to {js8call_tcp_address}:{js8call_tcp_port}")
|
||||
if watched_callsigns:
|
||||
logger.info(f"RadioMon: Watching for callsigns: {', '.join(watched_callsigns)}")
|
||||
logger.info(f"System: RadioMon: Watching for callsigns: {', '.join(watched_callsigns)}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
@@ -539,14 +572,14 @@ async def js8callMonitor():
|
||||
sock.connect((js8call_tcp_address, js8call_tcp_port))
|
||||
sock.setblocking(False)
|
||||
|
||||
logger.info("RadioMon: Connected to JS8Call API")
|
||||
logger.info("System: RadioMon: Connected to JS8Call API")
|
||||
|
||||
buffer = ""
|
||||
while True:
|
||||
try:
|
||||
data = sock.recv(4096)
|
||||
if not data:
|
||||
logger.warning("RadioMon: JS8Call connection closed")
|
||||
logger.warning("System: RadioMon: JS8Call connection closed")
|
||||
break
|
||||
|
||||
buffer += data.decode('utf-8', errors='ignore')
|
||||
@@ -570,34 +603,34 @@ async def js8callMonitor():
|
||||
|
||||
if text and check_callsign_match(text, watched_callsigns):
|
||||
msg_text = f"JS8Call from {from_call}: {text} (SNR: {snr:+d}dB)"
|
||||
logger.info(f"RadioMon: {msg_text}")
|
||||
logger.info(f"System: RadioMon: {msg_text}")
|
||||
js8callMsgQueue.append(msg_text)
|
||||
|
||||
except json.JSONDecodeError:
|
||||
logger.debug(f"RadioMon: Invalid JSON from JS8Call: {line[:100]}")
|
||||
logger.debug(f"System: RadioMon: Invalid JSON from JS8Call: {line[:100]}")
|
||||
except Exception as e:
|
||||
logger.debug(f"RadioMon: Error processing JS8Call message: {e}")
|
||||
logger.debug(f"System: RadioMon: Error processing JS8Call message: {e}")
|
||||
|
||||
except BlockingIOError:
|
||||
await asyncio.sleep(0.1)
|
||||
except socket.timeout:
|
||||
await asyncio.sleep(0.1)
|
||||
except Exception as e:
|
||||
logger.debug(f"RadioMon: Error in JS8Call receive loop: {e}")
|
||||
logger.debug(f"System: RadioMon: Error in JS8Call receive loop: {e}")
|
||||
break
|
||||
|
||||
sock.close()
|
||||
logger.warning("RadioMon: JS8Call connection lost, reconnecting in 5s...")
|
||||
logger.warning("System: RadioMon: JS8Call connection lost, reconnecting in 5s...")
|
||||
await asyncio.sleep(5)
|
||||
|
||||
except socket.timeout:
|
||||
logger.warning("RadioMon: JS8Call connection timeout, retrying in 5s...")
|
||||
logger.warning("System: RadioMon: JS8Call connection timeout, retrying in 5s...")
|
||||
await asyncio.sleep(5)
|
||||
except Exception as e:
|
||||
logger.warning(f"RadioMon: Error connecting to JS8Call: {e}")
|
||||
logger.warning(f"System: RadioMon: Error connecting to JS8Call: {e}")
|
||||
await asyncio.sleep(10)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"RadioMon: Error starting JS8Call monitor: {e}")
|
||||
logger.warning(f"System: RadioMon: Error starting JS8Call monitor: {e}")
|
||||
|
||||
# end of file
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
# rss feed module for meshing-around 2025
|
||||
from modules.log import logger
|
||||
from modules.settings import rssFeedURL, rssFeedNames, rssMaxItems, rssTruncate, urlTimeoutSeconds, ERROR_FETCHING_DATA
|
||||
from modules.settings import rssFeedURL, rssFeedNames, rssMaxItems, rssTruncate, urlTimeoutSeconds, ERROR_FETCHING_DATA, newsAPI_KEY, newsAPIsort
|
||||
import urllib.request
|
||||
import xml.etree.ElementTree as ET
|
||||
import html
|
||||
from html.parser import HTMLParser
|
||||
import bs4 as bs
|
||||
import requests
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# Common User-Agent for all RSS requests
|
||||
COMMON_USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
|
||||
@@ -136,3 +138,47 @@ def get_rss_feed(msg):
|
||||
logger.error(f"Error fetching RSS feed from {feed_url}: {e}")
|
||||
return ERROR_FETCHING_DATA
|
||||
|
||||
def get_newsAPI(user_search="meshtastic", message_from_id=None, deviceID=None, isDM=False):
|
||||
# Fetch news from NewsAPI.org
|
||||
user_search = user_search.strip()
|
||||
# check api_throttle
|
||||
from modules.system import api_throttle
|
||||
check_throttle = api_throttle(message_from_id, deviceID, apiName="NewsAPI")
|
||||
if check_throttle:
|
||||
return check_throttle # Return throttle message if applicable
|
||||
|
||||
if user_search.lower().startswith("latest"):
|
||||
user_search = user_search[6:].strip()
|
||||
if not user_search:
|
||||
user_search = "meshtastic"
|
||||
try:
|
||||
last_week = datetime.now() - timedelta(days=7)
|
||||
newsAPIurl = (
|
||||
f"https://newsapi.org/v2/everything?"
|
||||
f"q={user_search}&language=en&from={last_week.strftime('%Y-%m-%d')}&sortBy={newsAPIsort}shedAt&pageSize=5&apiKey={newsAPI_KEY}"
|
||||
)
|
||||
|
||||
response = requests.get(newsAPIurl, headers={"User-Agent": COMMON_USER_AGENT}, timeout=urlTimeoutSeconds)
|
||||
news_data = response.json()
|
||||
|
||||
if news_data.get("status") != "ok":
|
||||
error_message = news_data.get("message", "Unknown error")
|
||||
logger.error(f"NewsAPI error: {error_message}")
|
||||
return ERROR_FETCHING_DATA
|
||||
logger.debug(f"System: NewsAPI Searching for '{user_search}' got {news_data.get('totalResults', 0)} results")
|
||||
articles = news_data.get("articles", [])[:3]
|
||||
news_list = []
|
||||
for article in articles:
|
||||
title = article.get("title", "No Title")
|
||||
url = article.get("url", "")
|
||||
description = article.get("description", '')
|
||||
news_list.append(f"📰{title}\n{description}")
|
||||
|
||||
# Make a nice newspaper style output
|
||||
msg = f"🗞️:"
|
||||
for item in news_list:
|
||||
msg += item + "\n\n"
|
||||
return msg.strip()
|
||||
except Exception as e:
|
||||
logger.error(f"System: NewsAPI fetching news: {e}")
|
||||
return ERROR_FETCHING_DATA
|
||||
@@ -174,6 +174,12 @@ def setup_scheduler(
|
||||
lambda: send_message(handle_sun(0, schedulerInterface, schedulerChannel), schedulerChannel, 0, schedulerInterface)
|
||||
)
|
||||
logger.debug(f"System: Starting the scheduler to send solar information at {schedulerTime} on Device:{schedulerInterface} Channel:{schedulerChannel}")
|
||||
elif 'verse' in schedulerValue:
|
||||
from modules.filemon import read_verse
|
||||
schedule.every().day.at(schedulerTime).do(
|
||||
lambda: send_message(read_verse(), schedulerChannel, 0, schedulerInterface)
|
||||
)
|
||||
logger.debug(f"System: Starting the verse scheduler to send a verse at {schedulerTime} on Device:{schedulerInterface} Channel:{schedulerChannel}")
|
||||
elif 'custom' in schedulerValue:
|
||||
try:
|
||||
from modules.custom_scheduler import setup_custom_schedules # type: ignore
|
||||
|
||||
@@ -32,8 +32,11 @@ cmdHistory = [] # list to hold the command history for lheard and history comman
|
||||
msg_history = [] # list to hold the message history for the messages command
|
||||
max_bytes = 200 # Meshtastic has ~237 byte limit, use conservative 200 bytes for message content
|
||||
voxMsgQueue = [] # queue for VOX detected messages
|
||||
tts_read_queue = [] # queue for TTS messages
|
||||
wsjtxMsgQueue = [] # queue for WSJT-X detected messages
|
||||
js8callMsgQueue = [] # queue for JS8Call detected messages
|
||||
autoBanlist = [] # list of nodes to autoban for repeated offenses
|
||||
apiThrottleList = [] # list of nodes to throttle API requests for repeated offenses
|
||||
# Game trackers
|
||||
surveyTracker = [] # Survey game tracker
|
||||
tictactoeTracker = [] # TicTacToe game tracker
|
||||
@@ -47,6 +50,7 @@ lemonadeTracker = [] # Lemonade Stand game tracker
|
||||
dwPlayerTracker = [] # DopeWars player tracker
|
||||
jackTracker = [] # Jack game tracker
|
||||
mindTracker = [] # Mastermind (mmind) game tracker
|
||||
battleshipTracker = [] # Battleship game tracker
|
||||
|
||||
# Memory Management Constants
|
||||
MAX_MSG_HISTORY = 250
|
||||
@@ -80,7 +84,7 @@ if 'sentry' not in config:
|
||||
config.write(open(config_file, 'w'))
|
||||
|
||||
if 'location' not in config:
|
||||
config['location'] = {'enabled': 'True', 'lat': '48.50', 'lon': '-123.0', 'UseMeteoWxAPI': 'False', 'useMetric': 'False', 'NOAAforecastDuration': '4', 'NOAAalertCount': '2', 'NOAAalertsEnabled': 'True', 'wxAlertBroadcastEnabled': 'False', 'wxAlertBroadcastChannel': '2', 'repeaterLookup': 'rbook'}
|
||||
config['location'] = {'enabled': 'True', 'lat': '48.50', 'lon': '-123.0', 'fuzzConfigLocation': 'True',}
|
||||
config.write(open(config_file, 'w'))
|
||||
|
||||
if 'bbs' not in config:
|
||||
@@ -252,6 +256,7 @@ try:
|
||||
dad_jokes_enabled = config['general'].getboolean('DadJokes', False)
|
||||
dad_jokes_emojiJokes = config['general'].getboolean('DadJokesEmoji', False)
|
||||
bee_enabled = config['general'].getboolean('bee', False) # 🐝 off by default undocumented
|
||||
bible_enabled = config['general'].getboolean('verse', False) # verse command
|
||||
solar_conditions_enabled = config['general'].getboolean('spaceWeather', True)
|
||||
wikipedia_enabled = config['general'].getboolean('wikipedia', False)
|
||||
use_kiwix_server = config['general'].getboolean('useKiwixServer', False)
|
||||
@@ -275,12 +280,10 @@ try:
|
||||
rssMaxItems = config['general'].getint('rssMaxItems', 3) # default 3 items
|
||||
rssTruncate = config['general'].getint('rssTruncate', 100) # default 100 characters
|
||||
rssFeedNames = config['general'].get('rssFeedNames', 'default,arrl').split(',')
|
||||
|
||||
# emergency response
|
||||
emergency_responder_enabled = config['emergencyHandler'].getboolean('enabled', False)
|
||||
emergency_responder_alert_channel = config['emergencyHandler'].getint('alert_channel', 2) # default 2
|
||||
emergency_responder_alert_interface = config['emergencyHandler'].getint('alert_interface', 1) # default 1
|
||||
emergency_responder_email = config['emergencyHandler'].get('email', '').split(',')
|
||||
newsAPI_KEY = config['general'].get('newsAPI_KEY', '') # default empty
|
||||
newsAPIregion = config['general'].get('newsAPIregion', 'us') # default us
|
||||
enable_headlines = config['general'].getboolean('enableNewsAPI', False) # default False
|
||||
newsAPIsort = config['general'].get('sort_by', 'relevancy') # default publishedAt
|
||||
|
||||
# sentry
|
||||
sentry_enabled = config['sentry'].getboolean('SentryEnabled', False) # default False
|
||||
@@ -315,34 +318,55 @@ try:
|
||||
n2yoAPIKey = config['location'].get('n2yoAPIKey', '') # default empty
|
||||
satListConfig = config['location'].get('satList', '25544').split(',') # default 25544 ISS
|
||||
riverListDefault = config['location'].get('riverList', '').split(',') # default None
|
||||
useTidePredict = config['location'].getboolean('useTidePredict', False) # default False use NOAA
|
||||
coastalEnabled = config['location'].getboolean('coastalEnabled', False) # default False
|
||||
myCoastalZone = config['location'].get('myCoastalZone', None) # default None
|
||||
coastalForecastDays = config['location'].getint('coastalForecastDays', 3) # default 3 days
|
||||
|
||||
# location alerts
|
||||
emergencyAlertBrodcastEnabled = config['location'].getboolean('eAlertBroadcastEnabled', False) # default False
|
||||
alert_duration = config['location'].getint('alertDuration', 20) # default 20 minutes
|
||||
if alert_duration < 10: # the API calls need throttle time
|
||||
alert_duration = 10
|
||||
eAlertBroadcastEnabled = config['location'].getboolean('eAlertBroadcastEnabled', False) # old deprecated name
|
||||
ipawsAlertEnabled = config['location'].getboolean('ipawsAlertEnabled', False) # default False new ^
|
||||
# Keep both in sync for backward compatibility
|
||||
if eAlertBroadcastEnabled or ipawsAlertEnabled:
|
||||
eAlertBroadcastEnabled = True
|
||||
ipawsAlertEnabled = True
|
||||
wxAlertBroadcastEnabled = config['location'].getboolean('wxAlertBroadcastEnabled', False) # default False
|
||||
volcanoAlertBroadcastEnabled = config['location'].getboolean('volcanoAlertBroadcastEnabled', False) # default False
|
||||
enableGBalerts = config['location'].getboolean('enableGBalerts', False) # default False
|
||||
enableDEalerts = config['location'].getboolean('enableDEalerts', False) # default False
|
||||
wxAlertsEnabled = config['location'].getboolean('NOAAalertsEnabled', True) # default True
|
||||
|
||||
ignoreEASenable = config['location'].getboolean('ignoreEASenable', False) # default False
|
||||
ignoreEASwords = config['location'].get('ignoreEASwords', 'test,advisory').split(',') # default test,advisory
|
||||
myRegionalKeysDE = config['location'].get('myRegionalKeysDE', '110000000000').split(',') # default city Berlin
|
||||
ignoreFEMAenable = config['location'].getboolean('ignoreFEMAenable', True) # default True
|
||||
ignoreFEMAwords = config['location'].get('ignoreFEMAwords', 'test,exercise').split(',') # default test,exercise
|
||||
ignoreUSGSEnable = config['location'].getboolean('ignoreVolcanoEnable', False) # default False
|
||||
ignoreUSGSWords = config['location'].get('ignoreVolcanoWords', 'test,advisory').split(',') # default test,advisory
|
||||
|
||||
forecastDuration = config['location'].getint('NOAAforecastDuration', 4) # NOAA forcast days
|
||||
numWxAlerts = config['location'].getint('NOAAalertCount', 2) # default 2 alerts
|
||||
enableExtraLocationWx = config['location'].getboolean('enableExtraLocationWx', False) # default False
|
||||
myStateFIPSList = config['location'].get('myFIPSList', '').split(',') # default empty
|
||||
mySAMEList = config['location'].get('mySAMEList', '').split(',') # default empty
|
||||
ignoreFEMAenable = config['location'].getboolean('ignoreFEMAenable', True) # default True
|
||||
ignoreFEMAwords = config['location'].get('ignoreFEMAwords', 'test,exercise').split(',') # default test,exercise
|
||||
wxAlertBroadcastChannel = config['location'].get('wxAlertBroadcastCh', '2').split(',') # default Channel 2
|
||||
emergencyAlertBroadcastCh = config['location'].get('eAlertBroadcastCh', '2').split(',') # default Channel 2
|
||||
volcanoAlertBroadcastEnabled = config['location'].getboolean('volcanoAlertBroadcastEnabled', False) # default False
|
||||
volcanoAlertBroadcastChannel = config['location'].get('volcanoAlertBroadcastCh', '2').split(',') # default Channel 2
|
||||
ignoreUSGSEnable = config['location'].getboolean('ignoreVolcanoEnable', False) # default False
|
||||
ignoreUSGSWords = config['location'].get('ignoreVolcanoWords', 'test,advisory').split(',') # default test,advisory
|
||||
myRegionalKeysDE = config['location'].get('myRegionalKeysDE', '110000000000').split(',') # default city Berlin
|
||||
eAlertBroadcastChannel = config['location'].get('eAlertBroadcastCh', '').split(',') # default empty
|
||||
|
||||
# any US alerts enabled
|
||||
usAlerts = (
|
||||
ipawsAlertEnabled or
|
||||
wxAlertBroadcastEnabled or
|
||||
volcanoAlertBroadcastEnabled or
|
||||
eAlertBroadcastEnabled
|
||||
)
|
||||
|
||||
# emergency response
|
||||
emergency_responder_enabled = config['emergencyHandler'].getboolean('enabled', False)
|
||||
emergency_responder_alert_channel = config['emergencyHandler'].getint('alert_channel', 2) # default 2
|
||||
emergency_responder_alert_interface = config['emergencyHandler'].getint('alert_interface', 1) # default 1
|
||||
emergency_responder_email = config['emergencyHandler'].get('email', '').split(',')
|
||||
|
||||
|
||||
# bbs
|
||||
bbs_enabled = config['bbs'].getboolean('enabled', False)
|
||||
bbsdb = config['bbs'].get('bbsdb', 'data/bbsdb.pkl')
|
||||
@@ -356,6 +380,7 @@ try:
|
||||
checklist_enabled = config['checklist'].getboolean('enabled', False)
|
||||
checklist_db = config['checklist'].get('checklist_db', 'data/checklist.db')
|
||||
reverse_in_out = config['checklist'].getboolean('reverse_in_out', False)
|
||||
checklist_auto_approve = config['checklist'].getboolean('auto_approve', True) # default True
|
||||
|
||||
# qrz hello
|
||||
qrz_hello_enabled = config['qrz'].getboolean('enabled', False)
|
||||
@@ -418,6 +443,9 @@ try:
|
||||
voxOnTrapList = config['radioMon'].getboolean('voxOnTrapList', False) # default False
|
||||
voxTrapList = config['radioMon'].get('voxTrapList', 'chirpy').split(',') # default chirpy
|
||||
voxEnableCmd = config['radioMon'].getboolean('voxEnableCmd', True) # default True
|
||||
meshagesTTS = config['radioMon'].getboolean('meshagesTTS', False) # default False
|
||||
ttsChannels = config['radioMon'].get('ttsChannels', '2').split(',') # default Channel 2
|
||||
ttsnoWelcome = config['radioMon'].getboolean('ttsnoWelcome', False) # default False
|
||||
|
||||
# WSJT-X and JS8Call monitoring
|
||||
wsjtx_detection_enabled = config['radioMon'].getboolean('wsjtxDetectionEnabled', False) # default WSJT-X detection disabled
|
||||
@@ -434,10 +462,13 @@ try:
|
||||
read_news_enabled = config['fileMon'].getboolean('enable_read_news', False) # default disabled
|
||||
news_file_path = config['fileMon'].get('news_file_path', '../data/news.txt') # default ../data/news.txt
|
||||
news_random_line_only = config['fileMon'].getboolean('news_random_line', False) # default False
|
||||
news_block_mode = config['fileMon'].getboolean('news_block_mode', False) # default False
|
||||
if news_random_line_only and news_block_mode:
|
||||
news_random_line_only = False
|
||||
enable_runShellCmd = config['fileMon'].getboolean('enable_runShellCmd', False) # default False
|
||||
allowXcmd = config['fileMon'].getboolean('allowXcmd', False) # default False
|
||||
xCmd2factorEnabled = config['fileMon'].getboolean('2factor_enabled', True) # default True
|
||||
xCmd2factor_timeout = config['fileMon'].getint('2factor_timeout', 100) # default 100 seconds
|
||||
xCmd2factorEnabled = config['fileMon'].getboolean('twoFactor_enabled', True) # default True
|
||||
xCmd2factor_timeout = config['fileMon'].getint('twoFactor_timeout', 100) # default 100 seconds
|
||||
|
||||
# games
|
||||
game_hop_limit = config['games'].getint('game_hop_limit', 5) # default 5 hops
|
||||
@@ -457,6 +488,7 @@ try:
|
||||
surveyRecordID = config['games'].getboolean('surveyRecordID', True)
|
||||
surveyRecordLocation = config['games'].getboolean('surveyRecordLocation', True)
|
||||
wordOfTheDay = config['games'].getboolean('wordOfTheDay', True)
|
||||
battleship_enabled = config['games'].getboolean('battleShip', True)
|
||||
|
||||
# messaging settings
|
||||
responseDelay = config['messagingSettings'].getfloat('responseDelay', 0.7) # default 0.7
|
||||
@@ -471,6 +503,14 @@ try:
|
||||
noisyNodeLogging = config['messagingSettings'].getboolean('noisyNodeLogging', False) # default False
|
||||
logMetaStats = config['messagingSettings'].getboolean('logMetaStats', True) # default True
|
||||
noisyTelemetryLimit = config['messagingSettings'].getint('noisyTelemetryLimit', 5) # default 5 packets
|
||||
autoBanEnabled = config['messagingSettings'].getboolean('autoBanEnabled', False) # default False
|
||||
autoBanThreshold = config['messagingSettings'].getint('autoBanThreshold', 5) # default 5 offenses
|
||||
autoBanTimeframe = config['messagingSettings'].getint('autoBanTimeframe', 3600) # default 1 hour in seconds
|
||||
apiThrottleValue = config['messagingSettings'].getint('apiThrottleValue', 20) # default 20 requests
|
||||
|
||||
# data persistence settings
|
||||
dataPersistence_enabled = config.getboolean('dataPersistence', 'enabled', fallback=True) # default True
|
||||
dataPersistence_interval = config.getint('dataPersistence', 'interval', fallback=300) # default 300 seconds (5 minutes)
|
||||
except Exception as e:
|
||||
print(f"System: Error reading config file: {e}")
|
||||
print("System: Check the config.ini against config.template file for missing sections or values.")
|
||||
|
||||
105
modules/space.py
105
modules/space.py
@@ -37,19 +37,27 @@ def hf_band_conditions():
|
||||
def solar_conditions():
|
||||
# radio related solar conditions from hamsql.com
|
||||
solar_cond = ""
|
||||
solar_cond = requests.get("https://www.hamqsl.com/solarxml.php", timeout=urlTimeoutSeconds)
|
||||
if(solar_cond.ok):
|
||||
solar_xml = xml.dom.minidom.parseString(solar_cond.text)
|
||||
for i in solar_xml.getElementsByTagName("solardata"):
|
||||
solar_a_index = i.getElementsByTagName("aindex")[0].childNodes[0].data
|
||||
solar_k_index = i.getElementsByTagName("kindex")[0].childNodes[0].data
|
||||
solar_xray = i.getElementsByTagName("xray")[0].childNodes[0].data
|
||||
solar_flux = i.getElementsByTagName("solarflux")[0].childNodes[0].data
|
||||
sunspots = i.getElementsByTagName("sunspots")[0].childNodes[0].data
|
||||
signalnoise = i.getElementsByTagName("signalnoise")[0].childNodes[0].data
|
||||
solar_cond = "A-Index: " + solar_a_index + "\nK-Index: " + solar_k_index + "\nSunspots: " + sunspots + "\nX-Ray Flux: " + solar_xray + "\nSolar Flux: " + solar_flux + "\nSignal Noise: " + signalnoise
|
||||
else:
|
||||
logger.error("Solar: Error fetching solar conditions")
|
||||
try:
|
||||
solar_cond = requests.get("https://www.hamqsl.com/solarxml.php", timeout=urlTimeoutSeconds)
|
||||
if solar_cond.ok:
|
||||
try:
|
||||
solar_xml = xml.dom.minidom.parseString(solar_cond.text)
|
||||
except Exception as e:
|
||||
logger.error(f"Solar: XML parse error: {e}")
|
||||
return ERROR_FETCHING_DATA
|
||||
for i in solar_xml.getElementsByTagName("solardata"):
|
||||
solar_a_index = i.getElementsByTagName("aindex")[0].childNodes[0].data
|
||||
solar_k_index = i.getElementsByTagName("kindex")[0].childNodes[0].data
|
||||
solar_xray = i.getElementsByTagName("xray")[0].childNodes[0].data
|
||||
solar_flux = i.getElementsByTagName("solarflux")[0].childNodes[0].data
|
||||
sunspots = i.getElementsByTagName("sunspots")[0].childNodes[0].data
|
||||
signalnoise = i.getElementsByTagName("signalnoise")[0].childNodes[0].data
|
||||
solar_cond = "A-Index: " + solar_a_index + "\nK-Index: " + solar_k_index + "\nSunspots: " + sunspots + "\nX-Ray Flux: " + solar_xray + "\nSolar Flux: " + solar_flux + "\nSignal Noise: " + signalnoise
|
||||
else:
|
||||
logger.error("Solar: Error fetching solar conditions")
|
||||
solar_cond = ERROR_FETCHING_DATA
|
||||
except Exception as e:
|
||||
logger.error(f"Solar: Exception fetching or parsing: {e}")
|
||||
solar_cond = ERROR_FETCHING_DATA
|
||||
return solar_cond
|
||||
|
||||
@@ -68,6 +76,77 @@ def drap_xray_conditions():
|
||||
xray_flux = ERROR_FETCHING_DATA
|
||||
return xray_flux
|
||||
|
||||
def get_noaa_scales_summary():
|
||||
"""
|
||||
Show latest observed, 24-hour max, and predicted geomagnetic, storm, and blackout data.
|
||||
"""
|
||||
try:
|
||||
response = requests.get("https://services.swpc.noaa.gov/products/noaa-scales.json", timeout=urlTimeoutSeconds)
|
||||
if response.ok:
|
||||
data = response.json()
|
||||
today = datetime.utcnow().date()
|
||||
latest_entry = None
|
||||
latest_dt = None
|
||||
max_g_today = None
|
||||
max_g_scale = -1
|
||||
predicted_g = None
|
||||
predicted_g_scale = -1
|
||||
|
||||
# Find latest observed and 24-hour max for today
|
||||
for entry in data.values():
|
||||
date_str = entry.get("DateStamp")
|
||||
time_str = entry.get("TimeStamp")
|
||||
if date_str and time_str:
|
||||
try:
|
||||
dt = datetime.strptime(f"{date_str} {time_str}", "%Y-%m-%d %H:%M:%S")
|
||||
g = entry.get("G", {})
|
||||
g_scale = int(g.get("Scale", -1)) if g.get("Scale") else -1
|
||||
# Latest observed for today
|
||||
if dt.date() == today:
|
||||
if latest_dt is None or dt > latest_dt:
|
||||
latest_dt = dt
|
||||
latest_entry = entry
|
||||
# 24-hour max for today
|
||||
if g_scale > max_g_scale:
|
||||
max_g_scale = g_scale
|
||||
max_g_today = entry
|
||||
# Predicted (future)
|
||||
elif dt.date() > today:
|
||||
if g_scale > predicted_g_scale:
|
||||
predicted_g_scale = g_scale
|
||||
predicted_g = entry
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
def format_entry(label, entry):
|
||||
if not entry:
|
||||
return f"{label}: No data"
|
||||
g = entry.get("G", {})
|
||||
s = entry.get("S", {})
|
||||
r = entry.get("R", {})
|
||||
parts = [f"{label} {g.get('Text', 'N/A')} (G:{g.get('Scale', 'N/A')})"]
|
||||
|
||||
# Only show storm if it's happening
|
||||
if s.get("Text") and s.get("Text") != "none":
|
||||
parts.append(f"Currently:{s.get('Text')} (S:{s.get('Scale', 'N/A')})")
|
||||
|
||||
# Only show blackout if it's not "none" or scale is not 0
|
||||
if r.get("Text") and r.get("Text") != "none" and r.get("Scale") not in [None, "0", 0]:
|
||||
parts.append(f"RF Blackout:{r.get('Text')} (R:{r.get('Scale', 'N/A')})")
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
output = []
|
||||
#output.append(format_entry("Latest Observed", latest_entry))
|
||||
output.append(format_entry("24hrMax:", max_g_today))
|
||||
output.append(format_entry("Predicted:", predicted_g))
|
||||
return "\n".join(output)
|
||||
else:
|
||||
return NO_ALERTS
|
||||
except Exception as e:
|
||||
logger.warning(f"Error fetching services.swpc.noaa.gov: {e}")
|
||||
return ERROR_FETCHING_DATA
|
||||
|
||||
def get_sun(lat=0, lon=0):
|
||||
# get sunrise and sunset times using callers location or default
|
||||
obs = ephem.Observer()
|
||||
|
||||
@@ -114,7 +114,7 @@ if location_enabled:
|
||||
help_message = help_message + ", howtall"
|
||||
|
||||
# NOAA alerts needs location module
|
||||
if wxAlertBroadcastEnabled or emergencyAlertBrodcastEnabled or volcanoAlertBroadcastEnabled:
|
||||
if wxAlertBroadcastEnabled or ipawsAlertEnabled or volcanoAlertBroadcastEnabled or eAlertBroadcastEnabled: #eAlertBroadcastEnabled depricated
|
||||
from modules.locationdata import * # from the spudgunman/meshing-around repo
|
||||
# limited subset, this should be done better but eh..
|
||||
trap_list = trap_list + ("wx", "wxa", "wxalert", "ea", "ealert", "valert")
|
||||
@@ -125,10 +125,6 @@ if coastalEnabled:
|
||||
from modules.locationdata import * # from the spudgunman/meshing-around repo
|
||||
trap_list = trap_list + ("mwx","tide",)
|
||||
help_message = help_message + ", mwx, tide"
|
||||
if useTidePredict:
|
||||
from modules import xtide
|
||||
trap_list = trap_list + ("tide",)
|
||||
help_message = help_message + ", tide"
|
||||
|
||||
# BBS Configuration
|
||||
if bbs_enabled:
|
||||
@@ -157,10 +153,15 @@ if wikipedia_enabled or use_kiwix_server:
|
||||
help_message = help_message + ", wiki"
|
||||
|
||||
# RSS Feed Configuration
|
||||
if rssEnable:
|
||||
from modules.rss import * # from the spudgunman/meshing-around repo
|
||||
trap_list = trap_list + ("readrss",)
|
||||
help_message = help_message + ", readrss"
|
||||
if rssEnable or enable_headlines:
|
||||
if rssEnable:
|
||||
from modules.rss import get_rss_feed
|
||||
trap_list = trap_list + ("readrss",)
|
||||
help_message = help_message + ", readrss"
|
||||
if enable_headlines:
|
||||
from modules.rss import get_newsAPI
|
||||
trap_list = trap_list + ("latest",)
|
||||
help_message = help_message + ", latest"
|
||||
|
||||
# LLM Configuration
|
||||
if llm_enabled:
|
||||
@@ -213,7 +214,8 @@ if hamtest_enabled:
|
||||
games_enabled = True
|
||||
|
||||
if tictactoe_enabled:
|
||||
from modules.games.tictactoe import * # from the spudgunman/meshing-around repo
|
||||
from modules.games.tictactoe import TicTacToe # from the spudgunman/meshing-around repo
|
||||
tictactoe = TicTacToe(display_module=None)
|
||||
trap_list = trap_list + ("tictactoe","tic-tac-toe",)
|
||||
|
||||
if quiz_enabled:
|
||||
@@ -233,6 +235,11 @@ if wordOfTheDay:
|
||||
theWordOfTheDay = WordOfTheDayGame()
|
||||
# this runs in background and wont enable other games
|
||||
|
||||
if battleship_enabled:
|
||||
from modules.games.battleship import playBattleship # from the spudgunman/meshing-around repo
|
||||
trap_list = trap_list + ("battleship",)
|
||||
games_enabled = True
|
||||
|
||||
# Games Configuration
|
||||
if games_enabled is True:
|
||||
help_message = help_message + ", games"
|
||||
@@ -260,6 +267,8 @@ if games_enabled is True:
|
||||
gamesCmdList += "hamTest, "
|
||||
if tictactoe_enabled:
|
||||
gamesCmdList += "ticTacToe, "
|
||||
if battleship_enabled:
|
||||
gamesCmdList += "battleship, "
|
||||
gamesCmdList = gamesCmdList[:-2] # remove the last comma
|
||||
else:
|
||||
gamesCmdList = ""
|
||||
@@ -292,13 +301,6 @@ if inventory_enabled:
|
||||
trap_list = trap_list + trap_list_inventory # items item, itemlist, itemsell, etc.
|
||||
help_message = help_message + ", item, cart"
|
||||
|
||||
# Radio Monitor Configuration
|
||||
if radio_detection_enabled:
|
||||
from modules.radio import * # from the spudgunman/meshing-around repo
|
||||
|
||||
if voxDetectionEnabled:
|
||||
from modules.radio import * # from the spudgunman/meshing-around repo
|
||||
|
||||
# File Monitor Configuration
|
||||
if file_monitor_enabled or read_news_enabled or bee_enabled or enable_runShellCmd or cmdShellSentryAlerts:
|
||||
from modules.filemon import * # from the spudgunman/meshing-around repo
|
||||
@@ -308,6 +310,9 @@ if file_monitor_enabled or read_news_enabled or bee_enabled or enable_runShellCm
|
||||
# Bee Configuration uses file monitor module
|
||||
if bee_enabled:
|
||||
trap_list = trap_list + ("🐝",)
|
||||
if bible_enabled:
|
||||
trap_list = trap_list + ("verse",)
|
||||
help_message = help_message + ", verse"
|
||||
# x: command for shell access
|
||||
if enable_runShellCmd and allowXcmd:
|
||||
trap_list = trap_list + ("x:",)
|
||||
@@ -326,24 +331,6 @@ if ble_count > 1:
|
||||
logger.critical(f"System: Multiple BLE interfaces detected. Only one BLE interface is allowed. Exiting")
|
||||
exit()
|
||||
|
||||
def xor_hash(data: bytes) -> int:
|
||||
"""Compute an XOR hash from bytes."""
|
||||
result = 0
|
||||
for char in data:
|
||||
result ^= char
|
||||
return result
|
||||
|
||||
def generate_hash(name: str, key: str) -> int:
|
||||
"""generate the channel number by hashing the channel name and psk"""
|
||||
if key == "AQ==":
|
||||
key = "1PG7OiApB1nwvP+rz05pAQ=="
|
||||
replaced_key = key.replace("-", "+").replace("_", "/")
|
||||
key_bytes = base64.b64decode(replaced_key.encode("utf-8"))
|
||||
h_name = xor_hash(bytes(name, "utf-8"))
|
||||
h_key = xor_hash(key_bytes)
|
||||
result: int = h_name ^ h_key
|
||||
return result
|
||||
|
||||
# Initialize interfaces
|
||||
logger.debug(f"System: Initializing Interfaces")
|
||||
interface1 = interface2 = interface3 = interface4 = interface5 = interface6 = interface7 = interface8 = interface9 = None
|
||||
@@ -383,6 +370,9 @@ for i in range(1, 10):
|
||||
logger.critical(f"System: abort. Initializing Interface{i} {e}")
|
||||
exit()
|
||||
|
||||
# Get my node numbers for global use
|
||||
my_node_ids = [globals().get(f'myNodeNum{i}') for i in range(1, 10)]
|
||||
|
||||
# Get the node number of the devices, check if the devices are connected meshtastic devices
|
||||
for i in range(1, 10):
|
||||
if globals().get(f'interface{i}') and globals().get(f'interface{i}_enabled'):
|
||||
@@ -395,44 +385,90 @@ for i in range(1, 10):
|
||||
globals()[f'myNodeNum{i}'] = 777
|
||||
|
||||
# Fetch channel list from each device
|
||||
channel_list = []
|
||||
for i in range(1, 10):
|
||||
if globals().get(f'interface{i}') and globals().get(f'interface{i}_enabled'):
|
||||
_channel_cache = None
|
||||
|
||||
def build_channel_cache(force_refresh: bool = False):
|
||||
"""
|
||||
Build and cache channel_list from interfaces once (or when forced).
|
||||
"""
|
||||
global _channel_cache
|
||||
if _channel_cache is not None and not force_refresh:
|
||||
return _channel_cache
|
||||
|
||||
cache = []
|
||||
for i in range(1, 10):
|
||||
if not globals().get(f'interface{i}') or not globals().get(f'interface{i}_enabled'):
|
||||
continue
|
||||
try:
|
||||
node = globals()[f'interface{i}'].getNode('^local')
|
||||
channels = node.channels
|
||||
channel_dict = {}
|
||||
for channel in channels:
|
||||
if hasattr(channel, 'role') and channel.role:
|
||||
channel_name = getattr(channel.settings, 'name', '').strip()
|
||||
channel_number = getattr(channel, 'index', 0)
|
||||
# Only add channels with a non-empty name
|
||||
if channel_name:
|
||||
channel_dict[channel_name] = channel_number
|
||||
channel_list.append({
|
||||
"interface_id": i,
|
||||
"channels": channel_dict
|
||||
})
|
||||
logger.debug(f"System: Fetched Channel List from Device{i}")
|
||||
except Exception as e:
|
||||
logger.error(f"System: Error fetching channel list from Device{i}: {e}")
|
||||
# Try to use the node-provided channel/hash table if available
|
||||
try:
|
||||
ch_hash_table_raw = node.get_channels_with_hash()
|
||||
#print(f"System: Device{i} Channel Hash Table: {ch_hash_table_raw}")
|
||||
except Exception:
|
||||
logger.warning(f"System: API version error update API `pip3 install --upgrade meshtastic[cli]`")
|
||||
ch_hash_table_raw = []
|
||||
|
||||
# add channel hash to channel_list
|
||||
for device in channel_list:
|
||||
interface_id = device["interface_id"]
|
||||
interface = globals().get(f'interface{interface_id}')
|
||||
for channel_name, channel_number in device["channels"].items():
|
||||
psk_base64 = "AQ==" # default PSK
|
||||
channel_hash = generate_hash(channel_name, psk_base64)
|
||||
# add hash to the channel entry in channel_list under key 'hash'
|
||||
for entry in channel_list:
|
||||
if entry["interface_id"] == interface_id:
|
||||
entry["channels"][channel_name] = {
|
||||
"number": channel_number,
|
||||
"hash": channel_hash
|
||||
}
|
||||
channel_dict = {}
|
||||
# Use the hash table as the source of truth for channels
|
||||
if isinstance(ch_hash_table_raw, list):
|
||||
for entry in ch_hash_table_raw:
|
||||
channel_name = entry.get("name", "").strip()
|
||||
channel_number = entry.get("index")
|
||||
ch_hash = entry.get("hash")
|
||||
role = entry.get("role", "")
|
||||
# Always add PRIMARY/SECONDARY channels, even if name is empty
|
||||
if role in ("PRIMARY", "SECONDARY"):
|
||||
channel_dict[channel_name if channel_name else f"Channel{channel_number}"] = {
|
||||
"number": channel_number,
|
||||
"hash": ch_hash
|
||||
}
|
||||
elif isinstance(ch_hash_table_raw, dict):
|
||||
for channel_name, ch_hash in ch_hash_table_raw.items():
|
||||
channel_dict[channel_name] = {"number": None, "hash": ch_hash}
|
||||
# Always add the interface, even if no named channels
|
||||
cache.append({"interface_id": i, "channels": channel_dict})
|
||||
logger.debug(f"System: Fetched Channel List from Device{i} (cached)")
|
||||
except Exception as e:
|
||||
logger.debug(f"System: Error fetching channel list from Device{i}: {e}")
|
||||
|
||||
_channel_cache = cache
|
||||
return _channel_cache
|
||||
|
||||
def refresh_channel_cache():
|
||||
"""Force rebuild of channel cache (call only when channel config changes)."""
|
||||
return build_channel_cache(force_refresh=True)
|
||||
|
||||
channel_list = build_channel_cache()
|
||||
#print(f"System: Channel Cache Built: {channel_list}")
|
||||
|
||||
#### FUN-ctions ####
|
||||
def resolve_channel_name(channel_number, rxNode=1, interface_obj=None):
|
||||
"""
|
||||
Resolve a channel number/hash to its name using cached channel list.
|
||||
"""
|
||||
try:
|
||||
# ensure cache exists (cheap)
|
||||
cached = build_channel_cache()
|
||||
# quick search in cache first (no node calls)
|
||||
for device in cached:
|
||||
if device.get("interface_id") == rxNode:
|
||||
device_channels = device.get("channels", {}) or {}
|
||||
# info is dict: {name: {'number': X, 'hash': Y}}
|
||||
for chan_name, info in device_channels.items():
|
||||
try:
|
||||
if isinstance(info, dict):
|
||||
if str(info.get('number')) == str(channel_number) or str(info.get('hash')) == str(channel_number):
|
||||
return (chan_name, info.get('number') or info.get('hash'))
|
||||
else:
|
||||
if str(info) == str(channel_number):
|
||||
return (chan_name, info)
|
||||
except Exception:
|
||||
continue
|
||||
break # stop searching other devices
|
||||
except Exception as e:
|
||||
logger.debug(f"System: Error resolving channel name from cache: {e}")
|
||||
|
||||
|
||||
def cleanup_memory():
|
||||
"""Clean up memory by limiting list sizes and removing stale entries"""
|
||||
@@ -482,7 +518,7 @@ def cleanup_game_trackers(current_time):
|
||||
tracker_names = [
|
||||
'dwPlayerTracker', 'lemonadeTracker', 'jackTracker',
|
||||
'vpTracker', 'mindTracker', 'golfTracker',
|
||||
'hangmanTracker', 'hamtestTracker', 'tictactoeTracker', 'surveyTracker'
|
||||
'hangmanTracker', 'hamtestTracker', 'tictactoeTracker', 'surveyTracker', 'battleshipTracker'
|
||||
]
|
||||
|
||||
for tracker_name in tracker_names:
|
||||
@@ -666,7 +702,7 @@ async def get_closest_nodes(nodeInt=1,returnCount=3, channel=publicChannel):
|
||||
distance = round(geopy.distance.geodesic((latitudeValue, longitudeValue), (latitude, longitude)).m, 2)
|
||||
|
||||
if (distance < sentry_radius):
|
||||
if (nodeID not in [globals().get(f'myNodeNum{i}') for i in range(1, 10)]) and str(nodeID) not in sentryIgnoreList:
|
||||
if (nodeID not in my_node_ids) and str(nodeID) not in sentryIgnoreList:
|
||||
node_list.append({'id': nodeID, 'latitude': latitude, 'longitude': longitude, 'distance': distance})
|
||||
|
||||
except Exception as e:
|
||||
@@ -678,7 +714,7 @@ async def get_closest_nodes(nodeInt=1,returnCount=3, channel=publicChannel):
|
||||
try:
|
||||
logger.debug(f"System: Requesting location data for {node['id']}, lastHeard: {node.get('lastHeard', 'N/A')}")
|
||||
# if not a interface node
|
||||
if node['num'] in [globals().get(f'myNodeNum{i}') for i in range(1, 10)]:
|
||||
if node['num'] in my_node_ids:
|
||||
ignore = True
|
||||
else:
|
||||
# one idea is to send a ping to the node to request location data for if or when, ask again later
|
||||
@@ -955,21 +991,143 @@ def messageTrap(msg):
|
||||
return True
|
||||
return False
|
||||
|
||||
def stringSafeCheck(s):
|
||||
def stringSafeCheck(s, fromID=0):
|
||||
# Check if a string is safe to use, no control characters or non-printable characters
|
||||
soFarSoGood = True
|
||||
if not all(c.isprintable() or c.isspace() for c in s):
|
||||
return False
|
||||
ban_hammer(fromID, reason="Non-printable character in message")
|
||||
return False # non-printable characters found
|
||||
if any(ord(c) < 32 and c not in '\n\r\t' for c in s):
|
||||
return False
|
||||
ban_hammer(fromID, reason="Control character in message")
|
||||
return False # control characters found
|
||||
if any(c in s for c in ['\x0b', '\x0c', '\x1b']):
|
||||
return False
|
||||
return False # vertical tab, form feed, escape characters found
|
||||
if len(s) > 1000:
|
||||
return False
|
||||
injection_chars = [';', '|', '../']
|
||||
if any(char in s for char in injection_chars):
|
||||
# Check for single-character injections
|
||||
single_injection_chars = [';', '|', '}', '>']
|
||||
if any(c in s for c in single_injection_chars):
|
||||
return False # injection character found
|
||||
# Check for multi-character patterns
|
||||
multi_injection_patterns = ['../', '||']
|
||||
if any(pattern in s for pattern in multi_injection_patterns):
|
||||
return False
|
||||
return soFarSoGood
|
||||
return True
|
||||
|
||||
def api_throttle(node_id, rxInterface=None, channel=None, apiName=""):
|
||||
"""
|
||||
Throttle API requests from nodes to prevent abuse.
|
||||
Returns False if not throttled, or a string message if throttled.
|
||||
"""
|
||||
global apiThrottleList
|
||||
|
||||
current_time = time.time()
|
||||
node_id_str = str(node_id)
|
||||
|
||||
if isNodeAdmin(node_id_str):
|
||||
return False # Do not throttle admin nodes
|
||||
|
||||
# Find or create the apiThrottleList entry
|
||||
node_entry = next((entry for entry in apiThrottleList if entry['node_id'] == node_id_str), None)
|
||||
if node_entry:
|
||||
# Update interface and channel if provided
|
||||
if rxInterface is not None:
|
||||
node_entry['rxInterface'] = rxInterface
|
||||
if channel is not None:
|
||||
node_entry['channel'] = channel
|
||||
# Check if the timeframe has expired
|
||||
if (current_time - node_entry['lastSeen']) > autoBanTimeframe:
|
||||
node_entry['api_throttle_count'] = 1
|
||||
node_entry['lastSeen'] = current_time
|
||||
else:
|
||||
node_entry['api_throttle_count'] += 1
|
||||
node_entry['lastSeen'] = current_time
|
||||
if node_entry['api_throttle_count'] > apiThrottleValue:
|
||||
logger.warning(f"System: Node {node_id_str} throttled on API {apiName} count: {node_entry['api_throttle_count']}")
|
||||
if autoBanEnabled:
|
||||
ban_hammer(node_id_str, reason="API Throttle Exceeded")
|
||||
return "🚦 System busy, try again later."
|
||||
else:
|
||||
# node not found, create a new entry
|
||||
entry = {
|
||||
'node_id': node_id_str,
|
||||
'first_seen': current_time,
|
||||
'lastSeen': current_time,
|
||||
'api_throttle_count': 1,
|
||||
'rxInterface': rxInterface,
|
||||
'channel': channel
|
||||
}
|
||||
apiThrottleList.append(entry)
|
||||
node_entry = entry
|
||||
|
||||
logger.debug(f"System: API Throttle check for Node {node_id} on API {apiName} count: {node_entry['api_throttle_count']}")
|
||||
return False # Not throttled
|
||||
|
||||
def ban_hammer(node_id, rxInterface=None, channel=None, reason=""):
|
||||
"""
|
||||
Auto-ban nodes that exceed the message threshold within the timeframe.
|
||||
Returns True if the node is (or becomes) banned, False otherwise.
|
||||
"""
|
||||
global autoBanlist, seenNodes, bbs_ban_list
|
||||
|
||||
current_time = time.time()
|
||||
node_id_str = str(node_id)
|
||||
|
||||
if isNodeAdmin(node_id_str):
|
||||
return False # Do not ban admin nodes
|
||||
|
||||
# Check if the node is already banned
|
||||
if node_id_str in bbs_ban_list or node_id_str in autoBanlist:
|
||||
return True # Node is already banned
|
||||
|
||||
# if no reason provided, dont ban just run that last check
|
||||
if reason == "":
|
||||
return False
|
||||
|
||||
# Find or create the seenNodes entry (patched for missing 'node_id')
|
||||
node_entry = next((entry for entry in seenNodes if entry.get('node_id') == node_id_str), None)
|
||||
if node_entry:
|
||||
# Update interface and channel if provided
|
||||
if rxInterface is not None:
|
||||
node_entry['rxInterface'] = rxInterface
|
||||
if channel is not None:
|
||||
node_entry['channel'] = channel
|
||||
# Check if the timeframe has expired
|
||||
if (current_time - node_entry['lastSeen']) > autoBanTimeframe:
|
||||
node_entry['auto_ban_count'] = 1
|
||||
node_entry['lastSeen'] = current_time
|
||||
else:
|
||||
node_entry['auto_ban_count'] += 1
|
||||
node_entry['lastSeen'] = current_time
|
||||
else:
|
||||
# node not found, create a new entry
|
||||
entry = {
|
||||
'node_id': node_id_str,
|
||||
'first_seen': current_time,
|
||||
'lastSeen': current_time,
|
||||
'auto_ban_count': 3, # start at 3 to trigger ban faster
|
||||
'rxInterface': rxInterface,
|
||||
'channel': channel,
|
||||
'welcome': False
|
||||
}
|
||||
seenNodes.append(entry)
|
||||
node_entry = entry
|
||||
|
||||
# Check if the node has exceeded the ban threshold
|
||||
if node_entry['auto_ban_count'] < autoBanThreshold:
|
||||
logger.debug(f"System: Node {node_id_str} auto-ban count: {node_entry['auto_ban_count']}")
|
||||
return False # No ban applied
|
||||
|
||||
# If the node has exceeded the ban threshold within the time window
|
||||
autoBanlist.append(node_id_str)
|
||||
logger.info(f"System: Node {node_id_str} exceeded auto-ban threshold with {node_entry['auto_ban_count']} messages")
|
||||
if autoBanEnabled:
|
||||
logger.warning(f"System: Auto-banned node {node_id_str} Reason: {reason}")
|
||||
if node_id_str not in bbs_ban_list:
|
||||
bbs_ban_list.append(node_id_str)
|
||||
save_bbsBanList()
|
||||
return True # Node is now banned
|
||||
|
||||
return False # No ban applied
|
||||
|
||||
def save_bbsBanList():
|
||||
# save the bbs_ban_list to file
|
||||
@@ -987,7 +1145,7 @@ def load_bbsBanList():
|
||||
try:
|
||||
with open('data/bbs_ban_list.txt', 'r') as f:
|
||||
loaded_list = [line.strip() for line in f if line.strip()]
|
||||
logger.debug("System: BBS ban list loaded from file")
|
||||
logger.debug(f"System: BBS ban list now has {len(loaded_list)} entries loaded from file")
|
||||
except FileNotFoundError:
|
||||
config_val = config['bbs'].get('bbs_ban_list', '')
|
||||
if config_val:
|
||||
@@ -1007,8 +1165,6 @@ def isNodeAdmin(nodeID):
|
||||
for admin in bbs_admin_list:
|
||||
if str(nodeID) == admin:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
||||
def isNodeBanned(nodeID):
|
||||
@@ -1019,6 +1175,7 @@ def isNodeBanned(nodeID):
|
||||
return False
|
||||
|
||||
def handle_bbsban(message, message_from_id, isDM):
|
||||
global bbs_ban_list
|
||||
msg = ""
|
||||
if not isDM:
|
||||
return "🤖only available in a Direct Message📵"
|
||||
@@ -1115,143 +1272,83 @@ def handleMultiPing(nodeID=0, deviceID=1):
|
||||
multiPingList.pop(j)
|
||||
break
|
||||
|
||||
priorVolcanoAlert = ""
|
||||
priorEmergencyAlert = ""
|
||||
priorWxAlert = ""
|
||||
# Alert broadcasting initialization
|
||||
last_alerts = {
|
||||
"overdue": {"time": 0, "message": ""},
|
||||
"fema": {"time": 0, "message": ""},
|
||||
"uk": {"time": 0, "message": ""},
|
||||
"de": {"time": 0, "message": ""},
|
||||
"wx": {"time": 0, "message": ""},
|
||||
"volcano": {"time": 0, "message": ""},
|
||||
}
|
||||
def should_send_alert(alert_type, new_message, min_interval=1):
|
||||
now = time.time()
|
||||
last = last_alerts[alert_type]
|
||||
# Only send if enough time has passed AND the message is different
|
||||
if (now - last["time"]) > min_interval and new_message != last["message"]:
|
||||
last_alerts[alert_type]["time"] = now
|
||||
last_alerts[alert_type]["message"] = new_message
|
||||
return True
|
||||
return False
|
||||
|
||||
def handleAlertBroadcast(deviceID=1):
|
||||
try:
|
||||
global priorVolcanoAlert, priorEmergencyAlert, priorWxAlert
|
||||
alertUk = NO_ALERTS
|
||||
alertDe = NO_ALERTS
|
||||
alertFema = NO_ALERTS
|
||||
wxAlert = NO_ALERTS
|
||||
volcanoAlert = NO_ALERTS
|
||||
overdueAlerts = NO_ALERTS
|
||||
alertUk = alertDe = alertFema = wxAlert = volcanoAlert = overdueAlerts = NO_ALERTS
|
||||
alertWx = False
|
||||
# only allow API call every 20 minutes
|
||||
# the watchdog will call this function 3 times, seeing possible throttling on the API
|
||||
clock = datetime.now()
|
||||
if clock.minute % 20 != 0:
|
||||
return False
|
||||
if clock.second > 17:
|
||||
return False
|
||||
|
||||
# check for alerts
|
||||
if wxAlertBroadcastEnabled:
|
||||
alertWx = alertBrodcastNOAA()
|
||||
|
||||
if emergencyAlertBrodcastEnabled:
|
||||
if enableDEalerts:
|
||||
alertDe = get_nina_alerts()
|
||||
if enableGBalerts:
|
||||
alertUk = get_govUK_alerts()
|
||||
else:
|
||||
# default USA alerts
|
||||
alertFema = getIpawsAlert(latitudeValue,longitudeValue, shortAlerts=True)
|
||||
|
||||
# Overdue check-in alert
|
||||
if checklist_enabled:
|
||||
overdueAlerts = format_overdue_alert()
|
||||
|
||||
# format alert
|
||||
if alertWx:
|
||||
wxAlert = f"🚨 {alertWx[1]} EAS-WX ALERT: {alertWx[0]}"
|
||||
else:
|
||||
wxAlert = False
|
||||
if overdueAlerts:
|
||||
if should_send_alert("overdue", overdueAlerts, min_interval=300): # 5 minutes interval for overdue alerts
|
||||
send_message(overdueAlerts, emergency_responder_alert_channel, 0, emergency_responder_alert_interface)
|
||||
|
||||
femaAlert = alertFema
|
||||
ukAlert = alertUk
|
||||
deAlert = alertDe
|
||||
|
||||
if overdueAlerts != NO_ALERTS and overdueAlerts != None:
|
||||
logger.debug("System: Adding overdue checkin to emergency alerts")
|
||||
if femaAlert and NO_ALERTS not in femaAlert and ERROR_FETCHING_DATA not in femaAlert:
|
||||
femaAlert += "\n\n" + overdueAlerts
|
||||
elif ukAlert and NO_ALERTS not in ukAlert and ERROR_FETCHING_DATA not in ukAlert:
|
||||
ukAlert += "\n\n" + overdueAlerts
|
||||
elif deAlert and NO_ALERTS not in deAlert and ERROR_FETCHING_DATA not in deAlert:
|
||||
deAlert += "\n\n" + overdueAlerts
|
||||
else:
|
||||
# only overdue alerts to send
|
||||
if overdueAlerts != "" and overdueAlerts is not None and overdueAlerts != NO_ALERTS:
|
||||
if overdueAlerts != priorEmergencyAlert:
|
||||
priorEmergencyAlert = overdueAlerts
|
||||
else:
|
||||
return False
|
||||
if isinstance(emergencyAlertBroadcastCh, list):
|
||||
for channel in emergencyAlertBroadcastCh:
|
||||
send_message(overdueAlerts, int(channel), 0, deviceID)
|
||||
else:
|
||||
send_message(overdueAlerts, emergencyAlertBroadcastCh, 0, deviceID)
|
||||
return True
|
||||
|
||||
if emergencyAlertBrodcastEnabled:
|
||||
if NO_ALERTS not in femaAlert and ERROR_FETCHING_DATA not in femaAlert:
|
||||
if femaAlert != priorEmergencyAlert:
|
||||
priorEmergencyAlert = femaAlert
|
||||
else:
|
||||
return False
|
||||
if isinstance(emergencyAlertBroadcastCh, list):
|
||||
for channel in emergencyAlertBroadcastCh:
|
||||
send_message(femaAlert, int(channel), 0, deviceID)
|
||||
else:
|
||||
send_message(femaAlert, emergencyAlertBroadcastCh, 0, deviceID)
|
||||
return True
|
||||
if NO_ALERTS not in ukAlert:
|
||||
if ukAlert != priorEmergencyAlert:
|
||||
priorEmergencyAlert = ukAlert
|
||||
else:
|
||||
return False
|
||||
if isinstance(emergencyAlertBroadcastCh, list):
|
||||
for channel in emergencyAlertBroadcastCh:
|
||||
send_message(ukAlert, int(channel), 0, deviceID)
|
||||
else:
|
||||
send_message(ukAlert, emergencyAlertBroadcastCh, 0, deviceID)
|
||||
return True
|
||||
|
||||
if NO_ALERTS not in alertDe:
|
||||
if deAlert != priorEmergencyAlert:
|
||||
priorEmergencyAlert = deAlert
|
||||
else:
|
||||
return False
|
||||
if isinstance(emergencyAlertBroadcastCh, list):
|
||||
for channel in emergencyAlertBroadcastCh:
|
||||
send_message(deAlert, int(channel), 0, deviceID)
|
||||
else:
|
||||
send_message(deAlert, emergencyAlertBroadcastCh, 0, deviceID)
|
||||
return True
|
||||
# Only allow API call every alert_duration minutes at xx:00, xx:20, xx:40
|
||||
if not (clock.minute % alert_duration == 0 and clock.second <= 17):
|
||||
return False
|
||||
|
||||
# Collect alerts
|
||||
if wxAlertBroadcastEnabled:
|
||||
if wxAlert:
|
||||
if wxAlert != priorWxAlert:
|
||||
priorWxAlert = wxAlert
|
||||
else:
|
||||
return False
|
||||
if isinstance(wxAlertBroadcastChannel, list):
|
||||
for channel in wxAlertBroadcastChannel:
|
||||
send_message(wxAlert, int(channel), 0, deviceID)
|
||||
else:
|
||||
send_message(wxAlert, wxAlertBroadcastChannel, 0, deviceID)
|
||||
return True
|
||||
|
||||
alertWx = alertBrodcastNOAA()
|
||||
if alertWx:
|
||||
wxAlert = f"🚨 {alertWx[1]} EAS-WX ALERT: {alertWx[0]}"
|
||||
if eAlertBroadcastEnabled or ipawsAlertEnabled:
|
||||
alertFema = getIpawsAlert(latitudeValue, longitudeValue, shortAlerts=True)
|
||||
if volcanoAlertBroadcastEnabled:
|
||||
volcanoAlert = get_volcano_usgs(latitudeValue, longitudeValue)
|
||||
if volcanoAlert and NO_ALERTS not in volcanoAlert and ERROR_FETCHING_DATA not in volcanoAlert:
|
||||
# check if the alert is different from the last one
|
||||
if volcanoAlert != priorVolcanoAlert:
|
||||
priorVolcanoAlert = volcanoAlert
|
||||
if isinstance(volcanoAlertBroadcastChannel, list):
|
||||
for channel in volcanoAlertBroadcastChannel:
|
||||
send_message(volcanoAlert, int(channel), 0, deviceID)
|
||||
else:
|
||||
send_message(volcanoAlert, volcanoAlertBroadcastChannel, 0, deviceID)
|
||||
return True
|
||||
|
||||
if enableDEalerts:
|
||||
deAlerts = get_nina_alerts()
|
||||
|
||||
if usAlerts:
|
||||
alert_types = [
|
||||
("fema", alertFema, ipawsAlertEnabled),
|
||||
("wx", wxAlert, wxAlertBroadcastEnabled),
|
||||
("volcano", volcanoAlert, volcanoAlertBroadcastEnabled),]
|
||||
|
||||
if enableDEalerts:
|
||||
alert_types = [("de", deAlerts, enableDEalerts)]
|
||||
|
||||
for alert_type, alert_msg, enabled in alert_types:
|
||||
if enabled and alert_msg and NO_ALERTS not in alert_msg and ERROR_FETCHING_DATA not in alert_msg:
|
||||
if should_send_alert(alert_type, alert_msg):
|
||||
logger.debug(f"System: Sending {alert_type} alert to emergency responder channel {emergency_responder_alert_channel}")
|
||||
send_message(alert_msg, emergency_responder_alert_channel, 0, emergency_responder_alert_interface)
|
||||
if eAlertBroadcastChannel:
|
||||
for ch in eAlertBroadcastChannel:
|
||||
ch = ch.strip()
|
||||
if ch:
|
||||
logger.debug(f"System: Sending {alert_type} alert to aux channel {ch}")
|
||||
time.sleep(splitDelay)
|
||||
send_message(alert_msg, int(ch), 0, emergency_responder_alert_interface)
|
||||
except Exception as e:
|
||||
logger.error(f"System: Error in handleAlertBroadcast: {e}")
|
||||
return False
|
||||
|
||||
def onDisconnect(interface):
|
||||
# Handle disconnection of the interface
|
||||
logger.warning(f"System: Abrupt Disconnection of Interface detected")
|
||||
logger.warning(f"System: Abrupt Disconnection of Interface detected, attempting reconnect...")
|
||||
interface.close()
|
||||
|
||||
# Telemetry Functions
|
||||
@@ -1397,6 +1494,7 @@ def initializeMeshLeaderboard():
|
||||
'lowestBattery': {'nodeID': None, 'value': 101, 'timestamp': 0}, # 🪫
|
||||
'longestUptime': {'nodeID': None, 'value': 0, 'timestamp': 0}, # 🕰️
|
||||
'fastestSpeed': {'nodeID': None, 'value': 0, 'timestamp': 0}, # 🚓
|
||||
'fastestAirSpeed': {'nodeID': None, 'value': 0, 'timestamp': 0}, # ✈️
|
||||
'highestAltitude': {'nodeID': None, 'value': 0, 'timestamp': 0}, # 🚀
|
||||
'tallestNode': {'nodeID': None, 'value': 0, 'timestamp': 0}, # 🪜
|
||||
'coldestTemp': {'nodeID': None, 'value': 999, 'timestamp': 0}, # 🥶
|
||||
@@ -1444,11 +1542,13 @@ def consumeMetadata(packet, rxNode=0, channel=-1):
|
||||
|
||||
# Meta for most Messages leaderboard
|
||||
if packet_type == 'TEXT_MESSAGE':
|
||||
message_count = meshLeaderboard.get('nodeMessageCounts', {})
|
||||
message_count[nodeID] = message_count.get(nodeID, 0) + 1
|
||||
meshLeaderboard['nodeMessageCounts'] = message_count
|
||||
if message_count[nodeID] > meshLeaderboard['mostMessages']['value']:
|
||||
meshLeaderboard['mostMessages'] = {'nodeID': nodeID, 'value': message_count[nodeID], 'timestamp': time.time()}
|
||||
# if packet isnt TO a my_node_id count it
|
||||
if packet.get('to') not in my_node_ids:
|
||||
message_count = meshLeaderboard.get('nodeMessageCounts', {})
|
||||
message_count[nodeID] = message_count.get(nodeID, 0) + 1
|
||||
meshLeaderboard['nodeMessageCounts'] = message_count
|
||||
if message_count[nodeID] > meshLeaderboard['mostMessages']['value']:
|
||||
meshLeaderboard['mostMessages'] = {'nodeID': nodeID, 'value': message_count[nodeID], 'timestamp': time.time()}
|
||||
else:
|
||||
tmessage_count = meshLeaderboard.get('nodeTMessageCounts', {})
|
||||
tmessage_count[nodeID] = tmessage_count.get(nodeID, 0) + 1
|
||||
@@ -1541,32 +1641,39 @@ def consumeMetadata(packet, rxNode=0, channel=-1):
|
||||
positionMetadata[nodeID] = {}
|
||||
for key in position_stats_keys:
|
||||
positionMetadata[nodeID][key] = position_data.get(key, 0)
|
||||
# Track fastest speed 🚓
|
||||
if position_data.get('groundSpeed') is not None:
|
||||
if use_metric:
|
||||
speed = position_data['groundSpeed']
|
||||
else:
|
||||
speed = round(position_data['groundSpeed'] * 1.60934, 1) # Convert mph to km/h
|
||||
if speed > meshLeaderboard['fastestSpeed']['value']:
|
||||
meshLeaderboard['fastestSpeed'] = {'nodeID': nodeID, 'value': speed, 'timestamp': time.time()}
|
||||
if logMetaStats:
|
||||
logger.info(f"System: 🚓 New speed record: {speed} km/h from NodeID:{nodeID} ShortName:{get_name_from_number(nodeID, 'short', rxNode)}")
|
||||
# Track highest altitude 🚀 (also log if over highfly_altitude threshold)
|
||||
if position_data.get('altitude') is not None:
|
||||
altitude = position_data['altitude']
|
||||
if altitude > meshLeaderboard['highestAltitude']['value']:
|
||||
meshLeaderboard['highestAltitude'] = {'nodeID': nodeID, 'value': altitude, 'timestamp': time.time()}
|
||||
if logMetaStats:
|
||||
logger.info(f"System: 🚀 New altitude record: {altitude}m from NodeID:{nodeID} ShortName:{get_name_from_number(nodeID, 'short', rxNode)}")
|
||||
# Track tallest node 🪜 (under the highfly_altitude limit by 100m)
|
||||
|
||||
# Track altitude and speed records
|
||||
if position_data.get('altitude') is not None:
|
||||
altitude = position_data['altitude']
|
||||
highflying = altitude > highfly_altitude
|
||||
|
||||
# Tallest node (below highfly_altitude - 100m)
|
||||
if altitude < (highfly_altitude - 100):
|
||||
if altitude > meshLeaderboard['tallestNode']['value']:
|
||||
meshLeaderboard['tallestNode'] = {'nodeID': nodeID, 'value': altitude, 'timestamp': time.time()}
|
||||
if logMetaStats:
|
||||
logger.info(f"System: 🪜 New tallest node record: {altitude}m from NodeID:{nodeID} ShortName:{get_name_from_number(nodeID, 'short', rxNode)}")
|
||||
|
||||
|
||||
# Highest altitude (above highfly_altitude)
|
||||
if highflying:
|
||||
if altitude > meshLeaderboard['highestAltitude']['value']:
|
||||
meshLeaderboard['highestAltitude'] = {'nodeID': nodeID, 'value': altitude, 'timestamp': time.time()}
|
||||
if logMetaStats:
|
||||
logger.info(f"System: 🚀 New altitude record: {altitude}m from NodeID:{nodeID} ShortName:{get_name_from_number(nodeID, 'short', rxNode)}")
|
||||
|
||||
# Track speed records
|
||||
if position_data.get('groundSpeed') is not None:
|
||||
speed = position_data['groundSpeed']
|
||||
# Fastest ground speed (not highflying)
|
||||
if not highflying and speed > meshLeaderboard['fastestSpeed']['value']:
|
||||
meshLeaderboard['fastestSpeed'] = {'nodeID': nodeID, 'value': speed, 'timestamp': time.time()}
|
||||
if logMetaStats:
|
||||
logger.info(f"System: 🚓 New speed record: {speed} km/h from NodeID:{nodeID} ShortName:{get_name_from_number(nodeID, 'short', rxNode)}")
|
||||
# Fastest air speed (highflying)
|
||||
elif highflying and speed > meshLeaderboard['fastestAirSpeed']['value']:
|
||||
meshLeaderboard['fastestAirSpeed'] = {'nodeID': nodeID, 'value': speed, 'timestamp': time.time()}
|
||||
if logMetaStats:
|
||||
logger.info(f"System: ✈️ New air speed record: {speed} km/h from NodeID:{nodeID} ShortName:{get_name_from_number(nodeID, 'short', rxNode)}")
|
||||
# if altitude is over highfly_altitude send a log and message for high-flying nodes and not in highfly_ignoreList
|
||||
if position_data.get('altitude', 0) > highfly_altitude and highfly_enabled and str(nodeID) not in highfly_ignoreList and not isNodeBanned(nodeID):
|
||||
logger.info(f"System: High Altitude {position_data['altitude']}m on Device: {rxNode} Channel: {channel} NodeID:{nodeID} Lat:{position_data.get('latitude', 0)} Lon:{position_data.get('longitude', 0)}")
|
||||
@@ -1579,25 +1686,26 @@ def consumeMetadata(packet, rxNode=0, channel=-1):
|
||||
if current_time - last_alert_time < 1800:
|
||||
return False # less than 30 minutes since last alert
|
||||
positionMetadata[nodeID]['lastHighFlyAlert'] = current_time
|
||||
|
||||
if highfly_check_openskynetwork:
|
||||
# check get_openskynetwork to see if the node is an aircraft
|
||||
if 'latitude' in position_data and 'longitude' in position_data:
|
||||
flight_info = get_openskynetwork(position_data.get('latitude', 0), position_data.get('longitude', 0))
|
||||
# Only show plane if within altitude
|
||||
if (
|
||||
flight_info
|
||||
and NO_ALERTS not in flight_info
|
||||
and ERROR_FETCHING_DATA not in flight_info
|
||||
and isinstance(flight_info, dict)
|
||||
and 'altitude' in flight_info
|
||||
):
|
||||
plane_alt = flight_info['altitude']
|
||||
node_alt = position_data.get('altitude', 0)
|
||||
if abs(node_alt - plane_alt) <= 1000: # within 1000 meters
|
||||
msg += f"\n✈️Detected near:\n{flight_info}"
|
||||
send_message(msg, highfly_channel, 0, highfly_interface)
|
||||
|
||||
try:
|
||||
if highfly_check_openskynetwork:
|
||||
if 'latitude' in position_data and 'longitude' in position_data and 'altitude' in position_data:
|
||||
flight_info = get_openskynetwork(
|
||||
position_data.get('latitude', 0),
|
||||
position_data.get('longitude', 0),
|
||||
node_altitude=position_data.get('altitude', 0)
|
||||
)
|
||||
if flight_info and isinstance(flight_info, dict):
|
||||
msg += (
|
||||
f"\n✈️Detected near:\n"
|
||||
f"{flight_info.get('callsign', 'N/A')} "
|
||||
f"Alt:{int(flight_info.get('geo_altitude', 0)) if flight_info.get('geo_altitude') else 'N/A'}m "
|
||||
f"Vel:{int(flight_info.get('velocity', 0)) if flight_info.get('velocity') else 'N/A'}m/s "
|
||||
f"Heading:{int(flight_info.get('true_track', 0)) if flight_info.get('true_track') else 'N/A'}°\n"
|
||||
f"From:{flight_info.get('origin_country', 'N/A')}"
|
||||
)
|
||||
send_message(msg, highfly_channel, 0, highfly_interface)
|
||||
except Exception as e:
|
||||
logger.debug(f"System: Highfly: error: {e}")
|
||||
# Keep the positionMetadata dictionary at a maximum size
|
||||
if len(positionMetadata) > MAX_SEEN_NODES:
|
||||
# Remove the oldest entry
|
||||
@@ -1865,6 +1973,16 @@ def get_mesh_leaderboard(msg, fromID, deviceID):
|
||||
result += f"🚓 Speed: {value_kmh} km/h {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
else:
|
||||
result += f"🚓 Speed: {value_mph} mph {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
|
||||
# Tallest node
|
||||
if meshLeaderboard['tallestNode']['nodeID']:
|
||||
nodeID = meshLeaderboard['tallestNode']['nodeID']
|
||||
value_m = meshLeaderboard['tallestNode']['value']
|
||||
value_ft = round(value_m * 3.28084, 0)
|
||||
if use_metric:
|
||||
result += f"🪜 Tallest: {int(round(value_m, 0))}m {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
else:
|
||||
result += f"🪜 Tallest: {int(value_ft)}ft {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
|
||||
# Highest altitude
|
||||
if meshLeaderboard['highestAltitude']['nodeID']:
|
||||
@@ -1876,15 +1994,15 @@ def get_mesh_leaderboard(msg, fromID, deviceID):
|
||||
else:
|
||||
result += f"🚀 Altitude: {int(value_ft)}ft {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
|
||||
# Tallest node
|
||||
if meshLeaderboard['tallestNode']['nodeID']:
|
||||
nodeID = meshLeaderboard['tallestNode']['nodeID']
|
||||
value_m = meshLeaderboard['tallestNode']['value']
|
||||
value_ft = round(value_m * 3.28084, 0)
|
||||
# Fastest airspeed
|
||||
if meshLeaderboard['fastestAirSpeed']['nodeID']:
|
||||
nodeID = meshLeaderboard['fastestAirSpeed']['nodeID']
|
||||
value_kmh = round(meshLeaderboard['fastestAirSpeed']['value'], 1)
|
||||
value_mph = round(value_kmh / 1.60934, 1)
|
||||
if use_metric:
|
||||
result += f"🪜 Tallest: {int(round(value_m, 0))}m {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
result += f"✈️ Airspeed: {value_kmh} km/h {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
else:
|
||||
result += f"🪜 Tallest: {int(value_ft)}ft {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
result += f"✈️ Airspeed: {value_mph} mph {get_name_from_number(nodeID, 'short', 1)}\n"
|
||||
|
||||
# Coldest temperature
|
||||
if meshLeaderboard['coldestTemp']['nodeID']:
|
||||
@@ -1970,7 +2088,7 @@ def get_mesh_leaderboard(msg, fromID, deviceID):
|
||||
result = result.strip()
|
||||
|
||||
if result == "📊Leaderboard📊\n":
|
||||
result += "No records yet! Keep meshing! 📡"
|
||||
result += "No records yet! Keep meshing! 📡 \n firmware 2.7+ `Broadcast Device Metrics` in Telemetry Config, needs enabled for full use. Ideally not on AQ=="
|
||||
|
||||
return result
|
||||
|
||||
@@ -1986,7 +2104,8 @@ def get_sysinfo(nodeID=0, deviceID=1):
|
||||
return sysinfo
|
||||
|
||||
async def handleSignalWatcher():
|
||||
global lastHamLibAlert
|
||||
from modules.radio import signalWatcher
|
||||
from modules.settings import sigWatchBroadcastCh, sigWatchBroadcastInterface, lastHamLibAlert
|
||||
# monitor rigctld for signal strength and frequency
|
||||
while True:
|
||||
msg = await signalWatcher()
|
||||
@@ -2212,17 +2331,40 @@ async def handleSentinel(deviceID):
|
||||
handleSentinel_loop = 0 # Reset if nothing detected
|
||||
|
||||
async def process_vox_queue():
|
||||
# process the voxMsgQueue
|
||||
global voxMsgQueue
|
||||
items_to_process = voxMsgQueue[:]
|
||||
voxMsgQueue.clear()
|
||||
if len(items_to_process) > 0:
|
||||
logger.debug(f"System: Processing {len(items_to_process)} items in voxMsgQueue")
|
||||
for item in items_to_process:
|
||||
message = item
|
||||
for channel in sigWatchBroadcastCh:
|
||||
if antiSpam and int(channel) != publicChannel:
|
||||
send_message(message, int(channel), 0, sigWatchBroadcastInterface)
|
||||
# process the voxMsgQueue
|
||||
from modules.settings import sigWatchBroadcastCh, sigWatchBroadcastInterface, voxMsgQueue
|
||||
items_to_process = voxMsgQueue[:]
|
||||
voxMsgQueue.clear()
|
||||
if len(items_to_process) > 0:
|
||||
logger.debug(f"System: Processing {len(items_to_process)} items in voxMsgQueue")
|
||||
for item in items_to_process:
|
||||
message = item
|
||||
for channel in sigWatchBroadcastCh:
|
||||
if antiSpam and int(channel) != publicChannel:
|
||||
send_message(message, int(channel), 0, sigWatchBroadcastInterface)
|
||||
|
||||
async def handleTTS():
|
||||
from modules.radio import generate_and_play_tts, available_voices
|
||||
from modules.settings import ttsnoWelcome, tts_read_queue
|
||||
logger.debug("System: Handle TTS started")
|
||||
if not ttsnoWelcome:
|
||||
logger.debug("System: Playing TTS welcome message to disable set 'ttsnoWelcome = True' in settings.ini")
|
||||
await generate_and_play_tts("Hey its Cheerpy! Thanks for using Meshing-Around on Meshtasstic!", available_voices[0])
|
||||
try:
|
||||
while True:
|
||||
if tts_read_queue:
|
||||
tts_read = tts_read_queue.pop(0)
|
||||
voice = available_voices[0]
|
||||
# ensure the tts_read ends with a punctuation mark
|
||||
if not tts_read.endswith(('.', '!', '?')):
|
||||
tts_read += '.'
|
||||
try:
|
||||
await generate_and_play_tts(tts_read, voice)
|
||||
except Exception as e:
|
||||
logger.error(f"System: TTShandler error: {e}")
|
||||
await asyncio.sleep(1)
|
||||
except Exception as e:
|
||||
logger.critical(f"System: handleTTS crashed: {e}")
|
||||
|
||||
async def watchdog():
|
||||
global localTelemetryData, retry_int1, retry_int2, retry_int3, retry_int4, retry_int5, retry_int6, retry_int7, retry_int8, retry_int9
|
||||
@@ -2256,7 +2398,7 @@ async def watchdog():
|
||||
|
||||
handleMultiPing(0, i)
|
||||
|
||||
if wxAlertBroadcastEnabled or emergencyAlertBrodcastEnabled or volcanoAlertBroadcastEnabled or checklist_enabled:
|
||||
if usAlerts or checklist_enabled or enableDEalerts:
|
||||
handleAlertBroadcast(i)
|
||||
|
||||
intData = displayNodeTelemetry(0, i)
|
||||
@@ -2283,8 +2425,36 @@ async def watchdog():
|
||||
load_bbsdm()
|
||||
load_bbsdb()
|
||||
|
||||
def saveAllData():
|
||||
try:
|
||||
# Save BBS data if enabled
|
||||
if bbs_enabled:
|
||||
save_bbsdb()
|
||||
save_bbsdm()
|
||||
logger.debug("Persistence: BBS data saved")
|
||||
|
||||
# Save leaderboard data if enabled
|
||||
if logMetaStats:
|
||||
saveLeaderboard()
|
||||
logger.debug("Persistence: Leaderboard data saved")
|
||||
|
||||
# Save ban list
|
||||
save_bbsBanList()
|
||||
logger.debug("Persistence: Ban list saved")
|
||||
|
||||
logger.info("Persistence: Save completed")
|
||||
except Exception as e:
|
||||
logger.error(f"Persistence: Save error: {e}")
|
||||
|
||||
async def dataPersistenceLoop():
|
||||
"""Data persistence service loop for periodic data saving"""
|
||||
logger.debug("Persistence: Loop started")
|
||||
while True:
|
||||
await asyncio.sleep(dataPersistence_interval)
|
||||
saveAllData()
|
||||
|
||||
def exit_handler():
|
||||
# Close the interface and save the BBS messages
|
||||
# Close the interface and save all data
|
||||
logger.debug(f"System: Closing Autoresponder")
|
||||
try:
|
||||
logger.debug(f"System: Closing Interface1")
|
||||
@@ -2296,12 +2466,9 @@ def exit_handler():
|
||||
globals()[f'interface{i}'].close()
|
||||
except Exception as e:
|
||||
logger.error(f"System: closing: {e}")
|
||||
if bbs_enabled:
|
||||
save_bbsdb()
|
||||
save_bbsdm()
|
||||
logger.debug(f"System: BBS Messages Saved")
|
||||
if logMetaStats:
|
||||
saveLeaderboard()
|
||||
|
||||
saveAllData()
|
||||
|
||||
logger.debug(f"System: Exiting")
|
||||
asyncLoop.stop()
|
||||
asyncLoop.close()
|
||||
|
||||
@@ -28,7 +28,7 @@ if os.path.isfile(checkall_path):
|
||||
|
||||
|
||||
# List of module names to exclude
|
||||
exclude = ['test_bot','udp', 'system', 'log', 'gpio', 'web','test_xtide',]
|
||||
exclude = ['test_bot','udp', 'system', 'log', 'gpio', 'web',]
|
||||
available_modules = [
|
||||
m.name for m in pkgutil.iter_modules([modules_path])
|
||||
if m.name not in exclude]
|
||||
@@ -77,6 +77,13 @@ class TestBot(unittest.TestCase):
|
||||
self.assertTrue(result)
|
||||
self.assertIsInstance(result1, str)
|
||||
|
||||
def test_initialize_inventory_database(self):
|
||||
from inventory import initialize_inventory_database, process_inventory_command
|
||||
result = initialize_inventory_database()
|
||||
result1 = process_inventory_command(0, 'inventory', name="none")
|
||||
self.assertTrue(result)
|
||||
self.assertIsInstance(result1, str)
|
||||
|
||||
def test_init_news_sources(self):
|
||||
from filemon import initNewsSources
|
||||
result = initNewsSources()
|
||||
@@ -87,11 +94,6 @@ class TestBot(unittest.TestCase):
|
||||
alerts = get_nina_alerts()
|
||||
self.assertIsInstance(alerts, str)
|
||||
|
||||
def test_llmTool_get_google(self):
|
||||
from llm import llmTool_get_google
|
||||
result = llmTool_get_google("What is 2+2?", 1)
|
||||
self.assertIsInstance(result, list)
|
||||
|
||||
def test_send_ollama_query(self):
|
||||
from llm import send_ollama_query
|
||||
response = send_ollama_query("Hello, Ollama!")
|
||||
@@ -150,10 +152,13 @@ class TestBot(unittest.TestCase):
|
||||
result = initalize_qrz_database()
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_get_hamlib(self):
|
||||
from radio import get_hamlib
|
||||
frequency = get_hamlib('f')
|
||||
self.assertIsInstance(frequency, str)
|
||||
def test_import_radio_module(self):
|
||||
try:
|
||||
import radio
|
||||
#frequency = get_hamlib('f')
|
||||
#self.assertIsInstance(frequency, str)
|
||||
except Exception as e:
|
||||
self.fail(f"Importing radio module failed: {e}")
|
||||
|
||||
def test_get_rss_feed(self):
|
||||
from rss import get_rss_feed
|
||||
@@ -169,7 +174,9 @@ class TestBot(unittest.TestCase):
|
||||
self.assertIsInstance(haha, str)
|
||||
|
||||
def test_tictactoe_initial_and_move(self):
|
||||
from games.tictactoe import tictactoe
|
||||
from games.tictactoe import TicTacToe
|
||||
# Create an instance (no display module required for tests)
|
||||
tictactoe = TicTacToe(display_module=None)
|
||||
user_id = "testuser"
|
||||
# Start a new game (no move yet)
|
||||
initial = tictactoe.play(user_id, "")
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script for xtide module
|
||||
Tests both NOAA (disabled) and tidepredict (when available) tide predictions
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
|
||||
# Add parent directory to path
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
def test_xtide_import():
|
||||
"""Test that xtide module can be imported"""
|
||||
print("Testing xtide module import...")
|
||||
try:
|
||||
from modules import xtide
|
||||
print(f"✓ xtide module imported successfully")
|
||||
print(f" - tidepredict available: {xtide.TIDEPREDICT_AVAILABLE}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to import xtide: {e}")
|
||||
return False
|
||||
|
||||
def test_locationdata_import():
|
||||
"""Test that modified locationdata can be imported"""
|
||||
print("\nTesting locationdata module import...")
|
||||
try:
|
||||
from modules import locationdata
|
||||
print(f"✓ locationdata module imported successfully")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to import locationdata: {e}")
|
||||
return False
|
||||
|
||||
def test_settings():
|
||||
"""Test that settings has useTidePredict option"""
|
||||
print("\nTesting settings configuration...")
|
||||
try:
|
||||
from modules import settings as my_settings
|
||||
has_setting = hasattr(my_settings, 'useTidePredict')
|
||||
print(f"✓ settings module loaded")
|
||||
print(f" - useTidePredict setting available: {has_setting}")
|
||||
if has_setting:
|
||||
print(f" - useTidePredict value: {my_settings.useTidePredict}")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"✗ Failed to load settings: {e}")
|
||||
return False
|
||||
|
||||
def test_noaa_fallback():
|
||||
"""Test NOAA API fallback (without enabling tidepredict)"""
|
||||
print("\nTesting NOAA API (default mode)...")
|
||||
try:
|
||||
from modules import locationdata
|
||||
from modules import settings as my_settings
|
||||
|
||||
# Test with Seattle coordinates (should use NOAA)
|
||||
lat = 47.6062
|
||||
lon = -122.3321
|
||||
|
||||
print(f" Testing with Seattle coordinates: {lat}, {lon}")
|
||||
print(f" useTidePredict = {my_settings.useTidePredict}")
|
||||
|
||||
# Note: This will fail if we can't reach NOAA, but that's expected
|
||||
result = locationdata.get_NOAAtide(str(lat), str(lon))
|
||||
if result and "Error" not in result:
|
||||
print(f"✓ NOAA API returned data")
|
||||
print(f" First 100 chars: {result[:100]}")
|
||||
return True
|
||||
else:
|
||||
print(f"⚠ NOAA API returned: {result[:100]}")
|
||||
return True # Still pass as network might not be available
|
||||
except Exception as e:
|
||||
print(f"⚠ NOAA test encountered expected issue: {e}")
|
||||
return True # Expected in test environment
|
||||
|
||||
def test_parse_coords():
|
||||
"""Test coordinate parsing function"""
|
||||
print("\nTesting coordinate parsing...")
|
||||
try:
|
||||
from modules.xtide import parse_station_coords
|
||||
|
||||
test_cases = [
|
||||
(("43-36S", "172-43E"), (-43.6, 172.71666666666667)),
|
||||
(("02-45N", "072-21E"), (2.75, 72.35)),
|
||||
(("02-45S", "072-21W"), (-2.75, -72.35)),
|
||||
]
|
||||
|
||||
all_passed = True
|
||||
for (lat_str, lon_str), (expected_lat, expected_lon) in test_cases:
|
||||
result_lat, result_lon = parse_station_coords(lat_str, lon_str)
|
||||
if abs(result_lat - expected_lat) < 0.01 and abs(result_lon - expected_lon) < 0.01:
|
||||
print(f" ✓ {lat_str}, {lon_str} -> {result_lat:.2f}, {result_lon:.2f}")
|
||||
else:
|
||||
print(f" ✗ {lat_str}, {lon_str} -> expected {expected_lat}, {expected_lon}, got {result_lat}, {result_lon}")
|
||||
all_passed = False
|
||||
|
||||
return all_passed
|
||||
except Exception as e:
|
||||
print(f"✗ Coordinate parsing test failed: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run all tests"""
|
||||
print("=" * 60)
|
||||
print("xtide Module Test Suite")
|
||||
print("=" * 60)
|
||||
|
||||
results = []
|
||||
results.append(("Import xtide", test_xtide_import()))
|
||||
results.append(("Import locationdata", test_locationdata_import()))
|
||||
results.append(("Settings configuration", test_settings()))
|
||||
results.append(("Parse coordinates", test_parse_coords()))
|
||||
results.append(("NOAA fallback", test_noaa_fallback()))
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Test Results Summary")
|
||||
print("=" * 60)
|
||||
|
||||
passed = sum(1 for _, result in results if result)
|
||||
total = len(results)
|
||||
|
||||
for test_name, result in results:
|
||||
status = "✓ PASS" if result else "✗ FAIL"
|
||||
print(f"{status}: {test_name}")
|
||||
|
||||
print(f"\n{passed}/{total} tests passed")
|
||||
|
||||
return passed == total
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
129
modules/xtide.md
129
modules/xtide.md
@@ -1,129 +0,0 @@
|
||||
# xtide Module - Global Tide Predictions
|
||||
|
||||
This module provides global tide prediction capabilities using the [tidepredict](https://github.com/windcrusader/tidepredict) library, which uses the University of Hawaii's Research Quality Dataset for worldwide tide station coverage.
|
||||
|
||||
## Features
|
||||
|
||||
- Global tide predictions (not limited to US locations like NOAA)
|
||||
- Offline predictions once station data is initialized
|
||||
- Automatic selection of nearest tide station
|
||||
- Compatible with existing tide command interface
|
||||
|
||||
## Installation
|
||||
|
||||
1. Install tidepredict library:
|
||||
this takes about 3-500MB of disk
|
||||
|
||||
```bash
|
||||
pip install tidepredict
|
||||
```
|
||||
note: if you see warning about system packages the override for debian OS to install it anyway is..
|
||||
|
||||
```bash
|
||||
pip install tidepredict --break-system-packages
|
||||
```
|
||||
|
||||
2. Enable in `config.ini`:
|
||||
```ini
|
||||
[location]
|
||||
useTidePredict = True
|
||||
```
|
||||
|
||||
## First-Time Setup
|
||||
|
||||
On first use, tidepredict needs to download station data from the University of Hawaii FTP server. This requires internet access and happens automatically when you:
|
||||
|
||||
1. Run the tide command for the first time with `useTidePredict = True`
|
||||
2. Or manually initialize with:
|
||||
```bash
|
||||
python3 -m tidepredict -l <location> -genharm
|
||||
```
|
||||
|
||||
The station data is cached locally in `~/.tidepredict/` for offline use afterward.
|
||||
|
||||
No other downloads will happen automatically, its offline
|
||||
|
||||
## Usage
|
||||
|
||||
Once enabled, the existing `tide` command will automatically use tidepredict for global locations:
|
||||
|
||||
```
|
||||
tide
|
||||
```
|
||||
|
||||
The module will:
|
||||
1. Find the nearest tide station to your GPS coordinates
|
||||
2. Load harmonic constituents for that station
|
||||
3. Calculate tide predictions for today
|
||||
4. Format output compatible with mesh display
|
||||
|
||||
## Configuration
|
||||
|
||||
### config.ini Options
|
||||
|
||||
```ini
|
||||
[location]
|
||||
# Enable global tide predictions using tidepredict
|
||||
useTidePredict = True
|
||||
|
||||
# Standard location settings still apply
|
||||
lat = 48.50
|
||||
lon = -123.0
|
||||
useMetric = False
|
||||
```
|
||||
|
||||
## Fallback Behavior
|
||||
|
||||
If tidepredict is not available or encounters errors, the module will automatically fall back to the NOAA API for US locations.
|
||||
|
||||
## Limitations
|
||||
|
||||
- First-time setup requires internet access to download station database
|
||||
- Station coverage depends on University of Hawaii's dataset
|
||||
- Predictions may be less accurate for locations far from tide stations
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Station database not initialized" error
|
||||
|
||||
This means the station data hasn't been downloaded yet. Ensure internet access and:
|
||||
|
||||
```bash
|
||||
# Test station download
|
||||
python3 -m tidepredict -l Sydney
|
||||
|
||||
# Or manually run initialization
|
||||
python3 -c "from tidepredict import process_station_list; process_station_list.create_station_dataframe()"
|
||||
```
|
||||
|
||||
### "No tide station found nearby"
|
||||
|
||||
The module couldn't find a nearby station. This may happen if:
|
||||
- You're in a location without nearby tide monitoring stations
|
||||
- The station database hasn't been initialized
|
||||
- Network issues prevented loading the station list
|
||||
|
||||
Tide Station Map
|
||||
[https://uhslc.soest.hawaii.edu/network/](https://uhslc.soest.hawaii.edu/network/)
|
||||
- click on Tide Guages
|
||||
- Find yourself on the map
|
||||
- Locate the closest Gauge and its name (typically the city name)
|
||||
|
||||
To manually download data for the station first location the needed station id
|
||||
- `python -m tidepredict -l "Port Angeles"` finds a station
|
||||
- `python -m tidepredict -l "Port Angeles" -genharm` downloads that datafile
|
||||
|
||||
|
||||
|
||||
## Data Source
|
||||
|
||||
Tide predictions are based on harmonic analysis of historical tide data from:
|
||||
- University of Hawaii Sea Level Center (UHSLC)
|
||||
- Research Quality Dataset
|
||||
- Global coverage with 600+ stations
|
||||
|
||||
## References
|
||||
|
||||
- [tidepredict GitHub](https://github.com/windcrusader/tidepredict)
|
||||
- [UHSLC Data](https://uhslc.soest.hawaii.edu/)
|
||||
- [pytides](https://github.com/sam-cox/pytides) - Underlying tide calculation library
|
||||
202
modules/xtide.py
202
modules/xtide.py
@@ -1,202 +0,0 @@
|
||||
# xtide.py - Global tide prediction using tidepredict library
|
||||
# K7MHI Kelly Keeton 2025
|
||||
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from modules.log import logger
|
||||
import modules.settings as my_settings
|
||||
|
||||
try:
|
||||
from tidepredict import processdata, process_station_list, constants, timefunc
|
||||
from tidepredict.tide import Tide
|
||||
import pandas as pd
|
||||
TIDEPREDICT_AVAILABLE = True
|
||||
except ImportError:
|
||||
TIDEPREDICT_AVAILABLE = False
|
||||
logger.error("xtide: tidepredict module not installed. Install with: pip install tidepredict")
|
||||
|
||||
def get_nearest_station(lat, lon):
|
||||
"""
|
||||
Find the nearest tide station to the given lat/lon coordinates.
|
||||
Returns station code (e.g., 'h001a') or None if not found.
|
||||
"""
|
||||
if not TIDEPREDICT_AVAILABLE:
|
||||
return None
|
||||
|
||||
try:
|
||||
# Read the station list
|
||||
try:
|
||||
stations = pd.read_csv(constants.STATIONFILE)
|
||||
except FileNotFoundError:
|
||||
# If station file doesn't exist, create it (requires network)
|
||||
logger.info("xtide: Creating station database from online source (requires network)")
|
||||
try:
|
||||
stations = process_station_list.create_station_dataframe()
|
||||
except Exception as net_error:
|
||||
logger.error(f"xtide: Failed to download station database: {net_error}")
|
||||
return None
|
||||
|
||||
if stations.empty:
|
||||
logger.error("xtide: No stations found in database")
|
||||
return None
|
||||
|
||||
# Calculate distance to each station
|
||||
# Using simple haversine-like calculation
|
||||
def calc_distance(row):
|
||||
try:
|
||||
# Parse lat/lon from the format like "43-36S", "172-43E"
|
||||
station_lat, station_lon = parse_station_coords(row['Lat'], row['Lon'])
|
||||
|
||||
# Simple distance calculation (not precise but good enough)
|
||||
dlat = lat - station_lat
|
||||
dlon = lon - station_lon
|
||||
return (dlat**2 + dlon**2)**0.5
|
||||
except:
|
||||
return float('inf')
|
||||
|
||||
stations['distance'] = stations.apply(calc_distance, axis=1)
|
||||
|
||||
# Find the nearest station
|
||||
nearest = stations.loc[stations['distance'].idxmin()]
|
||||
|
||||
if nearest['distance'] > 10: # More than ~10 degrees away, might be too far
|
||||
logger.warning(f"xtide: Nearest station is {nearest['distance']:.1f}° away at {nearest['loc_name']}")
|
||||
|
||||
station_code = "h" + nearest['stat_idx'].lower()
|
||||
logger.debug(f"xtide: Found nearest station: {nearest['loc_name']} ({station_code}) at {nearest['distance']:.2f}° away")
|
||||
|
||||
return station_code, nearest['loc_name'], nearest['country']
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"xtide: Error finding nearest station: {e}")
|
||||
return None
|
||||
|
||||
def parse_station_coords(lat_str, lon_str):
|
||||
"""
|
||||
Parse station coordinates from format like "43-36S", "172-43E"
|
||||
Returns tuple of (latitude, longitude) as floats
|
||||
"""
|
||||
try:
|
||||
# Parse latitude
|
||||
lat_parts = lat_str.split('-')
|
||||
lat_deg = float(lat_parts[0])
|
||||
lat_min = float(lat_parts[1][:-1]) # Remove N/S
|
||||
lat_dir = lat_parts[1][-1] # Get N/S
|
||||
lat_val = lat_deg + lat_min/60.0
|
||||
if lat_dir == 'S':
|
||||
lat_val = -lat_val
|
||||
|
||||
# Parse longitude
|
||||
lon_parts = lon_str.split('-')
|
||||
lon_deg = float(lon_parts[0])
|
||||
lon_min = float(lon_parts[1][:-1]) # Remove E/W
|
||||
lon_dir = lon_parts[1][-1] # Get E/W
|
||||
lon_val = lon_deg + lon_min/60.0
|
||||
if lon_dir == 'W':
|
||||
lon_val = -lon_val
|
||||
|
||||
return lat_val, lon_val
|
||||
except Exception as e:
|
||||
logger.debug(f"xtide: Error parsing coordinates {lat_str}, {lon_str}: {e}")
|
||||
return 0.0, 0.0
|
||||
|
||||
def get_tide_predictions(lat=0, lon=0, days=1):
|
||||
"""
|
||||
Get tide predictions for the given location using tidepredict library.
|
||||
Returns formatted string with tide predictions.
|
||||
|
||||
Parameters:
|
||||
- lat: Latitude
|
||||
- lon: Longitude
|
||||
- days: Number of days to predict (default: 1)
|
||||
|
||||
Returns:
|
||||
- Formatted string with tide predictions or error message
|
||||
"""
|
||||
if not TIDEPREDICT_AVAILABLE:
|
||||
return "module not installed, see logs for more ⚓️"
|
||||
|
||||
if float(lat) == 0 and float(lon) == 0:
|
||||
return "No GPS data for tide prediction"
|
||||
|
||||
try:
|
||||
# Find nearest station
|
||||
station_info = get_nearest_station(float(lat), float(lon))
|
||||
if not station_info:
|
||||
return "No tide station found nearby. Network may be required to download station data."
|
||||
|
||||
station_code, station_name, station_country = station_info
|
||||
|
||||
# Load station data
|
||||
station_dict, harmfileloc = process_station_list.read_station_info_file()
|
||||
|
||||
# Check if harmonic data exists for this station
|
||||
if station_code not in station_dict:
|
||||
logger.warning(f"xtide: No harmonic data. python -m tidepredict -l \"{station_name}\" -genharm")
|
||||
return f"Tide data not available for {station_name}. Station database may need initialization."
|
||||
|
||||
# Reconstruct tide model
|
||||
tide = processdata.reconstruct_tide_model(station_dict, station_code)
|
||||
if tide is None:
|
||||
return f"Tide model unavailable for {station_name}"
|
||||
|
||||
# Set up time range (today only)
|
||||
now = datetime.now()
|
||||
start_time = now.strftime("%Y-%m-%d 00:00")
|
||||
end_time = (now + timedelta(days=days)).strftime("%Y-%m-%d 00:00")
|
||||
|
||||
# Create time object
|
||||
timeobj = timefunc.Tidetime(
|
||||
st_time=start_time,
|
||||
en_time=end_time,
|
||||
station_tz=station_dict[station_code].get('tzone', 'UTC')
|
||||
)
|
||||
|
||||
# Get predictions
|
||||
predictions = processdata.predict_plain(tide, station_dict[station_code], 't', timeobj)
|
||||
|
||||
# Format output for mesh
|
||||
lines = predictions.strip().split('\n')
|
||||
if len(lines) > 2:
|
||||
# Skip the header lines and format for mesh display
|
||||
result = f"Tide: {station_name}\n"
|
||||
tide_lines = lines[2:] # Skip first 2 header lines
|
||||
|
||||
# Format each tide prediction
|
||||
for line in tide_lines[:8]: # Limit to 8 entries
|
||||
parts = line.split()
|
||||
if len(parts) >= 4:
|
||||
date_str = parts[0]
|
||||
time_str = parts[1]
|
||||
height = parts[3]
|
||||
tide_type = ' '.join(parts[4:])
|
||||
|
||||
# Convert to 12-hour format if not using zulu time
|
||||
if not my_settings.zuluTime:
|
||||
try:
|
||||
time_obj = datetime.strptime(time_str, "%H%M")
|
||||
hour = time_obj.hour
|
||||
minute = time_obj.minute
|
||||
if hour >= 12:
|
||||
time_str = f"{hour-12 if hour > 12 else 12}:{minute:02d} PM"
|
||||
else:
|
||||
time_str = f"{hour if hour > 0 else 12}:{minute:02d} AM"
|
||||
except:
|
||||
pass
|
||||
|
||||
result += f"{tide_type} {time_str}, {height}\n"
|
||||
|
||||
return result.strip()
|
||||
else:
|
||||
return predictions
|
||||
|
||||
except FileNotFoundError as e:
|
||||
logger.error(f"xtide: Station data file not found: {e}")
|
||||
return "Tide station database not initialized. Network access required for first-time setup."
|
||||
except Exception as e:
|
||||
logger.error(f"xtide: Error getting tide predictions: {e}")
|
||||
return f"Error getting tide data: {str(e)}"
|
||||
|
||||
def is_enabled():
|
||||
"""Check if xtide/tidepredict is enabled in config"""
|
||||
return getattr(my_settings, 'useTidePredict', False) and TIDEPREDICT_AVAILABLE
|
||||
115
pong_bot.py
115
pong_bot.py
@@ -65,7 +65,11 @@ def handle_cmd(message, message_from_id, deviceID):
|
||||
def handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, channel_number):
|
||||
global multiPing
|
||||
if "?" in message and isDM:
|
||||
return message.split("?")[0].title() + " command returns SNR and RSSI, or hopcount from your message. Try adding e.g. @place or #tag"
|
||||
pingHelp = "🤖Ping Command Help:\n" \
|
||||
"🏓 Send 'ping' or 'ack' or 'test' to get a response.\n" \
|
||||
"🏓 Send 'ping <number>' to get multiple pings in DM"
|
||||
"🏓 ping @USERID to send a Joke from the bot"
|
||||
return pingHelp
|
||||
|
||||
msg = ""
|
||||
type = ''
|
||||
@@ -100,12 +104,12 @@ def handle_ping(message_from_id, deviceID, message, hop, snr, rssi, isDM, chann
|
||||
#flood
|
||||
msg += " [F]"
|
||||
|
||||
if (float(snr) != 0 or float(rssi) != 0) and "Hops" not in hop:
|
||||
if (float(snr) != 0 or float(rssi) != 0) and "Hop" not in hop:
|
||||
msg += f"\nSNR:{snr} RSSI:{rssi}"
|
||||
elif "Hops" in hop:
|
||||
msg += f"\n{hop}🐇 "
|
||||
else:
|
||||
msg += "\nflood route"
|
||||
elif "Hop" in hop:
|
||||
# janky, remove the words Gateway or MQTT if present
|
||||
hop = hop.replace("Gateway", "").replace("Direct", "").replace("MQTT", "").strip()
|
||||
msg += f"\n{hop} "
|
||||
|
||||
if "@" in message:
|
||||
msg = msg + " @" + message.split("@")[1]
|
||||
@@ -275,24 +279,38 @@ def onReceive(packet, interface):
|
||||
# check if the packet has a channel flag use it ## FIXME needs to be channel hash lookup
|
||||
if packet.get('channel'):
|
||||
channel_number = packet.get('channel')
|
||||
# get channel name from channel number from connected devices
|
||||
for device in channel_list:
|
||||
if device["interface_id"] == rxNode:
|
||||
device_channels = device['channels']
|
||||
for chan_name, info in device_channels.items():
|
||||
if info['number'] == channel_number:
|
||||
channel_name = chan_name
|
||||
channel_name = "unknown"
|
||||
try:
|
||||
res = resolve_channel_name(channel_number, rxNode, interface)
|
||||
if res:
|
||||
try:
|
||||
channel_name, _ = res
|
||||
except Exception:
|
||||
channel_name = "unknown"
|
||||
else:
|
||||
# Search all interfaces for this channel
|
||||
cache = build_channel_cache()
|
||||
found_on_other = None
|
||||
for device in cache:
|
||||
for chan_name, info in device.get("channels", {}).items():
|
||||
if str(info.get('number')) == str(channel_number) or str(info.get('hash')) == str(channel_number):
|
||||
found_on_other = device.get("interface_id")
|
||||
found_chan_name = chan_name
|
||||
break
|
||||
if found_on_other:
|
||||
break
|
||||
if found_on_other and found_on_other != rxNode:
|
||||
logger.debug(
|
||||
f"System: Received Packet on Channel:{channel_number} ({found_chan_name}) on Interface:{rxNode}, but this channel is configured on Interface:{found_on_other}"
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug(f"System: channel resolution error: {e}")
|
||||
|
||||
# get channel hashes for the interface
|
||||
device = next((d for d in channel_list if d["interface_id"] == rxNode), None)
|
||||
if device:
|
||||
# Find the channel name whose hash matches channel_number
|
||||
for chan_name, info in device['channels'].items():
|
||||
if info['hash'] == channel_number:
|
||||
print(f"Matched channel hash {info['hash']} to channel name {chan_name}")
|
||||
channel_name = chan_name
|
||||
break
|
||||
#debug channel info
|
||||
# if "unknown" in str(channel_name):
|
||||
# logger.debug(f"System: Received Packet on Channel:{channel_number} on Interface:{rxNode}")
|
||||
# else:
|
||||
# logger.debug(f"System: Received Packet on Channel:{channel_number} Name:{channel_name} on Interface:{rxNode}")
|
||||
|
||||
# check if the packet has a simulator flag
|
||||
simulator_flag = packet.get('decoded', {}).get('simulator', False)
|
||||
@@ -303,10 +321,21 @@ def onReceive(packet, interface):
|
||||
# set the message_from_id
|
||||
message_from_id = packet['from']
|
||||
|
||||
# check if the packet has a channel flag use it
|
||||
if packet.get('channel'):
|
||||
channel_number = packet.get('channel', 0)
|
||||
# if message_from_id is not in the seenNodes list add it
|
||||
if not any(node.get('nodeID') == message_from_id for node in seenNodes):
|
||||
seenNodes.append({'nodeID': message_from_id, 'rxInterface': rxNode, 'channel': channel_number, 'welcome': False, 'first_seen': time.time(), 'lastSeen': time.time()})
|
||||
else:
|
||||
# update lastSeen time
|
||||
for node in seenNodes:
|
||||
if node.get('nodeID') == message_from_id:
|
||||
node['lastSeen'] = time.time()
|
||||
break
|
||||
|
||||
# CHECK with ban_hammer() if the node is banned
|
||||
if str(message_from_id) in my_settings.bbs_ban_list or str(message_from_id) in my_settings.autoBanlist:
|
||||
logger.warning(f"System: Banned Node {message_from_id} tried to send a message. Ignored. Try adding to node firmware-blocklist")
|
||||
return
|
||||
|
||||
# handle TEXT_MESSAGE_APP
|
||||
try:
|
||||
if 'decoded' in packet and packet['decoded']['portnum'] == 'TEXT_MESSAGE_APP':
|
||||
@@ -355,31 +384,38 @@ def onReceive(packet, interface):
|
||||
else:
|
||||
hop_count = hop_away
|
||||
|
||||
if hop == "" and hop_count > 0:
|
||||
if hop_count > 0:
|
||||
# set hop string from calculated hop count
|
||||
hop = f"{hop_count} Hop" if hop_count == 1 else f"{hop_count} Hops"
|
||||
|
||||
if hop_start == hop_limit and "lora" in str(transport_mechanism).lower() and (snr != 0 or rssi != 0):
|
||||
if hop_start == hop_limit and "lora" in str(transport_mechanism).lower() and (snr != 0 or rssi != 0) and hop_count == 0:
|
||||
# 2.7+ firmware direct hop over LoRa
|
||||
hop = "Direct"
|
||||
|
||||
if ((hop_start == 0 and hop_limit >= 0) or via_mqtt or ("mqtt" in str(transport_mechanism).lower())):
|
||||
if via_mqtt or "mqtt" in str(transport_mechanism).lower():
|
||||
hop = "MQTT"
|
||||
elif hop == "" and hop_count == 0 and (snr != 0 or rssi != 0):
|
||||
# this came from a UDP but we had signal info so gateway is used
|
||||
hop = "Gateway"
|
||||
elif "unknown" in str(transport_mechanism).lower() and (snr == 0 and rssi == 0):
|
||||
# we for sure detected this sourced from a UDP like host
|
||||
via_mqtt = True
|
||||
elif "udp" in str(transport_mechanism).lower():
|
||||
hop = "Gateway"
|
||||
|
||||
if hop in ("MQTT", "Gateway") and hop_count > 0:
|
||||
hop = f"{hop_count} Hops"
|
||||
hop = f" {hop_count} Hops"
|
||||
|
||||
# Add relay node info if present
|
||||
if packet.get('relayNode') is not None:
|
||||
relay_val = packet['relayNode']
|
||||
last_byte = relay_val & 0xFF
|
||||
if last_byte == 0x00:
|
||||
hex_val = 'FF'
|
||||
else:
|
||||
hex_val = f"{last_byte:02X}"
|
||||
hop += f" (Relay:{hex_val})"
|
||||
|
||||
if my_settings.enableHopLogs:
|
||||
logger.debug(f"System: Packet HopDebugger: hop_away:{hop_away} hop_limit:{hop_limit} hop_start:{hop_start} calculated_hop_count:{hop_count} final_hop_value:{hop} via_mqtt:{via_mqtt} transport_mechanism:{transport_mechanism} Hostname:{rxNodeHostName}")
|
||||
|
||||
# check with stringSafeChecker if the message is safe
|
||||
if stringSafeCheck(message_string) is False:
|
||||
if stringSafeCheck(message_string, message_from_id) is False:
|
||||
logger.warning(f"System: Possibly Unsafe Message from {get_name_from_number(message_from_id, 'long', rxNode)}")
|
||||
|
||||
if help_message in message_string or welcome_message in message_string or "CMD?:" in message_string:
|
||||
@@ -574,6 +610,10 @@ def handle_boot(mesh=True):
|
||||
if my_settings.useDMForResponse:
|
||||
logger.debug("System: Respond by DM only")
|
||||
|
||||
if my_settings.autoBanEnabled:
|
||||
logger.debug(f"System: Auto-Ban Enabled for {my_settings.autoBanThreshold} messages in {my_settings.autoBanTimeframe} seconds")
|
||||
load_bbsBanList()
|
||||
|
||||
if my_settings.log_messages_to_file:
|
||||
logger.debug("System: Logging Messages to disk")
|
||||
if my_settings.syslog_to_file:
|
||||
@@ -631,8 +671,11 @@ async def main():
|
||||
# Create core tasks
|
||||
tasks.append(asyncio.create_task(start_rx(), name="mesh_rx"))
|
||||
tasks.append(asyncio.create_task(watchdog(), name="watchdog"))
|
||||
|
||||
|
||||
# Add optional tasks
|
||||
if my_settings.dataPersistence_enabled:
|
||||
tasks.append(asyncio.create_task(dataPersistenceLoop(), name="data_persistence"))
|
||||
|
||||
if my_settings.file_monitor_enabled:
|
||||
tasks.append(asyncio.create_task(handleFileWatcher(), name="file_monitor"))
|
||||
|
||||
|
||||
@@ -1,22 +1,4 @@
|
||||
## script/runShell.sh
|
||||
|
||||
**Purpose:**
|
||||
`runShell.sh` is a simple demo shell script for the Mesh Bot project. It demonstrates how to execute shell commands within the project’s scripting environment.
|
||||
|
||||
**Usage:**
|
||||
Run this script from the terminal to see a basic example of shell scripting in the project context.
|
||||
|
||||
```sh
|
||||
bash script/runShell.sh
|
||||
```
|
||||
|
||||
**What it does:**
|
||||
- Changes the working directory to the script’s location.
|
||||
- Prints the current directory path and a message indicating the script is running.
|
||||
- Serves as a template for creating additional shell scripts or automating tasks related to the project.
|
||||
|
||||
**Note:**
|
||||
You can modify this script to add more shell commands or automation steps as needed for your workflow.
|
||||
|
||||
## script/runShell.sh
|
||||
|
||||
@@ -57,4 +39,64 @@ bash script/sysEnv.sh
|
||||
- Designed to work on Linux systems, with special handling for Raspberry Pi hardware.
|
||||
|
||||
**Note:**
|
||||
You can expand or modify this script to include additional telemetry or environment checks as needed for your deployment.
|
||||
You can expand or modify this script to include additional telemetry or environment checks as needed for your deployment.
|
||||
|
||||
## script/configMerge.py
|
||||
|
||||
**Purpose:**
|
||||
`configMerge.py` is a Python script that merges your user configuration (`config.ini`) with the default template (`config.template`). This helps you keep your settings up to date when the default configuration changes, while preserving your customizations.
|
||||
|
||||
**Usage:**
|
||||
Run this script from the project root or the `script/` directory:
|
||||
|
||||
```sh
|
||||
python3 script/configMerge.py
|
||||
```
|
||||
|
||||
**What it does:**
|
||||
- Backs up your current `config.ini` to `config.bak`.
|
||||
- Merges new or updated settings from `config.template` into your `config.ini`.
|
||||
- Saves the merged result as `config_new.ini`.
|
||||
- Shows a summary of changes between your config and the merged version.
|
||||
|
||||
**Note:**
|
||||
After reviewing the changes, you can replace your `config.ini` with the merged version:
|
||||
|
||||
```sh
|
||||
cp config_new.ini config.ini
|
||||
```
|
||||
|
||||
This script is useful for safely updating your configuration when new options are added upstream.
|
||||
|
||||
## script/addFav.py
|
||||
|
||||
**Purpose:**
|
||||
`addFav.py` is a Python script to help manage and add favorite nodes to all interfaces using data from `config.ini`. It supports both bot and roof (client_base) node workflows, making it easier to retain DM keys and manage node lists across devices.
|
||||
|
||||
**Usage:**
|
||||
Run this script from the main repo directory:
|
||||
|
||||
```sh
|
||||
python3 script/addFav.py
|
||||
```
|
||||
|
||||
- To print the contents of `roofNodeList.pkl` and exit, use:
|
||||
```sh
|
||||
# note it is not production ready
|
||||
python3 script/addFav.py -p
|
||||
```
|
||||
|
||||
**What it does:**
|
||||
- Interactively asks if you are running on a roof (client_base) node or a bot.
|
||||
- On the bot:
|
||||
- Compiles a list of favorite nodes and saves it to `roofNodeList.pkl` for later use on the roof node.
|
||||
- On the roof node:
|
||||
- Loads the node list from `roofNodeList.pkl`.
|
||||
- Shows which favorite nodes will be added and asks for confirmation.
|
||||
- Adds favorite nodes to the appropriate devices, handling API rate limits.
|
||||
- Logs actions and errors for troubleshooting.
|
||||
|
||||
**Note:**
|
||||
- Always run this script from the main repo directory to ensure module imports work.
|
||||
- After running on the bot, copy `roofNodeList.pkl` to the roof node and rerun the script there to complete the process.
|
||||
|
||||
|
||||
@@ -9,9 +9,13 @@ This is not a full turnkey setup for Docker yet?
|
||||
|
||||
`docker compose run meshing-around`
|
||||
|
||||
`docker compose run debug-console`
|
||||
|
||||
`docker compose run ollama`
|
||||
|
||||
`docker run -d -p 3000:8080 -e OLLAMA_BASE_URL=http://127.0.0.1:11434 -v open-webui:/app/backend/data --name open-webui --restart always ghcr.io/open-webui/open-webui:main`
|
||||
|
||||
|
||||
|
||||
`docker compose run debug-console`
|
||||
### Other Stuff
|
||||
A cool tool to use with RAG creation with open-webui
|
||||
- https://github.com/microsoft/markitdown
|
||||
|
||||
15
script/game.ini
Normal file
15
script/game.ini
Normal file
@@ -0,0 +1,15 @@
|
||||
[network]
|
||||
MCAST_GRP = 224.0.0.69
|
||||
MCAST_PORT = 4403
|
||||
CHANNEL_ID = LongFast
|
||||
KEY = 1PG7OiApB1nwvP+rz05pAQ==
|
||||
PUBLIC_CHANNEL_IDS = LongFast,ShortSlow,Medium,LongSlow,ShortFast,ShortTurbo
|
||||
|
||||
[node]
|
||||
NODE_ID = !meshbotg
|
||||
LONG_NAME = Mesh Bot Game Server
|
||||
SHORT_NAME = MBGS
|
||||
|
||||
[game]
|
||||
SEEN_MESSAGES_MAX = 1000
|
||||
FULLSCREEN = True
|
||||
207
script/game_serve.py
Normal file
207
script/game_serve.py
Normal file
@@ -0,0 +1,207 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
# UDP Interface game server for Meshtastic Meshing-Around Mesh Bot
|
||||
# depends on: pip install meshtastic protobuf mudp
|
||||
# 2025 Kelly Keeton K7MHI
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from collections import OrderedDict
|
||||
import configparser
|
||||
|
||||
useSynchCompression = True
|
||||
|
||||
if useSynchCompression:
|
||||
import zlib
|
||||
|
||||
try:
|
||||
from pubsub import pub
|
||||
from meshtastic.protobuf import mesh_pb2, portnums_pb2
|
||||
except ImportError:
|
||||
print("meshtastic API not found. pip install -U meshtastic")
|
||||
exit(1)
|
||||
|
||||
try:
|
||||
from mudp import UDPPacketStream, node, conn
|
||||
from mudp.encryption import generate_hash
|
||||
except ImportError:
|
||||
print("mUDP module not found. pip install -U mudp")
|
||||
exit(1)
|
||||
try:
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
||||
from modules.games.tictactoe_vid import handle_tictactoe_payload, ttt_main
|
||||
from modules.games.battleship_vid import parse_battleship_message
|
||||
except Exception as e:
|
||||
print(f"Error importing modules: {e}\nRun this program from the main project directory, e.g. 'python3 script/game_serve.py'")
|
||||
exit(1)
|
||||
|
||||
# import logging
|
||||
|
||||
# logger = logging.getLogger("MeshBot Game Server")
|
||||
# logger.setLevel(logging.DEBUG)
|
||||
# logger.propagate = False
|
||||
|
||||
# # Remove any existing handlers
|
||||
# if logger.hasHandlers():
|
||||
# logger.handlers.clear()
|
||||
|
||||
# handler = logging.StreamHandler(sys.stdout)
|
||||
# logger.addHandler(handler)
|
||||
# logger.debug("Mesh Bot Game Server Logger initialized")
|
||||
|
||||
# Load config from game.ini if it exists
|
||||
config = configparser.ConfigParser()
|
||||
config_path = os.path.join(os.path.dirname(__file__), "game.ini")
|
||||
if os.path.exists(config_path):
|
||||
config.read(config_path)
|
||||
MCAST_GRP = config.get("network", "MCAST_GRP", fallback="224.0.0.69")
|
||||
MCAST_PORT = config.getint("network", "MCAST_PORT", fallback=4403)
|
||||
CHANNEL_ID = config.get("network", "CHANNEL_ID", fallback="LongFast")
|
||||
KEY = config.get("network", "KEY", fallback="1PG7OiApB1nwvP+rz05pAQ==")
|
||||
PUBLIC_CHANNEL_IDS = [x.strip() for x in config.get("network", "PUBLIC_CHANNEL_IDS", fallback="LongFast,ShortSlow,Medium,LongSlow,ShortFast,ShortTurbo").split(",")]
|
||||
NODE_ID = config.get("node", "NODE_ID", fallback="!meshbotg")
|
||||
LONG_NAME = config.get("node", "LONG_NAME", fallback="Mesh Bot Game Server")
|
||||
SHORT_NAME = config.get("node", "SHORT_NAME", fallback="MBGS")
|
||||
SEEN_MESSAGES_MAX = config.getint("game", "SEEN_MESSAGES_MAX", fallback=1000)
|
||||
FULLSCREEN = config.getboolean("game", "FULLSCREEN", fallback=True)
|
||||
else:
|
||||
MCAST_GRP, MCAST_PORT, CHANNEL_ID, KEY = "224.0.0.69", 4403, "LongFast", "1PG7OiApB1nwvP+rz05pAQ=="
|
||||
PUBLIC_CHANNEL_IDS = ["LongFast", "ShortSlow", "Medium", "LongSlow", "ShortFast", "ShortTurbo"]
|
||||
NODE_ID, LONG_NAME, SHORT_NAME = "!meshbotg", "Mesh Bot Game Server", "MBGS"
|
||||
SEEN_MESSAGES_MAX = 1000 # Adjust as needed
|
||||
FULLSCREEN = True
|
||||
|
||||
CHANNEL_HASHES = {generate_hash(name, KEY): name for name in PUBLIC_CHANNEL_IDS}
|
||||
mudpEnabled, mudpInterface = True, None
|
||||
seen_messages = OrderedDict() # Track seen (from, to, payload) tuples
|
||||
is_running = False
|
||||
|
||||
def initalize_mudp():
|
||||
global mudpInterface
|
||||
if mudpEnabled and mudpInterface is None:
|
||||
mudpInterface = UDPPacketStream(MCAST_GRP, MCAST_PORT, key=KEY)
|
||||
node.node_id, node.long_name, node.short_name = NODE_ID, LONG_NAME, SHORT_NAME
|
||||
node.channel, node.key = CHANNEL_ID, KEY
|
||||
conn.setup_multicast(MCAST_GRP, MCAST_PORT)
|
||||
print(f"mUDP Interface initialized on {MCAST_GRP}:{MCAST_PORT} with Channel ID '{CHANNEL_ID}'")
|
||||
print(f"Node ID: {NODE_ID}, Long Name: {LONG_NAME}, Short Name: {SHORT_NAME}")
|
||||
print("Public Channel IDs:", PUBLIC_CHANNEL_IDS)
|
||||
|
||||
def get_channel_name(channel_hash):
|
||||
return CHANNEL_HASHES.get(channel_hash, '')
|
||||
|
||||
def add_seen_message(msg_tuple):
|
||||
if msg_tuple not in seen_messages:
|
||||
if len(seen_messages) >= SEEN_MESSAGES_MAX:
|
||||
seen_messages.popitem(last=False) # Remove oldest
|
||||
seen_messages[msg_tuple] = None
|
||||
|
||||
def compress_payload(data: str) -> bytes:
|
||||
"""Compress a string to bytes using zlib if enabled."""
|
||||
if useSynchCompression:
|
||||
return zlib.compress(data.encode("utf-8"))
|
||||
else:
|
||||
return data.encode("utf-8")
|
||||
|
||||
def decompress_payload(data: bytes) -> str:
|
||||
"""Decompress bytes to string using zlib if enabled, fallback to utf-8 if not compressed."""
|
||||
if useSynchCompression:
|
||||
try:
|
||||
return zlib.decompress(data).decode("utf-8")
|
||||
except Exception:
|
||||
return data.decode("utf-8", "ignore")
|
||||
else:
|
||||
return data.decode("utf-8", "ignore")
|
||||
|
||||
def on_private_app(packet: mesh_pb2.MeshPacket, addr=None):
|
||||
global seen_messages
|
||||
packet_payload = ""
|
||||
packet_from_id = None
|
||||
if packet.HasField("decoded"):
|
||||
try:
|
||||
# Try to decompress, fallback to decode if not compressed
|
||||
packet_payload = decompress_payload(packet.decoded.payload)
|
||||
packet_from_id = getattr(packet, 'from', None)
|
||||
port_name = portnums_pb2.PortNum.Name(packet.decoded.portnum) if packet.decoded.portnum else "N/A"
|
||||
rx_channel = get_channel_name(packet.channel)
|
||||
if packet_payload.startswith("MTTT:"):
|
||||
packet_payload = packet_payload[5:] # remove 'MTTT:'
|
||||
msg_tuple = (getattr(packet, 'from', None), packet.to, packet_payload)
|
||||
if msg_tuple not in seen_messages:
|
||||
add_seen_message(msg_tuple)
|
||||
handle_tictactoe_payload(packet_payload, from_id=packet_from_id)
|
||||
print(f"[Channel: {rx_channel}] [Port: {port_name}] Tic-Tac-Toe Message payload:", packet_payload)
|
||||
elif packet_payload.startswith("MBSP:"):
|
||||
packet_payload = packet_payload[5:] # remove 'MBSP:'
|
||||
msg_tuple = (getattr(packet, 'from', None), packet.to, packet_payload)
|
||||
if msg_tuple not in seen_messages:
|
||||
add_seen_message(msg_tuple)
|
||||
#parse_battleship_message(packet_payload, from_id=packet_from_id)
|
||||
print(f"[Channel: {rx_channel}] [Port: {port_name}] Battleship Message payload:", packet_payload)
|
||||
else:
|
||||
msg_tuple = (getattr(packet, 'from', None), packet.to, packet_payload)
|
||||
if msg_tuple not in seen_messages:
|
||||
add_seen_message(msg_tuple)
|
||||
print(f"[Channel: {rx_channel}] [Port: {port_name}] Private App payload:", packet_payload)
|
||||
except Exception:
|
||||
print(" Private App extraction error payload (raw bytes):", packet.decoded.payload)
|
||||
|
||||
def on_text_message(packet: mesh_pb2.MeshPacket, addr=None):
|
||||
global seen_messages
|
||||
try:
|
||||
packet_payload = ""
|
||||
if packet.HasField("decoded"):
|
||||
rx_channel = get_channel_name(packet.channel)
|
||||
port_name = portnums_pb2.PortNum.Name(packet.decoded.portnum) if packet.decoded.portnum else "N/A"
|
||||
try:
|
||||
# Try to decompress, fallback to decode if not compressed
|
||||
packet_payload = decompress_payload(packet.decoded.payload)
|
||||
msg_tuple = (getattr(packet, 'from', None), packet.to, packet_payload)
|
||||
if msg_tuple not in seen_messages:
|
||||
add_seen_message(msg_tuple)
|
||||
#print(f"[Channel: {rx_channel}] [Port: {port_name}] TEXT Message payload:", packet_payload)
|
||||
except Exception:
|
||||
print(" extraction error payload (raw bytes):", packet.decoded.payload)
|
||||
except Exception as e:
|
||||
print("Error processing received packet:", e)
|
||||
|
||||
# def on_recieve(packet: mesh_pb2.MeshPacket, addr=None):
|
||||
# print(f"\n[RECV] Packet received from {addr}")
|
||||
# print(packet)
|
||||
#pub.subscribe(on_recieve, "mesh.rx.packet")
|
||||
pub.subscribe(on_text_message, "mesh.rx.port.1") # TEXT_MESSAGE
|
||||
pub.subscribe(on_private_app, "mesh.rx.port.256") # PRIVATE_APP DEFAULT_PORTNUM
|
||||
|
||||
def main():
|
||||
global mudpInterface, is_running
|
||||
print(r"""
|
||||
___
|
||||
/ \
|
||||
| HOT | Mesh Bot Display Server v0.9.5b
|
||||
| TOT | (aka tot-bot)
|
||||
\___/
|
||||
|
||||
""")
|
||||
print("Press escape (ESC) key to exit")
|
||||
initalize_mudp() # initialize MUDP interface
|
||||
mudpInterface.start()
|
||||
is_running = True
|
||||
try:
|
||||
while is_running:
|
||||
ttt_main(fullscreen=FULLSCREEN)
|
||||
is_running = False
|
||||
time.sleep(0.1)
|
||||
except KeyboardInterrupt:
|
||||
print("\n[INFO] KeyboardInterrupt received. Shutting down Mesh Bot Game Server...")
|
||||
is_running = False
|
||||
except Exception as e:
|
||||
print(f"[ERROR] Exception during main loop: {e}")
|
||||
finally:
|
||||
print("[INFO] Stopping mUDP interface...")
|
||||
if mudpInterface:
|
||||
mudpInterface.stop()
|
||||
print("[INFO] mUDP interface stopped.")
|
||||
print("[INFO] Mesh Bot Game Server shutdown complete.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
121
update.sh
Normal file → Executable file
121
update.sh
Normal file → Executable file
@@ -2,37 +2,38 @@
|
||||
# MeshBot Update Script
|
||||
# Usage: bash update.sh or ./update.sh after making it executable with chmod +x update.sh
|
||||
|
||||
# Check if the mesh_bot.service or pong_bot.service
|
||||
service_stopped=false
|
||||
if systemctl is-active --quiet mesh_bot.service; then
|
||||
echo "Stopping mesh_bot.service..."
|
||||
systemctl stop mesh_bot.service
|
||||
service_stopped=true
|
||||
fi
|
||||
if systemctl is-active --quiet pong_bot.service; then
|
||||
echo "Stopping pong_bot.service..."
|
||||
systemctl stop pong_bot.service
|
||||
service_stopped=true
|
||||
fi
|
||||
if systemctl is-active --quiet mesh_bot_reporting.service; then
|
||||
echo "Stopping mesh_bot_reporting.service..."
|
||||
systemctl stop mesh_bot_reporting.service
|
||||
service_stopped=true
|
||||
fi
|
||||
if systemctl is-active --quiet mesh_bot_w3.service; then
|
||||
echo "Stopping mesh_bot_w3.service..."
|
||||
systemctl stop mesh_bot_w3.service
|
||||
service_stopped=true
|
||||
fi
|
||||
echo "=============================================="
|
||||
echo " MeshBot Automated Update & Backup Tool "
|
||||
echo "=============================================="
|
||||
echo
|
||||
|
||||
# Fetch latest changes from GitHub
|
||||
# --- Service Management ---
|
||||
service_stopped=false
|
||||
for svc in mesh_bot.service pong_bot.service mesh_bot_reporting.service mesh_bot_w3.service; do
|
||||
if systemctl is-active --quiet "$svc"; then
|
||||
echo ">> Stopping $svc ..."
|
||||
systemctl stop "$svc"
|
||||
service_stopped=true
|
||||
fi
|
||||
done
|
||||
|
||||
# --- Git Operations ---
|
||||
echo
|
||||
echo "----------------------------------------------"
|
||||
echo "Fetching latest changes from GitHub..."
|
||||
echo "----------------------------------------------"
|
||||
if ! git fetch origin; then
|
||||
echo "Error: Failed to fetch from GitHub, check your network connection."
|
||||
echo "ERROR: Failed to fetch from GitHub. Check your network connection. Script expects to be run inside a git repository."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# git pull with rebase to avoid unnecessary merge commits
|
||||
if [[ $(git symbolic-ref --short -q HEAD) == "" ]]; then
|
||||
echo "WARNING: You are in a detached HEAD state."
|
||||
echo "You may not be on a branch. To return to the main branch, run:"
|
||||
echo " git checkout main"
|
||||
echo "Proceed with caution; changes may not be saved to a branch."
|
||||
fi
|
||||
|
||||
echo "Pulling latest changes from GitHub..."
|
||||
if ! git pull origin main --rebase; then
|
||||
read -p "Git pull resulted in conflicts. Do you want to reset hard to origin/main? This will discard local changes. (y/n): " choice
|
||||
@@ -45,51 +46,59 @@ if ! git pull origin main --rebase; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# copy modules/custom_scheduler.py template if it does not exist
|
||||
|
||||
if [[ ! -f modules/custom_scheduler.py ]]; then
|
||||
# --- Scheduler Template ---
|
||||
echo
|
||||
echo "----------------------------------------------"
|
||||
echo "Checking custom scheduler template..."
|
||||
echo "----------------------------------------------"
|
||||
cp -n etc/custom_scheduler.py modules/
|
||||
printf "\nCustom scheduler template copied to modules/custom_scheduler.py\n"
|
||||
printf "Custom scheduler template copied to modules/custom_scheduler.py\n"
|
||||
elif ! cmp -s modules/custom_scheduler.template etc/custom_scheduler.py; then
|
||||
echo "custom_scheduler.py is set. To check changes run: diff etc/custom_scheduler.py modules/custom_scheduler.py"
|
||||
fi
|
||||
|
||||
# Backup the data/ directory
|
||||
# --- Data Templates ---
|
||||
if [[ -d data ]]; then
|
||||
mkdir -p data
|
||||
for f in etc/data/*; do
|
||||
base=$(basename "$f")
|
||||
if [[ ! -e "data/$base" ]]; then
|
||||
if [[ -d "$f" ]]; then
|
||||
cp -r "$f" "data/"
|
||||
echo "Copied new data/directory $base"
|
||||
else
|
||||
cp "$f" "data/"
|
||||
echo "Copied new data/$base"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# --- Backup ---
|
||||
echo
|
||||
echo "----------------------------------------------"
|
||||
echo "Backing up data/ directory..."
|
||||
#backup_file="backup_$(date +%Y%m%d_%H%M%S).tar.gz"
|
||||
echo "----------------------------------------------"
|
||||
backup_file="data_backup.tar.gz"
|
||||
path2backup="data/"
|
||||
#copy custom_scheduler.py if it exists
|
||||
if [[ -f "modules/custom_scheduler.py" ]]; then
|
||||
echo "Including custom_scheduler.py in backup..."
|
||||
cp modules/custom_scheduler.py data/
|
||||
fi
|
||||
# Check config.ini ownership and permissions
|
||||
if [[ -f "config.ini" ]]; then
|
||||
owner=$(stat -f "%Su" config.ini)
|
||||
perms=$(stat -f "%A" config.ini)
|
||||
echo "config.ini is owned by: $owner"
|
||||
echo "config.ini permissions: $perms"
|
||||
if [[ "$owner" == "root" ]]; then
|
||||
echo "Warning: config.ini is owned by root check out the etc/set-permissions.sh script"
|
||||
fi
|
||||
if [[ $(stat -f "%Lp" config.ini) =~ .*[7,6,2]$ ]]; then
|
||||
echo "Warning: config.ini is world-writable or world-readable! check out the etc/set-permissions.sh script"
|
||||
fi
|
||||
|
||||
echo "Including config.ini in backup..."
|
||||
|
||||
cp config.ini data/config.backup
|
||||
fi
|
||||
#create the tar.gz backup
|
||||
tar -czf "$backup_file" "$path2backup"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Backup failed."
|
||||
echo "ERROR: Backup failed."
|
||||
else
|
||||
echo "Backup of ${path2backup} completed: ${backup_file}"
|
||||
fi
|
||||
|
||||
# Build a config_new.ini file merging user config with new defaults
|
||||
# --- Config Merge ---
|
||||
echo
|
||||
echo "----------------------------------------------"
|
||||
echo "Merging configuration files..."
|
||||
echo "----------------------------------------------"
|
||||
python3 script/configMerge.py > ini_merge_log.txt 2>&1
|
||||
if [[ -f ini_merge_log.txt ]]; then
|
||||
if grep -q "Error during configuration merge" ini_merge_log.txt; then
|
||||
@@ -98,11 +107,15 @@ if [[ -f ini_merge_log.txt ]]; then
|
||||
echo "Configuration merge completed. Please review config_new.ini and ini_merge_log.txt."
|
||||
fi
|
||||
else
|
||||
echo "Configuration merge log (ini_merge_log.txt) not found. check out the script/configMerge.py tool!"
|
||||
echo "Configuration merge log (ini_merge_log.txt) not found. Check out the script/configMerge.py tool!"
|
||||
fi
|
||||
|
||||
# --- Service Restart ---
|
||||
if [[ "$service_stopped" = true ]]; then
|
||||
echo
|
||||
echo "----------------------------------------------"
|
||||
echo "Restarting services..."
|
||||
echo "----------------------------------------------"
|
||||
for svc in mesh_bot.service pong_bot.service mesh_bot_reporting.service mesh_bot_w3.service; do
|
||||
if systemctl list-unit-files | grep -q "^$svc"; then
|
||||
systemctl start "$svc"
|
||||
@@ -111,7 +124,9 @@ if [[ "$service_stopped" = true ]]; then
|
||||
done
|
||||
fi
|
||||
|
||||
# Print completion message
|
||||
echo "Update completed successfully?"
|
||||
echo
|
||||
echo "=============================================="
|
||||
echo " MeshBot Update Completed Successfully! "
|
||||
echo "=============================================="
|
||||
exit 0
|
||||
# End of script
|
||||
Reference in New Issue
Block a user