mirror of
https://github.com/l5yth/potato-mesh.git
synced 2026-05-09 14:55:08 +02:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 858e9fa189 |
+2
-2
@@ -16,5 +16,5 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 100%
|
||||
threshold: 10%
|
||||
target: 99%
|
||||
threshold: 1%
|
||||
|
||||
+1
-11
@@ -1,6 +1,3 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (see LICENSE)
|
||||
#
|
||||
# PotatoMesh Environment Configuration
|
||||
# Copy this file to .env and customize for your setup
|
||||
|
||||
@@ -17,7 +14,7 @@ INSTANCE_DOMAIN="mesh.example.org"
|
||||
# Generate a secure token: openssl rand -hex 32
|
||||
API_TOKEN="your-secure-api-token-here"
|
||||
|
||||
# Mesh radio connection target (required for ingestor)
|
||||
# Meshtastic connection target (required for ingestor)
|
||||
# Common serial paths:
|
||||
# - Linux: /dev/ttyACM0, /dev/ttyUSB0
|
||||
# - macOS: /dev/cu.usbserial-*
|
||||
@@ -26,10 +23,6 @@ API_TOKEN="your-secure-api-token-here"
|
||||
# Bluetooth address (e.g. ED:4D:9E:95:CF:60).
|
||||
CONNECTION="/dev/ttyACM0"
|
||||
|
||||
# Mesh protocol to use (meshtastic or meshcore)
|
||||
# Default: meshtastic
|
||||
PROTOCOL="meshtastic"
|
||||
|
||||
# =============================================================================
|
||||
# SITE CUSTOMIZATION
|
||||
# =============================================================================
|
||||
@@ -75,9 +68,6 @@ PRIVATE=0
|
||||
# Debug mode (0=off, 1=on)
|
||||
DEBUG=0
|
||||
|
||||
# Energy saving mode — sleep between ingestion cycles (0=off, 1=on)
|
||||
ENERGY_SAVING=0
|
||||
|
||||
# Default map zoom override
|
||||
# MAP_ZOOM=15
|
||||
|
||||
|
||||
@@ -19,22 +19,6 @@ updates:
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "python"
|
||||
directory: "/data"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "cargo"
|
||||
directory: "/matrix"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/web"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "pub"
|
||||
directory: "/app"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# GitHub Actions Workflows
|
||||
|
||||
## Workflows
|
||||
@@ -13,3 +10,12 @@
|
||||
- **`mobile.yml`** - Flutter mobile tests with coverage reporting
|
||||
- **`release.yml`** - Tag-triggered Flutter release builds for Android and iOS
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build locally
|
||||
docker-compose build
|
||||
|
||||
# Deploy
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
@@ -23,7 +23,7 @@ on:
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
security-events: write
|
||||
packages: read
|
||||
|
||||
@@ -188,7 +188,7 @@ jobs:
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }}
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version_with_v }}
|
||||
docker run --rm --name ingestor-test \
|
||||
-e INSTANCE_DOMAIN=http://localhost:41447 \
|
||||
-e POTATOMESH_INSTANCE=http://localhost:41447 \
|
||||
-e API_TOKEN=test-token \
|
||||
-e CONNECTION=mock \
|
||||
-e DEBUG=1 \
|
||||
|
||||
@@ -20,7 +20,6 @@ on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'web/**'
|
||||
- 'tests/**'
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'app/**'
|
||||
- 'tests/**'
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'data/**'
|
||||
- 'tests/**'
|
||||
|
||||
@@ -39,7 +38,7 @@ jobs:
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install black pytest pytest-cov meshtastic meshcore
|
||||
pip install black pytest pytest-cov meshtastic
|
||||
- name: Test with pytest and coverage
|
||||
run: |
|
||||
mkdir -p reports
|
||||
|
||||
@@ -20,7 +20,6 @@ on:
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'web/**'
|
||||
- 'tests/**'
|
||||
|
||||
@@ -35,7 +34,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
ruby-version: ['3.4', '4.0']
|
||||
ruby-version: ['3.3', '3.4']
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
@@ -74,9 +74,5 @@ web/.config
|
||||
node_modules/
|
||||
web/node_modules/
|
||||
|
||||
# Operator-customised static pages (keep only the shipped default)
|
||||
web/pages/*.md
|
||||
|
||||
# Debug symbols
|
||||
ignored.txt
|
||||
ignored-*.txt
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
# Repository Guidelines
|
||||
|
||||
Keep code well structured, modular, and not monolithic. If modules get to big, consider submodules structure.
|
||||
|
||||
Make sure all tests pass for Python (`pytest`), Ruby (`rspec`), and JavaScript (`npm test`).
|
||||
|
||||
Make sure all code is properly inline documented (PDoc, RDoc, JSDoc, et.c). We do not want any undocumented code.
|
||||
|
||||
Make sure all code is 100% unit tested. We want all lines, units, and branches to be thouroughly covered by tests.
|
||||
|
||||
New source files should have Apache v2 license headers using the exact string `Copyright © 2025-26 l5yth & contributors`.
|
||||
|
||||
Run linters for Python (`black`) and Ruby (`rufo`) to ensure consistent code formatting.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The repository splits runtime and ingestion logic. `web/` holds the Sinatra dashboard (Ruby code in `lib/potato_mesh`, views in `views/`, static bundles in `public/`).
|
||||
|
||||
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
|
||||
|
||||
`matrix/` contains the Rust Matrix bridge; build with `cargo build --release` or `docker build -f matrix/Dockerfile .`, and keep bridge config under `matrix/Config.toml` when running locally.
|
||||
|
||||
## Build, Test, and Development Commands
|
||||
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
|
||||
|
||||
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
|
||||
|
||||
Container images publish via `.github/workflows/docker.yml` as `potato-mesh-{service}-linux-$arch` (`web`, `ingestor`, `matrix-bridge`), using the Dockerfiles in `web/`, `data/`, and `matrix/`.
|
||||
|
||||
## Coding Style & Naming Conventions
|
||||
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
|
||||
|
||||
JavaScript follows ES modules under `public/assets/js`; co-locate components with `__tests__` folders and use kebab-case filenames. Format Ruby via `bundle exec rufo .` and Python via `black`. Skip committing generated coverage artifacts.
|
||||
|
||||
## Flutter Mobile App (`app/`)
|
||||
The Flutter client lives in `app/`. Keep only the mobile targets (`android/`, `ios/`) under version control unless you explicitly support other platforms. Do not commit Flutter build outputs or editor cruft (`.dart_tool/`, `.flutter-plugins-dependencies`, `.idea/`, `.metadata`, `*.iml`, `.fvmrc` if unused).
|
||||
|
||||
Install dependencies with `cd app && flutter pub get`; format with `dart format .` and lint via `flutter analyze`. Run tests with `cd app && flutter test` and keep widget/unit coverage high—no new code without tests. Commit `pubspec.lock` and analysis options so toolchains stay consistent.
|
||||
|
||||
## Testing Guidelines
|
||||
Ruby specs run with `cd web && bundle exec rspec`, producing SimpleCov output in `coverage/`. Front-end behaviour is verified through Node’s test runner: `cd web && npm test` writes V8 coverage and JUnit XML under `reports/`.
|
||||
|
||||
The ingestion layer is guarded by `pytest -q tests/test_mesh.py`; leave fixtures in `tests/` untouched so CI can replay them. New features should ship with matching specs and updated integration checks.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Commits should stay imperative and reference issues the way history does (`Add chat log entries... (#408)`). Squash noisy work-in-progress commits before pushing. Pull requests need a concise summary, screenshots or curl traces for UI/API tweaks, and links to tracked issues. Paste the command output for the test suites you ran and mention configuration toggles (`API_TOKEN`, `PRIVATE`) reviewers must set.
|
||||
|
||||
## Security & Configuration Tips
|
||||
Never commit real API tokens or `.sqlite` dumps; use `.env.local` files ignored by Git. Confirm env defaults (`API_TOKEN`, `INSTANCE_DOMAIN`, `PRIVATE`) before deploying, and set `FEDERATION=0` when staging private nodes. Review `PROMETHEUS.md` when exposing metrics so scrape endpoints stay internal.
|
||||
-182
@@ -1,187 +1,5 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# CHANGELOG
|
||||
|
||||
## v0.6.2
|
||||
|
||||
This is a service release of the radio mesh app-suite `potato-mesh` v0.6.2, focused on Meshcore-related fixes, federation accuracy, and bridge coverage. The Matrix bridge now understands Meshcore traffic, and several duplication and classification issues in the web app and ingestor have been tightened up.
|
||||
|
||||
Demo: <https://potatomesh.net/>
|
||||
|
||||
### Features
|
||||
* Matrix: enable meshcore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/761>
|
||||
* Web: show colocated nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/753>
|
||||
|
||||
### Fixes
|
||||
* Web: fix emoji pattern render in short names by @l5yth in <https://github.com/l5yth/potato-mesh/pull/760>
|
||||
* Data: catch packet handler errors by @l5yth in <https://github.com/l5yth/potato-mesh/pull/759>
|
||||
* Web: fix meshcore message duplication with 120s dupe protection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/758>
|
||||
* Web: fix node duplication through message synthetization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/757>
|
||||
* Ingestor: deduplicate meshcore messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/752>
|
||||
* Fix reaction handling and classification by @l5yth in <https://github.com/l5yth/potato-mesh/pull/750>
|
||||
* Web: fix federation node counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/749>
|
||||
|
||||
## v0.6.1
|
||||
|
||||
This is a service release of the radio mesh app-suite `potato-mesh` v0.6.1, focused on Meshcore polish, federation resilience, and ingestor stability in the wake of the v0.6.0 multi-protocol release.
|
||||
|
||||
Demo: <https://potatomesh.net/>
|
||||
|
||||
### Features
|
||||
* Web: per protocol active node counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/735>
|
||||
* Web: optimize caching by @l5yth in <https://github.com/l5yth/potato-mesh/pull/744>
|
||||
* Data: better lora frequency handling for meshtastic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/733>
|
||||
|
||||
### Fixes
|
||||
* Web: fix meshcore node misclassification by @l5yth in <https://github.com/l5yth/potato-mesh/pull/748>
|
||||
* Web: fix federation resolver issue with multi addresses by @l5yth in <https://github.com/l5yth/potato-mesh/pull/743>
|
||||
* Web: restore refresh and protocol buttons by @l5yth in <https://github.com/l5yth/potato-mesh/pull/742>
|
||||
* Ingestor: fix serial connection failures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/736>
|
||||
|
||||
### Chores
|
||||
* Chore: bump version to 0.6.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/726>
|
||||
* Build(deps): bump rand from 0.9.2 to 0.9.4 in /matrix by @dependabot in <https://github.com/l5yth/potato-mesh/pull/741>
|
||||
|
||||
## v0.6.0
|
||||
|
||||
This is a service release of the radio mesh app-suite `potato-mesh` v0.6.0 which introduces new features and overhauls the user interface. The primary notable change is added support for multi-protocol along with an implementation of **Meshcore** in ingestor, web app, and frontend.
|
||||
|
||||
Demo: <https://potatomesh.net/>
|
||||
|
||||
### Meshcore
|
||||
|
||||
To start ingesting Meshcore data to an upgraded potato-mesh web app, simply tell your ingestor to use the `PROTOCOL="meshcore"`.
|
||||
|
||||
### About Pages
|
||||
|
||||
The other notable feature is the removal of the "darkmode" and "info" buttons in favor of customizable markdown pages that allow for more flexibility with regard to custom content (info about presets, contact information, etc.) - see `/pages/*.md` in the web app ([#723](https://github.com/l5yth/potato-mesh/pull/723)).
|
||||
|
||||
### Breaking Variable Changes
|
||||
|
||||
The following deprecated environmental variables have been removed in this release finally ([#704](https://github.com/l5yth/potato-mesh/pull/704)):
|
||||
* ~~POTATOMESH_INSTANCE~~ - please use `INSTANCE_DOMAIN`
|
||||
* ~~MESH_SERIAL~~ and ~~PORT~~ - please use `CONNECTION`
|
||||
|
||||
### Features
|
||||
* Web: add markdown static pages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/723>
|
||||
* Data: trace analysus multi ingestor support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/721>
|
||||
* Web: facelift by @l5yth in <https://github.com/l5yth/potato-mesh/pull/716>
|
||||
* Web: sort channels by activity not index by @l5yth in <https://github.com/l5yth/potato-mesh/pull/711>
|
||||
* Data: derive meshcore channel probe bound from device max_channels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/701>
|
||||
* Web: define meshcore modem presets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/696>
|
||||
* Data: register meshcore channel mappings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/695>
|
||||
* Data: provide frequency and modem preset for meshcore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/694>
|
||||
* Web: distinguish meshcore from meshtastic in frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/688>
|
||||
* [Meshcore] fix: get meshcore protocol icon displaying correctly by @benallfree in <https://github.com/l5yth/potato-mesh/pull/681>
|
||||
|
||||
### Fixes
|
||||
* Web: fix federation for multi protocol by @l5yth in <https://github.com/l5yth/potato-mesh/pull/722>
|
||||
* Data: fix position time updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/715>
|
||||
* Data: fix meshcore ingestor self reporting by @l5yth in <https://github.com/l5yth/potato-mesh/pull/713>
|
||||
* Web: reference meshcore nodes in chat by @l5yth in <https://github.com/l5yth/potato-mesh/pull/709>
|
||||
* Web: fix node disappearance role reset by @l5yth in <https://github.com/l5yth/potato-mesh/pull/707>
|
||||
* Web: protect real node names from fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/702>
|
||||
* Web: add proper short names for meshcore companions by @l5yth in <https://github.com/l5yth/potato-mesh/pull/693>
|
||||
* Fix: address review comments from PRs #676 and #681 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/689>
|
||||
* [Meshcore] fix: race condition by @benallfree in <https://github.com/l5yth/potato-mesh/pull/676>
|
||||
|
||||
### Chores
|
||||
* Release: v0.6.0 — remove deprecated env var aliases by @l5yth in <https://github.com/l5yth/potato-mesh/pull/704>
|
||||
* Chore: prepare codebase for breaking release by @l5yth in <https://github.com/l5yth/potato-mesh/pull/718>
|
||||
|
||||
## v0.5.12
|
||||
|
||||
This is a service release of the app potato-mesh v0.5.12 which improves performance and stability.
|
||||
|
||||
Notably, the frontend went through some graphical tweaks to prepare for an upcoming multi-protocol release (meshcore, reticulum, etc.).
|
||||
|
||||
* Enh: surface meshcore role types (#680) by @l5yth in https://github.com/l5yth/potato-mesh/pull/685
|
||||
* Chore: refactor codebase before meshcore release by @l5yth in https://github.com/l5yth/potato-mesh/pull/682
|
||||
* [Meshcore] enh: short name should be 1st 4 hex digits of public key by @benallfree in https://github.com/l5yth/potato-mesh/pull/679
|
||||
* Chore: update xcode deps by @benallfree in https://github.com/l5yth/potato-mesh/pull/674
|
||||
* Chore: update mesh.sh to use requirements file by @benallfree in https://github.com/l5yth/potato-mesh/pull/675
|
||||
* Data/meshcore: fix ble and enable tcp by @l5yth in https://github.com/l5yth/potato-mesh/pull/669
|
||||
* Data: handle store_forward and router_heartbeat portnum by @l5yth in https://github.com/l5yth/potato-mesh/pull/667
|
||||
* Feat: implement meshcore provider by @l5yth in https://github.com/l5yth/potato-mesh/pull/663
|
||||
* Ci: update dependabot and codecov settings by @l5yth in https://github.com/l5yth/potato-mesh/pull/666
|
||||
* Web: prepare release by @l5yth in https://github.com/l5yth/potato-mesh/pull/665
|
||||
* App: only query meshtastic provider by @l5yth in https://github.com/l5yth/potato-mesh/pull/664
|
||||
* Data: prepare ingestor for meshcore by @l5yth in https://github.com/l5yth/potato-mesh/pull/658
|
||||
* Web: fix css issues by @l5yth in https://github.com/l5yth/potato-mesh/pull/659
|
||||
* Web: prepare frontend for multi protocol by @l5yth in https://github.com/l5yth/potato-mesh/pull/657
|
||||
* Feat: split device and power-sensor telemetry charts (#643) by @l5yth in https://github.com/l5yth/potato-mesh/pull/656
|
||||
* Web: implement a 'protocol' field across systems by @l5yth in https://github.com/l5yth/potato-mesh/pull/655
|
||||
* Fix upsert clearing node coordinates bug by @l5yth in https://github.com/l5yth/potato-mesh/pull/654
|
||||
* Data: resolve circular dependency of deamon.py by @l5yth in https://github.com/l5yth/potato-mesh/pull/653
|
||||
* Proposal: mesh provider pattern refactor by @benallfree in https://github.com/l5yth/potato-mesh/pull/651
|
||||
* Build(deps): bump rustls-webpki from 0.103.8 to 0.103.10 in /matrix by @dependabot[bot] in https://github.com/l5yth/potato-mesh/pull/649
|
||||
* Build(deps): bump quinn-proto from 0.11.13 to 0.11.14 in /matrix by @dependabot[bot] in https://github.com/l5yth/potato-mesh/pull/646
|
||||
|
||||
## v0.5.11
|
||||
|
||||
* Chore: bump version to 0.5.11 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/645>
|
||||
* Web: limit horizontal size of dropdown by @l5yth in <https://github.com/l5yth/potato-mesh/pull/644>
|
||||
|
||||
## v0.5.10
|
||||
|
||||
* Web: expose node stats in distinct api by @l5yth in <https://github.com/l5yth/potato-mesh/pull/641>
|
||||
* Web: do merge channels by name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/640>
|
||||
* Web: do not merge channels by ID in frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/637>
|
||||
* Web: do not touch neighbor last seen on neighbor info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/636>
|
||||
* Ingestor: report self id per packet by @l5yth in <https://github.com/l5yth/potato-mesh/pull/635>
|
||||
* Ci: fix docker compose and docs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/634>
|
||||
* Web: supress encrypted text messages in frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/633>
|
||||
* Federation: ensure requests timeout properly and can be terminated by @l5yth in <https://github.com/l5yth/potato-mesh/pull/631>
|
||||
* Build(deps): bump bytes from 1.11.0 to 1.11.1 in /matrix by @dependabot[bot]< in https://github.com/l5yth/potato-mesh/pull/627>
|
||||
* Matrix: config loading now merges optional TOML with CLI/env/secret inputs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/617>
|
||||
* Matrix: logs only non-sensitive config fields by @l5yth in <https://github.com/l5yth/potato-mesh/pull/616>
|
||||
* Web: decrypted takes precedence by @l5yth in <https://github.com/l5yth/potato-mesh/pull/614>
|
||||
* Add Apache 2.0 license headers to missing sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/615>
|
||||
* Web: decrypt PSK-1 unencrypted messages on arrival by @l5yth in <https://github.com/l5yth/potato-mesh/pull/611>
|
||||
* Web: daemonize federation worker pool to avoid deadlocks on stuck announcments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/610>
|
||||
* Web: add announcement banner by @l5yth in <https://github.com/l5yth/potato-mesh/pull/609>
|
||||
* L5Y chore version 0510 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/608>
|
||||
|
||||
## v0.5.9
|
||||
|
||||
* Matrix: listen for synapse on port 41448 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/607>
|
||||
* Web: collapse federation map ledgend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/604>
|
||||
* Web: fix stale node queries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/603>
|
||||
* Matrix: move short name to display name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/602>
|
||||
* Ci: update ruby to 4 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/601>
|
||||
* Web: display traces of last 28 days if available by @l5yth in <https://github.com/l5yth/potato-mesh/pull/599>
|
||||
* Web: establish menu structure by @l5yth in <https://github.com/l5yth/potato-mesh/pull/597>
|
||||
* Matrix: fixed the text-message checkpoint regression by @l5yth in <https://github.com/l5yth/potato-mesh/pull/595>
|
||||
* Matrix: cache seen messages by rx_time not id by @l5yth in <https://github.com/l5yth/potato-mesh/pull/594>
|
||||
* Web: hide the default '0' tab when not active by @l5yth in <https://github.com/l5yth/potato-mesh/pull/593>
|
||||
* Matrix: fix empty bridge state json by @l5yth in <https://github.com/l5yth/potato-mesh/pull/592>
|
||||
* Web: allow certain charts to overflow upper bounds by @l5yth in <https://github.com/l5yth/potato-mesh/pull/585>
|
||||
* Ingestor: support ROUTING_APP messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/584>
|
||||
* Ci: run nix flake check on ci by @l5yth in <https://github.com/l5yth/potato-mesh/pull/583>
|
||||
* Web: hide legend by default by @l5yth in <https://github.com/l5yth/potato-mesh/pull/582>
|
||||
* Nix flake by @benjajaja in <https://github.com/l5yth/potato-mesh/pull/577>
|
||||
* Support BLE UUID format for macOS Bluetooth devices by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/575>
|
||||
* Web: add mesh.qrp.ro as seed node by @l5yth in <https://github.com/l5yth/potato-mesh/pull/573>
|
||||
* Web: ensure unknown nodes for messages and traces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/572>
|
||||
* Chore: bump version to 0.5.9 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/569>
|
||||
|
||||
## v0.5.8
|
||||
|
||||
* Web: add secondary seed node jmrp.io by @l5yth in <https://github.com/l5yth/potato-mesh/pull/568>
|
||||
* Data: implement whitelist for ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/567>
|
||||
* Web: add ?since= parameter to all apis by @l5yth in <https://github.com/l5yth/potato-mesh/pull/566>
|
||||
* Matrix: fix docker build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/565>
|
||||
* Matrix: fix docker build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/564>
|
||||
* Web: fix federation signature validation and create fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/563>
|
||||
* Chore: update readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/561>
|
||||
* Matrix: add docker file for bridge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/556>
|
||||
* Matrix: add health checks to startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/555>
|
||||
* Matrix: omit the api part in base url by @l5yth in <https://github.com/l5yth/potato-mesh/pull/554>
|
||||
* App: add utility coverage tests for main.dart by @l5yth in <https://github.com/l5yth/potato-mesh/pull/552>
|
||||
* Data: add thorough daemon unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/553>
|
||||
* Chore: bump version to 0.5.8 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/551>
|
||||
|
||||
## v0.5.7
|
||||
|
||||
* Data: track ingestors heartbeat by @l5yth in <https://github.com/l5yth/potato-mesh/pull/549>
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# Repository Guidelines
|
||||
|
||||
Keep code as modular as possible to reduce duplication and improve reusability and readability — this applies to tests as well as production code. If a module grows large, split it into a submodule structure. Prefer composing small, single-purpose units over monolithic files.
|
||||
|
||||
Make sure all tests pass for Python (`pytest`), Ruby (`rspec`), and JavaScript (`npm test`).
|
||||
|
||||
All code must be 100% unit tested — every line, branch, and code path must have a unit test. "100%" is the floor, not the ceiling: smoke tests, integration tests, and end-to-end tests come on top of that. No new code ships without matching unit tests.
|
||||
|
||||
All code must be 100% documented according to the language's API-doc standard (PDoc for Python, RDoc for Ruby, JSDoc for JavaScript, rustdoc for Rust, dartdoc for Dart). Documentation must be sufficient to generate complete API docs from source. In addition to API-level docs, add inline comments wherever the logic is not immediately self-evident.
|
||||
|
||||
Every file in the repository must carry an Apache v2 license notice using the exact string `Copyright © 2025-26 l5yth & contributors`. **Source-code files** (`.rb`, `.py`, `.js`, `.rs`, `.dart`, etc.) must include the full Apache v2 license header block. **Non-source files** (docs, configs, YAML, TOML, Dockerfiles, etc.) must include a short 2-line Apache v2 notice (copyright line + license reference).
|
||||
|
||||
Run linters for Python (`black`) and Ruby (`rufo`) to ensure consistent code formatting.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The repository splits runtime and ingestion logic. `web/` holds the Sinatra dashboard (Ruby code in `lib/potato_mesh`, views in `views/`, static bundles in `public/`).
|
||||
|
||||
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. The ingestor is structured as the `data/mesh_ingestor/` package with the following key modules: `daemon.py` (main loop), `handlers.py` (packet processing), `interfaces.py` (interface helpers), `config.py` (env-driven config), `events.py` (TypedDict event schemas), `mesh_protocol.py` (MeshProtocol base), `node_identity.py` (canonical node ID utilities), `decode_payload.py` (CLI protobuf decoder), and the `protocols/` subpackage (currently `meshtastic.py`). API contracts for all POST ingest routes are documented in `data/mesh_ingestor/CONTRACTS.md`. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
|
||||
|
||||
`matrix/` contains the Rust Matrix bridge; build with `cargo build --release` or `docker build -f matrix/Dockerfile .`, and keep bridge config under `matrix/Config.toml` when running locally.
|
||||
|
||||
## Build, Test, and Development Commands
|
||||
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
|
||||
|
||||
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
|
||||
|
||||
Container images publish via `.github/workflows/docker.yml` as `potato-mesh-{service}-linux-$arch` (`web`, `ingestor`, `matrix-bridge`), using the Dockerfiles in `web/`, `data/`, and `matrix/`.
|
||||
|
||||
## Coding Style & Naming Conventions
|
||||
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
|
||||
|
||||
JavaScript follows ES modules under `public/assets/js`; co-locate components with `__tests__` folders and use kebab-case filenames. Format Ruby via `bundle exec rufo .` and Python via `black`. Skip committing generated coverage artifacts.
|
||||
|
||||
## Flutter Mobile App (`app/`)
|
||||
The Flutter client lives in `app/`. Keep only the mobile targets (`android/`, `ios/`) under version control unless you explicitly support other platforms. Do not commit Flutter build outputs or editor cruft (`.dart_tool/`, `.flutter-plugins-dependencies`, `.idea/`, `.metadata`, `*.iml`, `.fvmrc` if unused).
|
||||
|
||||
Install dependencies with `cd app && flutter pub get`; format with `dart format .` and lint via `flutter analyze`. Run tests with `cd app && flutter test` and keep widget/unit coverage high—no new code without tests. Commit `pubspec.lock` and analysis options so toolchains stay consistent.
|
||||
|
||||
## Testing Guidelines
|
||||
Ruby specs run with `cd web && bundle exec rspec`, producing SimpleCov output in `coverage/`. Front-end behaviour is verified through Node’s test runner: `cd web && npm test` writes V8 coverage and JUnit XML under `reports/`.
|
||||
|
||||
The ingestion layer is tested with `pytest -q tests/`; leave fixtures in `tests/` untouched so CI can replay them. The suite includes both integration tests (`test_mesh.py`) and focused unit tests — `test_events_unit.py` (TypedDict schemas), `test_provider_unit.py` (Provider protocol conformance and `MeshtasticProvider`), `test_node_identity_unit.py` (canonical ID helpers), `test_daemon_unit.py`, `test_serialization_unit.py`, and `test_decode_payload.py`. New features should ship with matching specs and updated integration checks.
|
||||
|
||||
## Adding a New Ingestor Protocol
|
||||
The `data/mesh_ingestor/mesh_protocol.py` module defines a `@runtime_checkable` `MeshProtocol` class with five members: `name` (str), `subscribe()`, `connect(*, active_candidate)`, `extract_host_node_id(iface)`, and `node_snapshot_items(iface)`. To add a new backend (e.g. Reticulum):
|
||||
|
||||
1. Create `data/mesh_ingestor/protocols/<name>.py` with a class satisfying the `MeshProtocol` interface.
|
||||
2. Register it in `data/mesh_ingestor/protocols/__init__.py`.
|
||||
3. Pass an instance via `daemon.main(provider=...)` or make it the default in `main()`.
|
||||
4. Cover the protocol with unit tests in `tests/test_provider_unit.py` — at minimum an `isinstance(..., MeshProtocol)` conformance check and any retry/error-handling paths.
|
||||
|
||||
Consult `data/mesh_ingestor/CONTRACTS.md` for the canonical event shapes all protocols must emit.
|
||||
|
||||
## GitHub Configuration Standards
|
||||
Every language used in the repository must have a Dependabot entry checking for dependency updates on a **weekly** schedule. Keep the Dependabot config up to date as new languages or package ecosystems are added.
|
||||
|
||||
Codecov must be configured with a **100% coverage target** and a **10% threshold** (i.e. a drop of more than 10 percentage points fails the check). The `codecov.yml` should enforce this on both patch and project coverage.
|
||||
|
||||
Every service/component must have at least one GitHub Actions workflow that **builds and runs tests on pull requests against `main` and on direct pushes to `main`**. Workflows should cover all relevant test suites (Python, Ruby, JS, Rust, Flutter) for the components they touch.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Commits should stay imperative and reference issues the way history does (`Add chat log entries... (#408)`). Squash noisy work-in-progress commits before pushing. Pull requests need a concise summary, screenshots or curl traces for UI/API tweaks, and links to tracked issues. Paste the command output for the test suites you ran and mention configuration toggles (`API_TOKEN`, `PRIVATE`) reviewers must set.
|
||||
|
||||
## Security & Configuration Tips
|
||||
Never commit real API tokens or `.sqlite` dumps; use `.env.local` files ignored by Git. Confirm env defaults (`API_TOKEN`, `INSTANCE_DOMAIN`, `PRIVATE`) before deploying, and set `FEDERATION=0` when staging private nodes. Review `PROMETHEUS.md` when exposing metrics so scrape endpoints stay internal.
|
||||
@@ -1,6 +1,3 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# PotatoMesh Docker Guide
|
||||
|
||||
PotatoMesh publishes ready-to-run container images to the GitHub Packages container
|
||||
@@ -16,16 +13,16 @@ will pull the latest release images for you.
|
||||
|
||||
## Images on GHCR
|
||||
|
||||
| Service | Image |
|
||||
|----------|----------------------------------------------------------------------------------------------------------------|
|
||||
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:<tag>` (e.g. `latest`, `0.6.0`, `v0.6.0`, or `0.7.0-rc1`) |
|
||||
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:<tag>` (e.g. `latest`, `0.6.0`, `v0.6.0`, or `0.7.0-rc1`) |
|
||||
| Service | Image |
|
||||
|----------|---------------------------------------------------------------------------------------------------------------|
|
||||
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:<tag>` (e.g. `latest`, `3.0`, `v3.0`, or `3.1.0-rc1`) |
|
||||
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:<tag>` (e.g. `latest`, `3.0`, `v3.0`, or `3.1.0-rc1`) |
|
||||
|
||||
Images are published for every tagged release. Stable builds receive both
|
||||
semantic version tags (for example `0.6.0`) and a matching `v`-prefixed tag (for
|
||||
example `v0.6.0`), plus a `latest` tag that tracks the newest stable release.
|
||||
semantic version tags (for example `3.0`) and a matching `v`-prefixed tag (for
|
||||
example `v3.0`), plus a `latest` tag that tracks the newest stable release.
|
||||
Pre-release tags (for example `-rc`, `-beta`, `-alpha`, or `-dev` suffixes) are
|
||||
published only with their explicit version strings (`0.7.0-rc1` and `v0.7.0-rc1`
|
||||
published only with their explicit version strings (`3.1.0-rc1` and `v3.1.0-rc1`
|
||||
in this example) and do **not** advance `latest`. Pin the versioned tags when
|
||||
you need a specific build.
|
||||
|
||||
@@ -63,8 +60,9 @@ Additional environment variables are optional:
|
||||
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the radio. |
|
||||
|
||||
The ingestor posts to the URL configured via `INSTANCE_DOMAIN` (defaulting to
|
||||
`http://web:41447` in the provided compose file). Use `CHANNEL_INDEX` to select
|
||||
a LoRa channel on serial or Bluetooth connections.
|
||||
`http://web:41447` in the provided compose file) and still accepts
|
||||
`POTATOMESH_INSTANCE` as a legacy alias when the primary variable is unset. Use
|
||||
`CHANNEL_INDEX` to select a LoRa channel on serial or Bluetooth connections.
|
||||
|
||||
## Docker Compose file
|
||||
|
||||
@@ -81,18 +79,6 @@ the container. This path stores the instance private key and staged
|
||||
of container lifecycle events, generated credentials are not replaced on reboot
|
||||
or re-deploy.
|
||||
|
||||
The `potatomesh_pages` volume mounts to `/app/pages` and holds operator-managed
|
||||
Markdown files that are rendered as static content pages in the web UI. On first
|
||||
start the default `1-about.md` page is copied from the image into the volume.
|
||||
You can add, edit, or remove `.md` files in this volume to customise your
|
||||
instance's navigation. To use a host directory instead of a named volume, replace
|
||||
the volume entry with a bind mount:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- ./my-pages:/app/pages
|
||||
```
|
||||
|
||||
## Start the stack
|
||||
|
||||
From the directory containing the Compose file:
|
||||
|
||||
+9
-30
@@ -1,4 +1,3 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -26,9 +25,6 @@ ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
python3 \
|
||||
py3-pip \
|
||||
py3-virtualenv \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
@@ -44,16 +40,11 @@ RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Install Meshtastic decoder dependencies in a dedicated venv
|
||||
RUN python3 -m venv /opt/meshtastic-venv && \
|
||||
/opt/meshtastic-venv/bin/pip install --no-cache-dir meshtastic protobuf
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
python3 \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
@@ -67,27 +58,18 @@ WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
COPY --from=builder /opt/meshtastic-venv /opt/meshtastic-venv
|
||||
|
||||
# Copy application code (excluding the Dockerfile which is not required at runtime)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb ./
|
||||
COPY --chown=potatomesh:potatomesh web/app.sh ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile.lock* ./
|
||||
COPY --chown=potatomesh:potatomesh web/lib ./lib
|
||||
COPY --chown=potatomesh:potatomesh web/spec ./spec
|
||||
# Copy application code (exclude Dockerfile from web directory)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb web/app.sh web/Gemfile web/Gemfile.lock* web/spec/ ./
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views ./views
|
||||
COPY --chown=potatomesh:potatomesh web/scripts ./scripts
|
||||
COPY --chown=potatomesh:potatomesh web/views/ ./views/
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
COPY --chown=potatomesh:potatomesh data/mesh_ingestor/decode_payload.py /app/data/mesh_ingestor/decode_payload.py
|
||||
|
||||
# Create data and configuration directories with correct ownership
|
||||
RUN mkdir -p /app/.local/share/potato-mesh \
|
||||
&& mkdir -p /app/.config/potato-mesh/well-known \
|
||||
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config
|
||||
# Create data directory for SQLite database
|
||||
RUN mkdir -p /app/data /app/.local/share/potato-mesh && \
|
||||
chown -R potatomesh:potatomesh /app/data /app/.local
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
@@ -96,16 +78,13 @@ USER potatomesh
|
||||
EXPOSE 41447
|
||||
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV RACK_ENV=production \
|
||||
APP_ENV=production \
|
||||
MESHTASTIC_PYTHON=/opt/meshtastic-venv/bin/python \
|
||||
XDG_DATA_HOME=/app/.local/share \
|
||||
XDG_CONFIG_HOME=/app/.config \
|
||||
ENV APP_ENV=production \
|
||||
RACK_ENV=production \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
INSTANCE_DOMAIN="potato.example.com" \
|
||||
CHANNEL="#LongFast" \
|
||||
FREQUENCY="915MHz" \
|
||||
MAP_CENTER="38.761944,-27.090833" \
|
||||
MAP_ZOOM="" \
|
||||
MAX_DISTANCE=42 \
|
||||
CONTACT_LINK="#potatomesh:dod.ngo" \
|
||||
DEBUG=0
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# Prometheus Monitoring for PotatoMesh
|
||||
|
||||
PotatoMesh exposes runtime telemetry through a dedicated Prometheus endpoint so you can
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# 🥔 PotatoMesh
|
||||
|
||||
[](https://github.com/l5yth/potato-mesh/actions)
|
||||
@@ -10,10 +7,7 @@
|
||||
[](https://github.com/l5yth/potato-mesh/issues)
|
||||
[](https://matrix.to/#/#potatomesh:dod.ngo)
|
||||
|
||||
[](https://meshtastic.org)
|
||||
[](https://meshcore.co.uk)
|
||||
|
||||
A federated, Meshtastic & Meshcore node dashboard for your local community.
|
||||
A federated, Meshtastic-powered node dashboard for your local community.
|
||||
_No MQTT clutter, just local LoRa aether._
|
||||
|
||||
* Web dashboard with chat window and map view showing nodes, positions, neighbors,
|
||||
@@ -23,17 +17,15 @@ _No MQTT clutter, just local LoRa aether._
|
||||
* Allows searching and filtering for nodes in map and table view.
|
||||
* Federated: _automatically_ froms a federation with other communities running
|
||||
Potato Mesh!
|
||||
* Supports Meshtastic and Meshcore
|
||||
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
|
||||
* Supports multiple ingestors per instance.
|
||||
* Supports Meshtastic and Meshcore
|
||||
* Matrix bridge that posts Meshtastic messages to a defined matrix channel (no
|
||||
radio required).
|
||||
* Mobile app to _read_ messages on your local aether (no radio required).
|
||||
|
||||
Live demo for Berlin: [potatomesh.net](https://potatomesh.net)
|
||||
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
|
||||
|
||||

|
||||

|
||||
|
||||
## Web App
|
||||
|
||||
@@ -96,7 +88,6 @@ The web app can be configured with environment variables (defaults shown):
|
||||
| `CHANNEL` | `"#LongFast"` | Default channel name displayed in the UI. |
|
||||
| `FREQUENCY` | `"915MHz"` | Default frequency description displayed in the UI. |
|
||||
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix alias rendered in the footer and overlays. |
|
||||
| `ANNOUNCEMENT` | _unset_ | Optional announcement banner text rendered above the header on every page. |
|
||||
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map on load. |
|
||||
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
|
||||
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
|
||||
@@ -105,32 +96,10 @@ The web app can be configured with environment variables (defaults shown):
|
||||
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor will ignore when forwarding packets. |
|
||||
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
|
||||
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
|
||||
| `OG_IMAGE_URL` | _unset_ | Optional absolute URL for the social preview image. Must use an `http://` or `https://` scheme; values with other schemes are ignored. Most social platforms (Facebook, LinkedIn, Slack, iMessage) require **HTTPS** to render the card. When set, replaces the runtime-generated `/og-image.png` so deployments without Chromium (or with size-conscious images) can point at a CDN. |
|
||||
| `OG_IMAGE_TTL_SECONDS` | `3600` | Cache lifetime for the runtime-generated dashboard screenshot served at `/og-image.png`. |
|
||||
| `FERRUM_BROWSER_PATH` | `/usr/bin/chromium` (Docker) | Path to the headless Chromium binary used by the Open Graph preview generator. |
|
||||
|
||||
The application derives SEO-friendly document titles, descriptions, and social
|
||||
preview tags from these existing configuration values. `/robots.txt` and
|
||||
`/sitemap.xml` are generated automatically and respect `PRIVATE`/`FEDERATION`
|
||||
toggles; markdown files in `pages/` may declare optional YAML frontmatter
|
||||
(`title`, `description`, `image`, `noindex`) for per-page overrides. The
|
||||
`image:` frontmatter must be an absolute `http(s)://` URL; other schemes are
|
||||
silently dropped to keep operators from accidentally leaking `data:` or
|
||||
`javascript:` URIs into Open Graph tags.
|
||||
|
||||
If `INSTANCE_DOMAIN` is unset in production the app emits a one-time `WARN`
|
||||
at startup; canonical URLs and sitemap entries fall back to the inbound
|
||||
`Host` header, which can be cache-poisoned by a misconfigured proxy. Set
|
||||
`INSTANCE_DOMAIN` to your public hostname to silence the warning.
|
||||
|
||||
#### Open Graph preview image
|
||||
|
||||
The web container ships with Chromium so `/og-image.png` returns a fresh
|
||||
screenshot of the live dashboard, cached on disk for `OG_IMAGE_TTL_SECONDS`.
|
||||
Operators on size-constrained hosts can build a slim image by passing
|
||||
`--build-arg WITH_OG_IMAGE=0` to `docker build`; the route then falls back to
|
||||
the bundled `public/og-image-default.png`. Set `OG_IMAGE_URL` to an external
|
||||
PNG/JPG (e.g. on a CDN) to avoid runtime capture entirely.
|
||||
preview tags from these existing configuration values and reuses the bundled
|
||||
logo for Open Graph and Twitter cards.
|
||||
|
||||
Example:
|
||||
|
||||
@@ -150,28 +119,6 @@ well-known document is staged in
|
||||
|
||||
The database can be found in `$XDG_DATA_HOME/potato-mesh`.
|
||||
|
||||
### Custom Pages
|
||||
|
||||
Instance operators can publish static content pages (contact details, mesh
|
||||
protocol information, legal notices, etc.) by placing Markdown files in the
|
||||
`pages/` directory inside `web/`. Each `.md` file automatically becomes a nav
|
||||
entry and a route under `/pages/<slug>`.
|
||||
|
||||
Files are named `<sort-prefix>-<slug>.md` — the numeric prefix controls
|
||||
navigation order and the slug becomes the URL path and nav label:
|
||||
|
||||
| Filename | Nav Label | URL |
|
||||
| ---------------------- | -------------- | ----------------------- |
|
||||
| `1-about.md` | About | `/pages/about` |
|
||||
| `5-rules.md` | Rules | `/pages/rules` |
|
||||
| `9-contact.md` | Contact | `/pages/contact` |
|
||||
| `20-impressum.md` | Impressum | `/pages/impressum` |
|
||||
|
||||
A default `1-about.md` ships with the app. In Docker deployments the directory
|
||||
is exposed as the `potatomesh_pages` volume (mounted at `/app/pages`) so you can
|
||||
add or edit pages without rebuilding the image. The pages directory can also be
|
||||
overridden with the `PAGES_DIR` environment variable.
|
||||
|
||||
### Federation
|
||||
|
||||
PotatoMesh instances can optionally federate by publishing signed metadata and
|
||||
@@ -304,36 +251,15 @@ services.potato-mesh = {
|
||||
|
||||
## Docker
|
||||
|
||||
Docker images are published on GitHub Container Registry for each release.
|
||||
Image names and tags follow the workflow format:
|
||||
`${IMAGE_PREFIX}-${service}-${architecture}:${tag}` (see `.github/workflows/docker.yml`).
|
||||
Docker images are published on Github for each release:
|
||||
|
||||
```bash
|
||||
docker pull ghcr.io/l5yth/potato-mesh-web-linux-amd64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-web-linux-arm64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-web-linux-armv7:latest
|
||||
|
||||
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-arm64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-armv7:latest
|
||||
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-amd64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-arm64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-armv7:latest
|
||||
|
||||
# version-pinned examples
|
||||
docker pull ghcr.io/l5yth/potato-mesh-web-linux-amd64:v0.6.3
|
||||
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:v0.6.3
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-amd64:v0.6.3
|
||||
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
|
||||
docker pull ghcr.io/l5yth/potato-mesh/web:v0.5.5 # pinned historical release
|
||||
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh/matrix-bridge:latest
|
||||
```
|
||||
|
||||
Note: `latest` is only published for non-prerelease versions. Pre-release tags
|
||||
such as `-rc`, `-beta`, `-alpha`, or `-dev` are version-tagged only.
|
||||
|
||||
When using Compose, set `POTATOMESH_IMAGE_ARCH` in `docker-compose.yml` (or via
|
||||
environment) so service images resolve to the correct architecture variant and
|
||||
you avoid manual tag mistakes.
|
||||
|
||||
Feel free to run the [configure.sh](./configure.sh) script to set up your
|
||||
environment. See the [Docker guide](DOCKER.md) for more details and custom
|
||||
deployment instructions.
|
||||
@@ -344,8 +270,6 @@ A matrix bridge is currently being worked on. It requests messages from a config
|
||||
potato-mesh instance and forwards it to a specified matrix channel; see
|
||||
[matrix/README.md](./matrix/README.md).
|
||||
|
||||

|
||||
|
||||
## Mobile App
|
||||
|
||||
A mobile _reader_ app is currently being worked on. Stay tuned for releases and updates.
|
||||
|
||||
+2
-6
@@ -1,10 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
# Meshtastic Reader
|
||||
|
||||
# PotatoMesh Mobile
|
||||
|
||||
PotatoMesh Mobile — read-only mesh chat client for Android and iOS.
|
||||
Supports Meshtastic and MeshCore networks.
|
||||
Meshtastic Reader – read-only PotatoMesh chat client for Android and iOS.
|
||||
|
||||
## Setup
|
||||
|
||||
|
||||
@@ -1,18 +1,3 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
plugins {
|
||||
id("com.android.application")
|
||||
id("kotlin-android")
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package net.potatomesh.reader
|
||||
|
||||
import io.flutter.embedding.android.FlutterActivity
|
||||
|
||||
@@ -1,18 +1,3 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
allprojects {
|
||||
repositories {
|
||||
google()
|
||||
|
||||
@@ -1,18 +1,3 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
pluginManagement {
|
||||
val flutterSdkPath =
|
||||
run {
|
||||
|
||||
+1
-13
@@ -1,18 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
export GIT_TAG="$(git describe --tags --abbrev=0)"
|
||||
export GIT_COMMITS="$(git rev-list --count ${GIT_TAG}..HEAD)"
|
||||
export GIT_SHA="$(git rev-parse --short=9 HEAD)"
|
||||
@@ -25,3 +12,4 @@ flutter run \
|
||||
--dart-define=GIT_SHA="${GIT_SHA}" \
|
||||
--dart-define=GIT_DIRTY="${GIT_DIRTY}" \
|
||||
--device-id 38151FDJH00D4C
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>FMWK</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>0.6.3</string>
|
||||
<string>0.5.9</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>0.6.3</string>
|
||||
<string>0.5.9</string>
|
||||
<key>MinimumOSVersion</key>
|
||||
<string>14.0</string>
|
||||
</dict>
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
#include? "Pods/Target Support Files/Pods-Runner/Pods-Runner.debug.xcconfig"
|
||||
#include "Generated.xcconfig"
|
||||
|
||||
@@ -1,2 +1 @@
|
||||
#include? "Pods/Target Support Files/Pods-Runner/Pods-Runner.release.xcconfig"
|
||||
#include "Generated.xcconfig"
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import Flutter
|
||||
import UIKit
|
||||
|
||||
|
||||
@@ -1,14 +1 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import "GeneratedPluginRegistrant.h"
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import Flutter
|
||||
import UIKit
|
||||
import XCTest
|
||||
|
||||
+1
-5
@@ -2944,9 +2944,6 @@ class MeshNode {
|
||||
}
|
||||
}
|
||||
|
||||
/// The protocol identifier sent to the API to filter results to Meshtastic only.
|
||||
const String _kProtocolFilter = 'meshtastic';
|
||||
|
||||
/// Build a messages API URI for a given domain or absolute URL.
|
||||
Uri _buildMessagesUri(String domain, {int since = 0, int limit = 1000}) {
|
||||
final trimmed = domain.trim();
|
||||
@@ -2954,7 +2951,6 @@ Uri _buildMessagesUri(String domain, {int since = 0, int limit = 1000}) {
|
||||
'limit': limit.toString(),
|
||||
'encrypted': 'false',
|
||||
'since': since.toString(),
|
||||
'protocol': _kProtocolFilter,
|
||||
};
|
||||
if (trimmed.isEmpty) {
|
||||
return Uri.https('potatomesh.net', '/api/messages', params);
|
||||
@@ -2992,7 +2988,7 @@ Uri _buildNodeUri(String domain, String nodeId) {
|
||||
/// Build the bulk nodes API URI for fetching recent nodes.
|
||||
Uri _buildNodesUri(String domain, {int limit = 1000}) {
|
||||
final trimmedDomain = domain.trim();
|
||||
final params = {'limit': limit.toString(), 'protocol': _kProtocolFilter};
|
||||
final params = {'limit': limit.toString()};
|
||||
|
||||
if (trimmedDomain.isEmpty) {
|
||||
return Uri.https('potatomesh.net', '/api/nodes', params);
|
||||
|
||||
+8
-8
@@ -45,10 +45,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: characters
|
||||
sha256: faf38497bda5ead2a8c7615f4f7939df04333478bf32e4173fcb06d428b5716b
|
||||
sha256: f71061c654a3380576a52b451dd5532377954cf9dbd272a78fc8479606670803
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.4.1"
|
||||
version: "1.4.0"
|
||||
checked_yaml:
|
||||
dependency: transitive
|
||||
description:
|
||||
@@ -284,18 +284,18 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: matcher
|
||||
sha256: "12956d0ad8390bbcc63ca2e1469c0619946ccb52809807067a7020d57e647aa6"
|
||||
sha256: dc58c723c3c24bf8d3e2d3ad3f2f9d7bd9cf43ec6feaa64181775e60190153f2
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "0.12.18"
|
||||
version: "0.12.17"
|
||||
material_color_utilities:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: material_color_utilities
|
||||
sha256: "9c337007e82b1889149c82ed242ed1cb24a66044e30979c44912381e9be4c48b"
|
||||
sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "0.13.0"
|
||||
version: "0.11.1"
|
||||
meta:
|
||||
dependency: transitive
|
||||
description:
|
||||
@@ -497,10 +497,10 @@ packages:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: test_api
|
||||
sha256: "93167629bfc610f71560ab9312acdda4959de4df6fac7492c89ff0d3886f6636"
|
||||
sha256: ab2726c1a94d3176a45960b6234466ec367179b87dd74f1611adb1f3b5fb9d55
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "0.7.9"
|
||||
version: "0.7.7"
|
||||
timezone:
|
||||
dependency: transitive
|
||||
description:
|
||||
|
||||
+1
-1
@@ -1,7 +1,7 @@
|
||||
name: potato_mesh_reader
|
||||
description: Meshtastic Reader — read-only view for PotatoMesh messages.
|
||||
publish_to: "none"
|
||||
version: 0.6.3
|
||||
version: 0.5.9
|
||||
|
||||
environment:
|
||||
sdk: ">=3.4.0 <4.0.0"
|
||||
|
||||
+1
-13
@@ -1,18 +1,5 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export GIT_TAG="$(git describe --tags --abbrev=0)"
|
||||
@@ -40,3 +27,4 @@ fi
|
||||
export APK_DIR="build/app/outputs/flutter-apk"
|
||||
mv -v "${APK_DIR}/app-release.apk" "${APK_DIR}/potatomesh-reader-android-${TAG_NAME}.apk"
|
||||
(cd "${APK_DIR}" && sha256sum "potatomesh-reader-android-${TAG_NAME}.apk" > "potatomesh-reader-android-${TAG_NAME}.apk.sha256sum")
|
||||
|
||||
|
||||
@@ -206,10 +206,8 @@ void main() {
|
||||
|
||||
expect(calls[0].host, 'mesh.example.org');
|
||||
expect(calls[0].path, '/api/messages');
|
||||
expect(calls[0].queryParameters['protocol'], 'meshtastic');
|
||||
expect(calls[1].scheme, 'https');
|
||||
expect(calls[1].path, '/api/messages');
|
||||
expect(calls[1].queryParameters['protocol'], 'meshtastic');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -145,7 +145,6 @@ void main() {
|
||||
if (request.url.path == '/api/messages') {
|
||||
sinces.add(request.url.queryParameters['since'] ?? '');
|
||||
expect(request.url.queryParameters['limit'], '1000');
|
||||
expect(request.url.queryParameters['protocol'], 'meshtastic');
|
||||
if (sinces.length == 1) {
|
||||
return http.Response(
|
||||
jsonEncode([
|
||||
|
||||
@@ -1,16 +1,3 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a basic Flutter widget test.
|
||||
//
|
||||
// To perform an interaction with a widget in your test, use the WidgetTester
|
||||
|
||||
@@ -219,6 +219,16 @@ else
|
||||
sed -i.bak '/^INSTANCE_DOMAIN=.*/d' .env
|
||||
fi
|
||||
|
||||
# Migrate legacy connection settings and ensure defaults exist
|
||||
if grep -q "^MESH_SERIAL=" .env; then
|
||||
legacy_connection=$(grep "^MESH_SERIAL=" .env | head -n1 | cut -d'=' -f2-)
|
||||
if [ -n "$legacy_connection" ] && ! grep -q "^CONNECTION=" .env; then
|
||||
echo "♻️ Migrating legacy MESH_SERIAL value to CONNECTION"
|
||||
update_env "CONNECTION" "$legacy_connection"
|
||||
fi
|
||||
sed -i.bak '/^MESH_SERIAL=.*/d' .env
|
||||
fi
|
||||
|
||||
if ! grep -q "^CONNECTION=" .env; then
|
||||
echo "CONNECTION=/dev/ttyACM0" >> .env
|
||||
fi
|
||||
|
||||
@@ -50,7 +50,6 @@ USER potatomesh
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
PROTOCOL=meshtastic \
|
||||
ALLOWED_CHANNELS="" \
|
||||
HIDDEN_CHANNELS="" \
|
||||
INSTANCE_DOMAIN="" \
|
||||
@@ -78,7 +77,6 @@ USER ContainerUser
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
PROTOCOL=meshtastic \
|
||||
ALLOWED_CHANNELS="" \
|
||||
HIDDEN_CHANNELS="" \
|
||||
INSTANCE_DOMAIN="" \
|
||||
|
||||
+1
-1
@@ -18,7 +18,7 @@ The ``data.mesh`` module exposes helpers for reading Meshtastic node and
|
||||
message information before forwarding it to the accompanying web application.
|
||||
"""
|
||||
|
||||
VERSION = "0.6.3"
|
||||
VERSION = "0.5.9"
|
||||
"""Semantic version identifier shared with the dashboard and front-end."""
|
||||
|
||||
__version__ = VERSION
|
||||
|
||||
+1
-2
@@ -20,8 +20,7 @@ CREATE TABLE IF NOT EXISTS ingestors (
|
||||
last_seen_time INTEGER NOT NULL,
|
||||
version TEXT,
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT,
|
||||
protocol TEXT NOT NULL DEFAULT 'meshtastic'
|
||||
modem_preset TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ingestors_last_seen ON ingestors(last_seen_time);
|
||||
|
||||
@@ -27,8 +27,6 @@ CREATE TABLE IF NOT EXISTS instances (
|
||||
last_update_time INTEGER,
|
||||
is_private BOOLEAN NOT NULL DEFAULT 0,
|
||||
nodes_count INTEGER,
|
||||
meshcore_nodes_count INTEGER,
|
||||
meshtastic_nodes_count INTEGER,
|
||||
contact_link TEXT,
|
||||
signature TEXT
|
||||
);
|
||||
|
||||
+4
-11
@@ -15,14 +15,7 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Recreate the venv only when its embedded Python is missing or points to the
|
||||
# wrong prefix (e.g. a stale shebang from a sibling project's venv). Avoid
|
||||
# --clear on every run: it wipes installed packages before each start, so any
|
||||
# restart during a PyPI outage turns a transient network failure into hard
|
||||
# ingestor downtime.
|
||||
if ! .venv/bin/python -c "import sys; exit(0 if '.venv' in sys.prefix else 1)" 2>/dev/null; then
|
||||
python -m venv --clear .venv
|
||||
fi
|
||||
.venv/bin/pip install -U pip
|
||||
.venv/bin/pip install -r "$(dirname "$0")/requirements.txt"
|
||||
exec .venv/bin/python mesh.py
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -U meshtastic black pytest
|
||||
exec python mesh.py
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
## Mesh ingestor contracts (stable interfaces)
|
||||
|
||||
This repo’s ingestion pipeline is split into:
|
||||
|
||||
- **Python collector** (`data/mesh_ingestor/*`) which normalizes packets/events and POSTs JSON to the web app.
|
||||
- **Sinatra web app** (`web/`) which accepts those payloads on `POST /api/*` ingest routes and persists them into SQLite tables defined under `data/*.sql`.
|
||||
|
||||
This document records the **contracts that future protocols must preserve**. The intent is to enable adding new protocols (MeshCore, Reticulum, …) without changing the Ruby/DB/UI read-side.
|
||||
|
||||
### Canonical node identity
|
||||
|
||||
- **Canonical node id**: `nodes.node_id` is a `TEXT` primary key and is treated as canonical across the system.
|
||||
- **Format**: `!%08x` (lowercase hex, 8 chars), for example `!abcdef01`.
|
||||
- **Normalization**:
|
||||
- Python currently normalizes via `data/mesh_ingestor/serialization.py:_canonical_node_id`.
|
||||
- Ruby normalizes via `web/lib/potato_mesh/application/data_processing.rb:canonical_node_parts`.
|
||||
- **Dual addressing**: Ruby routes and queries accept either a canonical `!xxxxxxxx` string or a numeric node id; they normalize to `node_id`.
|
||||
|
||||
Note: non-Meshtastic protocols will need a strategy to map their native node identifiers into this `!%08x` space. That mapping is intentionally not standardized in code yet.
|
||||
|
||||
### Ingest HTTP routes and payload shapes
|
||||
|
||||
Future providers should emit payloads that match these shapes (keys + types), which are validated by existing tests (notably `tests/test_mesh.py`).
|
||||
|
||||
#### `POST /api/nodes`
|
||||
|
||||
Payload is a mapping keyed by canonical node id, with an optional top-level `”ingestor”` key:
|
||||
|
||||
- `{ “!abcdef01”: { ... node fields ... }, “ingestor”: “!ingestornodeid” }`
|
||||
|
||||
When `”ingestor”` is present the protocol is inherited from the registered ingestor (see `POST /api/ingestors`); omitting it defaults to `”meshtastic”`.
|
||||
|
||||
Node entry fields are “Meshtastic-ish” (camelCase) and may include:
|
||||
|
||||
- `num` (int node number)
|
||||
- `lastHeard` (int unix seconds)
|
||||
- `snr` (float)
|
||||
- `hopsAway` (int)
|
||||
- `isFavorite` (bool)
|
||||
- `user` (mapping; e.g. `shortName`, `longName`, `macaddr`, `hwModel`, `publicKey`, `isUnmessagable`)
|
||||
- `role` (optional string) — omit when unknown; known values include Meshtastic role names (e.g. `CLIENT`, `ROUTER`) and MeshCore role names (`COMPANION`, `REPEATER`, `ROOM_SERVER`, `SENSOR`)
|
||||
- `deviceMetrics` (mapping; e.g. `batteryLevel`, `voltage`, `channelUtilization`, `airUtilTx`, `uptimeSeconds`)
|
||||
- `position` (mapping; `latitude`, `longitude`, `altitude`, `time`, `locationSource`, `precisionBits`, optional nested `raw`)
|
||||
- Optional radio metadata: `lora_freq`, `modem_preset`
|
||||
|
||||
#### `POST /api/messages`
|
||||
|
||||
Single message payload:
|
||||
|
||||
- Required: `id` (int), `rx_time` (int), `rx_iso` (string)
|
||||
- Identity: `from_id` (string/int), `to_id` (string/int), `channel` (int), `portnum` (string|nil)
|
||||
- Payload: `text` (string|nil), `encrypted` (string|nil), `reply_id` (int|nil), `emoji` (string|nil)
|
||||
- RF: `snr` (float|nil), `rssi` (int|nil), `hop_limit` (int|nil)
|
||||
- Meta: `channel_name` (string; only when not encrypted and known), `ingestor` (canonical host id), `lora_freq`, `modem_preset`
|
||||
|
||||
**Cross-ingestor deduplication.** The `id` field is the sole dedup key — the server collapses repeat POSTs on the `messages.id` PRIMARY KEY. Protocols that lack a firmware-assigned packet ID MUST derive a stable, sender-side fingerprint so that the same physical transmission heard by multiple ingestors produces the same `id`. The id MUST fit in 53 bits (`0 <= id <= (1 << 53) - 1`) to round-trip through the JavaScript frontend without precision loss.
|
||||
|
||||
For MeshCore the canonical fingerprint is:
|
||||
|
||||
```
|
||||
v1:<sender_identity>:<sender_timestamp>:<discriminator>:<text>
|
||||
```
|
||||
|
||||
hashed with SHA-256 and truncated to 53 bits (first 7 bytes, masked). Components:
|
||||
|
||||
- `sender_identity` — for channel messages, the lowercased+stripped sender name parsed from a leading `SenderName:` prefix in the message text (split on the first colon, surrounding whitespace stripped); for direct messages, the sender's `pubkey_prefix` from the MeshCore event payload. Empty string when unavailable — when the channel-message text lacks any `SenderName:` prefix the dedup degrades and two distinct senders sharing timestamp + channel + text collide. In practice MeshCore clients always prefix the name; the residual risk is anonymous/malformed transmissions.
|
||||
- `sender_timestamp` — Unix seconds from the sender's clock (identical across receivers).
|
||||
- `discriminator` — `c<N>` for channel messages on channel `N`, `dm` for direct messages.
|
||||
- `text` — the message text exactly as transmitted.
|
||||
|
||||
The `v1:` prefix lets the format evolve (e.g. add a channel-secret hash) without colliding with previously-written ids.
|
||||
|
||||
**Known limitations of the v1 fingerprint:**
|
||||
|
||||
- *Format-string ambiguity around `:`.* Components are joined with literal colons and not length-prefixed, so a colon embedded in `sender_identity` or `text` shifts the boundary between fields. In theory two distinct triples (e.g. `sender_identity="a:b"` vs `sender_identity="a"` with a leading `b:` in `text`) can produce the same fingerprint. In practice this is vanishingly rare — MeshCore sender names rarely contain colons and even then both senders would have to land on the same timestamp/channel — but a `v2` revision should switch to a delimiter that cannot appear in any component (e.g. `\x00`) or length-prefix each field.
|
||||
- *meshcore_py text-decoding inconsistency.* The upstream `meshcore_py` reader strips trailing `\0` bytes on the real-time `CHANNEL_MSG_RECV` path but not on the sync-replay path. If the same physical message is heard once in real-time and once via sync-replay, the byte sequences differ → different fingerprints → duplicate row. Out of scope for the ingestor; track upstream.
|
||||
- *Sender-side clock reset.* MeshCore nodes without an RTC start `sender_timestamp` from `0` after reboot. Two messages from the same sender containing the same text within one second of power-on collapse into a single row. Acceptable trade-off given the alternative (no dedup at all).
|
||||
- *Relay-rewritten `sender_timestamp` (#756).* MeshCore has been observed delivering the same physical packet twice with a rewritten `sender_timestamp` (≈10 s later, same `from_id`/`channel`/`text`), which flips the v1 fingerprint and bypasses the `messages.id` PK collapse. To cover this, the web app runs an additional content-level dedup on insert: for `protocol = "meshcore"` with non-empty `text` and a known `from_id`, a second row matching `(from_id, to_id, channel, text)` within ±30 s of `rx_time` is dropped (window lives in `MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS`). The window is ~3× the observed relay delta; legitimate rapid re-sends of identical short text (e.g. `hi`, `ack`, `ok`, `test`) from the same sender on the same channel **within 30 s** will be silently collapsed into one row. Ingestors MUST still produce deterministic v1 ids — this content-level layer is additive, not a replacement. Pre-existing duplicates are cleared once by a `PRAGMA user_version`-gated one-shot backfill on startup.
|
||||
- *Concurrent-insert race (#756).* The content-dedup SELECT and the downstream INSERT are not currently wrapped in a shared transaction, so two concurrent Puma threads carrying the same content with different ids can both pass the pre-check and both insert. Duplicates produced this way are narrow (single-node multi-threaded ingest) and are not cleaned up on subsequent boots because the backfill is one-shot. If the race is ever observed in production, tighten `insert_message` to wrap the meshcore pre-check + id-PK path in `db.transaction(:immediate)`.
|
||||
- *Upstream `meshcore` reader crash on truncated advertisements (#754).* `meshcore-py` 2.3.6 (latest at the time of writing) raises `IndexError` from `MessageReader.handle_rx` at `reader.py:365` when a `DEVICE_INFO`/advertisement frame declares `fw_ver >= 10` but omits the trailing `path_hash_mode` byte. Because the frame is parsed inside a detached `asyncio.create_task(...)`, the exception surfaces as `Task exception was never retrieved` on stderr and the event for that frame is lost. The ingestor installs a runtime patch (`data/mesh_ingestor/protocols/_meshcore_patches.py`) that wraps `handle_rx`, logs one line with the first 32 bytes of the offending frame under `context=meshcore.reader.patch`, and lets the task exit cleanly; a loop-level handler (`context=asyncio.unhandled`) catches anything the targeted patch misses. Both shims are additive and will be removed once upstream ships a defensive length check.
|
||||
|
||||
#### `POST /api/positions`
|
||||
|
||||
Single position payload:
|
||||
|
||||
- Required: `id` (int), `rx_time` (int), `rx_iso` (string)
|
||||
- Node: `node_id` (canonical string), `node_num` (int|nil), `num` (int|nil), `from_id` (canonical string), `to_id` (string|nil)
|
||||
- Position: `latitude`, `longitude`, `altitude` (floats|nil)
|
||||
- Position time: `position_time` (int|nil)
|
||||
- Quality: `location_source` (string|nil), `precision_bits` (int|nil), `sats_in_view` (int|nil), `pdop` (float|nil)
|
||||
- Motion: `ground_speed` (float|nil), `ground_track` (float|nil)
|
||||
- RF/meta: `snr`, `rssi`, `hop_limit`, `bitfield`, `payload_b64` (string|nil), `raw` (mapping|nil), `ingestor`, `lora_freq`, `modem_preset`
|
||||
|
||||
#### `POST /api/telemetry`
|
||||
|
||||
Single telemetry payload:
|
||||
|
||||
- Required: `id` (int), `rx_time` (int), `rx_iso` (string)
|
||||
- Node: `node_id` (canonical string|nil), `node_num` (int|nil), `from_id`, `to_id`
|
||||
- Time: `telemetry_time` (int|nil)
|
||||
- Packet: `channel` (int), `portnum` (string|nil), `bitfield` (int|nil), `hop_limit` (int|nil)
|
||||
- RF: `snr` (float|nil), `rssi` (int|nil)
|
||||
- Raw: `payload_b64` (string; may be empty string when unknown)
|
||||
- Metrics: many optional snake_case keys (`battery_level`, `voltage`, `temperature`, etc.)
|
||||
- Subtype: `telemetry_type` (string|nil) — optional discriminator identifying which Meshtastic protobuf oneof was set; one of `"device"`, `"environment"`, `"power"`, or `"air_quality"`. Ingestors that detect the subtype SHOULD include this field; omit rather than send `null` when unknown. The web app infers the type from metric-field presence when absent, so old ingestors remain compatible.
|
||||
- Meta: `ingestor`, `lora_freq`, `modem_preset`
|
||||
|
||||
#### `POST /api/neighbors`
|
||||
|
||||
Neighbors snapshot payload:
|
||||
|
||||
- Node: `node_id` (canonical string), `node_num` (int|nil)
|
||||
- `neighbors`: list of entries with `neighbor_id` (canonical string), `neighbor_num` (int|nil), `snr` (float|nil), `rx_time` (int), `rx_iso` (string)
|
||||
- Snapshot time: `rx_time`, `rx_iso`
|
||||
- Optional: `node_broadcast_interval_secs` (int|nil), `last_sent_by_id` (canonical string|nil)
|
||||
- Meta: `ingestor`, `lora_freq`, `modem_preset`
|
||||
|
||||
#### `POST /api/traces`
|
||||
|
||||
Single trace payload:
|
||||
|
||||
- Identity: `id` (int|nil), `request_id` (int|nil)
|
||||
- Endpoints: `src` (int|nil), `dest` (int|nil)
|
||||
- Path: `hops` (list[int])
|
||||
- Time: `rx_time` (int), `rx_iso` (string)
|
||||
- Metrics: `rssi` (int|nil), `snr` (float|nil), `elapsed_ms` (int|nil)
|
||||
- Meta: `ingestor`, `lora_freq`, `modem_preset`
|
||||
|
||||
#### `POST /api/ingestors`
|
||||
|
||||
Heartbeat payload:
|
||||
|
||||
- `node_id` (canonical string)
|
||||
- `start_time` (int), `last_seen_time` (int)
|
||||
- `version` (string)
|
||||
- Optional: `lora_freq`, `modem_preset`
|
||||
- Optional: `protocol` (string; e.g. `"meshtastic"`, `"meshcore"`) — declares the mesh backend for this ingestor; defaults to `"meshtastic"` when absent
|
||||
|
||||
**Protocol propagation**: all event records (`messages`, `positions`, `telemetry`, `traces`, `neighbors`) that reference this ingestor via their `ingestor` field will inherit its `protocol` value at write time.
|
||||
|
||||
### GET endpoint filtering
|
||||
|
||||
All collection GET endpoints (`/api/nodes`, `/api/messages`, `/api/positions`, `/api/telemetry`, `/api/traces`, `/api/neighbors`, `/api/ingestors`) accept an optional `?protocol=<value>` query parameter. When present, only records whose `protocol` column matches the given value are returned. The `protocol` field is included in all GET responses.
|
||||
|
||||
@@ -25,7 +25,6 @@ from .. import VERSION as _PACKAGE_VERSION
|
||||
from . import (
|
||||
channels,
|
||||
config,
|
||||
connection,
|
||||
daemon,
|
||||
handlers,
|
||||
ingestors,
|
||||
@@ -47,7 +46,7 @@ def _reexport(module) -> None:
|
||||
def _export_constants() -> None:
|
||||
globals()["json"] = queue.json
|
||||
globals()["urllib"] = queue.urllib
|
||||
globals()["glob"] = connection.glob
|
||||
globals()["glob"] = interfaces.glob
|
||||
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
|
||||
|
||||
|
||||
@@ -70,7 +69,6 @@ _CONFIG_ATTRS = {
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"INSTANCES",
|
||||
"API_TOKEN",
|
||||
"ALLOWED_CHANNELS",
|
||||
"HIDDEN_CHANNELS",
|
||||
@@ -83,6 +81,9 @@ _CONFIG_ATTRS = {
|
||||
"_debug_log",
|
||||
}
|
||||
|
||||
# Legacy export maintained for backwards compatibility.
|
||||
_CONFIG_ATTRS.add("PORT")
|
||||
|
||||
_INTERFACE_ATTRS = {"BLEInterface", "SerialInterface", "TCPInterface"}
|
||||
|
||||
_QUEUE_ATTRS = set(queue.__all__)
|
||||
|
||||
@@ -182,9 +182,6 @@ def capture_from_interface(iface: Any) -> None:
|
||||
channels_obj = getattr(local_node, "channels", None) if local_node else None
|
||||
|
||||
channel_entries: list[tuple[int, str]] = []
|
||||
# Use a set for O(1) duplicate-index checks; Meshtastic occasionally
|
||||
# emits the same channel index twice when the channel list is partially
|
||||
# initialised, so we keep only the first valid entry per index.
|
||||
seen_indices: set[int] = set()
|
||||
for candidate in _iter_channel_objects(channels_obj):
|
||||
result = _channel_tuple(candidate)
|
||||
@@ -273,43 +270,6 @@ def is_hidden_channel(channel_name_value: str | None) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def register_channel(channel_idx: int, channel_name_value: str) -> None:
|
||||
"""Register a single channel index → name mapping.
|
||||
|
||||
Unlike :func:`capture_from_interface`, which scans a complete interface
|
||||
object in one shot, this function registers entries one at a time. It is
|
||||
intended for protocols (e.g. MeshCore) that expose channel metadata via
|
||||
per-index requests rather than a bulk channel list.
|
||||
|
||||
Idempotent: silently skips if *channel_idx* is already cached or
|
||||
*channel_name_value* is blank, matching the first-seen-wins semantics of
|
||||
:func:`capture_from_interface`.
|
||||
|
||||
Parameters:
|
||||
channel_idx: Zero-based channel index.
|
||||
channel_name_value: Human-readable channel name reported by the device.
|
||||
"""
|
||||
|
||||
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
|
||||
|
||||
if not isinstance(channel_name_value, str) or not channel_name_value.strip():
|
||||
return
|
||||
if channel_idx in _CHANNEL_LOOKUP:
|
||||
return
|
||||
|
||||
name = channel_name_value.strip()
|
||||
_CHANNEL_LOOKUP[channel_idx] = name
|
||||
_CHANNEL_MAPPINGS = tuple(sorted(_CHANNEL_LOOKUP.items()))
|
||||
|
||||
config._debug_log(
|
||||
"Registered channel",
|
||||
context="channels.register",
|
||||
severity="info",
|
||||
channel_idx=channel_idx,
|
||||
channel_name=name,
|
||||
)
|
||||
|
||||
|
||||
def _reset_channel_cache() -> None:
|
||||
"""Clear cached channel data. Intended for use in tests only."""
|
||||
|
||||
@@ -322,7 +282,6 @@ __all__ = [
|
||||
"capture_from_interface",
|
||||
"channel_mappings",
|
||||
"channel_name",
|
||||
"register_channel",
|
||||
"allowed_channel_names",
|
||||
"hidden_channel_names",
|
||||
"is_allowed_channel",
|
||||
|
||||
+36
-145
@@ -16,9 +16,10 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from types import ModuleType
|
||||
from typing import Any
|
||||
|
||||
DEFAULT_SNAPSHOT_SECS = 60
|
||||
@@ -48,14 +49,12 @@ DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
|
||||
DEFAULT_INGESTOR_HEARTBEAT_SECS = float(60 * 60)
|
||||
"""Interval between ingestor heartbeat announcements."""
|
||||
|
||||
DEFAULT_SELF_NODE_REPORT_INTERVAL_SECS = float(60 * 60)
|
||||
"""Interval between periodic forced self-node re-reports from the daemon."""
|
||||
|
||||
CONNECTION = os.environ.get("CONNECTION")
|
||||
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
|
||||
"""Optional connection target for the mesh interface.
|
||||
|
||||
When unset, platform-specific defaults will be inferred by the interface
|
||||
implementations.
|
||||
implementations. The legacy :envvar:`MESH_SERIAL` environment variable is still
|
||||
accepted for backwards compatibility.
|
||||
"""
|
||||
|
||||
SNAPSHOT_SECS = DEFAULT_SNAPSHOT_SECS
|
||||
@@ -66,52 +65,6 @@ CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
|
||||
|
||||
DEBUG = os.environ.get("DEBUG") == "1"
|
||||
|
||||
_KNOWN_PROTOCOLS = ("meshtastic", "meshcore")
|
||||
|
||||
_raw_protocol = os.environ.get("PROTOCOL", "meshtastic").strip().lower()
|
||||
if _raw_protocol not in _KNOWN_PROTOCOLS:
|
||||
raise ValueError(
|
||||
f"Unknown PROTOCOL={_raw_protocol!r}. "
|
||||
f"Valid options: {', '.join(_KNOWN_PROTOCOLS)}"
|
||||
)
|
||||
|
||||
PROTOCOL = _raw_protocol
|
||||
"""Active ingestion protocol, selected via the :envvar:`PROTOCOL` environment variable.
|
||||
|
||||
Accepted values are ``meshtastic`` (default) and ``meshcore``.
|
||||
"""
|
||||
|
||||
|
||||
def _parse_lora_freq_env(raw: str | None) -> float | int | None:
|
||||
"""Parse the ``FREQUENCY`` environment variable into a numeric LoRa frequency.
|
||||
|
||||
Returns an :class:`int` for whole-number strings (e.g. ``"868"``), a
|
||||
:class:`float` for decimal strings (e.g. ``"869.525"``), or ``None`` when
|
||||
*raw* is empty, absent, non-numeric, or non-finite (e.g. ``"inf"``).
|
||||
|
||||
Non-numeric labels such as ``"EU_868"`` intentionally return ``None`` so
|
||||
that :data:`LORA_FREQ` is left unset and :func:`~interfaces._ensure_radio_metadata`
|
||||
can still populate it from the detected radio configuration.
|
||||
|
||||
Parameters:
|
||||
raw: Raw value of the ``FREQUENCY`` environment variable.
|
||||
|
||||
Returns:
|
||||
Numeric frequency value, or ``None``.
|
||||
"""
|
||||
if not raw:
|
||||
return None
|
||||
stripped = raw.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
try:
|
||||
as_float = float(stripped)
|
||||
except ValueError:
|
||||
return None
|
||||
if not math.isfinite(as_float):
|
||||
return None
|
||||
return int(as_float) if as_float == int(as_float) else as_float
|
||||
|
||||
|
||||
def _parse_channel_names(raw_value: str | None) -> tuple[str, ...]:
|
||||
"""Normalise a comma-separated list of channel names.
|
||||
@@ -159,16 +112,16 @@ ALLOWED_CHANNELS = _parse_channel_names(os.environ.get("ALLOWED_CHANNELS"))
|
||||
def _resolve_instance_domain() -> str:
|
||||
"""Resolve the configured instance domain from the environment.
|
||||
|
||||
Reads the :envvar:`INSTANCE_DOMAIN` variable. When the value does not
|
||||
contain a scheme, ``https://`` is prepended automatically.
|
||||
|
||||
.. note::
|
||||
|
||||
Kept for backward compatibility with existing tests and callers.
|
||||
New code should use :func:`_resolve_instance_domains` instead.
|
||||
The ingestor prefers the :envvar:`INSTANCE_DOMAIN` variable for clarity and
|
||||
compatibility with the web application. For deployments that still
|
||||
configure the legacy :envvar:`POTATOMESH_INSTANCE` variable, the resolver
|
||||
falls back to that value when no primary domain is set.
|
||||
"""
|
||||
|
||||
configured_instance = os.environ.get("INSTANCE_DOMAIN", "").rstrip("/")
|
||||
instance_domain = os.environ.get("INSTANCE_DOMAIN", "")
|
||||
legacy_instance = os.environ.get("POTATOMESH_INSTANCE", "")
|
||||
|
||||
configured_instance = (instance_domain or legacy_instance).rstrip("/")
|
||||
|
||||
if configured_instance and "://" not in configured_instance:
|
||||
return f"https://{configured_instance}"
|
||||
@@ -176,91 +129,13 @@ def _resolve_instance_domain() -> str:
|
||||
return configured_instance
|
||||
|
||||
|
||||
def _normalise_domain(raw: str) -> str:
|
||||
"""Strip whitespace and trailing slashes, prepend ``https://`` when needed.
|
||||
|
||||
Parameters:
|
||||
raw: Single domain string to normalise.
|
||||
|
||||
Returns:
|
||||
A URL string with a scheme prefix.
|
||||
"""
|
||||
|
||||
domain = raw.strip().rstrip("/")
|
||||
if domain and "://" not in domain:
|
||||
return f"https://{domain}"
|
||||
return domain
|
||||
|
||||
|
||||
def _resolve_instance_domains() -> tuple[tuple[str, str], ...]:
|
||||
"""Parse :envvar:`INSTANCE_DOMAIN` and :envvar:`API_TOKEN` into paired tuples.
|
||||
|
||||
When ``INSTANCE_DOMAIN`` contains comma-separated values, each entry is
|
||||
treated as an independent target. ``API_TOKEN`` is either broadcast to
|
||||
every target (single value) or positionally paired (comma-separated with
|
||||
a matching count).
|
||||
|
||||
Returns:
|
||||
A tuple of ``(instance_url, api_token)`` pairs, deduplicated by URL.
|
||||
|
||||
Raises:
|
||||
ValueError: When the number of comma-separated tokens exceeds the
|
||||
number of domains.
|
||||
"""
|
||||
|
||||
raw_domain = os.environ.get("INSTANCE_DOMAIN", "")
|
||||
raw_token = os.environ.get("API_TOKEN", "")
|
||||
|
||||
domains: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for part in raw_domain.split(","):
|
||||
normalised = _normalise_domain(part)
|
||||
if not normalised:
|
||||
continue
|
||||
key = normalised.casefold()
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
domains.append(normalised)
|
||||
|
||||
if not domains:
|
||||
return ()
|
||||
|
||||
tokens = [t.strip() for t in raw_token.split(",")]
|
||||
# A single token (including empty string) is broadcast to all domains.
|
||||
if len(tokens) == 1:
|
||||
token = tokens[0]
|
||||
return tuple((d, token) for d in domains)
|
||||
|
||||
if len(tokens) != len(domains):
|
||||
raise ValueError(
|
||||
f"API_TOKEN has {len(tokens)} comma-separated values but "
|
||||
f"INSTANCE_DOMAIN has {len(domains)}; counts must match or "
|
||||
f"API_TOKEN must be a single value"
|
||||
)
|
||||
|
||||
return tuple(zip(domains, tokens))
|
||||
|
||||
|
||||
INSTANCES: tuple[tuple[str, str], ...] = _resolve_instance_domains()
|
||||
"""Paired ``(instance_url, api_token)`` tuples derived from the environment."""
|
||||
|
||||
INSTANCE = INSTANCES[0][0] if INSTANCES else _resolve_instance_domain()
|
||||
"""First configured instance URL, kept for backward compatibility."""
|
||||
|
||||
API_TOKEN = INSTANCES[0][1] if INSTANCES else os.environ.get("API_TOKEN", "")
|
||||
"""API token for the first configured instance, kept for backward compatibility."""
|
||||
INSTANCE = _resolve_instance_domain()
|
||||
API_TOKEN = os.environ.get("API_TOKEN", "")
|
||||
ENERGY_SAVING = os.environ.get("ENERGY_SAVING") == "1"
|
||||
"""When ``True``, enables the ingestor's energy saving mode."""
|
||||
|
||||
LORA_FREQ: float | int | str | None = _parse_lora_freq_env(os.environ.get("FREQUENCY"))
|
||||
"""Frequency of the local node's configured LoRa region in MHz or raw region label.
|
||||
|
||||
Pre-seeded from the ``FREQUENCY`` environment variable when set to a finite
|
||||
numeric value, allowing operators to override auto-detected values.
|
||||
Non-numeric or non-finite values are ignored so that auto-detection from the
|
||||
radio interface can still fill this in.
|
||||
"""
|
||||
LORA_FREQ: float | int | str | None = None
|
||||
"""Frequency of the local node's configured LoRa region in MHz or raw region label."""
|
||||
|
||||
MODEM_PRESET: str | None = None
|
||||
"""CamelCase modem preset name reported by the local node."""
|
||||
@@ -272,7 +147,9 @@ _INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
|
||||
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
|
||||
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
|
||||
_INGESTOR_HEARTBEAT_SECS = DEFAULT_INGESTOR_HEARTBEAT_SECS
|
||||
_SELF_NODE_REPORT_INTERVAL_SECS = DEFAULT_SELF_NODE_REPORT_INTERVAL_SECS
|
||||
|
||||
# Backwards compatibility shim for legacy imports.
|
||||
PORT = CONNECTION
|
||||
|
||||
|
||||
def _debug_log(
|
||||
@@ -317,7 +194,6 @@ __all__ = [
|
||||
"HIDDEN_CHANNELS",
|
||||
"ALLOWED_CHANNELS",
|
||||
"INSTANCE",
|
||||
"INSTANCES",
|
||||
"API_TOKEN",
|
||||
"ENERGY_SAVING",
|
||||
"LORA_FREQ",
|
||||
@@ -329,6 +205,21 @@ __all__ = [
|
||||
"_ENERGY_ONLINE_DURATION_SECS",
|
||||
"_ENERGY_SLEEP_SECS",
|
||||
"_INGESTOR_HEARTBEAT_SECS",
|
||||
"_SELF_NODE_REPORT_INTERVAL_SECS",
|
||||
"_debug_log",
|
||||
]
|
||||
|
||||
|
||||
class _ConfigModule(ModuleType):
|
||||
"""Module proxy that keeps connection aliases synchronised."""
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None: # type: ignore[override]
|
||||
"""Propagate CONNECTION/PORT assignments to both attributes."""
|
||||
|
||||
if name in {"CONNECTION", "PORT"}:
|
||||
super().__setattr__("CONNECTION", value)
|
||||
super().__setattr__("PORT", value)
|
||||
return
|
||||
super().__setattr__(name, value)
|
||||
|
||||
|
||||
sys.modules[__name__].__class__ = _ConfigModule
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Provider-agnostic connection target helpers.
|
||||
|
||||
This module contains utilities shared by all ingestor providers for
|
||||
parsing and auto-discovering connection targets. It is intentionally
|
||||
free of any provider-specific imports so that Meshtastic, MeshCore,
|
||||
and future providers can all rely on the same logic.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import glob
|
||||
import re
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Constants
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
DEFAULT_TCP_PORT: int = 4403
|
||||
"""Default TCP port used when no port is explicitly supplied."""
|
||||
|
||||
DEFAULT_SERIAL_PATTERNS: tuple[str, ...] = (
|
||||
"/dev/ttyACM*",
|
||||
"/dev/ttyUSB*",
|
||||
"/dev/tty.usbmodem*",
|
||||
"/dev/tty.usbserial*",
|
||||
"/dev/cu.usbmodem*",
|
||||
"/dev/cu.usbserial*",
|
||||
)
|
||||
"""Glob patterns for common serial device paths on Linux and macOS."""
|
||||
|
||||
# Support both MAC addresses (Linux/Windows) and UUIDs (macOS).
|
||||
BLE_ADDRESS_RE = re.compile(
|
||||
r"^(?:"
|
||||
r"(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}|" # MAC address format
|
||||
r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" # UUID format
|
||||
r")$"
|
||||
)
|
||||
"""Compiled regex matching a BLE MAC address or UUID."""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def parse_ble_target(value: str) -> str | None:
|
||||
"""Return a normalised BLE address (MAC or UUID) when ``value`` matches the format.
|
||||
|
||||
Parameters:
|
||||
value: User-provided target string.
|
||||
|
||||
Returns:
|
||||
The normalised MAC address (upper-cased) or UUID, or ``None`` when
|
||||
the value does not match a recognised BLE address format.
|
||||
"""
|
||||
if not value:
|
||||
return None
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
if BLE_ADDRESS_RE.fullmatch(value):
|
||||
return value.upper()
|
||||
return None
|
||||
|
||||
|
||||
def parse_tcp_target(value: str) -> tuple[str, int] | None:
|
||||
"""Parse a TCP ``host:port`` target, accepting both IPs and hostnames.
|
||||
|
||||
Unlike the Meshtastic-specific helper in :mod:`interfaces`, hostnames are
|
||||
accepted here because MeshCore companions may be reached over a local
|
||||
network by name (e.g. ``meshcore-node.local:4403``).
|
||||
|
||||
BLE MAC addresses (five colons) and bare serial port paths (no colon) are
|
||||
correctly rejected — they cannot produce a valid ``host:port`` pair.
|
||||
|
||||
Parameters:
|
||||
value: User-provided target string.
|
||||
|
||||
Returns:
|
||||
``(host, port)`` on success, or ``None`` when *value* does not look
|
||||
like a TCP target.
|
||||
"""
|
||||
if not value:
|
||||
return None
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
|
||||
# Strip URL scheme prefix (e.g. ``tcp://host:4403`` or ``http://host:4403``).
|
||||
if "://" in value:
|
||||
value = value.split("://", 1)[1]
|
||||
|
||||
# Handle bracketed IPv6: ``[::1]:4403``.
|
||||
if value.startswith("["):
|
||||
bracket_end = value.find("]")
|
||||
if bracket_end == -1:
|
||||
return None
|
||||
host = value[1:bracket_end]
|
||||
rest = value[bracket_end + 1 :]
|
||||
if rest.startswith(":"):
|
||||
try:
|
||||
port = int(rest[1:])
|
||||
except ValueError:
|
||||
return None
|
||||
if not (1 <= port <= 65535):
|
||||
return None
|
||||
else:
|
||||
port = DEFAULT_TCP_PORT
|
||||
if not host:
|
||||
return None
|
||||
return host, port
|
||||
|
||||
# For non-bracketed addresses require exactly one colon so that BLE MACs
|
||||
# (five colons) and bare serial paths (no colon) are rejected.
|
||||
colon_count = value.count(":")
|
||||
if colon_count != 1:
|
||||
return None
|
||||
|
||||
host, _, port_str = value.partition(":")
|
||||
if not host:
|
||||
return None
|
||||
try:
|
||||
port = int(port_str)
|
||||
except ValueError:
|
||||
return None
|
||||
if not (1 <= port <= 65535):
|
||||
return None
|
||||
return host, port
|
||||
|
||||
|
||||
def default_serial_targets() -> list[str]:
|
||||
"""Return candidate serial device paths for auto-discovery.
|
||||
|
||||
Globs for common USB serial device paths on Linux and macOS. Always
|
||||
includes ``/dev/ttyACM0`` as a final fallback so callers have at least
|
||||
one candidate even on systems without any attached hardware.
|
||||
|
||||
Returns:
|
||||
Ordered list of candidate device paths, deduplicated.
|
||||
"""
|
||||
candidates: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for pattern in DEFAULT_SERIAL_PATTERNS:
|
||||
for path in sorted(glob.glob(pattern)):
|
||||
if path not in seen:
|
||||
candidates.append(path)
|
||||
seen.add(path)
|
||||
if "/dev/ttyACM0" not in seen:
|
||||
candidates.append("/dev/ttyACM0")
|
||||
return candidates
|
||||
+300
-465
@@ -16,7 +16,6 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import inspect
|
||||
import signal
|
||||
import threading
|
||||
@@ -24,9 +23,7 @@ import time
|
||||
|
||||
from pubsub import pub
|
||||
|
||||
from . import config, handlers, ingestors, interfaces, queue
|
||||
from .mesh_protocol import MeshProtocol
|
||||
from .utils import _retry_dict_snapshot
|
||||
from . import config, handlers, ingestors, interfaces
|
||||
|
||||
_RECEIVE_TOPICS = (
|
||||
"meshtastic.receive",
|
||||
@@ -83,15 +80,10 @@ def _subscribe_receive_topics() -> list[str]:
|
||||
|
||||
|
||||
def _node_items_snapshot(
|
||||
nodes_obj: object, retries: int = 3
|
||||
nodes_obj, retries: int = 3
|
||||
) -> list[tuple[str, object]] | None:
|
||||
"""Snapshot ``nodes_obj`` to avoid iteration errors during updates.
|
||||
|
||||
Uses :func:`~data.mesh_ingestor.utils._retry_dict_snapshot` to handle
|
||||
both dict-like objects (``items()`` callable) and sequence-like objects
|
||||
(``__iter__`` + ``__getitem__``) that Meshtastic may return depending on
|
||||
firmware version.
|
||||
|
||||
Parameters:
|
||||
nodes_obj: Meshtastic nodes mapping or iterable.
|
||||
retries: Number of attempts when encountering "dictionary changed"
|
||||
@@ -107,15 +99,25 @@ def _node_items_snapshot(
|
||||
|
||||
items_callable = getattr(nodes_obj, "items", None)
|
||||
if callable(items_callable):
|
||||
return _retry_dict_snapshot(lambda: list(items_callable()), retries)
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
return list(items_callable())
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
if hasattr(nodes_obj, "__iter__") and hasattr(nodes_obj, "__getitem__"):
|
||||
|
||||
def _snapshot_via_keys() -> list[tuple[str, object]]:
|
||||
keys = list(nodes_obj)
|
||||
return [(key, nodes_obj[key]) for key in keys]
|
||||
|
||||
return _retry_dict_snapshot(_snapshot_via_keys, retries)
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
keys = list(nodes_obj)
|
||||
return [(key, nodes_obj[key]) for key in keys]
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
return []
|
||||
|
||||
@@ -195,6 +197,11 @@ def _process_ingestor_heartbeat(iface, *, ingestor_announcement_sent: bool) -> b
|
||||
if heartbeat_sent and not ingestor_announcement_sent:
|
||||
return True
|
||||
return ingestor_announcement_sent
|
||||
iface_cls = getattr(iface_obj, "__class__", None)
|
||||
if iface_cls is None:
|
||||
return False
|
||||
module_name = getattr(iface_cls, "__module__", "") or ""
|
||||
return "ble_interface" in module_name
|
||||
|
||||
|
||||
def _connected_state(candidate) -> bool | None:
|
||||
@@ -236,403 +243,10 @@ def _connected_state(candidate) -> bool | None:
|
||||
return None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Loop state container
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class _DaemonState:
|
||||
"""All mutable state for the :func:`main` daemon loop."""
|
||||
|
||||
provider: MeshProtocol
|
||||
stop: threading.Event
|
||||
configured_port: str | None
|
||||
inactivity_reconnect_secs: float
|
||||
energy_saving_enabled: bool
|
||||
energy_online_secs: float
|
||||
energy_sleep_secs: float
|
||||
retry_delay: float
|
||||
last_seen_packet_monotonic: float | None
|
||||
active_candidate: str | None
|
||||
|
||||
iface: object = None
|
||||
resolved_target: str | None = None
|
||||
initial_snapshot_sent: bool = False
|
||||
energy_session_deadline: float | None = None
|
||||
iface_connected_at: float | None = None
|
||||
last_inactivity_reconnect: float | None = None
|
||||
ingestor_announcement_sent: bool = False
|
||||
announced_target: bool = False
|
||||
last_self_node_report: float | None = None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Per-iteration helpers (each returns True when the caller should `continue`)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _advance_retry_delay(current: float) -> float:
|
||||
"""Return the next exponential-backoff retry delay."""
|
||||
|
||||
if config._RECONNECT_MAX_DELAY_SECS <= 0:
|
||||
return current
|
||||
# `current == 0` on the very first call (bootstrap); seed from config.
|
||||
next_delay = current * 2 if current else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
return min(next_delay, config._RECONNECT_MAX_DELAY_SECS)
|
||||
|
||||
|
||||
def _energy_sleep(state: _DaemonState, reason: str) -> None:
|
||||
"""Sleep for the configured energy-saving interval."""
|
||||
|
||||
if not state.energy_saving_enabled or state.energy_sleep_secs <= 0:
|
||||
return
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
f"energy saving: {reason}; sleeping for {state.energy_sleep_secs:g}s"
|
||||
)
|
||||
state.stop.wait(state.energy_sleep_secs)
|
||||
|
||||
|
||||
def _try_connect(state: _DaemonState) -> bool:
|
||||
"""Attempt to establish the mesh interface.
|
||||
|
||||
Returns:
|
||||
``True`` when connected and the loop should proceed; ``False`` when
|
||||
the connection failed and the caller should ``continue``.
|
||||
"""
|
||||
|
||||
try:
|
||||
state.iface, state.resolved_target, state.active_candidate = (
|
||||
state.provider.connect(active_candidate=state.active_candidate)
|
||||
)
|
||||
handlers.register_host_node_id(state.provider.extract_host_node_id(state.iface))
|
||||
ingestors.set_ingestor_node_id(handlers.host_node_id())
|
||||
state.retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
state.initial_snapshot_sent = False
|
||||
state.last_self_node_report = None
|
||||
if not state.announced_target and state.resolved_target:
|
||||
config._debug_log(
|
||||
"Using mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="info",
|
||||
target=state.resolved_target,
|
||||
)
|
||||
state.announced_target = True
|
||||
# Set an absolute monotonic deadline for this energy-saving session.
|
||||
# When the deadline passes, _check_energy_saving() will close the
|
||||
# interface and sleep until the next wake interval.
|
||||
if state.energy_saving_enabled and state.energy_online_secs > 0:
|
||||
state.energy_session_deadline = time.monotonic() + state.energy_online_secs
|
||||
else:
|
||||
state.energy_session_deadline = None
|
||||
state.iface_connected_at = time.monotonic()
|
||||
# Seed the inactivity tracking from the connection time so a
|
||||
# reconnect is given a full inactivity window even when the
|
||||
# handler still reports the previous packet timestamp.
|
||||
state.last_seen_packet_monotonic = state.iface_connected_at
|
||||
state.last_inactivity_reconnect = None
|
||||
return True
|
||||
except interfaces.NoAvailableMeshInterface as exc:
|
||||
config._debug_log(
|
||||
"No mesh interface available",
|
||||
context="daemon.interface",
|
||||
severity="error",
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(state.iface)
|
||||
raise SystemExit(1) from exc
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to create mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
candidate=state.active_candidate or "auto",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if state.configured_port is None:
|
||||
state.active_candidate = None
|
||||
state.announced_target = False
|
||||
state.stop.wait(state.retry_delay)
|
||||
state.retry_delay = _advance_retry_delay(state.retry_delay)
|
||||
return False
|
||||
|
||||
|
||||
def _check_energy_saving(state: _DaemonState) -> bool:
|
||||
"""Disconnect and sleep when energy-saving conditions are met.
|
||||
|
||||
Returns:
|
||||
``True`` when the interface was closed and the caller should
|
||||
``continue``; ``False`` otherwise.
|
||||
"""
|
||||
|
||||
if not state.energy_saving_enabled or state.iface is None:
|
||||
return False
|
||||
|
||||
if (
|
||||
state.energy_session_deadline is not None
|
||||
and time.monotonic() >= state.energy_session_deadline
|
||||
):
|
||||
reason = "disconnected after session"
|
||||
log_msg = "Energy saving disconnect"
|
||||
elif (
|
||||
_is_ble_interface(state.iface)
|
||||
and getattr(state.iface, "client", object()) is None
|
||||
):
|
||||
reason = "BLE client disconnected"
|
||||
log_msg = "Energy saving BLE disconnect"
|
||||
else:
|
||||
return False
|
||||
config._debug_log(log_msg, context="daemon.energy", severity="info")
|
||||
_close_interface(state.iface)
|
||||
state.iface = None
|
||||
state.announced_target = False
|
||||
state.initial_snapshot_sent = False
|
||||
state.last_self_node_report = None
|
||||
state.energy_session_deadline = None
|
||||
_energy_sleep(state, reason)
|
||||
return True
|
||||
|
||||
|
||||
def _try_send_snapshot(state: _DaemonState) -> bool:
|
||||
"""Send the initial node snapshot via the provider.
|
||||
|
||||
Returns:
|
||||
``True`` when the snapshot succeeded (or no nodes exist yet); ``False``
|
||||
when a hard error occurred and the caller should ``continue``.
|
||||
"""
|
||||
|
||||
try:
|
||||
node_items = state.provider.node_snapshot_items(state.iface)
|
||||
processed_any = False
|
||||
for node_id, node in node_items:
|
||||
processed_any = True
|
||||
try:
|
||||
handlers.upsert_node(node_id, node)
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to update node snapshot",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
node_id=node_id,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Snapshot node payload",
|
||||
context="daemon.snapshot",
|
||||
node=node,
|
||||
)
|
||||
if processed_any:
|
||||
state.initial_snapshot_sent = True
|
||||
return True
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Snapshot refresh failed",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(state.iface)
|
||||
state.iface = None
|
||||
state.stop.wait(state.retry_delay)
|
||||
state.retry_delay = _advance_retry_delay(state.retry_delay)
|
||||
return False
|
||||
|
||||
|
||||
def _check_inactivity_reconnect(state: _DaemonState) -> bool:
|
||||
"""Reconnect when the interface has been silent for too long.
|
||||
|
||||
Returns:
|
||||
``True`` when a reconnect was triggered and the caller should
|
||||
``continue``; ``False`` otherwise.
|
||||
"""
|
||||
|
||||
if state.iface is None or state.inactivity_reconnect_secs <= 0:
|
||||
return False
|
||||
|
||||
now = time.monotonic()
|
||||
iface_activity = handlers.last_packet_monotonic()
|
||||
|
||||
if (
|
||||
iface_activity is not None
|
||||
and state.iface_connected_at is not None
|
||||
and iface_activity < state.iface_connected_at
|
||||
):
|
||||
iface_activity = state.iface_connected_at
|
||||
|
||||
if iface_activity is not None and (
|
||||
state.last_seen_packet_monotonic is None
|
||||
or iface_activity > state.last_seen_packet_monotonic
|
||||
):
|
||||
state.last_seen_packet_monotonic = iface_activity
|
||||
state.last_inactivity_reconnect = None
|
||||
|
||||
latest_activity = iface_activity
|
||||
if latest_activity is None and state.iface_connected_at is not None:
|
||||
latest_activity = state.iface_connected_at
|
||||
if latest_activity is None:
|
||||
latest_activity = now
|
||||
|
||||
inactivity_elapsed = now - latest_activity
|
||||
believed_disconnected = (
|
||||
_connected_state(getattr(state.iface, "isConnected", None)) is False
|
||||
)
|
||||
|
||||
if (
|
||||
not believed_disconnected
|
||||
and inactivity_elapsed < state.inactivity_reconnect_secs
|
||||
):
|
||||
return False
|
||||
|
||||
if state.last_inactivity_reconnect is not None:
|
||||
# For explicit disconnects use the shorter max-reconnect-delay window
|
||||
# so the daemon reconnects promptly without thrashing. For inactivity-
|
||||
# only triggers retain the full inactivity window as the throttle.
|
||||
throttle_secs = (
|
||||
config._RECONNECT_MAX_DELAY_SECS
|
||||
if believed_disconnected
|
||||
else state.inactivity_reconnect_secs
|
||||
)
|
||||
if now - state.last_inactivity_reconnect < throttle_secs:
|
||||
return False
|
||||
|
||||
reason = (
|
||||
"disconnected"
|
||||
if believed_disconnected
|
||||
else f"no data for {inactivity_elapsed:.0f}s"
|
||||
)
|
||||
# Uses the module-level global STATE — acceptable because there is only
|
||||
# one queue in production, and in tests this is purely informational.
|
||||
queue_depth = len(queue.STATE.queue)
|
||||
config._debug_log(
|
||||
"Mesh interface inactivity detected",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
reason=reason,
|
||||
queue_depth=queue_depth,
|
||||
)
|
||||
state.last_inactivity_reconnect = now
|
||||
_close_interface(state.iface)
|
||||
state.iface = None
|
||||
state.announced_target = False
|
||||
state.initial_snapshot_sent = False
|
||||
state.last_self_node_report = None
|
||||
state.energy_session_deadline = None
|
||||
state.iface_connected_at = None
|
||||
return True
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Periodic self-node report helper
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _try_send_self_node(state: _DaemonState) -> None:
|
||||
"""Re-upsert the host self-node when the provider supports it.
|
||||
|
||||
Called once immediately after the initial snapshot and then at most once
|
||||
per :data:`~data.mesh_ingestor.config._SELF_NODE_REPORT_INTERVAL_SECS`.
|
||||
This ensures the self-node's protocol and radio metadata are refreshed
|
||||
even when the ingestor heartbeat races ahead of the first SELF_INFO event
|
||||
(meshcore) or when the protocol never sends periodic NODEINFO for itself.
|
||||
|
||||
Parameters:
|
||||
state: Current daemon loop state.
|
||||
|
||||
Returns:
|
||||
``None``. Errors are logged and suppressed so a single failure does
|
||||
not break the main loop.
|
||||
"""
|
||||
self_node_fn = getattr(state.provider, "self_node_item", None)
|
||||
if not callable(self_node_fn):
|
||||
return
|
||||
try:
|
||||
item = self_node_fn(state.iface)
|
||||
if item is None:
|
||||
return
|
||||
node_id, node = item
|
||||
handlers.upsert_node(node_id, node)
|
||||
state.last_self_node_report = time.monotonic()
|
||||
config._debug_log(
|
||||
"Sent periodic self-node report",
|
||||
context="daemon.self_node",
|
||||
severity="info",
|
||||
node_id=node_id,
|
||||
)
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Self-node re-report failed",
|
||||
context="daemon.self_node",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Loop iteration helper
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _loop_iteration(state: _DaemonState) -> bool:
|
||||
"""Execute one pass of the daemon main loop.
|
||||
|
||||
Encapsulates the per-iteration ``continue`` decisions so that
|
||||
:func:`main` stays within the allowed cognitive-complexity budget.
|
||||
|
||||
Returns:
|
||||
``True`` when the loop should start the next iteration immediately
|
||||
(equivalent to a ``continue``); ``False`` when the full pass
|
||||
completed and the caller should sleep before iterating again.
|
||||
"""
|
||||
|
||||
if state.iface is None and not _try_connect(state):
|
||||
return True
|
||||
if _check_energy_saving(state):
|
||||
return True
|
||||
if not state.initial_snapshot_sent and not _try_send_snapshot(state):
|
||||
return True
|
||||
if _check_inactivity_reconnect(state):
|
||||
return True
|
||||
state.ingestor_announcement_sent = _process_ingestor_heartbeat(
|
||||
state.iface, ingestor_announcement_sent=state.ingestor_announcement_sent
|
||||
)
|
||||
# Periodically re-upsert the host self-node so that its protocol and radio
|
||||
# metadata are corrected after the ingestor heartbeat is registered, and
|
||||
# kept fresh for protocols (e.g. meshcore) that only emit SELF_INFO once.
|
||||
_now = time.monotonic()
|
||||
if state.initial_snapshot_sent and (
|
||||
state.last_self_node_report is None
|
||||
or _now - state.last_self_node_report >= config._SELF_NODE_REPORT_INTERVAL_SECS
|
||||
):
|
||||
_try_send_self_node(state)
|
||||
state.retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
return False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Entry point
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def main(*, provider: MeshProtocol | None = None) -> None:
|
||||
def main(existing_interface=None) -> None:
|
||||
"""Run the mesh ingestion daemon until interrupted."""
|
||||
|
||||
if provider is None:
|
||||
if config.PROTOCOL == "meshcore":
|
||||
from .protocols.meshcore import MeshcoreProvider
|
||||
|
||||
provider = MeshcoreProvider()
|
||||
else:
|
||||
from .protocols.meshtastic import MeshtasticProvider
|
||||
|
||||
provider = MeshtasticProvider()
|
||||
|
||||
subscribed = provider.subscribe()
|
||||
subscribed = _subscribe_receive_topics()
|
||||
if subscribed:
|
||||
config._debug_log(
|
||||
"Subscribed to receive topics",
|
||||
@@ -641,92 +255,313 @@ def main(*, provider: MeshProtocol | None = None) -> None:
|
||||
topics=subscribed,
|
||||
)
|
||||
|
||||
if not config.INSTANCES and not config.INSTANCE:
|
||||
config._debug_log(
|
||||
"No INSTANCE_DOMAIN configured — cannot forward data; exiting",
|
||||
context="daemon.main",
|
||||
severity="error",
|
||||
always=True,
|
||||
)
|
||||
return
|
||||
iface = existing_interface
|
||||
resolved_target = None
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
|
||||
queue._start_queue_drainer(queue.STATE)
|
||||
|
||||
state = _DaemonState(
|
||||
provider=provider,
|
||||
stop=threading.Event(),
|
||||
configured_port=config.CONNECTION,
|
||||
inactivity_reconnect_secs=max(
|
||||
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
|
||||
),
|
||||
energy_saving_enabled=config.ENERGY_SAVING,
|
||||
energy_online_secs=max(0.0, config._ENERGY_ONLINE_DURATION_SECS),
|
||||
energy_sleep_secs=max(0.0, config._ENERGY_SLEEP_SECS),
|
||||
retry_delay=max(0.0, config._RECONNECT_INITIAL_DELAY_SECS),
|
||||
last_seen_packet_monotonic=handlers.last_packet_monotonic(),
|
||||
active_candidate=config.CONNECTION,
|
||||
stop = threading.Event()
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
iface_connected_at: float | None = None
|
||||
last_seen_packet_monotonic = handlers.last_packet_monotonic()
|
||||
last_inactivity_reconnect: float | None = None
|
||||
inactivity_reconnect_secs = max(
|
||||
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
|
||||
)
|
||||
ingestor_announcement_sent = False
|
||||
|
||||
energy_saving_enabled = config.ENERGY_SAVING
|
||||
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
|
||||
energy_sleep_secs = max(0.0, config._ENERGY_SLEEP_SECS)
|
||||
|
||||
def _energy_sleep(reason: str) -> None:
|
||||
if not energy_saving_enabled or energy_sleep_secs <= 0:
|
||||
return
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
f"energy saving: {reason}; sleeping for {energy_sleep_secs:g}s"
|
||||
)
|
||||
stop.wait(energy_sleep_secs)
|
||||
|
||||
def handle_sigterm(*_args) -> None:
|
||||
"""Set the stop flag so the daemon loop exits cleanly on SIGTERM."""
|
||||
state.stop.set()
|
||||
stop.set()
|
||||
|
||||
def handle_sigint(signum, frame) -> None:
|
||||
"""Handle SIGINT (Ctrl-C) with graceful-first, hard-exit-second behaviour.
|
||||
|
||||
The first SIGINT sets the stop flag and lets the loop finish its
|
||||
current iteration. A second SIGINT delegates to the default handler,
|
||||
which raises :class:`KeyboardInterrupt` and terminates immediately.
|
||||
"""
|
||||
if state.stop.is_set():
|
||||
if stop.is_set():
|
||||
signal.default_int_handler(signum, frame)
|
||||
return
|
||||
state.stop.set()
|
||||
stop.set()
|
||||
|
||||
if threading.current_thread() == threading.main_thread():
|
||||
signal.signal(signal.SIGINT, handle_sigint)
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
instance_label = ", ".join(inst for inst, _ in config.INSTANCES)
|
||||
target = config.INSTANCE or "(no INSTANCE_DOMAIN configured)"
|
||||
configured_port = config.CONNECTION
|
||||
active_candidate = configured_port
|
||||
announced_target = False
|
||||
config._debug_log(
|
||||
"Mesh daemon starting",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
target=instance_label,
|
||||
port=config.CONNECTION or "auto",
|
||||
target=target,
|
||||
port=configured_port or "auto",
|
||||
channel=config.CHANNEL_INDEX,
|
||||
)
|
||||
|
||||
try:
|
||||
while not state.stop.is_set():
|
||||
if not _loop_iteration(state):
|
||||
state.stop.wait(config.SNAPSHOT_SECS)
|
||||
while not stop.is_set():
|
||||
if iface is None:
|
||||
try:
|
||||
if active_candidate:
|
||||
iface, resolved_target = interfaces._create_serial_interface(
|
||||
active_candidate
|
||||
)
|
||||
else:
|
||||
iface, resolved_target = interfaces._create_default_interface()
|
||||
active_candidate = resolved_target
|
||||
interfaces._ensure_radio_metadata(iface)
|
||||
interfaces._ensure_channel_metadata(iface)
|
||||
handlers.register_host_node_id(
|
||||
interfaces._extract_host_node_id(iface)
|
||||
)
|
||||
ingestors.set_ingestor_node_id(handlers.host_node_id())
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
initial_snapshot_sent = False
|
||||
if not announced_target and resolved_target:
|
||||
config._debug_log(
|
||||
"Using mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="info",
|
||||
target=resolved_target,
|
||||
)
|
||||
announced_target = True
|
||||
if energy_saving_enabled and energy_online_secs > 0:
|
||||
energy_session_deadline = time.monotonic() + energy_online_secs
|
||||
else:
|
||||
energy_session_deadline = None
|
||||
iface_connected_at = time.monotonic()
|
||||
# Seed the inactivity tracking from the connection time so a
|
||||
# reconnect is given a full inactivity window even when the
|
||||
# handler still reports the previous packet timestamp.
|
||||
last_seen_packet_monotonic = iface_connected_at
|
||||
last_inactivity_reconnect = None
|
||||
except interfaces.NoAvailableMeshInterface as exc:
|
||||
config._debug_log(
|
||||
"No mesh interface available",
|
||||
context="daemon.interface",
|
||||
severity="error",
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(iface)
|
||||
raise SystemExit(1) from exc
|
||||
except Exception as exc:
|
||||
candidate_desc = active_candidate or "auto"
|
||||
config._debug_log(
|
||||
"Failed to create mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
candidate=candidate_desc,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if configured_port is None:
|
||||
active_candidate = None
|
||||
announced_target = False
|
||||
stop.wait(retry_delay)
|
||||
if config._RECONNECT_MAX_DELAY_SECS > 0:
|
||||
retry_delay = min(
|
||||
(
|
||||
retry_delay * 2
|
||||
if retry_delay
|
||||
else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
),
|
||||
config._RECONNECT_MAX_DELAY_SECS,
|
||||
)
|
||||
continue
|
||||
|
||||
if energy_saving_enabled and iface is not None:
|
||||
if (
|
||||
energy_session_deadline is not None
|
||||
and time.monotonic() >= energy_session_deadline
|
||||
):
|
||||
config._debug_log(
|
||||
"Energy saving disconnect",
|
||||
context="daemon.energy",
|
||||
severity="info",
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
_energy_sleep("disconnected after session")
|
||||
continue
|
||||
if (
|
||||
_is_ble_interface(iface)
|
||||
and getattr(iface, "client", object()) is None
|
||||
):
|
||||
config._debug_log(
|
||||
"Energy saving BLE disconnect",
|
||||
context="daemon.energy",
|
||||
severity="info",
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
_energy_sleep("BLE client disconnected")
|
||||
continue
|
||||
|
||||
if not initial_snapshot_sent:
|
||||
try:
|
||||
nodes = getattr(iface, "nodes", {}) or {}
|
||||
node_items = _node_items_snapshot(nodes)
|
||||
if node_items is None:
|
||||
config._debug_log(
|
||||
"Skipping node snapshot due to concurrent modification",
|
||||
context="daemon.snapshot",
|
||||
)
|
||||
else:
|
||||
processed_snapshot_item = False
|
||||
for node_id, node in node_items:
|
||||
processed_snapshot_item = True
|
||||
try:
|
||||
handlers.upsert_node(node_id, node)
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to update node snapshot",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
node_id=node_id,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Snapshot node payload",
|
||||
context="daemon.snapshot",
|
||||
node=node,
|
||||
)
|
||||
if processed_snapshot_item:
|
||||
initial_snapshot_sent = True
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Snapshot refresh failed",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
stop.wait(retry_delay)
|
||||
if config._RECONNECT_MAX_DELAY_SECS > 0:
|
||||
retry_delay = min(
|
||||
(
|
||||
retry_delay * 2
|
||||
if retry_delay
|
||||
else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
),
|
||||
config._RECONNECT_MAX_DELAY_SECS,
|
||||
)
|
||||
continue
|
||||
|
||||
if iface is not None and inactivity_reconnect_secs > 0:
|
||||
now_monotonic = time.monotonic()
|
||||
iface_activity = handlers.last_packet_monotonic()
|
||||
if (
|
||||
iface_activity is not None
|
||||
and iface_connected_at is not None
|
||||
and iface_activity < iface_connected_at
|
||||
):
|
||||
iface_activity = iface_connected_at
|
||||
if iface_activity is not None and (
|
||||
last_seen_packet_monotonic is None
|
||||
or iface_activity > last_seen_packet_monotonic
|
||||
):
|
||||
last_seen_packet_monotonic = iface_activity
|
||||
last_inactivity_reconnect = None
|
||||
|
||||
latest_activity = iface_activity
|
||||
if latest_activity is None and iface_connected_at is not None:
|
||||
latest_activity = iface_connected_at
|
||||
if latest_activity is None:
|
||||
latest_activity = now_monotonic
|
||||
|
||||
inactivity_elapsed = now_monotonic - latest_activity
|
||||
|
||||
connected_attr = getattr(iface, "isConnected", None)
|
||||
believed_disconnected = False
|
||||
connected_state = _connected_state(connected_attr)
|
||||
if connected_state is None:
|
||||
if callable(connected_attr):
|
||||
try:
|
||||
believed_disconnected = not bool(connected_attr())
|
||||
except Exception:
|
||||
believed_disconnected = False
|
||||
elif connected_attr is not None:
|
||||
try:
|
||||
believed_disconnected = not bool(connected_attr)
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
believed_disconnected = False
|
||||
else:
|
||||
believed_disconnected = not connected_state
|
||||
|
||||
should_reconnect = believed_disconnected or (
|
||||
inactivity_elapsed >= inactivity_reconnect_secs
|
||||
)
|
||||
|
||||
if should_reconnect:
|
||||
if (
|
||||
last_inactivity_reconnect is None
|
||||
or now_monotonic - last_inactivity_reconnect
|
||||
>= inactivity_reconnect_secs
|
||||
):
|
||||
reason = (
|
||||
"disconnected"
|
||||
if believed_disconnected
|
||||
else f"no data for {inactivity_elapsed:.0f}s"
|
||||
)
|
||||
config._debug_log(
|
||||
"Mesh interface inactivity detected",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
reason=reason,
|
||||
)
|
||||
last_inactivity_reconnect = now_monotonic
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
iface_connected_at = None
|
||||
continue
|
||||
|
||||
ingestor_announcement_sent = _process_ingestor_heartbeat(
|
||||
iface, ingestor_announcement_sent=ingestor_announcement_sent
|
||||
)
|
||||
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
stop.wait(config.SNAPSHOT_SECS)
|
||||
except KeyboardInterrupt: # pragma: no cover - interactive only
|
||||
config._debug_log(
|
||||
"Received KeyboardInterrupt; shutting down",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
)
|
||||
state.stop.set()
|
||||
stop.set()
|
||||
finally:
|
||||
_close_interface(state.iface)
|
||||
_close_interface(iface)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_RECEIVE_TOPICS",
|
||||
"_advance_retry_delay",
|
||||
"_loop_iteration",
|
||||
"_check_energy_saving",
|
||||
"_check_inactivity_reconnect",
|
||||
"_connected_state",
|
||||
"_energy_sleep",
|
||||
"_event_wait_allows_default_timeout",
|
||||
"_is_ble_interface",
|
||||
"_node_items_snapshot",
|
||||
"_process_ingestor_heartbeat",
|
||||
"_subscribe_receive_topics",
|
||||
"_try_connect",
|
||||
"_try_send_self_node",
|
||||
"_try_send_snapshot",
|
||||
"_is_ble_interface",
|
||||
"_process_ingestor_heartbeat",
|
||||
"_connected_state",
|
||||
"main",
|
||||
]
|
||||
|
||||
@@ -1,96 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Decode Meshtastic protobuf payloads from stdin JSON."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
if SCRIPT_DIR in sys.path:
|
||||
sys.path.remove(SCRIPT_DIR)
|
||||
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from meshtastic.protobuf import mesh_pb2, telemetry_pb2
|
||||
|
||||
PORTNUM_MAP: Dict[int, Tuple[str, Any]] = {
|
||||
3: ("POSITION_APP", mesh_pb2.Position),
|
||||
4: ("NODEINFO_APP", mesh_pb2.NodeInfo),
|
||||
5: ("ROUTING_APP", mesh_pb2.Routing),
|
||||
67: ("TELEMETRY_APP", telemetry_pb2.Telemetry),
|
||||
70: ("TRACEROUTE_APP", mesh_pb2.RouteDiscovery),
|
||||
71: ("NEIGHBORINFO_APP", mesh_pb2.NeighborInfo),
|
||||
}
|
||||
|
||||
|
||||
def _decode_payload(portnum: int, payload_b64: str) -> dict[str, Any]:
|
||||
if portnum not in PORTNUM_MAP:
|
||||
return {"error": "unsupported-port", "portnum": portnum}
|
||||
try:
|
||||
payload_bytes = base64.b64decode(payload_b64, validate=True)
|
||||
except Exception as exc:
|
||||
return {"error": f"invalid-payload: {exc}"}
|
||||
|
||||
name, message_cls = PORTNUM_MAP[portnum]
|
||||
msg = message_cls()
|
||||
try:
|
||||
msg.ParseFromString(payload_bytes)
|
||||
except Exception as exc:
|
||||
return {"error": f"decode-failed: {exc}", "portnum": portnum, "type": name}
|
||||
|
||||
decoded = MessageToDict(msg, preserving_proto_field_name=True)
|
||||
return {"portnum": portnum, "type": name, "payload": decoded}
|
||||
|
||||
|
||||
def main() -> int:
|
||||
"""Read a JSON request from stdin and write a decoded protobuf response to stdout.
|
||||
|
||||
Reads a single JSON object containing ``portnum`` (int) and
|
||||
``payload_b64`` (base-64 encoded bytes) from standard input, decodes the
|
||||
protobuf payload via :func:`_decode_payload`, and writes the result as
|
||||
JSON to standard output.
|
||||
|
||||
Returns:
|
||||
``0`` on success, ``1`` when the input is malformed or required fields
|
||||
are absent.
|
||||
"""
|
||||
raw = sys.stdin.read()
|
||||
try:
|
||||
request = json.loads(raw)
|
||||
except json.JSONDecodeError as exc:
|
||||
sys.stdout.write(json.dumps({"error": f"invalid-json: {exc}"}))
|
||||
return 1
|
||||
|
||||
portnum = request.get("portnum")
|
||||
payload_b64 = request.get("payload_b64")
|
||||
|
||||
if not isinstance(portnum, int):
|
||||
sys.stdout.write(json.dumps({"error": "missing-portnum"}))
|
||||
return 1
|
||||
if not isinstance(payload_b64, str):
|
||||
sys.stdout.write(json.dumps({"error": "missing-payload"}))
|
||||
return 1
|
||||
|
||||
result = _decode_payload(portnum, payload_b64)
|
||||
sys.stdout.write(json.dumps(result))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -1,240 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Protocol-agnostic event payload types for ingestion.
|
||||
|
||||
The ingestor ultimately POSTs JSON to the web app's ingest routes. These types
|
||||
capture the *shape* of those payloads so multiple providers can emit the same
|
||||
events, regardless of how they source or decode packets.
|
||||
|
||||
These are intentionally defined as ``TypedDict`` so existing code can continue
|
||||
to build plain dictionaries without a runtime dependency on dataclasses.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import NotRequired, TypedDict
|
||||
|
||||
|
||||
class _MessageEventRequired(TypedDict):
|
||||
"""Required fields shared by all :class:`MessageEvent` payloads."""
|
||||
|
||||
id: int
|
||||
rx_time: int
|
||||
rx_iso: str
|
||||
|
||||
|
||||
class MessageEvent(_MessageEventRequired, total=False):
|
||||
"""Payload for the ``/api/messages`` ingest route.
|
||||
|
||||
Maps to the ``MessageEvent`` contract described in ``CONTRACTS.md``.
|
||||
Required fields are inherited from :class:`_MessageEventRequired`;
|
||||
all other fields are optional.
|
||||
"""
|
||||
|
||||
from_id: object
|
||||
to_id: object
|
||||
channel: int
|
||||
portnum: str | None
|
||||
text: str | None
|
||||
encrypted: str | None
|
||||
snr: float | None
|
||||
rssi: int | None
|
||||
hop_limit: int | None
|
||||
reply_id: int | None
|
||||
emoji: str | None
|
||||
channel_name: str
|
||||
ingestor: str | None
|
||||
lora_freq: int
|
||||
modem_preset: str
|
||||
|
||||
|
||||
class _PositionEventRequired(TypedDict):
|
||||
"""Required fields shared by all :class:`PositionEvent` payloads."""
|
||||
|
||||
id: int
|
||||
rx_time: int
|
||||
rx_iso: str
|
||||
|
||||
|
||||
class PositionEvent(_PositionEventRequired, total=False):
|
||||
"""Payload for the ``/api/positions`` ingest route.
|
||||
|
||||
Maps to the ``PositionEvent`` contract described in ``CONTRACTS.md``.
|
||||
Coordinates may be supplied as floating-point degrees or derived from
|
||||
Meshtastic's integer-scaled ``latitudeI``/``longitudeI`` fields.
|
||||
"""
|
||||
|
||||
node_id: str
|
||||
node_num: int | None
|
||||
num: int | None
|
||||
from_id: str | None
|
||||
to_id: object
|
||||
latitude: float | None
|
||||
longitude: float | None
|
||||
altitude: float | None
|
||||
position_time: int | None
|
||||
location_source: str | None
|
||||
precision_bits: int | None
|
||||
sats_in_view: int | None
|
||||
pdop: float | None
|
||||
ground_speed: float | None
|
||||
ground_track: float | None
|
||||
snr: float | None
|
||||
rssi: int | None
|
||||
hop_limit: int | None
|
||||
bitfield: int | None
|
||||
payload_b64: str | None
|
||||
raw: dict
|
||||
ingestor: str | None
|
||||
lora_freq: int
|
||||
modem_preset: str
|
||||
|
||||
|
||||
class _TelemetryEventRequired(TypedDict):
|
||||
"""Required fields shared by all :class:`TelemetryEvent` payloads."""
|
||||
|
||||
id: int
|
||||
rx_time: int
|
||||
rx_iso: str
|
||||
|
||||
|
||||
class TelemetryEvent(_TelemetryEventRequired, total=False):
|
||||
"""Payload for the ``/api/telemetry`` ingest route.
|
||||
|
||||
Maps to the ``TelemetryEvent`` contract described in ``CONTRACTS.md``.
|
||||
Metric keys beyond the required ones are open-ended; the web layer accepts
|
||||
any additional device, environment, power, or air-quality fields.
|
||||
"""
|
||||
|
||||
node_id: str | None
|
||||
node_num: int | None
|
||||
from_id: object
|
||||
to_id: object
|
||||
telemetry_time: int | None
|
||||
channel: int
|
||||
portnum: str | None
|
||||
hop_limit: int | None
|
||||
snr: float | None
|
||||
rssi: int | None
|
||||
bitfield: int | None
|
||||
payload_b64: str
|
||||
ingestor: str | None
|
||||
lora_freq: int
|
||||
modem_preset: str
|
||||
|
||||
# Metric keys are intentionally open-ended; the Ruby side is permissive and
|
||||
# evolves over time.
|
||||
|
||||
|
||||
class _NeighborEntryRequired(TypedDict):
|
||||
"""Required fields for a single entry within a :class:`NeighborsSnapshot`."""
|
||||
|
||||
rx_time: int
|
||||
rx_iso: str
|
||||
|
||||
|
||||
class NeighborEntry(_NeighborEntryRequired, total=False):
|
||||
"""A single observed neighbour node within a :class:`NeighborsSnapshot`.
|
||||
|
||||
Each entry describes one node heard by the reporting device, including
|
||||
optional signal-quality metrics.
|
||||
"""
|
||||
|
||||
neighbor_id: str
|
||||
neighbor_num: int | None
|
||||
snr: float | None
|
||||
|
||||
|
||||
class _NeighborsSnapshotRequired(TypedDict):
|
||||
"""Required fields shared by all :class:`NeighborsSnapshot` payloads."""
|
||||
|
||||
node_id: str
|
||||
rx_time: int
|
||||
rx_iso: str
|
||||
|
||||
|
||||
class NeighborsSnapshot(_NeighborsSnapshotRequired, total=False):
|
||||
"""Payload for the ``/api/neighbors`` ingest route.
|
||||
|
||||
Maps to the ``NeighborsSnapshot`` contract described in ``CONTRACTS.md``.
|
||||
Encapsulates the full list of neighbours heard by a single reporting node.
|
||||
"""
|
||||
|
||||
node_num: int | None
|
||||
neighbors: list[NeighborEntry]
|
||||
node_broadcast_interval_secs: int | None
|
||||
last_sent_by_id: str | None
|
||||
ingestor: str | None
|
||||
lora_freq: int
|
||||
modem_preset: str
|
||||
|
||||
|
||||
class _TraceEventRequired(TypedDict):
|
||||
"""Required fields shared by all :class:`TraceEvent` payloads."""
|
||||
|
||||
hops: list[int]
|
||||
rx_time: int
|
||||
rx_iso: str
|
||||
|
||||
|
||||
class TraceEvent(_TraceEventRequired, total=False):
|
||||
"""Payload for the ``/api/traceroutes`` ingest route.
|
||||
|
||||
Maps to the ``TraceEvent`` contract described in ``CONTRACTS.md``.
|
||||
The ``hops`` list contains node numbers in transmission order from
|
||||
source to destination.
|
||||
"""
|
||||
|
||||
id: int | None
|
||||
request_id: int | None
|
||||
src: int | None
|
||||
dest: int | None
|
||||
rssi: int | None
|
||||
snr: float | None
|
||||
elapsed_ms: int | None
|
||||
ingestor: str | None
|
||||
lora_freq: int
|
||||
modem_preset: str
|
||||
|
||||
|
||||
class IngestorHeartbeat(TypedDict):
|
||||
"""Payload for the ``/api/ingestors`` heartbeat route.
|
||||
|
||||
Maps to the ``IngestorHeartbeat`` contract described in ``CONTRACTS.md``.
|
||||
Sent periodically to signal that the ingestor process is alive and
|
||||
associated with a particular radio node.
|
||||
"""
|
||||
|
||||
node_id: str
|
||||
start_time: int
|
||||
last_seen_time: int
|
||||
version: str
|
||||
lora_freq: NotRequired[int]
|
||||
modem_preset: NotRequired[str]
|
||||
|
||||
|
||||
NodeUpsert = dict[str, dict]
|
||||
|
||||
|
||||
__all__ = [
|
||||
"IngestorHeartbeat",
|
||||
"MessageEvent",
|
||||
"NeighborEntry",
|
||||
"NeighborsSnapshot",
|
||||
"NodeUpsert",
|
||||
"PositionEvent",
|
||||
"TelemetryEvent",
|
||||
"TraceEvent",
|
||||
]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,108 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Packet handlers that serialise mesh data and push it to the HTTP queue.
|
||||
|
||||
This package is organised into focused submodules:
|
||||
|
||||
- :mod:`._state` — shared mutable state (host node ID, packet timestamps)
|
||||
- :mod:`.radio` — radio metadata enrichment helpers
|
||||
- :mod:`.ignored` — debug-mode logging of dropped packets
|
||||
- :mod:`.position` — GPS position and traceroute handlers
|
||||
- :mod:`.telemetry` — device/environment telemetry and router heartbeat handlers
|
||||
- :mod:`.nodeinfo` — node information update handler
|
||||
- :mod:`.neighborinfo` — neighbour topology snapshot handler
|
||||
- :mod:`.generic` — packet dispatcher, node upsert, and the main receive callback
|
||||
|
||||
All public names from the original flat ``handlers`` module are re-exported
|
||||
here so existing callers (e.g. ``daemon.py``, ``protocols/``) require no
|
||||
changes.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .. import queue as _queue
|
||||
from ._state import (
|
||||
_mark_packet_seen,
|
||||
host_node_id,
|
||||
last_packet_monotonic,
|
||||
register_host_node_id,
|
||||
)
|
||||
from .generic import (
|
||||
_coerce_emoji_codepoint,
|
||||
_is_encrypted_flag,
|
||||
_is_likely_reaction,
|
||||
_is_reaction_placeholder_text,
|
||||
_portnum_candidates,
|
||||
on_receive,
|
||||
store_packet_dict,
|
||||
upsert_node,
|
||||
)
|
||||
from .ignored import (
|
||||
_IGNORED_PACKET_LOCK,
|
||||
_IGNORED_PACKET_LOG_PATH,
|
||||
_record_ignored_packet,
|
||||
)
|
||||
from .neighborinfo import store_neighborinfo_packet
|
||||
from .nodeinfo import store_nodeinfo_packet
|
||||
from .position import (
|
||||
_normalize_trace_hops,
|
||||
base64_payload,
|
||||
store_position_packet,
|
||||
store_traceroute_packet,
|
||||
)
|
||||
from .radio import (
|
||||
_apply_radio_metadata,
|
||||
_apply_radio_metadata_to_nodes,
|
||||
_radio_metadata_fields,
|
||||
)
|
||||
from .telemetry import (
|
||||
_VALID_TELEMETRY_TYPES,
|
||||
store_router_heartbeat_packet,
|
||||
store_telemetry_packet,
|
||||
)
|
||||
|
||||
# Re-export the queue alias for any callers that reference handlers._queue_post_json
|
||||
_queue_post_json = _queue._queue_post_json
|
||||
|
||||
__all__ = [
|
||||
"_IGNORED_PACKET_LOCK",
|
||||
"_IGNORED_PACKET_LOG_PATH",
|
||||
"_VALID_TELEMETRY_TYPES",
|
||||
"_apply_radio_metadata",
|
||||
"_apply_radio_metadata_to_nodes",
|
||||
"_coerce_emoji_codepoint",
|
||||
"_is_encrypted_flag",
|
||||
"_is_likely_reaction",
|
||||
"_is_reaction_placeholder_text",
|
||||
"_mark_packet_seen",
|
||||
"_normalize_trace_hops",
|
||||
"_portnum_candidates",
|
||||
"_queue_post_json",
|
||||
"_radio_metadata_fields",
|
||||
"_record_ignored_packet",
|
||||
"base64_payload",
|
||||
"host_node_id",
|
||||
"last_packet_monotonic",
|
||||
"on_receive",
|
||||
"register_host_node_id",
|
||||
"store_neighborinfo_packet",
|
||||
"store_nodeinfo_packet",
|
||||
"store_packet_dict",
|
||||
"store_position_packet",
|
||||
"store_router_heartbeat_packet",
|
||||
"store_telemetry_packet",
|
||||
"store_traceroute_packet",
|
||||
"upsert_node",
|
||||
]
|
||||
@@ -1,202 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Shared mutable state and state accessors for the handlers subpackage.
|
||||
|
||||
All mutable globals that span multiple handler modules live here so that each
|
||||
handler submodule can import this module and get a consistent view of state
|
||||
without risking stale references from bare ``from ... import`` bindings.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import time
|
||||
|
||||
from .. import config
|
||||
from ..serialization import _canonical_node_id
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Host device identity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_host_node_id: str | None = None
|
||||
"""Canonical ``!xxxxxxxx`` identifier for the connected host device."""
|
||||
|
||||
_host_telemetry_last_rx: int | None = None
|
||||
"""Receive timestamp of the last accepted host telemetry packet."""
|
||||
|
||||
_HOST_TELEMETRY_INTERVAL_SECS: int = 60 * 60
|
||||
"""Minimum interval (seconds) between accepted host telemetry packets.
|
||||
|
||||
Meshtastic devices report their own telemetry at regular intervals. Accepting
|
||||
every packet would overwrite the host's profile too aggressively; this window
|
||||
throttles updates to at most once per hour.
|
||||
"""
|
||||
|
||||
_host_nodeinfo_last_seen: float | None = None
|
||||
"""Monotonic timestamp of the last accepted host NODEINFO upsert."""
|
||||
|
||||
_HOST_NODEINFO_INTERVAL_SECS: int = 60 * 60
|
||||
"""Minimum interval (seconds) between accepted host NODEINFO upserts.
|
||||
|
||||
The meshtastic library re-broadcasts the local node's NODEINFO to the mesh
|
||||
periodically. Accepting every broadcast would overwrite the host node record
|
||||
too aggressively; this window throttles self-NODEINFO upserts to at most once
|
||||
per hour.
|
||||
"""
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Packet receipt tracking
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_last_packet_monotonic: float | None = None
|
||||
"""Monotonic timestamp of the most recently processed packet."""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public accessors
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def register_host_node_id(node_id: str | None) -> None:
|
||||
"""Record the canonical identifier for the connected host device.
|
||||
|
||||
Resetting the host node also clears the telemetry suppression window so
|
||||
the first telemetry packet from the new host is always accepted.
|
||||
|
||||
Parameters:
|
||||
node_id: Identifier reported by the connected device. ``None`` clears
|
||||
the current host assignment.
|
||||
"""
|
||||
|
||||
global _host_node_id, _host_telemetry_last_rx, _host_nodeinfo_last_seen
|
||||
canonical = _canonical_node_id(node_id)
|
||||
_host_node_id = canonical
|
||||
_host_telemetry_last_rx = None
|
||||
_host_nodeinfo_last_seen = None
|
||||
if canonical:
|
||||
config._debug_log(
|
||||
"Registered host device node id",
|
||||
context="handlers.host_device",
|
||||
host_node_id=canonical,
|
||||
)
|
||||
|
||||
|
||||
def host_node_id() -> str | None:
|
||||
"""Return the canonical identifier for the connected host device.
|
||||
|
||||
Returns:
|
||||
The canonical ``!xxxxxxxx`` node identifier, or ``None`` when no host
|
||||
has been registered yet.
|
||||
"""
|
||||
|
||||
return _host_node_id
|
||||
|
||||
|
||||
def _mark_host_telemetry_seen(rx_time: int) -> None:
|
||||
"""Update the last receive timestamp for the host telemetry window.
|
||||
|
||||
Parameters:
|
||||
rx_time: Unix timestamp of the accepted host telemetry packet.
|
||||
"""
|
||||
|
||||
global _host_telemetry_last_rx
|
||||
_host_telemetry_last_rx = rx_time
|
||||
|
||||
|
||||
def _host_telemetry_suppressed(rx_time: int) -> tuple[bool, int]:
|
||||
"""Return suppression state and minutes remaining for host telemetry.
|
||||
|
||||
Host telemetry is suppressed when it arrives within
|
||||
:data:`_HOST_TELEMETRY_INTERVAL_SECS` of the previous accepted packet.
|
||||
This avoids flooding the API with high-frequency device metrics from the
|
||||
locally connected node.
|
||||
|
||||
Parameters:
|
||||
rx_time: Unix timestamp of the candidate telemetry packet.
|
||||
|
||||
Returns:
|
||||
A ``(suppressed, minutes_remaining)`` tuple. ``suppressed`` is
|
||||
``True`` when the packet should be dropped; ``minutes_remaining``
|
||||
is the whole number of minutes until the next packet will be accepted.
|
||||
"""
|
||||
|
||||
if _host_telemetry_last_rx is None:
|
||||
return False, 0
|
||||
remaining_secs = (_host_telemetry_last_rx + _HOST_TELEMETRY_INTERVAL_SECS) - rx_time
|
||||
if remaining_secs <= 0:
|
||||
return False, 0
|
||||
return True, int(math.ceil(remaining_secs / 60.0))
|
||||
|
||||
|
||||
def _host_nodeinfo_suppressed(now: float) -> bool:
|
||||
"""Return ``True`` when a host NODEINFO upsert should be suppressed.
|
||||
|
||||
Self-NODEINFO upserts are throttled to at most once per
|
||||
:data:`_HOST_NODEINFO_INTERVAL_SECS` to prevent the meshtastic library's
|
||||
periodic rebroadcast from overwriting the host node record too aggressively.
|
||||
|
||||
Parameters:
|
||||
now: Current :func:`time.monotonic` value.
|
||||
|
||||
Returns:
|
||||
``True`` when the request should be dropped; ``False`` when it should
|
||||
proceed.
|
||||
"""
|
||||
if _host_nodeinfo_last_seen is None:
|
||||
return False
|
||||
return (now - _host_nodeinfo_last_seen) < _HOST_NODEINFO_INTERVAL_SECS
|
||||
|
||||
|
||||
def _mark_host_nodeinfo_seen(now: float) -> None:
|
||||
"""Record that a host NODEINFO upsert was accepted.
|
||||
|
||||
Parameters:
|
||||
now: Current :func:`time.monotonic` value from the accepted upsert.
|
||||
"""
|
||||
global _host_nodeinfo_last_seen
|
||||
_host_nodeinfo_last_seen = now
|
||||
|
||||
|
||||
def last_packet_monotonic() -> float | None:
|
||||
"""Return the monotonic timestamp of the most recently processed packet.
|
||||
|
||||
Returns:
|
||||
A :func:`time.monotonic` value, or ``None`` before any packet has been
|
||||
received.
|
||||
"""
|
||||
|
||||
return _last_packet_monotonic
|
||||
|
||||
|
||||
def _mark_packet_seen() -> None:
|
||||
"""Record that a packet has been processed by updating the monotonic clock."""
|
||||
|
||||
global _last_packet_monotonic
|
||||
_last_packet_monotonic = time.monotonic()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_HOST_NODEINFO_INTERVAL_SECS",
|
||||
"_HOST_TELEMETRY_INTERVAL_SECS",
|
||||
"_host_nodeinfo_suppressed",
|
||||
"_host_telemetry_suppressed",
|
||||
"_mark_host_nodeinfo_seen",
|
||||
"_mark_host_telemetry_seen",
|
||||
"_mark_packet_seen",
|
||||
"host_node_id",
|
||||
"last_packet_monotonic",
|
||||
"register_host_node_id",
|
||||
]
|
||||
@@ -1,604 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Generic packet dispatcher, node upsert, and the main receive callback."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import contextlib
|
||||
import importlib
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from .. import channels, config, queue
|
||||
from ..serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_int,
|
||||
_first,
|
||||
_iso,
|
||||
_pkt_to_dict,
|
||||
upsert_payload,
|
||||
)
|
||||
from . import _state, ignored as _ignored_mod
|
||||
from .neighborinfo import store_neighborinfo_packet
|
||||
from .nodeinfo import store_nodeinfo_packet
|
||||
from .position import store_position_packet
|
||||
from .radio import _apply_radio_metadata, _apply_radio_metadata_to_nodes
|
||||
from .telemetry import store_router_heartbeat_packet, store_telemetry_packet
|
||||
from .position import store_traceroute_packet
|
||||
|
||||
|
||||
def _portnum_candidates(name: str) -> set[int]:
|
||||
"""Return Meshtastic port number candidates for ``name``.
|
||||
|
||||
Meshtastic ships two protobuf module layouts (legacy and modern). Both are
|
||||
probed so that port-number comparisons work regardless of which firmware
|
||||
version is installed.
|
||||
|
||||
Parameters:
|
||||
name: Port name to look up in Meshtastic ``PortNum`` enums.
|
||||
|
||||
Returns:
|
||||
Set of integer port numbers resolved from all available Meshtastic
|
||||
modules.
|
||||
"""
|
||||
|
||||
candidates: set[int] = set()
|
||||
for module_name in (
|
||||
"meshtastic.portnums_pb2",
|
||||
"meshtastic.protobuf.portnums_pb2",
|
||||
):
|
||||
module = sys.modules.get(module_name)
|
||||
if module is None:
|
||||
with contextlib.suppress(ModuleNotFoundError):
|
||||
module = importlib.import_module(module_name)
|
||||
if module is None:
|
||||
continue
|
||||
portnum_enum = getattr(module, "PortNum", None)
|
||||
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
|
||||
if callable(value_lookup):
|
||||
with contextlib.suppress(Exception):
|
||||
candidate = _coerce_int(value_lookup(name))
|
||||
if candidate is not None:
|
||||
candidates.add(candidate)
|
||||
constant_value = getattr(module, name, None)
|
||||
candidate = _coerce_int(constant_value)
|
||||
if candidate is not None:
|
||||
candidates.add(candidate)
|
||||
return candidates
|
||||
|
||||
|
||||
def _coerce_emoji_codepoint(raw: object) -> str | None:
|
||||
"""Normalise an emoji candidate, converting numeric codepoints to characters.
|
||||
|
||||
Meshtastic firmware may transmit reaction emoji as a Unicode codepoint
|
||||
integer (e.g. ``128077`` for 👍) rather than as the character itself.
|
||||
Values above 127 are treated as codepoints and converted via :func:`chr`;
|
||||
small values (≤ 127) are preserved as strings so that slot markers such as
|
||||
``"1"`` pass through unchanged.
|
||||
|
||||
When a numeric value claims to be a codepoint but lies outside the valid
|
||||
Unicode range (``> 0x10FFFF``), ``None`` is returned rather than the
|
||||
decimal string form — storing a multi-digit integer as the emoji would
|
||||
leak garbage into the rendered chat (numeric strings of length > 1 are
|
||||
not valid slot markers either).
|
||||
|
||||
Parameters:
|
||||
raw: Raw emoji value from a decoded packet field.
|
||||
|
||||
Returns:
|
||||
Normalised emoji string, or ``None`` when *raw* is empty or invalid.
|
||||
"""
|
||||
|
||||
if raw is None:
|
||||
return None
|
||||
|
||||
# Numeric value (int / float) -------------------------------------------
|
||||
if isinstance(raw, (int, float)):
|
||||
n = int(raw)
|
||||
if n > 127:
|
||||
try:
|
||||
return chr(n)
|
||||
except (ValueError, OverflowError):
|
||||
# Value claimed to be a codepoint but is out of Unicode range;
|
||||
# do NOT preserve the decimal form (would render as garbage).
|
||||
return None
|
||||
text = str(raw).strip()
|
||||
return text or None
|
||||
|
||||
# String (possibly a digit-encoded codepoint) ---------------------------
|
||||
try:
|
||||
text = str(raw).strip()
|
||||
except Exception:
|
||||
return None
|
||||
if not text:
|
||||
return None
|
||||
if text.isdigit():
|
||||
n = int(text)
|
||||
if n > 127:
|
||||
try:
|
||||
return chr(n)
|
||||
except (ValueError, OverflowError):
|
||||
# See comment above — multi-digit numeric strings outside the
|
||||
# Unicode range are not valid emoji nor slot markers.
|
||||
return None
|
||||
return text
|
||||
|
||||
|
||||
#: Maximum Unicode codepoint length for text that may still qualify as a
|
||||
#: reaction placeholder. A bare emoji (single grapheme) is at most 2
|
||||
#: codepoints — for example a base character plus a single variation
|
||||
#: selector (U+FE0F). Multi-codepoint ZWJ families (👨👩👧, 🏳️🌈) are
|
||||
#: NOT accepted as placeholder text intentionally: matching them would
|
||||
#: also let through short CJK messages like ``"你好世界吗"`` (5 codepoints,
|
||||
#: no ASCII letters), causing real prose to be misclassified as a reaction.
|
||||
#: This constant must stay aligned with the JS frontend's
|
||||
#: ``isReactionPlaceholderText`` (``message-replies.js``); changing one
|
||||
#: side without the other re-introduces ingest/render disagreement.
|
||||
_REACTION_PLACEHOLDER_MAX_CODEPOINTS = 2
|
||||
|
||||
|
||||
def _is_reaction_placeholder_text(text: str | None) -> bool:
|
||||
"""Return ``True`` when *text* looks like a reaction slot or count marker.
|
||||
|
||||
Reaction packets carry either no text at all, a small numeric count (e.g.
|
||||
``"1"``, ``"3"``), or occasionally a bare emoji character. Anything that
|
||||
looks like substantive prose should cause the packet to be classified as a
|
||||
regular text message instead of a reaction.
|
||||
|
||||
Parameters:
|
||||
text: Message text to inspect (may be ``None``).
|
||||
|
||||
Returns:
|
||||
``True`` when *text* is absent, blank, a digit string, or a short
|
||||
non-ASCII-letter sequence (bare emoji).
|
||||
"""
|
||||
|
||||
if not text:
|
||||
return True
|
||||
stripped = text.strip()
|
||||
if not stripped:
|
||||
return True
|
||||
if stripped.isdigit():
|
||||
return True
|
||||
# Bare emoji heuristic — see _REACTION_PLACEHOLDER_MAX_CODEPOINTS.
|
||||
if len(stripped) <= _REACTION_PLACEHOLDER_MAX_CODEPOINTS and not any(
|
||||
c.isascii() and c.isalpha() for c in stripped
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_likely_reaction(
|
||||
portnum: str | None,
|
||||
portnum_int: int | None,
|
||||
reply_id: int | None,
|
||||
emoji: str | None,
|
||||
text: str | None,
|
||||
) -> bool:
|
||||
"""Determine whether a packet should be classified as a reaction.
|
||||
|
||||
A packet is a reaction when it carries the ``REACTION_APP`` portnum
|
||||
explicitly, **or** when it has both a ``reply_id`` and an ``emoji`` and its
|
||||
text content is absent or a mere placeholder (digit slot / bare emoji).
|
||||
|
||||
Parameters:
|
||||
portnum: String portnum label from the packet.
|
||||
portnum_int: Integer portnum, if available.
|
||||
reply_id: Reply-to message identifier.
|
||||
emoji: Normalised emoji string (after codepoint coercion).
|
||||
text: Message text extracted from the packet.
|
||||
|
||||
Returns:
|
||||
``True`` when the packet should be treated as a reaction.
|
||||
"""
|
||||
|
||||
if portnum == "REACTION_APP":
|
||||
return True
|
||||
reaction_port_candidates = _portnum_candidates("REACTION_APP")
|
||||
if portnum_int is not None and portnum_int in reaction_port_candidates:
|
||||
return True
|
||||
if reply_id is not None and emoji is not None:
|
||||
return _is_reaction_placeholder_text(text)
|
||||
return False
|
||||
|
||||
|
||||
def _is_encrypted_flag(value: object) -> bool:
|
||||
"""Return ``True`` when ``value`` represents an encrypted payload.
|
||||
|
||||
Meshtastic may express the encrypted flag as a boolean, an integer, or a
|
||||
string depending on how the packet was decoded. All representations are
|
||||
normalised to a Python bool.
|
||||
|
||||
Parameters:
|
||||
value: Raw encrypted field from a Meshtastic packet.
|
||||
|
||||
Returns:
|
||||
``True`` when the payload is considered encrypted, ``False`` otherwise.
|
||||
"""
|
||||
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, (int, float)):
|
||||
return value != 0
|
||||
if isinstance(value, str):
|
||||
normalized = value.strip().lower()
|
||||
if normalized in {"", "0", "false", "no"}:
|
||||
return False
|
||||
return True
|
||||
return bool(value)
|
||||
|
||||
|
||||
def upsert_node(node_id: object, node: object) -> None:
|
||||
"""Schedule an upsert for a single node.
|
||||
|
||||
Serialises ``node`` via :func:`upsert_payload`, enriches the result with
|
||||
radio metadata and the current host node identifier, then enqueues a POST
|
||||
to ``/api/nodes``.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical identifier for the node in the ``!xxxxxxxx`` format.
|
||||
node: Node object or mapping to serialise for the API payload.
|
||||
|
||||
Returns:
|
||||
``None``. The payload is forwarded to the shared HTTP queue.
|
||||
"""
|
||||
|
||||
payload = _apply_radio_metadata_to_nodes(upsert_payload(node_id, node))
|
||||
payload["ingestor"] = _state.host_node_id()
|
||||
queue._queue_post_json("/api/nodes", payload, priority=queue._NODE_POST_PRIORITY)
|
||||
|
||||
if config.DEBUG:
|
||||
from ..serialization import _get
|
||||
|
||||
user = _get(payload[node_id], "user") or {}
|
||||
short = _get(user, "shortName")
|
||||
long = _get(user, "longName")
|
||||
config._debug_log(
|
||||
"Queued node upsert payload",
|
||||
context="handlers.upsert_node",
|
||||
node_id=node_id,
|
||||
short_name=short,
|
||||
long_name=long,
|
||||
)
|
||||
|
||||
|
||||
def store_packet_dict(packet: Mapping) -> None:
|
||||
"""Route a decoded packet to the appropriate storage handler.
|
||||
|
||||
Inspects ``portnum`` (string and integer forms) and the presence of
|
||||
well-known decoded sub-sections to determine packet type, then delegates
|
||||
to the corresponding ``store_*`` handler.
|
||||
|
||||
Parameters:
|
||||
packet: Packet dictionary emitted by the mesh interface.
|
||||
|
||||
Returns:
|
||||
``None``. Side-effects depend on the specific handler invoked.
|
||||
"""
|
||||
|
||||
decoded = packet.get("decoded") or {}
|
||||
|
||||
portnum_raw = _first(decoded, "portnum", default=None)
|
||||
portnum = str(portnum_raw).upper() if portnum_raw is not None else None
|
||||
portnum_int = _coerce_int(portnum_raw)
|
||||
|
||||
telemetry_section = (
|
||||
decoded.get("telemetry") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if (
|
||||
portnum == "TELEMETRY_APP"
|
||||
or portnum_int == 65
|
||||
or isinstance(telemetry_section, Mapping)
|
||||
):
|
||||
store_telemetry_packet(packet, decoded)
|
||||
return
|
||||
|
||||
traceroute_section = (
|
||||
decoded.get("traceroute") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
traceroute_port_ints = _portnum_candidates("TRACEROUTE_APP")
|
||||
|
||||
if (
|
||||
portnum == "TRACEROUTE_APP"
|
||||
or (portnum_int is not None and portnum_int in traceroute_port_ints)
|
||||
or isinstance(traceroute_section, Mapping)
|
||||
):
|
||||
store_traceroute_packet(packet, decoded)
|
||||
return
|
||||
|
||||
if portnum in {"5", "NODEINFO_APP"}:
|
||||
store_nodeinfo_packet(packet, decoded)
|
||||
return
|
||||
|
||||
if portnum in {"4", "POSITION_APP"}:
|
||||
store_position_packet(packet, decoded)
|
||||
return
|
||||
|
||||
neighborinfo_section = (
|
||||
decoded.get("neighborinfo") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if portnum == "NEIGHBORINFO_APP" or isinstance(neighborinfo_section, Mapping):
|
||||
store_neighborinfo_packet(packet, decoded)
|
||||
return
|
||||
|
||||
store_forward_port_candidates = _portnum_candidates("STORE_FORWARD_APP")
|
||||
store_forward_section = (
|
||||
decoded.get("storeforward") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if portnum == "STORE_FORWARD_APP" or (
|
||||
portnum_int is not None and portnum_int in store_forward_port_candidates
|
||||
):
|
||||
if not isinstance(store_forward_section, Mapping):
|
||||
_ignored_mod._record_ignored_packet(
|
||||
packet, reason="unsupported-store-forward"
|
||||
)
|
||||
return
|
||||
rr = str(store_forward_section.get("rr") or "").upper()
|
||||
if rr == "ROUTER_HEARTBEAT":
|
||||
store_router_heartbeat_packet(packet)
|
||||
return
|
||||
_ignored_mod._record_ignored_packet(
|
||||
packet, reason="unsupported-store-forward-rr"
|
||||
)
|
||||
return
|
||||
|
||||
text = _first(decoded, "payload.text", "text", "data.text", default=None)
|
||||
encrypted = _first(decoded, "payload.encrypted", "encrypted", default=None)
|
||||
if encrypted is None:
|
||||
encrypted = _first(packet, "encrypted", default=None)
|
||||
reply_id_raw = _first(
|
||||
decoded,
|
||||
"payload.replyId",
|
||||
"payload.reply_id",
|
||||
"data.replyId",
|
||||
"data.reply_id",
|
||||
"replyId",
|
||||
"reply_id",
|
||||
default=None,
|
||||
)
|
||||
reply_id = _coerce_int(reply_id_raw)
|
||||
emoji_raw = _first(
|
||||
decoded,
|
||||
"payload.emoji",
|
||||
"data.emoji",
|
||||
"emoji",
|
||||
default=None,
|
||||
)
|
||||
emoji = _coerce_emoji_codepoint(emoji_raw)
|
||||
|
||||
routing_section = decoded.get("routing") if isinstance(decoded, Mapping) else None
|
||||
routing_port_candidates = _portnum_candidates("ROUTING_APP")
|
||||
if text is None and (
|
||||
portnum == "ROUTING_APP"
|
||||
or (portnum_int is not None and portnum_int in routing_port_candidates)
|
||||
or isinstance(routing_section, Mapping)
|
||||
):
|
||||
routing_payload = _first(decoded, "payload", "data", default=None)
|
||||
if routing_payload is not None:
|
||||
if isinstance(routing_payload, bytes):
|
||||
text = base64.b64encode(routing_payload).decode("ascii")
|
||||
elif isinstance(routing_payload, str):
|
||||
text = routing_payload
|
||||
else:
|
||||
try:
|
||||
text = json.dumps(routing_payload, ensure_ascii=True)
|
||||
except TypeError:
|
||||
text = str(routing_payload)
|
||||
if isinstance(text, str):
|
||||
text = text.strip() or None
|
||||
|
||||
allowed_port_values = {"1", "TEXT_MESSAGE_APP", "REACTION_APP", "ROUTING_APP"}
|
||||
allowed_port_ints = {1}
|
||||
|
||||
reaction_port_candidates = _portnum_candidates("REACTION_APP")
|
||||
for candidate in reaction_port_candidates:
|
||||
allowed_port_ints.add(candidate)
|
||||
allowed_port_values.add(str(candidate))
|
||||
|
||||
for candidate in routing_port_candidates:
|
||||
allowed_port_ints.add(candidate)
|
||||
allowed_port_values.add(str(candidate))
|
||||
|
||||
if isinstance(routing_section, Mapping) and portnum_int is not None:
|
||||
allowed_port_ints.add(portnum_int)
|
||||
allowed_port_values.add(str(portnum_int))
|
||||
|
||||
is_reaction_packet = _is_likely_reaction(
|
||||
portnum, portnum_int, reply_id, emoji, text
|
||||
)
|
||||
if is_reaction_packet and portnum_int is not None:
|
||||
allowed_port_ints.add(portnum_int)
|
||||
allowed_port_values.add(str(portnum_int))
|
||||
|
||||
if portnum and portnum not in allowed_port_values:
|
||||
if portnum_int not in allowed_port_ints:
|
||||
_ignored_mod._record_ignored_packet(packet, reason="unsupported-port")
|
||||
return
|
||||
|
||||
encrypted_flag = _is_encrypted_flag(encrypted)
|
||||
if not any([text, encrypted_flag, emoji is not None, reply_id is not None]):
|
||||
_ignored_mod._record_ignored_packet(packet, reason="no-message-payload")
|
||||
return
|
||||
|
||||
channel = _first(decoded, "channel", default=None)
|
||||
if channel is None:
|
||||
channel = _first(packet, "channel", default=0)
|
||||
try:
|
||||
channel = int(channel)
|
||||
except Exception:
|
||||
channel = 0
|
||||
|
||||
channel_name_value = channels.channel_name(channel)
|
||||
|
||||
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
|
||||
if pkt_id is None:
|
||||
_ignored_mod._record_ignored_packet(packet, reason="missing-packet-id")
|
||||
return
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
from_id = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
|
||||
if (from_id is None or str(from_id) == "") and config.DEBUG:
|
||||
try:
|
||||
raw = json.dumps(packet, default=str)
|
||||
except Exception:
|
||||
raw = str(packet)
|
||||
config._debug_log(
|
||||
"Packet missing from_id",
|
||||
context="handlers.store_packet_dict",
|
||||
packet=raw,
|
||||
)
|
||||
|
||||
snr = _first(packet, "snr", "rx_snr", "rxSnr", default=None)
|
||||
rssi = _first(packet, "rssi", "rx_rssi", "rxRssi", default=None)
|
||||
hop = _first(packet, "hopLimit", "hop_limit", default=None)
|
||||
|
||||
to_id_normalized = str(to_id).strip() if to_id is not None else ""
|
||||
|
||||
if (
|
||||
not is_reaction_packet
|
||||
and channel == 0
|
||||
and not encrypted_flag
|
||||
and to_id_normalized
|
||||
and to_id_normalized.lower() != "^all"
|
||||
):
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Skipped direct message on primary channel",
|
||||
context="handlers.store_packet_dict",
|
||||
from_id=_canonical_node_id(from_id) or from_id,
|
||||
to_id=_canonical_node_id(to_id) or to_id,
|
||||
channel=channel,
|
||||
)
|
||||
_ignored_mod._record_ignored_packet(packet, reason="skipped-direct-message")
|
||||
return
|
||||
|
||||
if not channels.is_allowed_channel(channel_name_value):
|
||||
_ignored_mod._record_ignored_packet(packet, reason="disallowed-channel")
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Ignored packet on disallowed channel",
|
||||
context="handlers.store_packet_dict",
|
||||
channel=channel,
|
||||
channel_name=channel_name_value,
|
||||
allowed_channels=channels.allowed_channel_names(),
|
||||
)
|
||||
return
|
||||
|
||||
if channels.is_hidden_channel(channel_name_value):
|
||||
_ignored_mod._record_ignored_packet(packet, reason="hidden-channel")
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Ignored packet on hidden channel",
|
||||
context="handlers.store_packet_dict",
|
||||
channel=channel,
|
||||
channel_name=channel_name_value,
|
||||
)
|
||||
return
|
||||
|
||||
message_payload = {
|
||||
"id": int(pkt_id),
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"from_id": from_id,
|
||||
"to_id": to_id,
|
||||
"channel": channel,
|
||||
"portnum": str(portnum) if portnum is not None else None,
|
||||
"text": text,
|
||||
"encrypted": encrypted,
|
||||
"snr": float(snr) if snr is not None else None,
|
||||
"rssi": int(rssi) if rssi is not None else None,
|
||||
"hop_limit": int(hop) if hop is not None else None,
|
||||
"reply_id": reply_id,
|
||||
"emoji": emoji,
|
||||
"ingestor": _state.host_node_id(),
|
||||
}
|
||||
|
||||
if not encrypted_flag and channel_name_value:
|
||||
message_payload["channel_name"] = channel_name_value
|
||||
queue._queue_post_json(
|
||||
"/api/messages",
|
||||
_apply_radio_metadata(message_payload),
|
||||
priority=queue._MESSAGE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
from_label = _canonical_node_id(from_id) or from_id
|
||||
to_label = _canonical_node_id(to_id) or to_id
|
||||
payload_desc = "Encrypted" if text is None and encrypted else text
|
||||
log_kwargs = {
|
||||
"context": "handlers.store_packet_dict",
|
||||
"from_id": from_label,
|
||||
"to_id": to_label,
|
||||
"channel": channel,
|
||||
"channel_display": channel_name_value or channel,
|
||||
"payload": payload_desc,
|
||||
}
|
||||
if channel_name_value:
|
||||
log_kwargs["channel_name"] = channel_name_value
|
||||
config._debug_log("Queued message payload", **log_kwargs)
|
||||
|
||||
|
||||
def on_receive(packet: object, interface: object) -> None:
|
||||
"""Callback registered with Meshtastic to capture incoming packets.
|
||||
|
||||
Subscribed to all ``meshtastic.receive.*`` pubsub topics. The packet is
|
||||
deduplicated via a ``_potatomesh_seen`` flag before being normalised and
|
||||
dispatched to :func:`store_packet_dict`.
|
||||
|
||||
Parameters:
|
||||
packet: Packet payload supplied by the Meshtastic pubsub topic.
|
||||
interface: Interface instance that produced the packet. Only used for
|
||||
compatibility with Meshtastic's callback signature.
|
||||
|
||||
Returns:
|
||||
``None``. Packets are serialised and enqueued asynchronously.
|
||||
"""
|
||||
|
||||
if isinstance(packet, dict):
|
||||
if packet.get("_potatomesh_seen"):
|
||||
return
|
||||
packet["_potatomesh_seen"] = True
|
||||
|
||||
_state._mark_packet_seen()
|
||||
|
||||
packet_dict = None
|
||||
try:
|
||||
packet_dict = _pkt_to_dict(packet)
|
||||
store_packet_dict(packet_dict)
|
||||
except Exception as exc:
|
||||
info = (
|
||||
list(packet_dict.keys()) if isinstance(packet_dict, dict) else type(packet)
|
||||
)
|
||||
config._debug_log(
|
||||
"Failed to store packet",
|
||||
context="handlers.on_receive",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
packet_info=info,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_is_encrypted_flag",
|
||||
"_portnum_candidates",
|
||||
"on_receive",
|
||||
"store_packet_dict",
|
||||
"upsert_node",
|
||||
]
|
||||
@@ -1,103 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Debug-mode logging of ignored Meshtastic packets.
|
||||
|
||||
When :data:`config.DEBUG` is set the ingestor appends a JSON record for each
|
||||
packet that is filtered out (unsupported port, missing fields, disallowed
|
||||
channel, etc.) to a plain-text log file. This aids offline debugging without
|
||||
adding overhead in production.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import threading
|
||||
from collections.abc import Mapping
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
from .. import config
|
||||
|
||||
_IGNORED_PACKET_LOG_PATH = (
|
||||
Path(__file__).resolve().parents[3] / "ignored-meshtastic.txt"
|
||||
)
|
||||
"""Filesystem path that stores ignored Meshtastic packets when debug mode is active."""
|
||||
|
||||
_IGNORED_PACKET_LOCK = threading.Lock()
|
||||
"""Lock serialising concurrent appends to :data:`_IGNORED_PACKET_LOG_PATH`."""
|
||||
|
||||
|
||||
def _ignored_packet_default(value: object) -> object:
|
||||
"""Return a JSON-serialisable representation for an ignored packet value.
|
||||
|
||||
Called as the ``default`` argument to :func:`json.dumps` when serialising
|
||||
ignored packet entries. Handles container types and raw bytes so the log
|
||||
file contains readable text rather than ``repr()`` fragments.
|
||||
|
||||
Parameters:
|
||||
value: Arbitrary value encountered during packet serialisation.
|
||||
|
||||
Returns:
|
||||
A JSON-compatible object derived from ``value``.
|
||||
"""
|
||||
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return list(value)
|
||||
if isinstance(value, bytes):
|
||||
return base64.b64encode(value).decode("ascii")
|
||||
if isinstance(value, Mapping):
|
||||
return {
|
||||
str(key): _ignored_packet_default(sub_value)
|
||||
for key, sub_value in value.items()
|
||||
}
|
||||
return str(value)
|
||||
|
||||
|
||||
def _record_ignored_packet(packet: Mapping | object, *, reason: str) -> None:
|
||||
"""Persist packet details to :data:`_IGNORED_PACKET_LOG_PATH` during debugging.
|
||||
|
||||
Does nothing when :data:`config.DEBUG` is ``False``. Each call appends a
|
||||
single newline-delimited JSON record with a timestamp, drop reason, and a
|
||||
sanitised copy of the packet.
|
||||
|
||||
Parameters:
|
||||
packet: Packet object or mapping to record.
|
||||
reason: Short machine-readable label describing why the packet was
|
||||
ignored (e.g. ``"unsupported-port"``, ``"missing-packet-id"``).
|
||||
"""
|
||||
|
||||
if not config.DEBUG:
|
||||
return
|
||||
|
||||
timestamp = datetime.now(timezone.utc).isoformat()
|
||||
entry = {
|
||||
"timestamp": timestamp,
|
||||
"reason": reason,
|
||||
"packet": _ignored_packet_default(packet),
|
||||
}
|
||||
payload = json.dumps(entry, ensure_ascii=False, sort_keys=True)
|
||||
with _IGNORED_PACKET_LOCK:
|
||||
_IGNORED_PACKET_LOG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
with _IGNORED_PACKET_LOG_PATH.open("a", encoding="utf-8") as handle:
|
||||
handle.write(f"{payload}\n")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_IGNORED_PACKET_LOCK",
|
||||
"_IGNORED_PACKET_LOG_PATH",
|
||||
"_ignored_packet_default",
|
||||
"_record_ignored_packet",
|
||||
]
|
||||
@@ -1,150 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Handler for neighbour-information packets."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from .. import config, queue
|
||||
from ..serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_float,
|
||||
_coerce_int,
|
||||
_first,
|
||||
_iso,
|
||||
_node_num_from_id,
|
||||
)
|
||||
from . import _state
|
||||
from .radio import _apply_radio_metadata
|
||||
|
||||
|
||||
def store_neighborinfo_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist neighbour information gathered from a packet.
|
||||
|
||||
Meshtastic nodes periodically broadcast the set of nodes they can hear
|
||||
directly along with the observed signal quality. This handler serialises
|
||||
that snapshot so the web dashboard can render a live RF topology graph.
|
||||
|
||||
Parameters:
|
||||
packet: Raw Meshtastic packet metadata.
|
||||
decoded: Decoded view containing the ``neighborinfo`` section.
|
||||
|
||||
Returns:
|
||||
``None``. The neighbour snapshot is queued for HTTP submission.
|
||||
"""
|
||||
|
||||
neighbor_section = (
|
||||
decoded.get("neighborinfo") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if not isinstance(neighbor_section, Mapping):
|
||||
return
|
||||
|
||||
node_ref = _first(
|
||||
neighbor_section,
|
||||
"nodeId",
|
||||
"node_id",
|
||||
default=_first(packet, "fromId", "from_id", "from", default=None),
|
||||
)
|
||||
node_id = _canonical_node_id(node_ref)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_num = _coerce_int(_first(neighbor_section, "nodeId", "node_id", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
|
||||
node_broadcast_interval = _coerce_int(
|
||||
_first(
|
||||
neighbor_section,
|
||||
"nodeBroadcastIntervalSecs",
|
||||
"node_broadcast_interval_secs",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
last_sent_by_ref = _first(
|
||||
neighbor_section,
|
||||
"lastSentById",
|
||||
"last_sent_by_id",
|
||||
default=None,
|
||||
)
|
||||
last_sent_by_id = _canonical_node_id(last_sent_by_ref)
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
neighbors_payload = neighbor_section.get("neighbors")
|
||||
neighbors_iterable = (
|
||||
neighbors_payload if isinstance(neighbors_payload, list) else []
|
||||
)
|
||||
|
||||
neighbor_entries: list[dict] = []
|
||||
for entry in neighbors_iterable:
|
||||
if not isinstance(entry, Mapping):
|
||||
continue
|
||||
neighbor_ref = _first(entry, "nodeId", "node_id", default=None)
|
||||
neighbor_id = _canonical_node_id(neighbor_ref)
|
||||
if neighbor_id is None:
|
||||
continue
|
||||
neighbor_num = _coerce_int(_first(entry, "nodeId", "node_id", default=None))
|
||||
if neighbor_num is None:
|
||||
neighbor_num = _node_num_from_id(neighbor_id)
|
||||
snr = _coerce_float(_first(entry, "snr", default=None))
|
||||
entry_rx_time = _coerce_int(_first(entry, "rxTime", "rx_time", default=None))
|
||||
if entry_rx_time is None:
|
||||
entry_rx_time = rx_time
|
||||
neighbor_entries.append(
|
||||
{
|
||||
"neighbor_id": neighbor_id,
|
||||
"neighbor_num": neighbor_num,
|
||||
"snr": snr,
|
||||
"rx_time": entry_rx_time,
|
||||
"rx_iso": _iso(entry_rx_time),
|
||||
}
|
||||
)
|
||||
|
||||
payload = {
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"neighbors": neighbor_entries,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"ingestor": _state.host_node_id(),
|
||||
}
|
||||
|
||||
if node_broadcast_interval is not None:
|
||||
payload["node_broadcast_interval_secs"] = node_broadcast_interval
|
||||
if last_sent_by_id is not None:
|
||||
payload["last_sent_by_id"] = last_sent_by_id
|
||||
|
||||
queue._queue_post_json(
|
||||
"/api/neighbors",
|
||||
_apply_radio_metadata(payload),
|
||||
priority=queue._NEIGHBOR_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued neighborinfo payload",
|
||||
context="handlers.store_neighborinfo",
|
||||
node_id=node_id,
|
||||
neighbors=len(neighbor_entries),
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["store_neighborinfo_packet"]
|
||||
@@ -1,234 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Handler for node-information packets."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from .. import config, queue
|
||||
from ..serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_int,
|
||||
_decode_nodeinfo_payload,
|
||||
_extract_payload_bytes,
|
||||
_first,
|
||||
_merge_mappings,
|
||||
_node_num_from_id,
|
||||
_node_to_dict,
|
||||
_nodeinfo_metrics_dict,
|
||||
_nodeinfo_position_dict,
|
||||
_nodeinfo_user_dict,
|
||||
)
|
||||
from . import _state
|
||||
from .radio import _apply_radio_metadata_to_nodes
|
||||
|
||||
|
||||
def store_nodeinfo_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist node information updates.
|
||||
|
||||
Node info packets carry user profile data (short name, long name, hardware
|
||||
model, public key) together with optional position and device-metrics
|
||||
snapshots. When a protobuf payload is present it is decoded first; any
|
||||
fields missing from the protobuf are filled in from the ``decoded`` dict
|
||||
so both firmware variants are handled.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata describing the update.
|
||||
decoded: Decoded payload that may include ``user`` and ``position``
|
||||
sections.
|
||||
|
||||
Returns:
|
||||
``None``. The node payload is merged into the API queue.
|
||||
"""
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
node_info = _decode_nodeinfo_payload(payload_bytes)
|
||||
decoded_user = decoded.get("user")
|
||||
user_dict = _nodeinfo_user_dict(node_info, decoded_user)
|
||||
|
||||
node_info_fields = set()
|
||||
if node_info:
|
||||
node_info_fields = {field_desc.name for field_desc, _ in node_info.ListFields()}
|
||||
|
||||
node_id = None
|
||||
if isinstance(user_dict, Mapping):
|
||||
node_id = _canonical_node_id(user_dict.get("id"))
|
||||
|
||||
if node_id is None:
|
||||
node_id = _canonical_node_id(
|
||||
_first(packet, "fromId", "from_id", "from", default=None)
|
||||
)
|
||||
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
# Throttle self-NODEINFO upserts to at most once per hour. The meshtastic
|
||||
# library rebroadcasts the local node's NODEINFO periodically; accepting
|
||||
# every broadcast would overwrite the host node record too aggressively.
|
||||
if node_id == _state.host_node_id():
|
||||
_now = time.monotonic()
|
||||
if _state._host_nodeinfo_suppressed(_now):
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Suppressed host self-NODEINFO update within throttle window",
|
||||
context="handlers.store_nodeinfo",
|
||||
node_id=node_id,
|
||||
)
|
||||
return
|
||||
_state._mark_host_nodeinfo_seen(_now)
|
||||
|
||||
node_payload: dict = {}
|
||||
if user_dict:
|
||||
node_payload["user"] = user_dict
|
||||
|
||||
# Resolve node_num from protobuf first, then decoded dict, then from the
|
||||
# canonical ID as a last resort.
|
||||
node_num = None
|
||||
if node_info and "num" in node_info_fields:
|
||||
try:
|
||||
node_num = int(node_info.num)
|
||||
except (TypeError, ValueError):
|
||||
node_num = None
|
||||
if node_num is None:
|
||||
decoded_num = decoded.get("num")
|
||||
if decoded_num is not None:
|
||||
try:
|
||||
node_num = int(decoded_num)
|
||||
except (TypeError, ValueError):
|
||||
try:
|
||||
node_num = int(str(decoded_num).strip(), 0)
|
||||
except Exception:
|
||||
node_num = None
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
if node_num is not None:
|
||||
node_payload["num"] = node_num
|
||||
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
last_heard = None
|
||||
if node_info and "last_heard" in node_info_fields:
|
||||
try:
|
||||
last_heard = int(node_info.last_heard)
|
||||
except (TypeError, ValueError):
|
||||
last_heard = None
|
||||
if last_heard is None:
|
||||
decoded_last_heard = decoded.get("lastHeard")
|
||||
if decoded_last_heard is not None:
|
||||
try:
|
||||
last_heard = int(decoded_last_heard)
|
||||
except (TypeError, ValueError):
|
||||
last_heard = None
|
||||
if last_heard is None or last_heard < rx_time:
|
||||
last_heard = rx_time
|
||||
node_payload["lastHeard"] = last_heard
|
||||
|
||||
snr = None
|
||||
if node_info and "snr" in node_info_fields:
|
||||
try:
|
||||
snr = float(node_info.snr)
|
||||
except (TypeError, ValueError):
|
||||
snr = None
|
||||
if snr is None:
|
||||
snr = _first(packet, "snr", "rx_snr", "rxSnr", default=None)
|
||||
if snr is not None:
|
||||
try:
|
||||
snr = float(snr)
|
||||
except (TypeError, ValueError):
|
||||
snr = None
|
||||
if snr is not None:
|
||||
node_payload["snr"] = snr
|
||||
|
||||
hops = None
|
||||
if node_info and "hops_away" in node_info_fields:
|
||||
try:
|
||||
hops = int(node_info.hops_away)
|
||||
except (TypeError, ValueError):
|
||||
hops = None
|
||||
if hops is None:
|
||||
hops = decoded.get("hopsAway")
|
||||
if hops is not None:
|
||||
try:
|
||||
hops = int(hops)
|
||||
except (TypeError, ValueError):
|
||||
hops = None
|
||||
if hops is not None:
|
||||
node_payload["hopsAway"] = hops
|
||||
|
||||
if node_info and "channel" in node_info_fields:
|
||||
try:
|
||||
node_payload["channel"] = int(node_info.channel)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if node_info and "via_mqtt" in node_info_fields:
|
||||
node_payload["viaMqtt"] = bool(node_info.via_mqtt)
|
||||
|
||||
if node_info and "is_favorite" in node_info_fields:
|
||||
node_payload["isFavorite"] = bool(node_info.is_favorite)
|
||||
elif "isFavorite" in decoded:
|
||||
node_payload["isFavorite"] = bool(decoded.get("isFavorite"))
|
||||
|
||||
if node_info and "is_ignored" in node_info_fields:
|
||||
node_payload["isIgnored"] = bool(node_info.is_ignored)
|
||||
if node_info and "is_key_manually_verified" in node_info_fields:
|
||||
node_payload["isKeyManuallyVerified"] = bool(node_info.is_key_manually_verified)
|
||||
|
||||
metrics = _nodeinfo_metrics_dict(node_info)
|
||||
decoded_metrics = decoded.get("deviceMetrics")
|
||||
if isinstance(decoded_metrics, Mapping):
|
||||
metrics = _merge_mappings(metrics, _node_to_dict(decoded_metrics))
|
||||
if metrics:
|
||||
node_payload["deviceMetrics"] = metrics
|
||||
|
||||
position = _nodeinfo_position_dict(node_info)
|
||||
decoded_position = decoded.get("position")
|
||||
if isinstance(decoded_position, Mapping):
|
||||
position = _merge_mappings(position, _node_to_dict(decoded_position))
|
||||
if position:
|
||||
node_payload["position"] = position
|
||||
|
||||
hop_limit = _first(packet, "hopLimit", "hop_limit", default=None)
|
||||
if hop_limit is not None and "hopLimit" not in node_payload:
|
||||
try:
|
||||
node_payload["hopLimit"] = int(hop_limit)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
nodes_payload = _apply_radio_metadata_to_nodes({node_id: node_payload})
|
||||
nodes_payload["ingestor"] = _state.host_node_id()
|
||||
queue._queue_post_json(
|
||||
"/api/nodes",
|
||||
nodes_payload,
|
||||
priority=queue._NODE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
short = None
|
||||
long_name = None
|
||||
if isinstance(user_dict, Mapping):
|
||||
short = user_dict.get("shortName")
|
||||
long_name = user_dict.get("longName")
|
||||
config._debug_log(
|
||||
"Queued nodeinfo payload",
|
||||
context="handlers.store_nodeinfo",
|
||||
node_id=node_id,
|
||||
short_name=short,
|
||||
long_name=long_name,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["store_nodeinfo_packet"]
|
||||
@@ -1,413 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Handlers for position and traceroute packets."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from .. import config, queue
|
||||
from ..serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_float,
|
||||
_coerce_int,
|
||||
_extract_payload_bytes,
|
||||
_first,
|
||||
_iso,
|
||||
_node_num_from_id,
|
||||
_node_to_dict,
|
||||
_pkt_to_dict,
|
||||
)
|
||||
from . import _state
|
||||
from .ignored import _record_ignored_packet
|
||||
from .radio import _apply_radio_metadata
|
||||
|
||||
|
||||
def base64_payload(payload_bytes: bytes | None) -> str | None:
|
||||
"""Encode raw payload bytes as a Base64 string for JSON transport.
|
||||
|
||||
Parameters:
|
||||
payload_bytes: Optional raw bytes to encode. When ``None`` or empty,
|
||||
``None`` is returned so callers can omit the field.
|
||||
|
||||
Returns:
|
||||
The Base64-encoded ASCII string, or ``None`` when ``payload_bytes`` is
|
||||
falsy.
|
||||
"""
|
||||
|
||||
if not payload_bytes:
|
||||
return None
|
||||
return base64.b64encode(payload_bytes).decode("ascii")
|
||||
|
||||
|
||||
def _normalize_trace_hops(hops_value: object) -> list[int]:
|
||||
"""Coerce hop entries to integer node numbers, preserving order.
|
||||
|
||||
Each hop can arrive as a plain integer, a canonical node-ID string
|
||||
(``!xxxxxxxx``), or a mapping with a ``nodeId`` / ``node_id`` field.
|
||||
All forms are normalised to the raw 32-bit node number used by the API.
|
||||
|
||||
Parameters:
|
||||
hops_value: A single hop or list of hops in any supported form.
|
||||
|
||||
Returns:
|
||||
List of integer node numbers with ``None``-coerced entries dropped.
|
||||
"""
|
||||
|
||||
if hops_value is None:
|
||||
return []
|
||||
hop_entries = hops_value if isinstance(hops_value, list) else [hops_value]
|
||||
normalized: list[int] = []
|
||||
for hop in hop_entries:
|
||||
hop_value = hop
|
||||
if isinstance(hop, Mapping):
|
||||
hop_value = _first(hop, "node_id", "nodeId", "id", "num", default=None)
|
||||
|
||||
canonical = _canonical_node_id(hop_value)
|
||||
hop_id = _node_num_from_id(canonical or hop_value)
|
||||
if hop_id is None:
|
||||
hop_id = _coerce_int(hop_value)
|
||||
if hop_id is not None:
|
||||
normalized.append(hop_id)
|
||||
return normalized
|
||||
|
||||
|
||||
def store_position_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist a decoded GPS position packet to the API.
|
||||
|
||||
Extracts coordinates from both the integer-scaled (``latitudeI`` /
|
||||
``longitudeI``) and floating-point (``latitude`` / ``longitude``) forms
|
||||
that Meshtastic may produce depending on firmware version.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata emitted by the Meshtastic interface.
|
||||
decoded: Decoded payload extracted from ``packet['decoded']``.
|
||||
|
||||
Returns:
|
||||
``None``. The formatted position payload is added to the HTTP queue.
|
||||
"""
|
||||
|
||||
node_ref = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
if node_ref is None:
|
||||
node_ref = _first(decoded, "num", default=None)
|
||||
node_id = _canonical_node_id(node_ref)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_num = _coerce_int(_first(decoded, "num", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
return
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
to_id = to_id if to_id not in {"", None} else None
|
||||
|
||||
position_section = decoded.get("position") if isinstance(decoded, Mapping) else None
|
||||
if not isinstance(position_section, Mapping):
|
||||
position_section = {}
|
||||
|
||||
# Meshtastic firmware may emit coordinates in one of two forms:
|
||||
# - Floating-point degrees: ``latitude`` / ``longitude``
|
||||
# - Integer-scaled (1e-7 degrees): ``latitudeI`` / ``longitudeI``
|
||||
# Try the float form first and fall back to the integer form when absent.
|
||||
latitude = _coerce_float(
|
||||
_first(position_section, "latitude", "raw.latitude", default=None)
|
||||
)
|
||||
if latitude is None:
|
||||
lat_i = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"latitudeI",
|
||||
"latitude_i",
|
||||
"raw.latitude_i",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
if lat_i is not None:
|
||||
latitude = lat_i / 1e7
|
||||
|
||||
longitude = _coerce_float(
|
||||
_first(position_section, "longitude", "raw.longitude", default=None)
|
||||
)
|
||||
if longitude is None:
|
||||
lon_i = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"longitudeI",
|
||||
"longitude_i",
|
||||
"raw.longitude_i",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
if lon_i is not None:
|
||||
longitude = lon_i / 1e7
|
||||
|
||||
altitude = _coerce_float(
|
||||
_first(position_section, "altitude", "raw.altitude", default=None)
|
||||
)
|
||||
position_time = _coerce_int(
|
||||
_first(position_section, "time", "raw.time", default=None)
|
||||
)
|
||||
location_source = _first(
|
||||
position_section,
|
||||
"locationSource",
|
||||
"location_source",
|
||||
"raw.location_source",
|
||||
default=None,
|
||||
)
|
||||
location_source = (
|
||||
str(location_source).strip() if location_source not in {None, ""} else None
|
||||
)
|
||||
|
||||
precision_bits = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"precisionBits",
|
||||
"precision_bits",
|
||||
"raw.precision_bits",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
sats_in_view = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"satsInView",
|
||||
"sats_in_view",
|
||||
"raw.sats_in_view",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
pdop = _coerce_float(
|
||||
_first(position_section, "PDOP", "pdop", "raw.PDOP", "raw.pdop", default=None)
|
||||
)
|
||||
ground_speed = _coerce_float(
|
||||
_first(
|
||||
position_section,
|
||||
"groundSpeed",
|
||||
"ground_speed",
|
||||
"raw.ground_speed",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
ground_track = _coerce_float(
|
||||
_first(
|
||||
position_section,
|
||||
"groundTrack",
|
||||
"ground_track",
|
||||
"raw.ground_track",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
snr = _coerce_float(_first(packet, "snr", "rx_snr", "rxSnr", default=None))
|
||||
rssi = _coerce_int(_first(packet, "rssi", "rx_rssi", "rxRssi", default=None))
|
||||
hop_limit = _coerce_int(_first(packet, "hopLimit", "hop_limit", default=None))
|
||||
bitfield = _coerce_int(_first(decoded, "bitfield", default=None))
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
payload_b64 = base64_payload(payload_bytes)
|
||||
|
||||
raw_section = decoded.get("raw") if isinstance(decoded, Mapping) else None
|
||||
raw_payload = _node_to_dict(raw_section) if raw_section else None
|
||||
if raw_payload is None and position_section:
|
||||
raw_position = (
|
||||
position_section.get("raw")
|
||||
if isinstance(position_section, Mapping)
|
||||
else None
|
||||
)
|
||||
if raw_position:
|
||||
raw_payload = _node_to_dict(raw_position)
|
||||
|
||||
position_payload = {
|
||||
"id": pkt_id,
|
||||
"node_id": node_id or node_ref,
|
||||
"node_num": node_num,
|
||||
"num": node_num,
|
||||
"from_id": node_id,
|
||||
"to_id": to_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"latitude": latitude,
|
||||
"longitude": longitude,
|
||||
"altitude": altitude,
|
||||
"position_time": position_time,
|
||||
"location_source": location_source,
|
||||
"precision_bits": precision_bits,
|
||||
"sats_in_view": sats_in_view,
|
||||
"pdop": pdop,
|
||||
"ground_speed": ground_speed,
|
||||
"ground_track": ground_track,
|
||||
"snr": snr,
|
||||
"rssi": rssi,
|
||||
"hop_limit": hop_limit,
|
||||
"bitfield": bitfield,
|
||||
"payload_b64": payload_b64,
|
||||
"ingestor": _state.host_node_id(),
|
||||
}
|
||||
if raw_payload:
|
||||
position_payload["raw"] = raw_payload
|
||||
|
||||
queue._queue_post_json(
|
||||
"/api/positions",
|
||||
_apply_radio_metadata(position_payload),
|
||||
priority=queue._POSITION_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued position payload",
|
||||
context="handlers.store_position",
|
||||
node_id=node_id,
|
||||
latitude=latitude,
|
||||
longitude=longitude,
|
||||
position_time=position_time,
|
||||
)
|
||||
|
||||
|
||||
def store_traceroute_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist traceroute details and the observed hop path to the API.
|
||||
|
||||
Hop lists can arrive under several key names (``hops``, ``path``,
|
||||
``route``) and may appear at multiple nesting levels. All candidates are
|
||||
deduplicated and merged into a single ordered list.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata from the Meshtastic interface.
|
||||
decoded: Decoded payload containing the traceroute section.
|
||||
|
||||
Returns:
|
||||
``None``. The traceroute payload is queued for HTTP submission, or
|
||||
silently dropped when identifiers are entirely absent.
|
||||
"""
|
||||
|
||||
traceroute_section = (
|
||||
decoded.get("traceroute") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
request_id = _coerce_int(
|
||||
_first(
|
||||
traceroute_section,
|
||||
"requestId",
|
||||
"request_id",
|
||||
default=_first(decoded, "req", "requestId", "request_id", default=None),
|
||||
)
|
||||
)
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
pkt_id = request_id
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
src = _coerce_int(
|
||||
_first(
|
||||
decoded,
|
||||
"src",
|
||||
"source",
|
||||
default=_first(packet, "fromId", "from_id", "from", default=None),
|
||||
)
|
||||
)
|
||||
dest = _coerce_int(
|
||||
_first(
|
||||
decoded,
|
||||
"dest",
|
||||
"destination",
|
||||
default=_first(packet, "toId", "to_id", "to", default=None),
|
||||
)
|
||||
)
|
||||
|
||||
metrics = traceroute_section if isinstance(traceroute_section, Mapping) else {}
|
||||
rssi = _coerce_int(
|
||||
_first(metrics, "rssi", default=_first(packet, "rssi", "rx_rssi", "rxRssi"))
|
||||
)
|
||||
snr = _coerce_float(
|
||||
_first(metrics, "snr", default=_first(packet, "snr", "rx_snr", "rxSnr"))
|
||||
)
|
||||
elapsed_ms = _coerce_int(
|
||||
_first(metrics, "elapsed_ms", "latency_ms", "latencyMs", default=None)
|
||||
)
|
||||
|
||||
# Hops can appear under multiple keys at different nesting levels; collect
|
||||
# all candidates and deduplicate while preserving first-seen order.
|
||||
hop_candidates = (
|
||||
_first(metrics, "hops", default=None),
|
||||
_first(metrics, "path", default=None),
|
||||
_first(metrics, "route", default=None),
|
||||
_first(decoded, "hops", default=None),
|
||||
_first(decoded, "path", default=None),
|
||||
(
|
||||
_first(traceroute_section, "route", default=None)
|
||||
if isinstance(traceroute_section, Mapping)
|
||||
else None
|
||||
),
|
||||
)
|
||||
hops: list[int] = []
|
||||
seen_hops: set[int] = set()
|
||||
for candidate in hop_candidates:
|
||||
for hop in _normalize_trace_hops(candidate):
|
||||
if hop in seen_hops:
|
||||
continue
|
||||
seen_hops.add(hop)
|
||||
hops.append(hop)
|
||||
|
||||
if pkt_id is None and request_id is None and not hops:
|
||||
_record_ignored_packet(packet, reason="traceroute-missing-identifiers")
|
||||
return
|
||||
|
||||
payload = {
|
||||
"id": pkt_id,
|
||||
"request_id": request_id,
|
||||
"src": src,
|
||||
"dest": dest,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"hops": hops,
|
||||
"rssi": rssi,
|
||||
"snr": snr,
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"ingestor": _state.host_node_id(),
|
||||
}
|
||||
|
||||
queue._queue_post_json(
|
||||
"/api/traces",
|
||||
_apply_radio_metadata(payload),
|
||||
priority=queue._TRACE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued traceroute payload",
|
||||
context="handlers.store_traceroute_packet",
|
||||
request_id=request_id,
|
||||
src=src,
|
||||
dest=dest,
|
||||
hop_count=len(hops),
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"base64_payload",
|
||||
"store_position_packet",
|
||||
"store_traceroute_packet",
|
||||
]
|
||||
@@ -1,94 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Radio metadata helpers for enriching API payloads.
|
||||
|
||||
LoRa radio parameters (frequency and modem preset) are captured once at
|
||||
connection time by :mod:`data.mesh_ingestor.interfaces` and stored on the
|
||||
:mod:`data.mesh_ingestor.config` module. The helpers here read those cached
|
||||
values and attach them to outgoing payloads so the web dashboard can display
|
||||
radio configuration alongside mesh data.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .. import config
|
||||
|
||||
|
||||
def _radio_metadata_fields() -> dict[str, object]:
|
||||
"""Return the shared radio metadata fields for payload enrichment.
|
||||
|
||||
Reads ``LORA_FREQ`` and ``MODEM_PRESET`` from :mod:`config` and returns
|
||||
only the keys that have been populated (i.e. skips ``None`` values).
|
||||
|
||||
Returns:
|
||||
A dictionary containing zero, one, or both of ``lora_freq`` and
|
||||
``modem_preset`` depending on what is available.
|
||||
"""
|
||||
|
||||
metadata: dict[str, object] = {}
|
||||
freq = getattr(config, "LORA_FREQ", None)
|
||||
if freq is not None:
|
||||
metadata["lora_freq"] = freq
|
||||
preset = getattr(config, "MODEM_PRESET", None)
|
||||
if preset is not None:
|
||||
metadata["modem_preset"] = preset
|
||||
return metadata
|
||||
|
||||
|
||||
def _apply_radio_metadata(payload: dict) -> dict:
|
||||
"""Augment a flat payload dict with radio metadata when available.
|
||||
|
||||
Parameters:
|
||||
payload: Mutable dictionary that will receive radio metadata keys.
|
||||
|
||||
Returns:
|
||||
The same ``payload`` dict with radio metadata keys merged in-place.
|
||||
"""
|
||||
|
||||
metadata = _radio_metadata_fields()
|
||||
if metadata:
|
||||
payload.update(metadata)
|
||||
return payload
|
||||
|
||||
|
||||
def _apply_radio_metadata_to_nodes(payload: dict) -> dict:
|
||||
"""Attach radio metadata to each node entry stored in ``payload``.
|
||||
|
||||
Node upsert payloads are keyed by node ID; each value is a dict of node
|
||||
attributes. This function enriches every node-value dict with radio
|
||||
metadata so the dashboard can show the radio configuration that was active
|
||||
when the node was last heard.
|
||||
|
||||
Parameters:
|
||||
payload: Mapping of ``node_id → node_dict`` to enrich in-place.
|
||||
|
||||
Returns:
|
||||
The same ``payload`` dict after in-place mutation of its node entries.
|
||||
"""
|
||||
|
||||
metadata = _radio_metadata_fields()
|
||||
if not metadata:
|
||||
return payload
|
||||
for value in payload.values():
|
||||
if isinstance(value, dict):
|
||||
value.update(metadata)
|
||||
return payload
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_apply_radio_metadata",
|
||||
"_apply_radio_metadata_to_nodes",
|
||||
"_radio_metadata_fields",
|
||||
]
|
||||
@@ -1,563 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Handlers for telemetry and router-heartbeat packets."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from .. import config, queue
|
||||
from ..serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_float,
|
||||
_coerce_int,
|
||||
_extract_payload_bytes,
|
||||
_first,
|
||||
_iso,
|
||||
_node_num_from_id,
|
||||
)
|
||||
from . import _state
|
||||
from .position import base64_payload
|
||||
from .radio import _apply_radio_metadata, _apply_radio_metadata_to_nodes
|
||||
|
||||
_VALID_TELEMETRY_TYPES: frozenset[str] = frozenset(
|
||||
{"device", "environment", "power", "air_quality"}
|
||||
)
|
||||
"""Allowed discriminator values for the ``telemetry_type`` field.
|
||||
|
||||
Meshtastic uses a protobuf ``oneof`` so only one metric sub-object can be
|
||||
populated per packet. Values outside this set indicate a firmware version
|
||||
that added a new type not yet handled here; those are logged and dropped to
|
||||
avoid persisting unexpected data shapes.
|
||||
"""
|
||||
|
||||
|
||||
def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist telemetry metrics extracted from a packet.
|
||||
|
||||
Handles all four Meshtastic telemetry sub-types (device, environment,
|
||||
power, air quality) by extracting common fields first and then
|
||||
conditionally adding type-specific metric keys.
|
||||
|
||||
Host telemetry is rate-limited: if the locally connected node's own
|
||||
telemetry arrives within the suppression window it is silently dropped to
|
||||
avoid constant self-updates overwriting other node data.
|
||||
|
||||
Parameters:
|
||||
packet: Packet metadata received from the radio interface.
|
||||
decoded: Meshtastic-decoded view containing telemetry structures.
|
||||
|
||||
Returns:
|
||||
``None``. The telemetry payload is added to the HTTP queue.
|
||||
"""
|
||||
|
||||
telemetry_section = (
|
||||
decoded.get("telemetry") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if not isinstance(telemetry_section, Mapping):
|
||||
return
|
||||
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
return
|
||||
|
||||
raw_from = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
node_id = _canonical_node_id(raw_from)
|
||||
node_num = _coerce_int(_first(decoded, "num", "node_num", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id or raw_from)
|
||||
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
|
||||
raw_rx_time = _first(packet, "rxTime", "rx_time", default=time.time())
|
||||
try:
|
||||
rx_time = int(raw_rx_time)
|
||||
except (TypeError, ValueError):
|
||||
rx_time = int(time.time())
|
||||
rx_iso = _iso(rx_time)
|
||||
|
||||
host_id = _state.host_node_id()
|
||||
# The locally connected node broadcasts its own telemetry frequently.
|
||||
# Accepting every packet would overwrite the host's profile more often
|
||||
# than necessary; the suppression window (default 1 h) rate-limits
|
||||
# self-updates without blocking telemetry from other nodes.
|
||||
if host_id is not None and node_id == host_id:
|
||||
suppressed, minutes_remaining = _state._host_telemetry_suppressed(rx_time)
|
||||
if suppressed:
|
||||
config._debug_log(
|
||||
"Suppressed host telemetry update",
|
||||
context="handlers.store_telemetry",
|
||||
host_node_id=host_id,
|
||||
minutes_remaining=minutes_remaining,
|
||||
)
|
||||
return
|
||||
_state._mark_host_telemetry_seen(rx_time)
|
||||
|
||||
telemetry_time = _coerce_int(_first(telemetry_section, "time", default=None))
|
||||
|
||||
_dm = telemetry_section.get("deviceMetrics") or telemetry_section.get(
|
||||
"device_metrics"
|
||||
)
|
||||
_em = telemetry_section.get("environmentMetrics") or telemetry_section.get(
|
||||
"environment_metrics"
|
||||
)
|
||||
_pm = telemetry_section.get("powerMetrics") or telemetry_section.get(
|
||||
"power_metrics"
|
||||
)
|
||||
_aq = telemetry_section.get("airQualityMetrics") or telemetry_section.get(
|
||||
"air_quality_metrics"
|
||||
)
|
||||
# Priority order matters: deviceMetrics is checked first because the device
|
||||
# sub-object also carries a voltage field that overlaps with powerMetrics.
|
||||
# Meshtastic uses a protobuf oneof so only one sub-object can be populated per
|
||||
# packet; the elif chain handles any hypothetical overlap from future protocols.
|
||||
if isinstance(_dm, Mapping):
|
||||
telemetry_type: str | None = "device"
|
||||
elif isinstance(_em, Mapping):
|
||||
telemetry_type = "environment"
|
||||
elif isinstance(_pm, Mapping):
|
||||
telemetry_type = "power"
|
||||
elif isinstance(_aq, Mapping):
|
||||
telemetry_type = "air_quality"
|
||||
else:
|
||||
telemetry_type = None
|
||||
|
||||
if telemetry_type is not None and telemetry_type not in _VALID_TELEMETRY_TYPES:
|
||||
config._debug_log(
|
||||
"Unexpected telemetry_type value; dropping field",
|
||||
context="handlers.store_telemetry",
|
||||
severity="warning",
|
||||
always=True,
|
||||
telemetry_type=telemetry_type,
|
||||
)
|
||||
telemetry_type = None
|
||||
|
||||
channel = _coerce_int(_first(decoded, "channel", default=None))
|
||||
if channel is None:
|
||||
channel = _coerce_int(_first(packet, "channel", default=None))
|
||||
if channel is None:
|
||||
channel = 0
|
||||
|
||||
portnum = _first(decoded, "portnum", default=None)
|
||||
portnum = str(portnum) if portnum not in {None, ""} else None
|
||||
|
||||
bitfield = _coerce_int(_first(decoded, "bitfield", default=None))
|
||||
|
||||
snr = _coerce_float(_first(packet, "snr", "rx_snr", "rxSnr", default=None))
|
||||
rssi = _coerce_int(_first(packet, "rssi", "rx_rssi", "rxRssi", default=None))
|
||||
hop_limit = _coerce_int(_first(packet, "hopLimit", "hop_limit", default=None))
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
payload_b64 = base64_payload(payload_bytes) or ""
|
||||
|
||||
battery_level = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"batteryLevel",
|
||||
"battery_level",
|
||||
"deviceMetrics.batteryLevel",
|
||||
"environmentMetrics.battery_level",
|
||||
"deviceMetrics.battery_level",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
voltage = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"voltage",
|
||||
"environmentMetrics.voltage",
|
||||
"deviceMetrics.voltage",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
channel_utilization = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"channelUtilization",
|
||||
"channel_utilization",
|
||||
"deviceMetrics.channelUtilization",
|
||||
"deviceMetrics.channel_utilization",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
air_util_tx = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"airUtilTx",
|
||||
"air_util_tx",
|
||||
"deviceMetrics.airUtilTx",
|
||||
"deviceMetrics.air_util_tx",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
uptime_seconds = _coerce_int(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"uptimeSeconds",
|
||||
"uptime_seconds",
|
||||
"deviceMetrics.uptimeSeconds",
|
||||
"deviceMetrics.uptime_seconds",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
temperature = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"temperature",
|
||||
"environmentMetrics.temperature",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
relative_humidity = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"relativeHumidity",
|
||||
"relative_humidity",
|
||||
"environmentMetrics.relativeHumidity",
|
||||
"environmentMetrics.relative_humidity",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
barometric_pressure = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"barometricPressure",
|
||||
"barometric_pressure",
|
||||
"environmentMetrics.barometricPressure",
|
||||
"environmentMetrics.barometric_pressure",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
current = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"current",
|
||||
"deviceMetrics.current",
|
||||
"deviceMetrics.current_ma",
|
||||
"deviceMetrics.currentMa",
|
||||
"environmentMetrics.current",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
gas_resistance = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"gasResistance",
|
||||
"gas_resistance",
|
||||
"environmentMetrics.gasResistance",
|
||||
"environmentMetrics.gas_resistance",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
iaq = _coerce_int(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"iaq",
|
||||
"environmentMetrics.iaq",
|
||||
"environmentMetrics.iaqIndex",
|
||||
"environmentMetrics.iaq_index",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
distance = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"distance",
|
||||
"environmentMetrics.distance",
|
||||
"environmentMetrics.range",
|
||||
"environmentMetrics.rangeMeters",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
lux = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"lux",
|
||||
"environmentMetrics.lux",
|
||||
"environmentMetrics.illuminance",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
white_lux = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"whiteLux",
|
||||
"white_lux",
|
||||
"environmentMetrics.whiteLux",
|
||||
"environmentMetrics.white_lux",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
ir_lux = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"irLux",
|
||||
"ir_lux",
|
||||
"environmentMetrics.irLux",
|
||||
"environmentMetrics.ir_lux",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
uv_lux = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"uvLux",
|
||||
"uv_lux",
|
||||
"environmentMetrics.uvLux",
|
||||
"environmentMetrics.uv_lux",
|
||||
"environmentMetrics.uvIndex",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
wind_direction = _coerce_int(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"windDirection",
|
||||
"wind_direction",
|
||||
"environmentMetrics.windDirection",
|
||||
"environmentMetrics.wind_direction",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
wind_speed = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"windSpeed",
|
||||
"wind_speed",
|
||||
"environmentMetrics.windSpeed",
|
||||
"environmentMetrics.wind_speed",
|
||||
"environmentMetrics.windSpeedMps",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
wind_gust = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"windGust",
|
||||
"wind_gust",
|
||||
"environmentMetrics.windGust",
|
||||
"environmentMetrics.wind_gust",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
wind_lull = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"windLull",
|
||||
"wind_lull",
|
||||
"environmentMetrics.windLull",
|
||||
"environmentMetrics.wind_lull",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
weight = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"weight",
|
||||
"environmentMetrics.weight",
|
||||
"environmentMetrics.mass",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
radiation = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"radiation",
|
||||
"environmentMetrics.radiation",
|
||||
"environmentMetrics.radiationLevel",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
rainfall_1h = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"rainfall1h",
|
||||
"rainfall_1h",
|
||||
"environmentMetrics.rainfall1h",
|
||||
"environmentMetrics.rainfall_1h",
|
||||
"environmentMetrics.rainfallOneHour",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
rainfall_24h = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"rainfall24h",
|
||||
"rainfall_24h",
|
||||
"environmentMetrics.rainfall24h",
|
||||
"environmentMetrics.rainfall_24h",
|
||||
"environmentMetrics.rainfallTwentyFourHour",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
soil_moisture = _coerce_int(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"soilMoisture",
|
||||
"soil_moisture",
|
||||
"environmentMetrics.soilMoisture",
|
||||
"environmentMetrics.soil_moisture",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
soil_temperature = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"soilTemperature",
|
||||
"soil_temperature",
|
||||
"environmentMetrics.soilTemperature",
|
||||
"environmentMetrics.soil_temperature",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
telemetry_payload = {
|
||||
"id": pkt_id,
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"from_id": node_id or raw_from,
|
||||
"to_id": to_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": rx_iso,
|
||||
"telemetry_time": telemetry_time,
|
||||
"channel": channel,
|
||||
"portnum": portnum,
|
||||
"bitfield": bitfield,
|
||||
"snr": snr,
|
||||
"rssi": rssi,
|
||||
"hop_limit": hop_limit,
|
||||
"payload_b64": payload_b64,
|
||||
"ingestor": _state.host_node_id(),
|
||||
}
|
||||
|
||||
# Conditionally include metric keys so the API ignores absent fields rather
|
||||
# than overwriting existing values with null.
|
||||
if battery_level is not None:
|
||||
telemetry_payload["battery_level"] = battery_level
|
||||
if voltage is not None:
|
||||
telemetry_payload["voltage"] = voltage
|
||||
if channel_utilization is not None:
|
||||
telemetry_payload["channel_utilization"] = channel_utilization
|
||||
if air_util_tx is not None:
|
||||
telemetry_payload["air_util_tx"] = air_util_tx
|
||||
if uptime_seconds is not None:
|
||||
telemetry_payload["uptime_seconds"] = uptime_seconds
|
||||
if temperature is not None:
|
||||
telemetry_payload["temperature"] = temperature
|
||||
if relative_humidity is not None:
|
||||
telemetry_payload["relative_humidity"] = relative_humidity
|
||||
if barometric_pressure is not None:
|
||||
telemetry_payload["barometric_pressure"] = barometric_pressure
|
||||
if current is not None:
|
||||
telemetry_payload["current"] = current
|
||||
if gas_resistance is not None:
|
||||
telemetry_payload["gas_resistance"] = gas_resistance
|
||||
if iaq is not None:
|
||||
telemetry_payload["iaq"] = iaq
|
||||
if distance is not None:
|
||||
telemetry_payload["distance"] = distance
|
||||
if lux is not None:
|
||||
telemetry_payload["lux"] = lux
|
||||
if white_lux is not None:
|
||||
telemetry_payload["white_lux"] = white_lux
|
||||
if ir_lux is not None:
|
||||
telemetry_payload["ir_lux"] = ir_lux
|
||||
if uv_lux is not None:
|
||||
telemetry_payload["uv_lux"] = uv_lux
|
||||
if wind_direction is not None:
|
||||
telemetry_payload["wind_direction"] = wind_direction
|
||||
if wind_speed is not None:
|
||||
telemetry_payload["wind_speed"] = wind_speed
|
||||
if wind_gust is not None:
|
||||
telemetry_payload["wind_gust"] = wind_gust
|
||||
if wind_lull is not None:
|
||||
telemetry_payload["wind_lull"] = wind_lull
|
||||
if weight is not None:
|
||||
telemetry_payload["weight"] = weight
|
||||
if radiation is not None:
|
||||
telemetry_payload["radiation"] = radiation
|
||||
if rainfall_1h is not None:
|
||||
telemetry_payload["rainfall_1h"] = rainfall_1h
|
||||
if rainfall_24h is not None:
|
||||
telemetry_payload["rainfall_24h"] = rainfall_24h
|
||||
if soil_moisture is not None:
|
||||
telemetry_payload["soil_moisture"] = soil_moisture
|
||||
if soil_temperature is not None:
|
||||
telemetry_payload["soil_temperature"] = soil_temperature
|
||||
if telemetry_type is not None:
|
||||
telemetry_payload["telemetry_type"] = telemetry_type
|
||||
|
||||
queue._queue_post_json(
|
||||
"/api/telemetry",
|
||||
_apply_radio_metadata(telemetry_payload),
|
||||
priority=queue._TELEMETRY_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued telemetry payload",
|
||||
context="handlers.store_telemetry",
|
||||
node_id=node_id,
|
||||
battery_level=battery_level,
|
||||
voltage=voltage,
|
||||
)
|
||||
|
||||
|
||||
def store_router_heartbeat_packet(packet: Mapping) -> None:
|
||||
"""Persist a ``STORE_FORWARD_APP ROUTER_HEARTBEAT`` as a node presence update.
|
||||
|
||||
The heartbeat carries no message payload — the only actionable signal is
|
||||
that the store-and-forward router is alive at the observed ``rx_time``.
|
||||
All other fields are left untouched so the router's existing profile is
|
||||
not overwritten.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata.
|
||||
|
||||
Returns:
|
||||
``None``. A minimal node upsert is enqueued at low priority.
|
||||
"""
|
||||
|
||||
node_id = _canonical_node_id(
|
||||
_first(packet, "fromId", "from_id", "from", default=None)
|
||||
)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
|
||||
node_payload: dict = {"lastHeard": rx_time}
|
||||
nodes_payload = _apply_radio_metadata_to_nodes({node_id: node_payload})
|
||||
nodes_payload["ingestor"] = _state.host_node_id()
|
||||
queue._queue_post_json(
|
||||
"/api/nodes", nodes_payload, priority=queue._DEFAULT_POST_PRIORITY
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued router heartbeat node upsert",
|
||||
context="handlers.store_router_heartbeat",
|
||||
node_id=node_id,
|
||||
rx_time=rx_time,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"store_router_heartbeat_packet",
|
||||
"store_telemetry_packet",
|
||||
]
|
||||
@@ -113,7 +113,6 @@ def queue_ingestor_heartbeat(
|
||||
"start_time": STATE.start_time,
|
||||
"last_seen_time": now,
|
||||
"version": INGESTOR_VERSION,
|
||||
"protocol": getattr(config, "PROTOCOL", "meshtastic") or "meshtastic",
|
||||
}
|
||||
if getattr(config, "LORA_FREQ", None) is not None:
|
||||
payload["lora_freq"] = config.LORA_FREQ
|
||||
|
||||
@@ -0,0 +1,889 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Mesh interface discovery helpers for interacting with Meshtastic hardware."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import glob
|
||||
import importlib
|
||||
import ipaddress
|
||||
import math
|
||||
import re
|
||||
import sys
|
||||
import urllib.parse
|
||||
from collections.abc import Mapping
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
try: # pragma: no cover - dependency optional in tests
|
||||
import meshtastic # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
meshtastic = None # type: ignore[assignment]
|
||||
|
||||
from . import channels, config, serialization
|
||||
|
||||
|
||||
def _ensure_mapping(value) -> Mapping | None:
|
||||
"""Return ``value`` as a mapping when conversion is possible."""
|
||||
|
||||
if isinstance(value, Mapping):
|
||||
return value
|
||||
if hasattr(value, "__dict__") and isinstance(value.__dict__, Mapping):
|
||||
return value.__dict__
|
||||
with contextlib.suppress(Exception):
|
||||
converted = serialization._node_to_dict(value)
|
||||
if isinstance(converted, Mapping):
|
||||
return converted
|
||||
return None
|
||||
|
||||
|
||||
def _is_nodeish_identifier(value: Any) -> bool:
|
||||
"""Return ``True`` when ``value`` resembles a Meshtastic node identifier."""
|
||||
|
||||
if isinstance(value, (int, float)):
|
||||
return False
|
||||
if not isinstance(value, str):
|
||||
return False
|
||||
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
return False
|
||||
if trimmed.startswith("^"):
|
||||
return True
|
||||
if trimmed.startswith("!"):
|
||||
trimmed = trimmed[1:]
|
||||
elif trimmed.lower().startswith("0x"):
|
||||
trimmed = trimmed[2:]
|
||||
elif not re.search(r"[a-fA-F]", trimmed):
|
||||
# Bare decimal strings should not be treated as node ids when labelled "id".
|
||||
return False
|
||||
|
||||
return bool(re.fullmatch(r"[0-9a-fA-F]{1,8}", trimmed))
|
||||
|
||||
|
||||
def _candidate_node_id(mapping: Mapping | None) -> str | None:
|
||||
"""Extract a canonical node identifier from ``mapping`` when present."""
|
||||
|
||||
if mapping is None:
|
||||
return None
|
||||
|
||||
node_keys = (
|
||||
"fromId",
|
||||
"from_id",
|
||||
"from",
|
||||
"nodeId",
|
||||
"node_id",
|
||||
"nodeNum",
|
||||
"node_num",
|
||||
"num",
|
||||
"userId",
|
||||
"user_id",
|
||||
)
|
||||
|
||||
for key in node_keys:
|
||||
with contextlib.suppress(Exception):
|
||||
node_id = serialization._canonical_node_id(mapping.get(key))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
with contextlib.suppress(Exception):
|
||||
value = mapping.get("id")
|
||||
if _is_nodeish_identifier(value):
|
||||
node_id = serialization._canonical_node_id(value)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
user_section = _ensure_mapping(mapping.get("user"))
|
||||
if user_section is not None:
|
||||
for key in ("userId", "user_id", "num", "nodeNum", "node_num"):
|
||||
with contextlib.suppress(Exception):
|
||||
node_id = serialization._canonical_node_id(user_section.get(key))
|
||||
if node_id:
|
||||
return node_id
|
||||
with contextlib.suppress(Exception):
|
||||
user_id_value = user_section.get("id")
|
||||
if _is_nodeish_identifier(user_id_value):
|
||||
node_id = serialization._canonical_node_id(user_id_value)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
decoded_section = _ensure_mapping(mapping.get("decoded"))
|
||||
if decoded_section is not None:
|
||||
node_id = _candidate_node_id(decoded_section)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
payload_section = _ensure_mapping(mapping.get("payload"))
|
||||
if payload_section is not None:
|
||||
node_id = _candidate_node_id(payload_section)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
for key in ("packet", "meta", "info"):
|
||||
node_id = _candidate_node_id(_ensure_mapping(mapping.get(key)))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
for value in mapping.values():
|
||||
if isinstance(value, (list, tuple)):
|
||||
for item in value:
|
||||
node_id = _candidate_node_id(_ensure_mapping(item))
|
||||
if node_id:
|
||||
return node_id
|
||||
else:
|
||||
node_id = _candidate_node_id(_ensure_mapping(value))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _extract_host_node_id(iface) -> str | None:
|
||||
"""Return the canonical node identifier for the connected host device."""
|
||||
|
||||
if iface is None:
|
||||
return None
|
||||
|
||||
def _as_mapping(candidate) -> Mapping | None:
|
||||
mapping = _ensure_mapping(candidate)
|
||||
if mapping is not None:
|
||||
return mapping
|
||||
if callable(candidate):
|
||||
with contextlib.suppress(Exception):
|
||||
return _ensure_mapping(candidate())
|
||||
return None
|
||||
|
||||
candidates: list[Mapping] = []
|
||||
for attr in ("myInfo", "my_node_info", "myNodeInfo", "my_node", "localNode"):
|
||||
mapping = _as_mapping(getattr(iface, attr, None))
|
||||
if mapping is None:
|
||||
continue
|
||||
candidates.append(mapping)
|
||||
nested_info = _ensure_mapping(mapping.get("info"))
|
||||
if nested_info:
|
||||
candidates.append(nested_info)
|
||||
|
||||
for mapping in candidates:
|
||||
node_id = _candidate_node_id(mapping)
|
||||
if node_id:
|
||||
return node_id
|
||||
for key in ("myNodeNum", "my_node_num", "myNodeId", "my_node_id"):
|
||||
node_id = serialization._canonical_node_id(mapping.get(key))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
node_id = serialization._canonical_node_id(getattr(iface, "myNodeNum", None))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _normalise_nodeinfo_packet(packet) -> dict | None:
|
||||
"""Return a dictionary view of ``packet`` with a guaranteed ``id`` when known."""
|
||||
|
||||
mapping = _ensure_mapping(packet)
|
||||
if mapping is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
normalised: dict = dict(mapping)
|
||||
except Exception:
|
||||
try:
|
||||
normalised = {key: mapping[key] for key in mapping}
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
node_id = _candidate_node_id(normalised)
|
||||
if node_id and normalised.get("id") != node_id:
|
||||
normalised["id"] = node_id
|
||||
|
||||
return normalised
|
||||
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover - import only used for type checking
|
||||
from meshtastic.ble_interface import BLEInterface as _BLEInterface
|
||||
|
||||
BLEInterface = None
|
||||
|
||||
|
||||
def _patch_meshtastic_nodeinfo_handler() -> None:
|
||||
"""Ensure Meshtastic nodeinfo packets always include an ``id`` field."""
|
||||
|
||||
module = sys.modules.get("meshtastic", meshtastic)
|
||||
if module is None:
|
||||
with contextlib.suppress(Exception):
|
||||
module = importlib.import_module("meshtastic")
|
||||
if module is None:
|
||||
return
|
||||
globals()["meshtastic"] = module
|
||||
|
||||
original = getattr(module, "_onNodeInfoReceive", None)
|
||||
if not callable(original):
|
||||
return
|
||||
|
||||
mesh_interface_module = getattr(module, "mesh_interface", None)
|
||||
if mesh_interface_module is None:
|
||||
with contextlib.suppress(Exception):
|
||||
mesh_interface_module = importlib.import_module("meshtastic.mesh_interface")
|
||||
|
||||
if not getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
module._onNodeInfoReceive = _build_safe_nodeinfo_callback(original)
|
||||
|
||||
_patch_nodeinfo_handler_class(mesh_interface_module, module)
|
||||
|
||||
|
||||
def _build_safe_nodeinfo_callback(original):
|
||||
"""Return a wrapper that injects a missing ``id`` before dispatching."""
|
||||
|
||||
def _safe_on_node_info_receive(iface, packet): # type: ignore[override]
|
||||
normalised = _normalise_nodeinfo_packet(packet)
|
||||
if normalised is not None:
|
||||
packet = normalised
|
||||
|
||||
try:
|
||||
return original(iface, packet)
|
||||
except KeyError as exc: # pragma: no cover - defensive only
|
||||
if exc.args and exc.args[0] == "id":
|
||||
return None
|
||||
raise
|
||||
|
||||
_safe_on_node_info_receive._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
return _safe_on_node_info_receive
|
||||
|
||||
|
||||
def _update_nodeinfo_handler_aliases(original, replacement) -> None:
|
||||
"""Ensure Meshtastic modules reference the patched ``NodeInfoHandler``."""
|
||||
|
||||
for module_name, module in list(sys.modules.items()):
|
||||
if not module_name.startswith("meshtastic"):
|
||||
continue
|
||||
existing = getattr(module, "NodeInfoHandler", None)
|
||||
if existing is original:
|
||||
setattr(module, "NodeInfoHandler", replacement)
|
||||
|
||||
|
||||
def _patch_nodeinfo_handler_class(
|
||||
mesh_interface_module, meshtastic_module=None
|
||||
) -> None:
|
||||
"""Wrap ``NodeInfoHandler.onReceive`` to normalise packets before callbacks."""
|
||||
|
||||
if mesh_interface_module is None:
|
||||
return
|
||||
|
||||
handler_class = getattr(mesh_interface_module, "NodeInfoHandler", None)
|
||||
if handler_class is None:
|
||||
return
|
||||
if getattr(handler_class, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
original_on_receive = getattr(handler_class, "onReceive", None)
|
||||
if not callable(original_on_receive):
|
||||
return
|
||||
|
||||
class _SafeNodeInfoHandler(handler_class): # type: ignore[misc]
|
||||
"""Subclass that guards against missing node identifiers."""
|
||||
|
||||
def onReceive(self, iface, packet): # type: ignore[override]
|
||||
normalised = _normalise_nodeinfo_packet(packet)
|
||||
if normalised is not None:
|
||||
packet = normalised
|
||||
|
||||
try:
|
||||
return super().onReceive(iface, packet)
|
||||
except KeyError as exc: # pragma: no cover - defensive only
|
||||
if exc.args and exc.args[0] == "id":
|
||||
return None
|
||||
raise
|
||||
|
||||
_SafeNodeInfoHandler.__name__ = handler_class.__name__
|
||||
_SafeNodeInfoHandler.__qualname__ = getattr(
|
||||
handler_class, "__qualname__", handler_class.__name__
|
||||
)
|
||||
_SafeNodeInfoHandler.__module__ = getattr(
|
||||
handler_class, "__module__", mesh_interface_module.__name__
|
||||
)
|
||||
_SafeNodeInfoHandler.__doc__ = getattr(
|
||||
handler_class, "__doc__", _SafeNodeInfoHandler.__doc__
|
||||
)
|
||||
_SafeNodeInfoHandler._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
|
||||
setattr(mesh_interface_module, "NodeInfoHandler", _SafeNodeInfoHandler)
|
||||
if meshtastic_module is None:
|
||||
meshtastic_module = globals().get("meshtastic")
|
||||
if meshtastic_module is not None:
|
||||
existing_top = getattr(meshtastic_module, "NodeInfoHandler", None)
|
||||
if existing_top is handler_class:
|
||||
setattr(meshtastic_module, "NodeInfoHandler", _SafeNodeInfoHandler)
|
||||
_update_nodeinfo_handler_aliases(handler_class, _SafeNodeInfoHandler)
|
||||
|
||||
|
||||
_patch_meshtastic_nodeinfo_handler()
|
||||
|
||||
|
||||
try: # pragma: no cover - optional dependency may be unavailable
|
||||
from meshtastic.serial_interface import SerialInterface # type: ignore
|
||||
except Exception: # pragma: no cover - optional dependency may be unavailable
|
||||
SerialInterface = None # type: ignore[assignment]
|
||||
|
||||
try: # pragma: no cover - optional dependency may be unavailable
|
||||
from meshtastic.tcp_interface import TCPInterface # type: ignore
|
||||
except Exception: # pragma: no cover - optional dependency may be unavailable
|
||||
TCPInterface = None # type: ignore[assignment]
|
||||
|
||||
|
||||
def _patch_meshtastic_ble_receive_loop() -> None:
|
||||
"""Prevent ``UnboundLocalError`` crashes in Meshtastic's BLE reader."""
|
||||
|
||||
try:
|
||||
from meshtastic import ble_interface as _ble_interface_module # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
return
|
||||
|
||||
ble_class = getattr(_ble_interface_module, "BLEInterface", None)
|
||||
if ble_class is None:
|
||||
return
|
||||
|
||||
original = getattr(ble_class, "_receiveFromRadioImpl", None)
|
||||
if not callable(original):
|
||||
return
|
||||
if getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
FROMRADIO_UUID = getattr(_ble_interface_module, "FROMRADIO_UUID", None)
|
||||
BleakDBusError = getattr(_ble_interface_module, "BleakDBusError", ())
|
||||
BleakError = getattr(_ble_interface_module, "BleakError", ())
|
||||
logger = getattr(_ble_interface_module, "logger", None)
|
||||
time = getattr(_ble_interface_module, "time", None)
|
||||
|
||||
if not FROMRADIO_UUID or logger is None or time is None:
|
||||
return
|
||||
|
||||
def _safe_receive_from_radio(self): # type: ignore[override]
|
||||
while self._want_receive:
|
||||
if self.should_read:
|
||||
self.should_read = False
|
||||
retries: int = 0
|
||||
while self._want_receive:
|
||||
if self.client is None:
|
||||
logger.debug("BLE client is None, shutting down")
|
||||
self._want_receive = False
|
||||
continue
|
||||
|
||||
payload: bytes = b""
|
||||
try:
|
||||
payload = bytes(self.client.read_gatt_char(FROMRADIO_UUID))
|
||||
except BleakDBusError as exc:
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
except BleakError as exc:
|
||||
if "Not connected" in str(exc):
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
else:
|
||||
raise ble_class.BLEError("Error reading BLE") from exc
|
||||
|
||||
if not payload:
|
||||
if not self._want_receive:
|
||||
break
|
||||
if retries < 5:
|
||||
time.sleep(0.1)
|
||||
retries += 1
|
||||
continue
|
||||
break
|
||||
|
||||
logger.debug("FROMRADIO read: %s", payload.hex())
|
||||
self._handleFromRadio(payload)
|
||||
else:
|
||||
time.sleep(0.01)
|
||||
|
||||
_safe_receive_from_radio._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
ble_class._receiveFromRadioImpl = _safe_receive_from_radio
|
||||
|
||||
|
||||
_patch_meshtastic_ble_receive_loop()
|
||||
|
||||
|
||||
def _has_field(message: Any, field_name: str) -> bool:
|
||||
"""Return ``True`` when ``message`` advertises ``field_name`` via ``HasField``."""
|
||||
|
||||
if message is None:
|
||||
return False
|
||||
has_field = getattr(message, "HasField", None)
|
||||
if callable(has_field):
|
||||
try:
|
||||
return bool(has_field(field_name))
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
return False
|
||||
return hasattr(message, field_name)
|
||||
|
||||
|
||||
def _enum_name_from_field(message: Any, field_name: str, value: Any) -> str | None:
|
||||
"""Return the enum name for ``value`` using ``message`` descriptors."""
|
||||
|
||||
descriptor = getattr(message, "DESCRIPTOR", None)
|
||||
if descriptor is None:
|
||||
return None
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {})
|
||||
field_desc = fields_by_name.get(field_name)
|
||||
if field_desc is None:
|
||||
return None
|
||||
enum_type = getattr(field_desc, "enum_type", None)
|
||||
if enum_type is None:
|
||||
return None
|
||||
enum_values = getattr(enum_type, "values_by_number", {})
|
||||
enum_value = enum_values.get(value)
|
||||
if enum_value is None:
|
||||
return None
|
||||
return getattr(enum_value, "name", None)
|
||||
|
||||
|
||||
def _resolve_lora_message(local_config: Any) -> Any | None:
|
||||
"""Return the LoRa configuration sub-message from ``local_config``."""
|
||||
|
||||
if local_config is None:
|
||||
return None
|
||||
if _has_field(local_config, "lora"):
|
||||
candidate = getattr(local_config, "lora", None)
|
||||
if candidate is not None:
|
||||
return candidate
|
||||
radio_section = getattr(local_config, "radio", None)
|
||||
if radio_section is not None:
|
||||
if _has_field(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora", None)
|
||||
if hasattr(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora")
|
||||
if hasattr(local_config, "lora"):
|
||||
return getattr(local_config, "lora")
|
||||
return None
|
||||
|
||||
|
||||
def _region_frequency(lora_message: Any) -> int | float | str | None:
|
||||
"""Derive the LoRa region frequency in MHz or the region label from ``lora_message``.
|
||||
|
||||
Numeric override values are floored to the nearest MHz to align with the
|
||||
integer frequencies expected elsewhere in the ingestion pipeline.
|
||||
"""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
|
||||
override_frequency = getattr(lora_message, "override_frequency", None)
|
||||
if override_frequency is not None:
|
||||
if isinstance(override_frequency, (int, float)):
|
||||
if override_frequency > 0:
|
||||
return math.floor(override_frequency)
|
||||
elif override_frequency:
|
||||
return override_frequency
|
||||
|
||||
region_value = getattr(lora_message, "region", None)
|
||||
if region_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, "region", region_value)
|
||||
if enum_name:
|
||||
digits = re.findall(r"\d+", enum_name)
|
||||
for token in digits:
|
||||
try:
|
||||
freq = int(token)
|
||||
except ValueError: # pragma: no cover - regex guarantees digits
|
||||
continue
|
||||
if freq >= 100:
|
||||
return freq
|
||||
for token in reversed(digits):
|
||||
try:
|
||||
return int(token)
|
||||
except ValueError: # pragma: no cover - defensive only
|
||||
continue
|
||||
return enum_name
|
||||
if isinstance(region_value, int) and region_value >= 100:
|
||||
return region_value
|
||||
if isinstance(region_value, str) and region_value:
|
||||
return region_value
|
||||
return None
|
||||
|
||||
|
||||
def _camelcase_enum_name(name: str | None) -> str | None:
|
||||
"""Convert ``name`` from ``SCREAMING_SNAKE`` to ``CamelCase``."""
|
||||
|
||||
if not name:
|
||||
return None
|
||||
parts = re.split(r"[^0-9A-Za-z]+", name.strip())
|
||||
camel_parts = [part.capitalize() for part in parts if part]
|
||||
if not camel_parts:
|
||||
return None
|
||||
return "".join(camel_parts)
|
||||
|
||||
|
||||
def _modem_preset(lora_message: Any) -> str | None:
|
||||
"""Return the CamelCase modem preset configured on ``lora_message``."""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
descriptor = getattr(lora_message, "DESCRIPTOR", None)
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {}) if descriptor else {}
|
||||
if "modem_preset" in fields_by_name:
|
||||
preset_field = "modem_preset"
|
||||
elif "preset" in fields_by_name:
|
||||
preset_field = "preset"
|
||||
elif hasattr(lora_message, "modem_preset"):
|
||||
preset_field = "modem_preset"
|
||||
elif hasattr(lora_message, "preset"):
|
||||
preset_field = "preset"
|
||||
else:
|
||||
return None
|
||||
|
||||
preset_value = getattr(lora_message, preset_field, None)
|
||||
if preset_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, preset_field, preset_value)
|
||||
if isinstance(enum_name, str) and enum_name:
|
||||
return _camelcase_enum_name(enum_name)
|
||||
if isinstance(preset_value, str) and preset_value:
|
||||
return _camelcase_enum_name(preset_value)
|
||||
return None
|
||||
|
||||
|
||||
def _ensure_radio_metadata(iface: Any) -> None:
|
||||
"""Populate cached LoRa metadata by inspecting ``iface`` when available."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
wait_for_config = getattr(iface, "waitForConfig", None)
|
||||
if callable(wait_for_config):
|
||||
wait_for_config()
|
||||
except Exception: # pragma: no cover - hardware dependent guard
|
||||
pass
|
||||
|
||||
local_node = getattr(iface, "localNode", None)
|
||||
local_config = getattr(local_node, "localConfig", None) if local_node else None
|
||||
lora_message = _resolve_lora_message(local_config)
|
||||
if lora_message is None:
|
||||
return
|
||||
|
||||
frequency = _region_frequency(lora_message)
|
||||
preset = _modem_preset(lora_message)
|
||||
|
||||
updated = False
|
||||
if frequency is not None and getattr(config, "LORA_FREQ", None) is None:
|
||||
config.LORA_FREQ = frequency
|
||||
updated = True
|
||||
if preset is not None and getattr(config, "MODEM_PRESET", None) is None:
|
||||
config.MODEM_PRESET = preset
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
config._debug_log(
|
||||
"Captured LoRa radio metadata",
|
||||
context="interfaces.ensure_radio_metadata",
|
||||
severity="info",
|
||||
always=True,
|
||||
lora_freq=frequency,
|
||||
modem_preset=preset,
|
||||
)
|
||||
|
||||
|
||||
def _ensure_channel_metadata(iface: Any) -> None:
|
||||
"""Capture channel metadata by inspecting ``iface`` once per runtime."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
channels.capture_from_interface(iface)
|
||||
except Exception as exc: # pragma: no cover - defensive instrumentation
|
||||
config._debug_log(
|
||||
"Failed to capture channel metadata",
|
||||
context="interfaces.ensure_channel_metadata",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
|
||||
_DEFAULT_TCP_PORT = 4403
|
||||
_DEFAULT_TCP_TARGET = "http://127.0.0.1"
|
||||
|
||||
_DEFAULT_SERIAL_PATTERNS = (
|
||||
"/dev/ttyACM*",
|
||||
"/dev/ttyUSB*",
|
||||
"/dev/tty.usbmodem*",
|
||||
"/dev/tty.usbserial*",
|
||||
"/dev/cu.usbmodem*",
|
||||
"/dev/cu.usbserial*",
|
||||
)
|
||||
|
||||
# Support both MAC addresses (Linux/Windows) and UUIDs (macOS)
|
||||
_BLE_ADDRESS_RE = re.compile(
|
||||
r"^(?:"
|
||||
r"(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}|" # MAC address format
|
||||
r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" # UUID format
|
||||
r")$"
|
||||
)
|
||||
|
||||
|
||||
class _DummySerialInterface:
|
||||
"""In-memory replacement for ``meshtastic.serial_interface.SerialInterface``."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.nodes: dict = {}
|
||||
|
||||
def close(self) -> None: # pragma: no cover - nothing to close
|
||||
pass
|
||||
|
||||
|
||||
def _parse_ble_target(value: str) -> str | None:
|
||||
"""Return a normalized BLE address (MAC or UUID) when ``value`` matches the format.
|
||||
|
||||
Parameters:
|
||||
value: User-provided target string.
|
||||
|
||||
Returns:
|
||||
The normalised MAC address or UUID, or ``None`` when validation fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
if _BLE_ADDRESS_RE.fullmatch(value):
|
||||
return value.upper()
|
||||
return None
|
||||
|
||||
|
||||
def _parse_network_target(value: str) -> tuple[str, int] | None:
|
||||
"""Return ``(host, port)`` when ``value`` is a numeric IP address string.
|
||||
|
||||
Only literal IPv4 or IPv6 addresses are accepted, optionally paired with a
|
||||
port or scheme. Callers that start from hostnames should resolve them to an
|
||||
address before invoking this helper.
|
||||
|
||||
Parameters:
|
||||
value: Numeric IP literal or URL describing the TCP interface.
|
||||
|
||||
Returns:
|
||||
A ``(host, port)`` tuple or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
|
||||
def _validated_result(host: str | None, port: int | None) -> tuple[str, int] | None:
|
||||
if not host:
|
||||
return None
|
||||
try:
|
||||
ipaddress.ip_address(host)
|
||||
except ValueError:
|
||||
return None
|
||||
return host, port or _DEFAULT_TCP_PORT
|
||||
|
||||
parsed_values = []
|
||||
if "://" in value:
|
||||
parsed_values.append(urllib.parse.urlparse(value, scheme="tcp"))
|
||||
parsed_values.append(urllib.parse.urlparse(f"//{value}", scheme="tcp"))
|
||||
|
||||
for parsed in parsed_values:
|
||||
try:
|
||||
port = parsed.port
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(parsed.hostname, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
if value.count(":") == 1 and not value.startswith("["):
|
||||
host, _, port_text = value.partition(":")
|
||||
try:
|
||||
port = int(port_text) if port_text else None
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(host, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
return _validated_result(value, None)
|
||||
|
||||
|
||||
def _load_ble_interface():
|
||||
"""Return :class:`meshtastic.ble_interface.BLEInterface` when available.
|
||||
|
||||
Returns:
|
||||
The resolved BLE interface class.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the BLE dependencies are not installed.
|
||||
"""
|
||||
|
||||
global BLEInterface
|
||||
if BLEInterface is not None:
|
||||
return BLEInterface
|
||||
|
||||
try:
|
||||
from meshtastic.ble_interface import BLEInterface as _resolved_interface
|
||||
except ImportError as exc: # pragma: no cover - exercised in non-BLE envs
|
||||
raise RuntimeError(
|
||||
"BLE interface requested but the Meshtastic BLE dependencies are not installed. "
|
||||
"Install the 'meshtastic[ble]' extra to enable BLE support."
|
||||
) from exc
|
||||
BLEInterface = _resolved_interface
|
||||
try:
|
||||
import sys
|
||||
|
||||
for module_name in ("data.mesh_ingestor", "data.mesh"):
|
||||
mesh_module = sys.modules.get(module_name)
|
||||
if mesh_module is not None:
|
||||
setattr(mesh_module, "BLEInterface", BLEInterface)
|
||||
except Exception: # pragma: no cover - defensive only
|
||||
pass
|
||||
return _resolved_interface
|
||||
|
||||
|
||||
def _create_serial_interface(port: str) -> tuple[object, str]:
|
||||
"""Return an appropriate mesh interface for ``port``.
|
||||
|
||||
Parameters:
|
||||
port: User-supplied port string which may represent serial, BLE or TCP.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` describing the created interface.
|
||||
"""
|
||||
|
||||
port_value = (port or "").strip()
|
||||
if port_value.lower() in {"", "mock", "none", "null", "disabled"}:
|
||||
config._debug_log(
|
||||
"Using dummy serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return _DummySerialInterface(), "mock"
|
||||
ble_target = _parse_ble_target(port_value)
|
||||
if ble_target:
|
||||
# Determine if it's a MAC address or UUID
|
||||
address_type = "MAC" if ":" in ble_target else "UUID"
|
||||
config._debug_log(
|
||||
"Using BLE interface",
|
||||
context="interfaces.ble",
|
||||
address=ble_target,
|
||||
address_type=address_type,
|
||||
)
|
||||
return _load_ble_interface()(address=ble_target), ble_target
|
||||
network_target = _parse_network_target(port_value)
|
||||
if network_target:
|
||||
host, tcp_port = network_target
|
||||
config._debug_log(
|
||||
"Using TCP interface",
|
||||
context="interfaces.tcp",
|
||||
host=host,
|
||||
port=tcp_port,
|
||||
)
|
||||
return (
|
||||
TCPInterface(hostname=host, portNumber=tcp_port),
|
||||
f"tcp://{host}:{tcp_port}",
|
||||
)
|
||||
config._debug_log(
|
||||
"Using serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return SerialInterface(devPath=port_value), port_value
|
||||
|
||||
|
||||
class NoAvailableMeshInterface(RuntimeError):
|
||||
"""Raised when no default mesh interface can be created."""
|
||||
|
||||
|
||||
def _default_serial_targets() -> list[str]:
|
||||
"""Return candidate serial device paths for auto-discovery."""
|
||||
|
||||
candidates: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for pattern in _DEFAULT_SERIAL_PATTERNS:
|
||||
for path in sorted(glob.glob(pattern)):
|
||||
if path not in seen:
|
||||
candidates.append(path)
|
||||
seen.add(path)
|
||||
if "/dev/ttyACM0" not in seen:
|
||||
candidates.append("/dev/ttyACM0")
|
||||
return candidates
|
||||
|
||||
|
||||
def _create_default_interface() -> tuple[object, str]:
|
||||
"""Attempt to create the default mesh interface, raising on failure.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` for the discovered connection.
|
||||
|
||||
Raises:
|
||||
NoAvailableMeshInterface: When no usable connection can be created.
|
||||
"""
|
||||
|
||||
errors: list[tuple[str, Exception]] = []
|
||||
for candidate in _default_serial_targets():
|
||||
try:
|
||||
return _create_serial_interface(candidate)
|
||||
except Exception as exc: # pragma: no cover - hardware dependent
|
||||
errors.append((candidate, exc))
|
||||
config._debug_log(
|
||||
"Failed to open serial candidate",
|
||||
context="interfaces.auto_discovery",
|
||||
target=candidate,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
try:
|
||||
return _create_serial_interface(_DEFAULT_TCP_TARGET)
|
||||
except Exception as exc: # pragma: no cover - network dependent
|
||||
errors.append((_DEFAULT_TCP_TARGET, exc))
|
||||
config._debug_log(
|
||||
"Failed to open TCP fallback",
|
||||
context="interfaces.auto_discovery",
|
||||
target=_DEFAULT_TCP_TARGET,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if errors:
|
||||
summary = "; ".join(f"{target}: {error}" for target, error in errors)
|
||||
raise NoAvailableMeshInterface(
|
||||
f"no mesh interface available ({summary})"
|
||||
) from errors[-1][1]
|
||||
raise NoAvailableMeshInterface("no mesh interface available")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BLEInterface",
|
||||
"NoAvailableMeshInterface",
|
||||
"_ensure_channel_metadata",
|
||||
"_ensure_radio_metadata",
|
||||
"_extract_host_node_id",
|
||||
"_DummySerialInterface",
|
||||
"_DEFAULT_TCP_PORT",
|
||||
"_DEFAULT_TCP_TARGET",
|
||||
"_create_default_interface",
|
||||
"_create_serial_interface",
|
||||
"_default_serial_targets",
|
||||
"_load_ble_interface",
|
||||
"_parse_ble_target",
|
||||
"_parse_network_target",
|
||||
"SerialInterface",
|
||||
"TCPInterface",
|
||||
]
|
||||
@@ -1,108 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Mesh interface discovery helpers for interacting with Meshtastic hardware."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# The patches subpackage applies meshtastic monkey-patches at import time so
|
||||
# subsequent calls (and any direct ``import meshtastic`` from elsewhere)
|
||||
# inherit the safe wrappers. Apply BEFORE pulling in factory.py because
|
||||
# factory.py imports ``meshtastic.serial_interface`` / ``meshtastic.tcp_interface``
|
||||
# and those modules transitively load NodeInfoHandler.
|
||||
from .patches import (
|
||||
_build_safe_nodeinfo_callback,
|
||||
_patch_meshtastic_ble_receive_loop,
|
||||
_patch_meshtastic_nodeinfo_handler,
|
||||
_patch_nodeinfo_handler_class,
|
||||
_update_nodeinfo_handler_aliases,
|
||||
apply_all as _apply_all_patches,
|
||||
)
|
||||
|
||||
_apply_all_patches()
|
||||
|
||||
from ._aliases import ( # noqa: E402 - keep grouped with sibling re-exports.
|
||||
_BLE_ADDRESS_RE,
|
||||
_DEFAULT_SERIAL_PATTERNS,
|
||||
_DEFAULT_TCP_PORT,
|
||||
_default_serial_targets,
|
||||
_parse_ble_target,
|
||||
)
|
||||
from .channels_meta import _ensure_channel_metadata # noqa: E402
|
||||
from .factory import ( # noqa: E402
|
||||
NoAvailableMeshInterface,
|
||||
_DummySerialInterface,
|
||||
_create_default_interface,
|
||||
_create_serial_interface,
|
||||
_load_ble_interface,
|
||||
)
|
||||
|
||||
# Resolve the meshtastic interface classes at package-load time so that
|
||||
# repeated imports (e.g. tests that pop ``data.mesh_ingestor.interfaces`` from
|
||||
# ``sys.modules`` and re-import after swapping ``meshtastic.*`` submodules)
|
||||
# pick up the freshly registered classes rather than whatever a cached
|
||||
# ``factory.py`` first resolved. ``factory.py`` no longer keeps duplicate
|
||||
# module-level globals; lookups go through the package surface only.
|
||||
BLEInterface = None
|
||||
"""Resolved on demand by :func:`_load_ble_interface` to keep BLE optional."""
|
||||
|
||||
try: # pragma: no cover - optional dependency may be unavailable
|
||||
from meshtastic.serial_interface import (
|
||||
SerialInterface,
|
||||
) # noqa: E402 # type: ignore
|
||||
except Exception: # pragma: no cover - optional dependency may be unavailable
|
||||
SerialInterface = None # type: ignore[assignment]
|
||||
|
||||
try: # pragma: no cover - optional dependency may be unavailable
|
||||
from meshtastic.tcp_interface import TCPInterface # noqa: E402 # type: ignore
|
||||
except Exception: # pragma: no cover - optional dependency may be unavailable
|
||||
TCPInterface = None # type: ignore[assignment]
|
||||
from .identity import ( # noqa: E402
|
||||
_candidate_node_id,
|
||||
_ensure_mapping,
|
||||
_extract_host_node_id,
|
||||
_is_nodeish_identifier,
|
||||
)
|
||||
from .nodeinfo_normalize import _normalise_nodeinfo_packet # noqa: E402
|
||||
from .radio import ( # noqa: E402
|
||||
_REGION_CHANNEL_PARAMS,
|
||||
_camelcase_enum_name,
|
||||
_computed_channel_frequency,
|
||||
_ensure_radio_metadata,
|
||||
_enum_name_from_field,
|
||||
_has_field,
|
||||
_modem_preset,
|
||||
_region_frequency,
|
||||
_resolve_lora_message,
|
||||
)
|
||||
from .targets import _DEFAULT_TCP_TARGET, _parse_network_target # noqa: E402
|
||||
|
||||
__all__ = [
|
||||
"BLEInterface",
|
||||
"NoAvailableMeshInterface",
|
||||
"_ensure_channel_metadata",
|
||||
"_ensure_radio_metadata",
|
||||
"_extract_host_node_id",
|
||||
"_DummySerialInterface",
|
||||
"_DEFAULT_TCP_PORT",
|
||||
"_DEFAULT_TCP_TARGET",
|
||||
"_create_default_interface",
|
||||
"_create_serial_interface",
|
||||
"_default_serial_targets",
|
||||
"_load_ble_interface",
|
||||
"_parse_ble_target",
|
||||
"_parse_network_target",
|
||||
"SerialInterface",
|
||||
"TCPInterface",
|
||||
]
|
||||
@@ -1,33 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Backward-compat aliases for renames hidden behind the package barrel."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ..connection import (
|
||||
BLE_ADDRESS_RE,
|
||||
DEFAULT_SERIAL_PATTERNS,
|
||||
DEFAULT_TCP_PORT,
|
||||
default_serial_targets,
|
||||
parse_ble_target,
|
||||
)
|
||||
|
||||
# Private aliases so that existing internal callers and monkeypatching in
|
||||
# tests keep working without modification.
|
||||
_BLE_ADDRESS_RE = BLE_ADDRESS_RE
|
||||
_DEFAULT_TCP_PORT = DEFAULT_TCP_PORT
|
||||
_DEFAULT_SERIAL_PATTERNS = DEFAULT_SERIAL_PATTERNS
|
||||
_parse_ble_target = parse_ble_target
|
||||
_default_serial_targets = default_serial_targets
|
||||
@@ -1,39 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""One-shot channel metadata capture from a live Meshtastic interface."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from .. import channels, config
|
||||
|
||||
|
||||
def _ensure_channel_metadata(iface: Any) -> None:
|
||||
"""Capture channel metadata by inspecting ``iface`` once per runtime."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
channels.capture_from_interface(iface)
|
||||
except Exception as exc: # pragma: no cover - defensive instrumentation
|
||||
config._debug_log(
|
||||
"Failed to capture channel metadata",
|
||||
context="interfaces.ensure_channel_metadata",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
@@ -1,191 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Build Meshtastic interface objects from caller-supplied target strings."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .. import config
|
||||
from ..connection import parse_ble_target
|
||||
from .targets import _DEFAULT_TCP_TARGET, _parse_network_target
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover - import only used for type checking
|
||||
from meshtastic.ble_interface import BLEInterface as _BLEInterface
|
||||
|
||||
|
||||
# All cached interface classes live on the parent package
|
||||
# (``data.mesh_ingestor.interfaces``). Tests set them via
|
||||
# ``monkeypatch.setattr(mesh, "BLEInterface", ...)`` and the package proxy
|
||||
# routes those writes through to ``interfaces``; keeping a duplicate global on
|
||||
# this submodule would cache the wrong value across tests because
|
||||
# ``monkeypatch`` only restores attributes it set. The ``__init__.py``
|
||||
# re-resolves ``SerialInterface``/``TCPInterface`` from ``meshtastic.*`` at
|
||||
# package-load time and assigns them to package-level attributes.
|
||||
|
||||
|
||||
class _DummySerialInterface:
|
||||
"""In-memory replacement for ``meshtastic.serial_interface.SerialInterface``."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.nodes: dict = {}
|
||||
|
||||
def close(self) -> None: # pragma: no cover - nothing to close
|
||||
"""No-op: the dummy interface holds no resources to release."""
|
||||
pass
|
||||
|
||||
|
||||
class NoAvailableMeshInterface(RuntimeError):
|
||||
"""Raised when no default mesh interface can be created."""
|
||||
|
||||
|
||||
def _load_ble_interface():
|
||||
"""Return :class:`meshtastic.ble_interface.BLEInterface` when available.
|
||||
|
||||
Returns:
|
||||
The resolved BLE interface class.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the BLE dependencies are not installed.
|
||||
"""
|
||||
|
||||
pkg = sys.modules.get("data.mesh_ingestor.interfaces")
|
||||
pkg_ble = getattr(pkg, "BLEInterface", None) if pkg is not None else None
|
||||
if pkg_ble is not None:
|
||||
return pkg_ble
|
||||
|
||||
try:
|
||||
from meshtastic.ble_interface import BLEInterface as _resolved_interface
|
||||
except ImportError as exc: # pragma: no cover - exercised in non-BLE envs
|
||||
raise RuntimeError(
|
||||
"BLE interface requested but the Meshtastic BLE dependencies are not installed. "
|
||||
"Install the 'meshtastic[ble]' extra to enable BLE support."
|
||||
) from exc
|
||||
if pkg is not None:
|
||||
setattr(pkg, "BLEInterface", _resolved_interface)
|
||||
for module_name in ("data.mesh_ingestor", "data.mesh"):
|
||||
mesh_module = sys.modules.get(module_name)
|
||||
if mesh_module is not None:
|
||||
setattr(mesh_module, "BLEInterface", _resolved_interface)
|
||||
return _resolved_interface
|
||||
|
||||
|
||||
def _create_serial_interface(port: str) -> tuple[object, str]:
|
||||
"""Return an appropriate mesh interface for ``port``.
|
||||
|
||||
Parameters:
|
||||
port: User-supplied port string which may represent serial, BLE or TCP.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` describing the created interface.
|
||||
"""
|
||||
|
||||
pkg = sys.modules["data.mesh_ingestor.interfaces"]
|
||||
|
||||
port_value = (port or "").strip()
|
||||
if port_value.lower() in {"", "mock", "none", "null", "disabled"}:
|
||||
config._debug_log(
|
||||
"Using dummy serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return _DummySerialInterface(), "mock"
|
||||
ble_target = parse_ble_target(port_value)
|
||||
if ble_target:
|
||||
# Determine if it's a MAC address or UUID
|
||||
address_type = "MAC" if ":" in ble_target else "UUID"
|
||||
config._debug_log(
|
||||
"Using BLE interface",
|
||||
context="interfaces.ble",
|
||||
address=ble_target,
|
||||
address_type=address_type,
|
||||
)
|
||||
return _load_ble_interface()(address=ble_target), ble_target
|
||||
network_target = _parse_network_target(port_value)
|
||||
if network_target:
|
||||
host, tcp_port = network_target
|
||||
config._debug_log(
|
||||
"Using TCP interface",
|
||||
context="interfaces.tcp",
|
||||
host=host,
|
||||
port=tcp_port,
|
||||
)
|
||||
# Resolve via the package so test fakes installed via ``sys.modules``
|
||||
# patches at ``meshtastic.tcp_interface`` propagate when interfaces
|
||||
# was imported earlier.
|
||||
tcp_cls = getattr(pkg, "TCPInterface", None)
|
||||
return (
|
||||
tcp_cls(hostname=host, portNumber=tcp_port),
|
||||
f"tcp://{host}:{tcp_port}",
|
||||
)
|
||||
config._debug_log(
|
||||
"Using serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
serial_cls = getattr(pkg, "SerialInterface", None)
|
||||
return serial_cls(devPath=port_value), port_value
|
||||
|
||||
|
||||
def _create_default_interface() -> tuple[object, str]:
|
||||
"""Attempt to create the default mesh interface, raising on failure.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` for the discovered connection.
|
||||
|
||||
Raises:
|
||||
NoAvailableMeshInterface: When no usable connection can be created.
|
||||
"""
|
||||
|
||||
# Resolve via the package surface so that monkeypatches against the
|
||||
# backward-compat aliases (``mesh._default_serial_targets``,
|
||||
# ``mesh._create_serial_interface``) propagate at call time.
|
||||
pkg = sys.modules["data.mesh_ingestor.interfaces"]
|
||||
default_serial_targets = pkg._default_serial_targets
|
||||
create_serial = pkg._create_serial_interface
|
||||
|
||||
errors: list[tuple[str, Exception]] = []
|
||||
for candidate in default_serial_targets():
|
||||
try:
|
||||
return create_serial(candidate)
|
||||
except Exception as exc: # pragma: no cover - hardware dependent
|
||||
errors.append((candidate, exc))
|
||||
config._debug_log(
|
||||
"Failed to open serial candidate",
|
||||
context="interfaces.auto_discovery",
|
||||
target=candidate,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
try:
|
||||
return create_serial(_DEFAULT_TCP_TARGET)
|
||||
except Exception as exc: # pragma: no cover - network dependent
|
||||
errors.append((_DEFAULT_TCP_TARGET, exc))
|
||||
config._debug_log(
|
||||
"Failed to open TCP fallback",
|
||||
context="interfaces.auto_discovery",
|
||||
target=_DEFAULT_TCP_TARGET,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if errors:
|
||||
summary = "; ".join(f"{target}: {error}" for target, error in errors)
|
||||
raise NoAvailableMeshInterface(
|
||||
f"no mesh interface available ({summary})"
|
||||
) from errors[-1][1]
|
||||
raise NoAvailableMeshInterface( # pragma: no cover - defensive only
|
||||
"no mesh interface available"
|
||||
)
|
||||
@@ -1,194 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Mapping/identifier helpers for Meshtastic interface objects."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import re
|
||||
from collections.abc import Mapping
|
||||
from typing import Any
|
||||
|
||||
from .. import serialization
|
||||
|
||||
|
||||
def _ensure_mapping(value) -> Mapping | None:
|
||||
"""Return ``value`` as a mapping when conversion is possible."""
|
||||
|
||||
if isinstance(value, Mapping):
|
||||
return value
|
||||
if hasattr(value, "__dict__") and isinstance(value.__dict__, Mapping):
|
||||
return value.__dict__
|
||||
with contextlib.suppress(Exception):
|
||||
converted = serialization._node_to_dict(value)
|
||||
if isinstance(converted, Mapping):
|
||||
return converted
|
||||
return None
|
||||
|
||||
|
||||
def _is_nodeish_identifier(value: Any) -> bool:
|
||||
"""Return ``True`` when ``value`` resembles a Meshtastic node identifier."""
|
||||
|
||||
if isinstance(value, (int, float)):
|
||||
return False
|
||||
if not isinstance(value, str):
|
||||
return False
|
||||
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
return False
|
||||
if trimmed.startswith("^"):
|
||||
return True
|
||||
if trimmed.startswith("!"):
|
||||
trimmed = trimmed[1:]
|
||||
elif trimmed.lower().startswith("0x"):
|
||||
trimmed = trimmed[2:]
|
||||
elif not re.search(r"[a-fA-F]", trimmed):
|
||||
# Bare decimal strings should not be treated as node ids when labelled "id".
|
||||
return False
|
||||
|
||||
return bool(re.fullmatch(r"[0-9a-fA-F]{1,8}", trimmed))
|
||||
|
||||
|
||||
def _candidate_node_id(mapping: Mapping | None) -> str | None:
|
||||
"""Extract a canonical node identifier from ``mapping`` when present."""
|
||||
|
||||
if mapping is None:
|
||||
return None
|
||||
|
||||
node_keys = (
|
||||
"fromId",
|
||||
"from_id",
|
||||
"from",
|
||||
"nodeId",
|
||||
"node_id",
|
||||
"nodeNum",
|
||||
"node_num",
|
||||
"num",
|
||||
"userId",
|
||||
"user_id",
|
||||
)
|
||||
|
||||
for key in node_keys:
|
||||
with contextlib.suppress(Exception):
|
||||
node_id = serialization._canonical_node_id(mapping.get(key))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
with contextlib.suppress(Exception):
|
||||
value = mapping.get("id")
|
||||
if _is_nodeish_identifier(value):
|
||||
node_id = serialization._canonical_node_id(value)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
user_section = _ensure_mapping(mapping.get("user"))
|
||||
if user_section is not None:
|
||||
for key in ("userId", "user_id", "num", "nodeNum", "node_num"):
|
||||
with contextlib.suppress(Exception):
|
||||
node_id = serialization._canonical_node_id(user_section.get(key))
|
||||
if node_id:
|
||||
return node_id
|
||||
with contextlib.suppress(Exception):
|
||||
user_id_value = user_section.get("id")
|
||||
if _is_nodeish_identifier(user_id_value):
|
||||
node_id = serialization._canonical_node_id(user_id_value)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
decoded_section = _ensure_mapping(mapping.get("decoded"))
|
||||
if decoded_section is not None:
|
||||
node_id = _candidate_node_id(decoded_section)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
payload_section = _ensure_mapping(mapping.get("payload"))
|
||||
if payload_section is not None:
|
||||
node_id = _candidate_node_id(payload_section)
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
for key in ("packet", "meta", "info"):
|
||||
node_id = _candidate_node_id(_ensure_mapping(mapping.get(key)))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
for value in mapping.values():
|
||||
if isinstance(value, (list, tuple)):
|
||||
for item in value:
|
||||
node_id = _candidate_node_id(_ensure_mapping(item))
|
||||
if node_id:
|
||||
return node_id
|
||||
else:
|
||||
node_id = _candidate_node_id(_ensure_mapping(value))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _extract_host_node_id(iface) -> str | None:
|
||||
"""Return the canonical node identifier for the connected host device.
|
||||
|
||||
Searches a sequence of well-known attribute names (``myInfo``,
|
||||
``my_node_info``, etc.) on ``iface`` for a mapping that contains a
|
||||
recognisable node identifier, then falls back to the raw ``myNodeNum``
|
||||
integer attribute.
|
||||
|
||||
Parameters:
|
||||
iface: Live Meshtastic interface object, or any object that exposes
|
||||
node-identity attributes in one of the expected forms.
|
||||
|
||||
Returns:
|
||||
A canonical ``!xxxxxxxx`` node identifier, or ``None`` when no
|
||||
identifiable host node information is available.
|
||||
"""
|
||||
|
||||
if iface is None:
|
||||
return None
|
||||
|
||||
def _as_mapping(candidate) -> Mapping | None:
|
||||
mapping = _ensure_mapping(candidate)
|
||||
if mapping is not None:
|
||||
return mapping
|
||||
if callable(candidate):
|
||||
with contextlib.suppress(Exception):
|
||||
return _ensure_mapping(candidate())
|
||||
return None
|
||||
|
||||
candidates: list[Mapping] = []
|
||||
for attr in ("myInfo", "my_node_info", "myNodeInfo", "my_node", "localNode"):
|
||||
mapping = _as_mapping(getattr(iface, attr, None))
|
||||
if mapping is None:
|
||||
continue
|
||||
candidates.append(mapping)
|
||||
nested_info = _ensure_mapping(mapping.get("info"))
|
||||
if nested_info:
|
||||
candidates.append(nested_info)
|
||||
|
||||
for mapping in candidates:
|
||||
node_id = _candidate_node_id(mapping)
|
||||
if node_id:
|
||||
return node_id
|
||||
for key in ("myNodeNum", "my_node_num", "myNodeId", "my_node_id"):
|
||||
node_id = serialization._canonical_node_id(mapping.get(key))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
node_id = serialization._canonical_node_id(getattr(iface, "myNodeNum", None))
|
||||
if node_id:
|
||||
return node_id
|
||||
|
||||
return None
|
||||
@@ -1,41 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Inject a canonical ``id`` into Meshtastic nodeinfo packets when missing."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .identity import _candidate_node_id, _ensure_mapping
|
||||
|
||||
|
||||
def _normalise_nodeinfo_packet(packet) -> dict | None:
|
||||
"""Return a dictionary view of ``packet`` with a guaranteed ``id`` when known."""
|
||||
|
||||
mapping = _ensure_mapping(packet)
|
||||
if mapping is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
normalised: dict = dict(mapping)
|
||||
except Exception:
|
||||
try:
|
||||
normalised = {key: mapping[key] for key in mapping}
|
||||
except Exception: # pragma: no cover - both copy strategies failed
|
||||
return None
|
||||
|
||||
node_id = _candidate_node_id(normalised)
|
||||
if node_id and normalised.get("id") != node_id:
|
||||
normalised["id"] = node_id
|
||||
|
||||
return normalised
|
||||
@@ -1,41 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Runtime monkey-patches applied to the upstream ``meshtastic`` library."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .ble_receive import _patch_meshtastic_ble_receive_loop
|
||||
from .nodeinfo import (
|
||||
_build_safe_nodeinfo_callback,
|
||||
_patch_meshtastic_nodeinfo_handler,
|
||||
_patch_nodeinfo_handler_class,
|
||||
_update_nodeinfo_handler_aliases,
|
||||
)
|
||||
|
||||
|
||||
def apply_all() -> None:
|
||||
"""Apply every meshtastic monkey-patch in the order required for safety."""
|
||||
_patch_meshtastic_nodeinfo_handler()
|
||||
_patch_meshtastic_ble_receive_loop()
|
||||
|
||||
|
||||
__all__ = [
|
||||
"apply_all",
|
||||
"_build_safe_nodeinfo_callback",
|
||||
"_patch_meshtastic_ble_receive_loop",
|
||||
"_patch_meshtastic_nodeinfo_handler",
|
||||
"_patch_nodeinfo_handler_class",
|
||||
"_update_nodeinfo_handler_aliases",
|
||||
]
|
||||
@@ -1,93 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Patch the upstream Meshtastic BLE receive loop to avoid ``UnboundLocalError``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
def _patch_meshtastic_ble_receive_loop() -> None:
|
||||
"""Prevent ``UnboundLocalError`` crashes in Meshtastic's BLE reader."""
|
||||
|
||||
try:
|
||||
from meshtastic import ble_interface as _ble_interface_module # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
return
|
||||
|
||||
ble_class = getattr(_ble_interface_module, "BLEInterface", None)
|
||||
if ble_class is None: # pragma: no cover - exercised only without BLE class
|
||||
return
|
||||
|
||||
original = getattr(ble_class, "_receiveFromRadioImpl", None)
|
||||
if not callable(original): # pragma: no cover - upstream API regression guard
|
||||
return
|
||||
if getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
FROMRADIO_UUID = getattr(_ble_interface_module, "FROMRADIO_UUID", None)
|
||||
BleakDBusError = getattr(_ble_interface_module, "BleakDBusError", ())
|
||||
BleakError = getattr(_ble_interface_module, "BleakError", ())
|
||||
logger = getattr(_ble_interface_module, "logger", None)
|
||||
time = getattr(_ble_interface_module, "time", None)
|
||||
|
||||
if ( # pragma: no cover - upstream API regression guard
|
||||
not FROMRADIO_UUID or logger is None or time is None
|
||||
):
|
||||
return
|
||||
|
||||
# The receive loop runs on a dedicated thread and only completes against a
|
||||
# live BLE adapter; the body is hardware-dependent and not unit-testable.
|
||||
def _safe_receive_from_radio(self): # pragma: no cover - hardware dependent
|
||||
# type: ignore[override]
|
||||
while self._want_receive:
|
||||
if self.should_read:
|
||||
self.should_read = False
|
||||
retries: int = 0
|
||||
while self._want_receive:
|
||||
if self.client is None:
|
||||
logger.debug("BLE client is None, shutting down")
|
||||
self._want_receive = False
|
||||
continue
|
||||
|
||||
payload: bytes = b""
|
||||
try:
|
||||
payload = bytes(self.client.read_gatt_char(FROMRADIO_UUID))
|
||||
except BleakDBusError as exc:
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
except BleakError as exc:
|
||||
if "Not connected" in str(exc):
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
else:
|
||||
raise ble_class.BLEError("Error reading BLE") from exc
|
||||
|
||||
if not payload:
|
||||
if not self._want_receive:
|
||||
break
|
||||
if retries < 5:
|
||||
time.sleep(0.1)
|
||||
retries += 1
|
||||
continue
|
||||
break
|
||||
|
||||
logger.debug("FROMRADIO read: %s", payload.hex())
|
||||
self._handleFromRadio(payload)
|
||||
else:
|
||||
time.sleep(0.01)
|
||||
|
||||
_safe_receive_from_radio._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
ble_class._receiveFromRadioImpl = _safe_receive_from_radio
|
||||
@@ -1,164 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Runtime patches that harden Meshtastic's nodeinfo handler against missing ``id`` fields."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
try: # pragma: no cover - dependency optional in tests
|
||||
import meshtastic # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
meshtastic = None # type: ignore[assignment]
|
||||
|
||||
from ..nodeinfo_normalize import _normalise_nodeinfo_packet
|
||||
|
||||
|
||||
def _patch_meshtastic_nodeinfo_handler() -> None:
|
||||
"""Ensure Meshtastic nodeinfo packets always include an ``id`` field."""
|
||||
|
||||
module = sys.modules.get("meshtastic", meshtastic)
|
||||
if module is None: # pragma: no cover - re-import fallback for cold caches
|
||||
with contextlib.suppress(Exception):
|
||||
module = importlib.import_module("meshtastic")
|
||||
if module is None: # pragma: no cover - exercised only without meshtastic
|
||||
return
|
||||
globals()["meshtastic"] = module
|
||||
|
||||
original = getattr(module, "_onNodeInfoReceive", None)
|
||||
if not callable(original): # pragma: no cover - upstream API regression guard
|
||||
return
|
||||
|
||||
mesh_interface_module = getattr(module, "mesh_interface", None)
|
||||
if mesh_interface_module is None:
|
||||
with contextlib.suppress(Exception):
|
||||
mesh_interface_module = importlib.import_module("meshtastic.mesh_interface")
|
||||
|
||||
# Replace the module-level handler only once; the sentinel attribute prevents
|
||||
# re-wrapping if _patch_meshtastic_nodeinfo_handler() is called again after
|
||||
# the interface module is reloaded or re-imported.
|
||||
if not getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
module._onNodeInfoReceive = _build_safe_nodeinfo_callback(original)
|
||||
|
||||
_patch_nodeinfo_handler_class(mesh_interface_module, module)
|
||||
|
||||
|
||||
def _build_safe_nodeinfo_callback(original):
|
||||
"""Return a wrapper that injects a missing ``id`` before dispatching."""
|
||||
|
||||
def _safe_on_node_info_receive(iface, packet): # type: ignore[override]
|
||||
normalised = _normalise_nodeinfo_packet(packet)
|
||||
if normalised is not None:
|
||||
packet = normalised
|
||||
|
||||
try:
|
||||
return original(iface, packet)
|
||||
except KeyError as exc: # pragma: no cover - defensive only
|
||||
if exc.args and exc.args[0] == "id":
|
||||
return None
|
||||
raise
|
||||
|
||||
_safe_on_node_info_receive._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
return _safe_on_node_info_receive
|
||||
|
||||
|
||||
def _update_nodeinfo_handler_aliases(original, replacement) -> None:
|
||||
"""Ensure Meshtastic modules reference the patched ``NodeInfoHandler``."""
|
||||
|
||||
for module_name, module in list(sys.modules.items()):
|
||||
if not module_name.startswith("meshtastic"):
|
||||
continue
|
||||
existing = getattr(module, "NodeInfoHandler", None)
|
||||
if existing is original:
|
||||
setattr(module, "NodeInfoHandler", replacement)
|
||||
|
||||
|
||||
def _patch_nodeinfo_handler_class(
|
||||
mesh_interface_module, meshtastic_module=None
|
||||
) -> None:
|
||||
"""Wrap ``NodeInfoHandler.onReceive`` to normalise packets before callbacks."""
|
||||
|
||||
if (
|
||||
mesh_interface_module is None
|
||||
): # pragma: no cover - exercised only without meshtastic
|
||||
return
|
||||
|
||||
handler_class = getattr(mesh_interface_module, "NodeInfoHandler", None)
|
||||
if handler_class is None: # pragma: no cover - upstream API regression guard
|
||||
return
|
||||
if getattr(
|
||||
handler_class, "_potato_mesh_safe_wrapper", False
|
||||
): # pragma: no cover - re-entry guard
|
||||
return
|
||||
|
||||
original_on_receive = getattr(handler_class, "onReceive", None)
|
||||
if not callable(
|
||||
original_on_receive
|
||||
): # pragma: no cover - upstream API regression guard
|
||||
return
|
||||
|
||||
class _SafeNodeInfoHandler(handler_class): # type: ignore[misc]
|
||||
"""Subclass that guards against missing node identifiers."""
|
||||
|
||||
def onReceive(self, iface, packet): # type: ignore[override]
|
||||
"""Normalise ``packet`` before dispatching to the parent handler.
|
||||
|
||||
Injects a canonical ``id`` field when one can be inferred from the
|
||||
packet's other fields, then delegates to the original
|
||||
``NodeInfoHandler.onReceive``. A ``KeyError`` on ``"id"`` is
|
||||
suppressed because some firmware versions omit the field entirely.
|
||||
|
||||
Parameters:
|
||||
iface: The Meshtastic interface that received the packet.
|
||||
packet: Raw nodeinfo packet dict, possibly lacking an ``id``
|
||||
key.
|
||||
|
||||
Returns:
|
||||
The return value of the parent handler, or ``None`` when a
|
||||
missing ``"id"`` key would otherwise raise.
|
||||
"""
|
||||
normalised = _normalise_nodeinfo_packet(packet)
|
||||
if normalised is not None:
|
||||
packet = normalised
|
||||
|
||||
try:
|
||||
return super().onReceive(iface, packet)
|
||||
except KeyError as exc: # pragma: no cover - defensive only
|
||||
if exc.args and exc.args[0] == "id":
|
||||
return None
|
||||
raise
|
||||
|
||||
_SafeNodeInfoHandler.__name__ = handler_class.__name__
|
||||
_SafeNodeInfoHandler.__qualname__ = getattr(
|
||||
handler_class, "__qualname__", handler_class.__name__
|
||||
)
|
||||
_SafeNodeInfoHandler.__module__ = getattr(
|
||||
handler_class, "__module__", mesh_interface_module.__name__
|
||||
)
|
||||
_SafeNodeInfoHandler.__doc__ = getattr(
|
||||
handler_class, "__doc__", _SafeNodeInfoHandler.__doc__
|
||||
)
|
||||
_SafeNodeInfoHandler._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
|
||||
setattr(mesh_interface_module, "NodeInfoHandler", _SafeNodeInfoHandler)
|
||||
if meshtastic_module is None:
|
||||
meshtastic_module = globals().get("meshtastic")
|
||||
if meshtastic_module is not None:
|
||||
existing_top = getattr(meshtastic_module, "NodeInfoHandler", None)
|
||||
if existing_top is handler_class: # pragma: no cover - top-level re-export
|
||||
setattr(meshtastic_module, "NodeInfoHandler", _SafeNodeInfoHandler)
|
||||
_update_nodeinfo_handler_aliases(handler_class, _SafeNodeInfoHandler)
|
||||
@@ -1,292 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""LoRa region/frequency/preset derivation from a Meshtastic config protobuf."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from .. import config
|
||||
|
||||
|
||||
def _has_field(message: Any, field_name: str) -> bool:
|
||||
"""Return ``True`` when ``message`` advertises ``field_name`` via ``HasField``."""
|
||||
|
||||
if message is None:
|
||||
return False
|
||||
has_field = getattr(message, "HasField", None)
|
||||
if callable(has_field):
|
||||
try:
|
||||
return bool(has_field(field_name))
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
return False
|
||||
return hasattr(message, field_name)
|
||||
|
||||
|
||||
def _enum_name_from_field(message: Any, field_name: str, value: Any) -> str | None:
|
||||
"""Return the enum name for ``value`` using ``message`` descriptors."""
|
||||
|
||||
descriptor = getattr(message, "DESCRIPTOR", None)
|
||||
if descriptor is None:
|
||||
return None
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {})
|
||||
field_desc = fields_by_name.get(field_name)
|
||||
if field_desc is None:
|
||||
return None
|
||||
enum_type = getattr(field_desc, "enum_type", None)
|
||||
if enum_type is None:
|
||||
return None
|
||||
enum_values = getattr(enum_type, "values_by_number", {})
|
||||
enum_value = enum_values.get(value)
|
||||
if enum_value is None:
|
||||
return None
|
||||
return getattr(enum_value, "name", None)
|
||||
|
||||
|
||||
def _resolve_lora_message(local_config: Any) -> Any | None:
|
||||
"""Return the LoRa configuration sub-message from ``local_config``."""
|
||||
|
||||
if local_config is None:
|
||||
return None
|
||||
if _has_field(local_config, "lora"):
|
||||
candidate = getattr(local_config, "lora", None)
|
||||
if candidate is not None:
|
||||
return candidate
|
||||
radio_section = getattr(local_config, "radio", None)
|
||||
if radio_section is not None:
|
||||
if _has_field(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora", None)
|
||||
if hasattr(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora")
|
||||
if hasattr(local_config, "lora"):
|
||||
return getattr(local_config, "lora")
|
||||
return None
|
||||
|
||||
|
||||
# Maps Meshtastic region enum name to (base_freq_MHz, channel_spacing_MHz).
|
||||
# Values are derived from the Meshtastic firmware RegionInfo tables.
|
||||
# Used by _computed_channel_frequency to derive the actual radio frequency
|
||||
# from the region and channel index.
|
||||
_REGION_CHANNEL_PARAMS: dict[str, tuple[float, float]] = {
|
||||
"US": (902.0, 0.25), # 902–928 MHz; e.g. ch 52 ≈ 915 MHz at 250 kHz spacing
|
||||
"EU_433": (433.175, 0.2),
|
||||
"EU_868": (869.525, 0.5), # actual primary ≈ 869.525 MHz, not 868
|
||||
"CN": (470.0, 0.2),
|
||||
"JP": (920.875, 0.5),
|
||||
"ANZ": (916.0, 0.5),
|
||||
"KR": (921.9, 0.5),
|
||||
"TW": (923.0, 0.5),
|
||||
"RU": (868.9, 0.5),
|
||||
"IN": (865.0, 0.5),
|
||||
"NZ_865": (864.0, 0.5),
|
||||
"TH": (920.0, 0.5),
|
||||
"LORA_24": (2400.0, 0.5),
|
||||
"UA_433": (433.175, 0.2),
|
||||
"UA_868": (868.0, 0.5),
|
||||
"MY_433": (433.0, 0.2),
|
||||
"MY_919": (919.0, 0.5),
|
||||
"SG_923": (923.0, 0.5),
|
||||
"PH_433": (433.0, 0.2),
|
||||
"PH_868": (868.0, 0.5),
|
||||
"PH_915": (915.0, 0.5),
|
||||
"ANZ_433": (433.0, 0.2),
|
||||
"KZ_433": (433.0, 0.2),
|
||||
"KZ_863": (863.125, 0.5),
|
||||
"NP_865": (865.0, 0.5),
|
||||
"BR_902": (902.0, 0.25),
|
||||
# IL (Israel) is absent from meshtastic Python lib 2.7.8 protobufs; the
|
||||
# enum value is unresolvable at runtime. Operators on IL firmware should
|
||||
# set the FREQUENCY environment variable to override.
|
||||
}
|
||||
|
||||
|
||||
def _computed_channel_frequency(
|
||||
enum_name: str | None,
|
||||
channel_num: int | None,
|
||||
) -> int | None:
|
||||
"""Compute the floor MHz frequency for a known region and channel index.
|
||||
|
||||
Looks up *enum_name* in :data:`_REGION_CHANNEL_PARAMS` and returns
|
||||
``floor(base_freq + channel_num * spacing)``. Returns ``None`` when the
|
||||
region is not in the table. A missing or negative *channel_num* is
|
||||
treated as 0 so the base frequency is always usable.
|
||||
|
||||
Args:
|
||||
enum_name: Region enum name as returned by
|
||||
:func:`_enum_name_from_field`, e.g. ``"EU_868"`` or ``"US"``.
|
||||
channel_num: Zero-based channel index from the device LoRa config.
|
||||
|
||||
Returns:
|
||||
Floored MHz as :class:`int`, or ``None`` if the region is unknown.
|
||||
"""
|
||||
if enum_name is None:
|
||||
return None
|
||||
params = _REGION_CHANNEL_PARAMS.get(enum_name)
|
||||
if params is None:
|
||||
return None
|
||||
base, spacing = params
|
||||
idx = channel_num if (isinstance(channel_num, int) and channel_num >= 0) else 0
|
||||
return math.floor(base + idx * spacing)
|
||||
|
||||
|
||||
def _region_frequency(lora_message: Any) -> int | float | str | None:
|
||||
"""Derive the LoRa region frequency in MHz or the region label from ``lora_message``.
|
||||
|
||||
Frequency sources are tried in priority order:
|
||||
|
||||
1. ``override_frequency > 0`` — explicit radio override, floored to MHz.
|
||||
2. :data:`_REGION_CHANNEL_PARAMS` lookup + ``channel_num`` — actual
|
||||
band-plan frequency derived from the device's region and channel index,
|
||||
floored to MHz.
|
||||
3. Largest digit token ≥ 100 parsed from the region enum name string.
|
||||
4. Largest digit token < 100 from the enum name (reversed scan).
|
||||
5. Full enum name string, raw integer ≥ 100, or raw string as a label.
|
||||
|
||||
Args:
|
||||
lora_message: A LoRa config protobuf message or compatible object.
|
||||
|
||||
Returns:
|
||||
An integer MHz frequency, a fallback string label, or ``None``.
|
||||
"""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
|
||||
# Step 1 — explicit radio override
|
||||
override_frequency = getattr(lora_message, "override_frequency", None)
|
||||
if override_frequency is not None:
|
||||
if isinstance(override_frequency, (int, float)):
|
||||
if override_frequency > 0:
|
||||
return math.floor(override_frequency)
|
||||
elif override_frequency:
|
||||
return override_frequency
|
||||
|
||||
region_value = getattr(lora_message, "region", None)
|
||||
if region_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, "region", region_value)
|
||||
|
||||
# Step 2 — lookup table + channel offset (actual band-plan frequency)
|
||||
if enum_name:
|
||||
channel_num = getattr(lora_message, "channel_num", None)
|
||||
computed = _computed_channel_frequency(enum_name, channel_num)
|
||||
if computed is not None:
|
||||
return computed
|
||||
|
||||
# Steps 3–5 — parse digits from enum name (fallback for unknown regions)
|
||||
if enum_name:
|
||||
digits = re.findall(r"\d+", enum_name)
|
||||
for token in digits:
|
||||
try:
|
||||
freq = int(token)
|
||||
except ValueError: # pragma: no cover - regex guarantees digits
|
||||
continue
|
||||
if freq >= 100:
|
||||
return freq
|
||||
for token in reversed(digits):
|
||||
try:
|
||||
return int(token)
|
||||
except ValueError: # pragma: no cover - defensive only
|
||||
continue
|
||||
return enum_name
|
||||
if isinstance(region_value, int) and region_value >= 100:
|
||||
return region_value
|
||||
if isinstance(region_value, str) and region_value:
|
||||
return region_value
|
||||
return None
|
||||
|
||||
|
||||
def _camelcase_enum_name(name: str | None) -> str | None:
|
||||
"""Convert ``name`` from ``SCREAMING_SNAKE`` to ``CamelCase``."""
|
||||
|
||||
if not name:
|
||||
return None
|
||||
parts = re.split(r"[^0-9A-Za-z]+", name.strip())
|
||||
camel_parts = [part.capitalize() for part in parts if part]
|
||||
if not camel_parts:
|
||||
return None
|
||||
return "".join(camel_parts)
|
||||
|
||||
|
||||
def _modem_preset(lora_message: Any) -> str | None:
|
||||
"""Return the CamelCase modem preset configured on ``lora_message``."""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
descriptor = getattr(lora_message, "DESCRIPTOR", None)
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {}) if descriptor else {}
|
||||
if "modem_preset" in fields_by_name:
|
||||
preset_field = "modem_preset"
|
||||
elif "preset" in fields_by_name:
|
||||
preset_field = "preset"
|
||||
elif hasattr(lora_message, "modem_preset"):
|
||||
preset_field = "modem_preset"
|
||||
elif hasattr(lora_message, "preset"):
|
||||
preset_field = "preset"
|
||||
else:
|
||||
return None
|
||||
|
||||
preset_value = getattr(lora_message, preset_field, None)
|
||||
if preset_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, preset_field, preset_value)
|
||||
if isinstance(enum_name, str) and enum_name:
|
||||
return _camelcase_enum_name(enum_name)
|
||||
if isinstance(preset_value, str) and preset_value:
|
||||
return _camelcase_enum_name(preset_value)
|
||||
return None
|
||||
|
||||
|
||||
def _ensure_radio_metadata(iface: Any) -> None:
|
||||
"""Populate cached LoRa metadata by inspecting ``iface`` when available."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
wait_for_config = getattr(iface, "waitForConfig", None)
|
||||
if callable(wait_for_config):
|
||||
wait_for_config()
|
||||
except Exception: # pragma: no cover - hardware dependent guard
|
||||
pass
|
||||
|
||||
local_node = getattr(iface, "localNode", None)
|
||||
local_config = getattr(local_node, "localConfig", None) if local_node else None
|
||||
lora_message = _resolve_lora_message(local_config)
|
||||
if lora_message is None:
|
||||
return
|
||||
|
||||
frequency = _region_frequency(lora_message)
|
||||
preset = _modem_preset(lora_message)
|
||||
|
||||
updated = False
|
||||
if frequency is not None and getattr(config, "LORA_FREQ", None) is None:
|
||||
config.LORA_FREQ = frequency
|
||||
updated = True
|
||||
if preset is not None and getattr(config, "MODEM_PRESET", None) is None:
|
||||
config.MODEM_PRESET = preset
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
config._debug_log(
|
||||
"Captured LoRa radio metadata",
|
||||
context="interfaces.ensure_radio_metadata",
|
||||
severity="info",
|
||||
always=True,
|
||||
lora_freq=frequency,
|
||||
modem_preset=preset,
|
||||
)
|
||||
@@ -1,84 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Network target parsing helpers for Meshtastic interfaces."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import urllib.parse
|
||||
|
||||
from ..connection import DEFAULT_TCP_PORT
|
||||
|
||||
_DEFAULT_TCP_TARGET = "http://127.0.0.1"
|
||||
|
||||
|
||||
def _parse_network_target(value: str) -> tuple[str, int] | None:
|
||||
"""Return ``(host, port)`` when ``value`` is a numeric IP address string.
|
||||
|
||||
Only literal IPv4 or IPv6 addresses are accepted, optionally paired with a
|
||||
port or scheme. Callers that start from hostnames should resolve them to an
|
||||
address before invoking this helper.
|
||||
|
||||
Parameters:
|
||||
value: Numeric IP literal or URL describing the TCP interface.
|
||||
|
||||
Returns:
|
||||
A ``(host, port)`` tuple or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
|
||||
def _validated_result(host: str | None, port: int | None) -> tuple[str, int] | None:
|
||||
if not host:
|
||||
return None
|
||||
try:
|
||||
ipaddress.ip_address(host)
|
||||
except ValueError:
|
||||
return None
|
||||
return host, port or DEFAULT_TCP_PORT
|
||||
|
||||
parsed_values = []
|
||||
if "://" in value:
|
||||
parsed_values.append(urllib.parse.urlparse(value, scheme="tcp"))
|
||||
parsed_values.append(urllib.parse.urlparse(f"//{value}", scheme="tcp"))
|
||||
|
||||
for parsed in parsed_values:
|
||||
try:
|
||||
port = parsed.port
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(parsed.hostname, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
# For bare "host:port" strings that urlparse may misparse, try a manual
|
||||
# partition. The `startswith("[")` guard excludes IPv6 bracket notation
|
||||
# (e.g. "[::1]:8080") because those already succeed via urlparse above.
|
||||
if value.count(":") == 1 and not value.startswith("["):
|
||||
host, _, port_text = value.partition(":")
|
||||
try:
|
||||
port = int(port_text) if port_text else None
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(host, port)
|
||||
if result: # pragma: no cover - urlparse handles all currently-known forms
|
||||
return result
|
||||
|
||||
return _validated_result(value, None)
|
||||
@@ -1,57 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""MeshProtocol interface for ingestion sources.
|
||||
|
||||
This module defines the seam so future protocols (MeshCore, Reticulum, ...) can
|
||||
be added without changing the web app ingest contract.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterable
|
||||
from typing import Protocol, runtime_checkable
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class MeshProtocol(Protocol):
|
||||
"""Abstract mesh protocol source."""
|
||||
|
||||
name: str
|
||||
|
||||
def subscribe(self) -> list[str]:
|
||||
"""Subscribe to any async receive callbacks and return topic names."""
|
||||
|
||||
def connect(
|
||||
self, *, active_candidate: str | None
|
||||
) -> tuple[object, str | None, str | None]:
|
||||
"""Create an interface connection.
|
||||
|
||||
Returns:
|
||||
(iface, resolved_target, next_active_candidate)
|
||||
"""
|
||||
|
||||
def extract_host_node_id(self, iface: object) -> str | None:
|
||||
"""Best-effort extraction of the connected host node id."""
|
||||
|
||||
def node_snapshot_items(self, iface: object) -> Iterable[tuple[str, object]]:
|
||||
"""Return iterable of (node_id, node_obj) for initial snapshot."""
|
||||
|
||||
|
||||
__all__ = [
|
||||
"MeshProtocol",
|
||||
]
|
||||
|
||||
# Backwards-compatibility alias — import Provider from here during transition.
|
||||
Provider = MeshProtocol
|
||||
@@ -1,115 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Node identity helpers shared across ingestor providers.
|
||||
|
||||
The web application keys nodes by a canonical textual identifier of the form
|
||||
``!%08x`` (lowercase hex). Both the Python collector and Ruby server accept
|
||||
several input forms (ints, ``0x`` hex strings, ``!`` hex strings, decimal
|
||||
strings). This module centralizes that normalization.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Final
|
||||
|
||||
CANONICAL_PREFIX: Final[str] = "!"
|
||||
|
||||
|
||||
def canonical_node_id(value: object) -> str | None:
|
||||
"""Convert ``value`` into canonical ``!xxxxxxxx`` form.
|
||||
|
||||
Parameters:
|
||||
value: Node reference which may be an int, float, or string.
|
||||
|
||||
Returns:
|
||||
Canonical node id string or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
try:
|
||||
num = int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if num < 0:
|
||||
return None
|
||||
return f"{CANONICAL_PREFIX}{num & 0xFFFFFFFF:08x}"
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("^"):
|
||||
# Meshtastic special destinations like "^all" are not node ids; callers
|
||||
# that already accept them should keep passing them through unchanged.
|
||||
return trimmed
|
||||
if trimmed.startswith(CANONICAL_PREFIX):
|
||||
body = trimmed[1:]
|
||||
elif trimmed.lower().startswith("0x"):
|
||||
body = trimmed[2:]
|
||||
elif trimmed.isdigit():
|
||||
try:
|
||||
return f"{CANONICAL_PREFIX}{int(trimmed, 10) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
else:
|
||||
body = trimmed
|
||||
|
||||
if not body:
|
||||
return None
|
||||
try:
|
||||
return f"{CANONICAL_PREFIX}{int(body, 16) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def node_num_from_id(node_id: object) -> int | None:
|
||||
"""Extract the numeric node identifier from a canonical (or near-canonical) id."""
|
||||
|
||||
if node_id is None:
|
||||
return None
|
||||
if isinstance(node_id, (int, float)):
|
||||
try:
|
||||
num = int(node_id)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return num if num >= 0 else None
|
||||
if not isinstance(node_id, str):
|
||||
return None
|
||||
|
||||
trimmed = node_id.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith(CANONICAL_PREFIX):
|
||||
trimmed = trimmed[1:]
|
||||
if trimmed.lower().startswith("0x"):
|
||||
trimmed = trimmed[2:]
|
||||
try:
|
||||
return int(trimmed, 16)
|
||||
except ValueError:
|
||||
try:
|
||||
return int(trimmed, 10)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CANONICAL_PREFIX",
|
||||
"canonical_node_id",
|
||||
"node_num_from_id",
|
||||
]
|
||||
@@ -1,44 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Protocol implementations.
|
||||
|
||||
This package contains protocol-specific implementations (Meshtastic,
|
||||
MeshCore, and others in the future).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .meshtastic import MeshtasticProvider
|
||||
|
||||
|
||||
def __getattr__(name: str) -> object:
|
||||
"""Lazy-load protocol classes and exceptions that carry optional heavy dependencies.
|
||||
|
||||
``MeshcoreProvider`` and ``ClosedBeforeConnectedError`` are imported on
|
||||
demand so that the MeshCore library (once wired in) is not loaded at
|
||||
startup when ``PROTOCOL=meshtastic``.
|
||||
"""
|
||||
if name == "MeshcoreProvider":
|
||||
from .meshcore import MeshcoreProvider
|
||||
|
||||
return MeshcoreProvider
|
||||
if name == "ClosedBeforeConnectedError":
|
||||
from .meshcore import ClosedBeforeConnectedError
|
||||
|
||||
return ClosedBeforeConnectedError
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
|
||||
__all__ = ["MeshtasticProvider", "MeshcoreProvider", "ClosedBeforeConnectedError"]
|
||||
@@ -1,161 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Runtime patches applied to the upstream ``meshcore`` library.
|
||||
|
||||
This module exists solely to paper over bugs in the third-party
|
||||
``meshcore-py`` package while we wait for upstream fixes. Each patch is
|
||||
narrow, idempotent, and preserves the original method on the target class so
|
||||
that it can be reverted cleanly once a fix ships upstream.
|
||||
|
||||
Current patches:
|
||||
|
||||
* :func:`_wrap_handle_rx` — guards :meth:`meshcore.reader.MessageReader.handle_rx`
|
||||
against unhandled exceptions raised while decoding a single radio frame.
|
||||
Upstream 2.3.6 (latest at the time of writing) raises ``IndexError`` at
|
||||
``reader.py:365`` when parsing a truncated ``DEVICE_INFO`` advertisement
|
||||
(``path_hash_mode = dbuf.read(1)[0]`` with an already-exhausted buffer).
|
||||
Because the frame is parsed inside a detached
|
||||
``asyncio.create_task(...)`` the resulting exception surfaces as a noisy
|
||||
``Task exception was never retrieved`` stderr dump and the decoded event
|
||||
for that frame is lost. See GitHub issue #754.
|
||||
|
||||
Apply the patches by calling :func:`apply` as early as possible after the
|
||||
``meshcore`` package is imported. Re-invoking :func:`apply` is a no-op.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from .. import config
|
||||
|
||||
# Sentinel attribute set on a patched method so repeated imports/tests do
|
||||
# not wrap the same function more than once. The name intentionally
|
||||
# includes the project slug so we can grep for it while diagnosing.
|
||||
_PATCH_MARKER = "_potato_mesh_patched"
|
||||
|
||||
# Cap on hex bytes dumped into the log per failure. Keeps the log line
|
||||
# under a few hundred characters even for maximum-sized frames.
|
||||
_PACKET_LOG_MAX_BYTES = 32
|
||||
|
||||
|
||||
def apply() -> bool:
|
||||
"""Install every known-needed patch on the upstream ``meshcore`` library.
|
||||
|
||||
Safe to call multiple times; each patch is individually idempotent.
|
||||
|
||||
Implicit contract with upstream: every patch here rebinds a method on
|
||||
the target *class*. This only affects call sites that perform an
|
||||
attribute lookup at call time (``reader.handle_rx(data)``) — not call
|
||||
sites that captured an unbound reference before :func:`apply` ran
|
||||
(``_rx = reader.handle_rx``). As of ``meshcore-py`` 2.3.6 the library
|
||||
always uses attribute-lookup-at-call, so this is fine; if a future
|
||||
release flips that, the patch silently no-ops and the original bug
|
||||
resurfaces. Spot-check after every upstream bump.
|
||||
|
||||
Returns:
|
||||
``True`` when at least one patch was installed during this call,
|
||||
``False`` when every patch had already been applied (or when the
|
||||
``meshcore`` library is not importable in this environment, e.g. a
|
||||
meshtastic-only test runner).
|
||||
"""
|
||||
try:
|
||||
import meshcore.reader as _reader # type: ignore[import-not-found]
|
||||
except ImportError:
|
||||
# Meshtastic-only runtimes never load this module's caller, but
|
||||
# imports from tests may still land here. Nothing to patch.
|
||||
return False
|
||||
|
||||
return _wrap_handle_rx(_reader.MessageReader)
|
||||
|
||||
|
||||
def _wrap_handle_rx(reader_cls: Any) -> bool:
|
||||
"""Wrap ``reader_cls.handle_rx`` with an exception-swallowing shim.
|
||||
|
||||
Parameters:
|
||||
reader_cls: The ``MessageReader`` class to patch in place.
|
||||
|
||||
Returns:
|
||||
``True`` when the wrap was installed on this call; ``False`` when
|
||||
the method had already been wrapped.
|
||||
"""
|
||||
original = getattr(reader_cls, "handle_rx", None)
|
||||
if original is None:
|
||||
return False
|
||||
if getattr(original, _PATCH_MARKER, False):
|
||||
return False
|
||||
|
||||
async def safe_handle_rx(self, data, *args, **kwargs): # type: ignore[no-untyped-def]
|
||||
"""Run the original ``handle_rx`` and convert hard failures to logs.
|
||||
|
||||
A single malformed frame would otherwise kill the
|
||||
``asyncio.create_task(reader.handle_rx(data))`` task spawned by the
|
||||
upstream connection layer, surfacing as ``Task exception was never
|
||||
retrieved`` in stderr and losing the event silently. We log once
|
||||
with the first few bytes of the offending frame for forensics and
|
||||
then return ``None`` so the task exits cleanly.
|
||||
"""
|
||||
try:
|
||||
return await original(self, data, *args, **kwargs)
|
||||
except Exception as exc: # noqa: BLE001 — deliberately broad: a
|
||||
# single malformed frame must not kill the reader. Narrower
|
||||
# excepts would hide future upstream failure modes (e.g.
|
||||
# ``struct.error``) the same way the current IndexError was
|
||||
# hidden before we added this shim.
|
||||
config._debug_log(
|
||||
"Suppressed meshcore reader exception on malformed frame",
|
||||
context="meshcore.reader.patch",
|
||||
severity="warning",
|
||||
always=True,
|
||||
error_class=type(exc).__name__,
|
||||
error_message=str(exc),
|
||||
packet_len=_safe_len(data),
|
||||
packet_hex=_hex_preview(data, _PACKET_LOG_MAX_BYTES),
|
||||
)
|
||||
return None
|
||||
|
||||
setattr(safe_handle_rx, _PATCH_MARKER, True)
|
||||
# Preserve the pre-patch method under a stable name so operators and
|
||||
# future maintainers can revert the patch with one line.
|
||||
reader_cls._orig_handle_rx = original
|
||||
reader_cls.handle_rx = safe_handle_rx
|
||||
return True
|
||||
|
||||
|
||||
def _safe_len(data: Any) -> int | None:
|
||||
"""Return ``len(data)`` or ``None`` when the object is not sized."""
|
||||
try:
|
||||
return len(data)
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
|
||||
def _hex_preview(data: Any, limit: int) -> str:
|
||||
"""Return the first *limit* bytes of ``data`` as a lowercase hex string.
|
||||
|
||||
Accepts anything that is a :class:`bytes`-like or supports ``bytes(data)``.
|
||||
On conversion failure returns an empty string — the log caller still gets
|
||||
the error class and message.
|
||||
"""
|
||||
try:
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
data = bytes(data)
|
||||
except Exception: # noqa: BLE001 — pure diagnostic path, never raise.
|
||||
return ""
|
||||
prefix = bytes(data[:limit])
|
||||
return prefix.hex()
|
||||
|
||||
|
||||
__all__ = ["apply"]
|
||||
@@ -1,170 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""MeshCore protocol implementation.
|
||||
|
||||
This package defines :class:`MeshcoreProvider`, which satisfies the
|
||||
:class:`~data.mesh_ingestor.mesh_protocol.MeshProtocol` interface for MeshCore
|
||||
nodes connected via serial port, BLE, or TCP/IP.
|
||||
|
||||
The protocol backend runs MeshCore's ``asyncio`` event loop in a background
|
||||
daemon thread so that incoming events are dispatched without blocking the
|
||||
synchronous daemon loop. Received contacts, channel messages, and direct
|
||||
messages are forwarded to the shared HTTP ingest queue via the same
|
||||
:mod:`~data.mesh_ingestor.handlers` helpers used by the Meshtastic protocol.
|
||||
|
||||
Connection type is detected automatically from the target string:
|
||||
|
||||
* **BLE** — MAC address (``AA:BB:CC:DD:EE:FF``) or UUID (macOS format).
|
||||
* **TCP** — ``host:port`` or ``[ipv6]:port`` (accepts hostnames).
|
||||
* **Serial** — any other non-empty string (e.g. ``/dev/ttyUSB0``).
|
||||
* **Auto** — ``None`` or empty: tries serial candidates from
|
||||
:func:`~data.mesh_ingestor.connection.default_serial_targets`.
|
||||
|
||||
Node identities are derived from the first four bytes (eight hex characters)
|
||||
of each contact's 32-byte public key, formatted as ``!xxxxxxxx`` to match
|
||||
the canonical node-ID schema used across the system. Ingested
|
||||
``user.shortName`` is the first two bytes (four hex characters) of the
|
||||
node ID, not the advertised name.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
# Apply upstream-library patches before any ``MeshCore`` instance is built,
|
||||
# otherwise the first malformed advertisement dies inside a detached asyncio
|
||||
# task before our handler can observe it. See
|
||||
# :mod:`data.mesh_ingestor.protocols._meshcore_patches` for the specific
|
||||
# upstream bugs covered.
|
||||
#
|
||||
# This mutates the upstream class at import time. The blast radius is
|
||||
# narrow because ``protocols/__init__.py`` exposes this package only through
|
||||
# a lazy ``__getattr__`` and the daemon resolves it only when
|
||||
# ``PROTOCOL=meshcore`` is active. Any future diagnostic CLI that imports
|
||||
# this package will inherit the shim.
|
||||
from .. import _meshcore_patches as _meshcore_patches
|
||||
|
||||
_meshcore_patches.apply()
|
||||
|
||||
# Re-expose meshcore-library symbols so existing test imports (and callers
|
||||
# that prefer a single import surface) keep working unchanged. Submodules
|
||||
# resolve these names at call time via ``sys.modules`` so monkey-patches
|
||||
# applied to the package surface during tests propagate.
|
||||
from meshcore import ( # noqa: E402 - patches must run before this import.
|
||||
BLEConnection,
|
||||
EventType,
|
||||
MeshCore,
|
||||
SerialConnection,
|
||||
TCPConnection,
|
||||
)
|
||||
|
||||
# Re-expose the ``data.mesh_ingestor`` modules that tests monkeypatch through
|
||||
# the meshcore namespace (``_mod.config._debug_log``, ``_mod._ingestors``,
|
||||
# ``_mod._queue``). Keeping these attributes preserves the call surface of
|
||||
# the pre-split ``meshcore.py`` module.
|
||||
from ... import config as config # noqa: E402
|
||||
from ... import ingestors as _ingestors # noqa: E402
|
||||
from ... import queue as _queue # noqa: E402
|
||||
from ...connection import default_serial_targets # noqa: E402
|
||||
|
||||
from ._constants import ( # noqa: E402 - keep grouped with sibling re-exports.
|
||||
_CHANNEL_PROBE_FALLBACK_MAX,
|
||||
_CONNECT_TIMEOUT_SECS,
|
||||
_DEFAULT_BAUDRATE,
|
||||
_MENTION_RE,
|
||||
_MESHCORE_ADV_TYPE_ROLE,
|
||||
_MESHCORE_ID_BITS,
|
||||
_MESHCORE_ID_MASK,
|
||||
)
|
||||
from .channels import _ensure_channel_names # noqa: E402
|
||||
from .connection import ( # noqa: E402
|
||||
_log_unhandled_loop_exception,
|
||||
_make_connection,
|
||||
)
|
||||
from .debug_log import ( # noqa: E402
|
||||
_IGNORED_MESSAGE_LOCK,
|
||||
_IGNORED_MESSAGE_LOG_PATH,
|
||||
_record_meshcore_message,
|
||||
_to_json_safe,
|
||||
)
|
||||
from .decode import ( # noqa: E402
|
||||
_contact_to_node_dict,
|
||||
_derive_modem_preset,
|
||||
_self_info_to_node_dict,
|
||||
)
|
||||
from .handlers import ( # noqa: E402
|
||||
_make_event_handlers,
|
||||
_process_contact_update,
|
||||
_process_contacts,
|
||||
_process_self_info,
|
||||
)
|
||||
from .identity import ( # noqa: E402
|
||||
_derive_synthetic_node_id,
|
||||
_meshcore_adv_type_to_role,
|
||||
_meshcore_node_id,
|
||||
_meshcore_short_name,
|
||||
_pubkey_prefix_to_node_id,
|
||||
)
|
||||
from .interface import ClosedBeforeConnectedError, _MeshcoreInterface # noqa: E402
|
||||
from .messages import ( # noqa: E402
|
||||
_derive_message_id,
|
||||
_extract_mention_names,
|
||||
_parse_sender_name,
|
||||
_synthetic_node_dict,
|
||||
)
|
||||
from .position import _store_meshcore_position # noqa: E402
|
||||
from .provider import MeshcoreProvider # noqa: E402
|
||||
from .runner import _run_meshcore # noqa: E402
|
||||
|
||||
__all__ = [
|
||||
"BLEConnection",
|
||||
"ClosedBeforeConnectedError",
|
||||
"EventType",
|
||||
"MeshCore",
|
||||
"MeshcoreProvider",
|
||||
"SerialConnection",
|
||||
"TCPConnection",
|
||||
"_CHANNEL_PROBE_FALLBACK_MAX",
|
||||
"_CONNECT_TIMEOUT_SECS",
|
||||
"_DEFAULT_BAUDRATE",
|
||||
"_IGNORED_MESSAGE_LOCK",
|
||||
"_IGNORED_MESSAGE_LOG_PATH",
|
||||
"_MENTION_RE",
|
||||
"_MESHCORE_ADV_TYPE_ROLE",
|
||||
"_MESHCORE_ID_BITS",
|
||||
"_MESHCORE_ID_MASK",
|
||||
"_MeshcoreInterface",
|
||||
"_contact_to_node_dict",
|
||||
"_derive_message_id",
|
||||
"_derive_modem_preset",
|
||||
"_derive_synthetic_node_id",
|
||||
"_ensure_channel_names",
|
||||
"_extract_mention_names",
|
||||
"_log_unhandled_loop_exception",
|
||||
"_make_connection",
|
||||
"_make_event_handlers",
|
||||
"_meshcore_adv_type_to_role",
|
||||
"_meshcore_node_id",
|
||||
"_meshcore_short_name",
|
||||
"_parse_sender_name",
|
||||
"_process_contact_update",
|
||||
"_process_contacts",
|
||||
"_process_self_info",
|
||||
"_pubkey_prefix_to_node_id",
|
||||
"_record_meshcore_message",
|
||||
"_run_meshcore",
|
||||
"_self_info_to_node_dict",
|
||||
"_store_meshcore_position",
|
||||
"_synthetic_node_dict",
|
||||
"_to_json_safe",
|
||||
]
|
||||
@@ -1,56 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Constants shared across MeshCore submodules.
|
||||
|
||||
Hoisted out of the original monolithic ``meshcore.py`` so that submodules can
|
||||
import only what they need without picking up unrelated side-effects.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
_CONNECT_TIMEOUT_SECS: float = 30.0
|
||||
"""Seconds to wait for the MeshCore node to respond to the appstart handshake."""
|
||||
|
||||
_DEFAULT_BAUDRATE: int = 115200
|
||||
"""Default baud rate for MeshCore serial connections."""
|
||||
|
||||
# MeshCore ``ADV_TYPE_*`` (``AdvertDataHelpers.h``) → ``user.role`` for POST /api/nodes.
|
||||
_MESHCORE_ADV_TYPE_ROLE: dict[int, str] = {
|
||||
1: "COMPANION", # ADV_TYPE_CHAT
|
||||
2: "REPEATER", # ADV_TYPE_REPEATER
|
||||
3: "ROOM_SERVER", # ADV_TYPE_ROOM_SERVER
|
||||
4: "SENSOR", # ADV_TYPE_SENSOR
|
||||
}
|
||||
|
||||
_MESHCORE_ID_BITS = 53
|
||||
"""Width of the synthetic MeshCore message ID, in bits.
|
||||
|
||||
53 bits keeps the value within :js:data:`Number.MAX_SAFE_INTEGER`
|
||||
(``2**53 - 1``) so the JSON ID round-trips through the JavaScript frontend
|
||||
without precision loss, while giving roughly :math:`2^{26.5}` (~95 million)
|
||||
distinct messages of birthday-collision headroom.
|
||||
"""
|
||||
|
||||
_MESHCORE_ID_MASK = (1 << _MESHCORE_ID_BITS) - 1
|
||||
"""Bitmask applied to the SHA-256 prefix to clamp the id to 53 bits."""
|
||||
|
||||
# Fallback upper bound for channel index probing when the device query fails
|
||||
# or returns an older firmware version that omits ``max_channels``.
|
||||
_CHANNEL_PROBE_FALLBACK_MAX = 32
|
||||
|
||||
# Matches @[Name] mention patterns in MeshCore message bodies.
|
||||
_MENTION_RE = re.compile(r"@\[([^\]]+)\]")
|
||||
@@ -1,86 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Channel-name probing for MeshCore devices."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
from ... import config
|
||||
from ._constants import _CHANNEL_PROBE_FALLBACK_MAX
|
||||
|
||||
|
||||
async def _ensure_channel_names(mc: object) -> None:
|
||||
"""Probe channel names from the device and populate the channel cache.
|
||||
|
||||
Queries the device for its authoritative channel count via
|
||||
:meth:`~meshcore.MeshCore.commands.send_device_query` (``max_channels``
|
||||
field of the ``DEVICE_INFO`` response), then iterates every index from 0
|
||||
through ``max_channels - 1``, requesting each via
|
||||
:meth:`~meshcore.MeshCore.commands.get_channel`. The responses arrive as
|
||||
:attr:`~meshcore.EventType.CHANNEL_INFO` events and are registered into
|
||||
the shared channel cache via :func:`~data.mesh_ingestor.channels.register_channel`.
|
||||
|
||||
Falls back to a probe bound of :data:`_CHANNEL_PROBE_FALLBACK_MAX` when the
|
||||
device query fails or returns an older firmware that omits ``max_channels``.
|
||||
|
||||
Probes every index without early-stopping on ``ERROR`` responses, so sparse
|
||||
configurations (e.g. slots 0 and 5 configured, slots 1–4 empty) are handled
|
||||
correctly. Only a hard exception (connection loss, timeout) aborts the loop.
|
||||
|
||||
Parameters:
|
||||
mc: Connected :class:`~meshcore.MeshCore` instance.
|
||||
"""
|
||||
# Deferred — see _make_event_handlers for the circular-dependency note.
|
||||
from ... import channels as _channels
|
||||
|
||||
# Look up ``EventType`` via the parent package so that test fakes installed
|
||||
# via ``monkeypatch.setattr(mod, "EventType", ...)`` apply at call time.
|
||||
pkg = sys.modules["data.mesh_ingestor.protocols.meshcore"]
|
||||
EventType = pkg.EventType
|
||||
|
||||
max_idx = _CHANNEL_PROBE_FALLBACK_MAX
|
||||
try:
|
||||
dev_evt = await mc.commands.send_device_query()
|
||||
if dev_evt.type == EventType.DEVICE_INFO:
|
||||
reported = (dev_evt.payload or {}).get("max_channels")
|
||||
if isinstance(reported, int) and reported > 0:
|
||||
max_idx = reported
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Device query failed; using fallback channel probe bound",
|
||||
context="meshcore.channels",
|
||||
severity="warning",
|
||||
fallback_max=max_idx,
|
||||
error=str(exc),
|
||||
)
|
||||
|
||||
for idx in range(max_idx):
|
||||
try:
|
||||
evt = await mc.commands.get_channel(idx)
|
||||
if evt.type == EventType.CHANNEL_INFO:
|
||||
name = (evt.payload or {}).get("channel_name", "")
|
||||
if name:
|
||||
_channels.register_channel(idx, name)
|
||||
# ERROR response — unconfigured slot; continue to next index
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Channel probe failed",
|
||||
context="meshcore.channels",
|
||||
severity="warning",
|
||||
channel_idx=idx,
|
||||
error=str(exc),
|
||||
)
|
||||
break
|
||||
@@ -1,95 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Connection routing and asyncio exception logging for MeshCore."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
|
||||
from ... import config
|
||||
from ...connection import parse_ble_target, parse_tcp_target
|
||||
|
||||
|
||||
def _make_connection(target: str, baudrate: int) -> object:
|
||||
"""Create the appropriate MeshCore connection object for *target*.
|
||||
|
||||
Routes to the correct ``meshcore`` connection class based on the target
|
||||
string format:
|
||||
|
||||
* BLE MAC / UUID → :class:`meshcore.BLEConnection`
|
||||
* ``host:port`` / ``[ipv6]:port`` → :class:`meshcore.TCPConnection`
|
||||
* anything else → :class:`meshcore.SerialConnection`
|
||||
|
||||
Parameters:
|
||||
target: Resolved, non-empty connection target.
|
||||
baudrate: Baud rate for serial connections (ignored for BLE/TCP).
|
||||
|
||||
Returns:
|
||||
An unconnected ``meshcore`` connection object.
|
||||
"""
|
||||
# Look up connection classes via the parent package so that test fakes
|
||||
# installed via ``monkeypatch.setattr(mod, "BLEConnection", ...)`` apply.
|
||||
pkg = sys.modules["data.mesh_ingestor.protocols.meshcore"]
|
||||
ble_addr = parse_ble_target(target)
|
||||
if ble_addr:
|
||||
return pkg.BLEConnection(address=ble_addr)
|
||||
|
||||
tcp_target = parse_tcp_target(target)
|
||||
if tcp_target:
|
||||
host, port = tcp_target
|
||||
return pkg.TCPConnection(host, port)
|
||||
|
||||
return pkg.SerialConnection(target, baudrate)
|
||||
|
||||
|
||||
def _log_unhandled_loop_exception(
|
||||
loop: asyncio.AbstractEventLoop, context: dict
|
||||
) -> None:
|
||||
"""Route asyncio's "unhandled task exception" warnings through our logger.
|
||||
|
||||
The upstream ``meshcore`` library spawns detached
|
||||
``asyncio.create_task`` tasks for every inbound radio frame. When one
|
||||
of those tasks raises and nobody awaits the future, asyncio's default
|
||||
handler writes ``Task exception was never retrieved`` to stderr. That
|
||||
bypasses our structured log pipeline and clutters container logs.
|
||||
This handler preserves the same information under
|
||||
``context=asyncio.unhandled`` so operators grep for one place.
|
||||
|
||||
Parameters:
|
||||
loop: Event loop that surfaced the exception (unused but required
|
||||
by the asyncio handler signature).
|
||||
context: Asyncio exception-context dictionary. Fields we care
|
||||
about: ``message`` (human summary) and ``exception`` (the raw
|
||||
exception object, when available).
|
||||
"""
|
||||
del loop
|
||||
exception = context.get("exception")
|
||||
task = context.get("task")
|
||||
task_name = None
|
||||
if task is not None:
|
||||
# Prefer the friendly ``get_name()``; fall back to ``repr`` for any
|
||||
# future Task-like object that does not implement it.
|
||||
get_name = getattr(task, "get_name", None)
|
||||
task_name = get_name() if callable(get_name) else repr(task)
|
||||
config._debug_log(
|
||||
context.get("message") or "Unhandled asyncio task exception",
|
||||
context="asyncio.unhandled",
|
||||
severity="error",
|
||||
always=True,
|
||||
error_class=type(exception).__name__ if exception else None,
|
||||
error_message=str(exception) if exception else None,
|
||||
task=task_name,
|
||||
)
|
||||
@@ -1,90 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""``DEBUG=1`` capture of unhandled MeshCore frames to ``ignored-meshcore.txt``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import sys
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
from ... import config
|
||||
|
||||
# This file lives one level deeper than the pre-split ``meshcore.py``
|
||||
# (``data/mesh_ingestor/protocols/meshcore/debug_log.py`` vs.
|
||||
# ``data/mesh_ingestor/protocols/meshcore.py``), so ``parents[4]`` here
|
||||
# (meshcore/ → protocols/ → mesh_ingestor/ → data/ → repo root) lands at
|
||||
# the same repo-root destination as ``parents[3]`` did in the original
|
||||
# module. The on-disk log path is therefore unchanged after the split.
|
||||
_IGNORED_MESSAGE_LOG_PATH = Path(__file__).resolve().parents[4] / "ignored-meshcore.txt"
|
||||
"""Filesystem path that stores raw MeshCore messages when ``DEBUG=1``."""
|
||||
|
||||
_IGNORED_MESSAGE_LOCK = threading.Lock()
|
||||
"""Lock guarding writes to :data:`_IGNORED_MESSAGE_LOG_PATH`."""
|
||||
|
||||
|
||||
def _to_json_safe(value: object) -> object:
|
||||
"""Recursively convert *value* to a JSON-serialisable form.
|
||||
|
||||
Handles the common types present in mesh protocol messages: dicts, lists,
|
||||
bytes (base64-encoded), and primitives. Anything else is coerced via
|
||||
``str()``.
|
||||
"""
|
||||
if isinstance(value, dict):
|
||||
return {str(k): _to_json_safe(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return [_to_json_safe(v) for v in value]
|
||||
if isinstance(value, bytes):
|
||||
return base64.b64encode(value).decode("ascii")
|
||||
if isinstance(value, (str, int, float, bool)) or value is None:
|
||||
return value
|
||||
return str(value)
|
||||
|
||||
|
||||
def _record_meshcore_message(message: object, *, source: str) -> None:
|
||||
"""Persist a MeshCore message to :data:`ignored-meshcore.txt` when ``DEBUG=1``.
|
||||
|
||||
When ``DEBUG`` is not set the function returns immediately without any
|
||||
I/O so that production deployments are not burdened by file writes.
|
||||
|
||||
Parameters:
|
||||
message: The raw message object received from the MeshCore node.
|
||||
source: A short label describing where the message originated (e.g.
|
||||
a serial port path or BLE address).
|
||||
"""
|
||||
if not config.DEBUG:
|
||||
return
|
||||
|
||||
# Resolve path/lock via the parent package so test monkey-patches at
|
||||
# ``meshcore._IGNORED_MESSAGE_LOG_PATH`` (and ``_IGNORED_MESSAGE_LOCK``)
|
||||
# take effect at call time.
|
||||
pkg = sys.modules.get("data.mesh_ingestor.protocols.meshcore")
|
||||
log_path = getattr(pkg, "_IGNORED_MESSAGE_LOG_PATH", _IGNORED_MESSAGE_LOG_PATH)
|
||||
log_lock = getattr(pkg, "_IGNORED_MESSAGE_LOCK", _IGNORED_MESSAGE_LOCK)
|
||||
|
||||
timestamp = datetime.now(timezone.utc).isoformat()
|
||||
entry = {
|
||||
"message": _to_json_safe(message),
|
||||
"source": source,
|
||||
"timestamp": timestamp,
|
||||
}
|
||||
payload = json.dumps(entry, ensure_ascii=False, sort_keys=True)
|
||||
with log_lock:
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with log_path.open("a", encoding="utf-8") as fh:
|
||||
fh.write(f"{payload}\n")
|
||||
@@ -1,110 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Convert MeshCore contact / self-info payloads into ``POST /api/nodes`` dicts."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
|
||||
from .identity import (
|
||||
_meshcore_adv_type_to_role,
|
||||
_meshcore_node_id,
|
||||
_meshcore_short_name,
|
||||
)
|
||||
|
||||
|
||||
def _contact_to_node_dict(contact: dict) -> dict:
|
||||
"""Convert a MeshCore contact dict to a Meshtastic-ish node dict.
|
||||
|
||||
Parameters:
|
||||
contact: Contact dict from the MeshCore library. Expected keys
|
||||
include ``public_key``, ``type`` (``ADV_TYPE_*``), ``adv_name``,
|
||||
``last_advert``, ``adv_lat``, and ``adv_lon``.
|
||||
|
||||
Returns:
|
||||
Node dict compatible with the ``POST /api/nodes`` payload format.
|
||||
"""
|
||||
pub_key = contact.get("public_key", "")
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
name = (contact.get("adv_name") or "").strip()
|
||||
role = _meshcore_adv_type_to_role(contact.get("type"))
|
||||
node: dict = {
|
||||
"lastHeard": contact.get("last_advert"),
|
||||
"protocol": "meshcore",
|
||||
"user": {
|
||||
"longName": name,
|
||||
"shortName": _meshcore_short_name(node_id),
|
||||
"publicKey": pub_key,
|
||||
**({"role": role} if role is not None else {}),
|
||||
},
|
||||
}
|
||||
lat = contact.get("adv_lat")
|
||||
lon = contact.get("adv_lon")
|
||||
if lat is not None and lon is not None and (lat or lon):
|
||||
pos: dict = {"latitude": lat, "longitude": lon}
|
||||
last_advert = contact.get("last_advert")
|
||||
if last_advert is not None:
|
||||
pos["time"] = last_advert
|
||||
node["position"] = pos
|
||||
return node
|
||||
|
||||
|
||||
def _derive_modem_preset(sf: object, bw: object, cr: object) -> str | None:
|
||||
"""Return a compact radio-parameter string from spreading factor, bandwidth, and coding rate.
|
||||
|
||||
Parameters:
|
||||
sf: Spreading factor (int, e.g. ``12``).
|
||||
bw: Bandwidth in kHz (int or float, e.g. ``125.0``).
|
||||
cr: Coding rate denominator (int, e.g. ``5`` meaning 4/5).
|
||||
|
||||
Returns:
|
||||
A string such as ``"SF12/BW125/CR5"``, or ``None`` when any parameter
|
||||
is absent or zero (meaning the radio config was not reported).
|
||||
"""
|
||||
if not sf or not bw or not cr:
|
||||
return None
|
||||
return f"SF{int(sf)}/BW{int(bw)}/CR{int(cr)}"
|
||||
|
||||
|
||||
def _self_info_to_node_dict(self_info: dict) -> dict:
|
||||
"""Convert a MeshCore ``SELF_INFO`` payload to a Meshtastic-ish node dict.
|
||||
|
||||
Parameters:
|
||||
self_info: Payload dict from the ``SELF_INFO`` event. Expected keys
|
||||
include ``name``, ``public_key``, ``adv_type`` (``ADV_TYPE_*``),
|
||||
``adv_lat``, and ``adv_lon``.
|
||||
|
||||
Returns:
|
||||
Node dict compatible with the ``POST /api/nodes`` payload format.
|
||||
"""
|
||||
name = (self_info.get("name") or "").strip()
|
||||
pub_key = self_info.get("public_key", "")
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
role = _meshcore_adv_type_to_role(self_info.get("adv_type"))
|
||||
node: dict = {
|
||||
"lastHeard": int(time.time()),
|
||||
"protocol": "meshcore",
|
||||
"user": {
|
||||
"longName": name,
|
||||
"shortName": _meshcore_short_name(node_id),
|
||||
"publicKey": pub_key,
|
||||
**({"role": role} if role is not None else {}),
|
||||
},
|
||||
}
|
||||
lat = self_info.get("adv_lat")
|
||||
lon = self_info.get("adv_lon")
|
||||
if lat is not None and lon is not None and (lat or lon):
|
||||
node["position"] = {"latitude": lat, "longitude": lon, "time": int(time.time())}
|
||||
return node
|
||||
@@ -1,324 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Event-handler closures for MeshCore protocol messages."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
|
||||
from ... import config, ingestors as _ingestors
|
||||
from .decode import _contact_to_node_dict, _derive_modem_preset, _self_info_to_node_dict
|
||||
from .identity import _derive_synthetic_node_id, _meshcore_node_id
|
||||
from .interface import _MeshcoreInterface
|
||||
from .messages import (
|
||||
_derive_message_id,
|
||||
_extract_mention_names,
|
||||
_parse_sender_name,
|
||||
_synthetic_node_dict,
|
||||
)
|
||||
from .position import _store_meshcore_position
|
||||
|
||||
|
||||
def _process_self_info(
|
||||
payload: dict, iface: _MeshcoreInterface, handlers: object
|
||||
) -> None:
|
||||
"""Apply a ``SELF_INFO`` payload: set host_node_id, upsert the host node,
|
||||
and capture LoRa radio metadata into the shared config cache.
|
||||
|
||||
Parameters:
|
||||
payload: Event payload dict containing at minimum ``public_key`` and
|
||||
optionally ``name``, ``adv_lat``, ``adv_lon``, ``radio_freq``,
|
||||
``radio_bw``, ``radio_sf``, ``radio_cr``.
|
||||
iface: Active interface whose :attr:`host_node_id` will be updated.
|
||||
handlers: Module reference for :func:`~data.mesh_ingestor.handlers`
|
||||
functions (passed to avoid circular-import issues).
|
||||
"""
|
||||
# Cache the payload so node_snapshot_items / self_node_item can use it later.
|
||||
iface._self_info_payload = payload
|
||||
|
||||
pub_key = payload.get("public_key", "")
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
|
||||
# Capture radio metadata BEFORE upserting the node so that
|
||||
# _apply_radio_metadata_to_nodes finds populated values on the very first
|
||||
# SELF_INFO. Never overwrite a previously cached value.
|
||||
radio_freq = payload.get("radio_freq")
|
||||
if radio_freq is not None and getattr(config, "LORA_FREQ", None) is None:
|
||||
config.LORA_FREQ = radio_freq
|
||||
modem_preset = _derive_modem_preset(
|
||||
payload.get("radio_sf"), payload.get("radio_bw"), payload.get("radio_cr")
|
||||
)
|
||||
if modem_preset is not None and getattr(config, "MODEM_PRESET", None) is None:
|
||||
config.MODEM_PRESET = modem_preset
|
||||
|
||||
if node_id:
|
||||
iface.host_node_id = node_id
|
||||
handlers.register_host_node_id(node_id)
|
||||
# Queue the ingestor registration BEFORE any node upserts so the web
|
||||
# backend assigns the correct protocol to all subsequent records.
|
||||
# Radio metadata (LORA_FREQ, MODEM_PRESET) is captured just above and
|
||||
# will be included in the heartbeat payload by queue_ingestor_heartbeat.
|
||||
_ingestors.queue_ingestor_heartbeat(force=True, node_id=node_id)
|
||||
handlers.upsert_node(node_id, _self_info_to_node_dict(payload))
|
||||
lat = payload.get("adv_lat")
|
||||
lon = payload.get("adv_lon")
|
||||
if lat is not None and lon is not None and (lat or lon):
|
||||
_store_meshcore_position(
|
||||
node_id, lat, lon, int(time.time()), handlers.host_node_id()
|
||||
)
|
||||
|
||||
config._debug_log(
|
||||
"MeshCore radio metadata captured",
|
||||
context="meshcore.self_info.radio",
|
||||
severity="info",
|
||||
lora_freq=radio_freq,
|
||||
modem_preset=modem_preset,
|
||||
)
|
||||
|
||||
handlers._mark_packet_seen()
|
||||
config._debug_log(
|
||||
"MeshCore self-info received",
|
||||
context="meshcore.self_info",
|
||||
node_id=node_id,
|
||||
name=payload.get("name"),
|
||||
)
|
||||
|
||||
|
||||
def _process_contacts(
|
||||
contacts: dict, iface: _MeshcoreInterface, handlers: object
|
||||
) -> None:
|
||||
"""Apply a bulk ``CONTACTS`` payload: update the local snapshot and upsert nodes.
|
||||
|
||||
Parameters:
|
||||
contacts: Mapping of full ``public_key`` hex strings to contact dicts.
|
||||
iface: Active interface whose contact snapshot will be updated.
|
||||
handlers: Module reference for :func:`~data.mesh_ingestor.handlers`.
|
||||
"""
|
||||
for pub_key, contact in contacts.items():
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
if node_id is None:
|
||||
continue
|
||||
iface._update_contact(contact)
|
||||
handlers.upsert_node(node_id, _contact_to_node_dict(contact))
|
||||
lat = contact.get("adv_lat")
|
||||
lon = contact.get("adv_lon")
|
||||
if lat is not None and lon is not None and (lat or lon):
|
||||
_store_meshcore_position(
|
||||
node_id,
|
||||
lat,
|
||||
lon,
|
||||
contact.get("last_advert"),
|
||||
handlers.host_node_id(),
|
||||
)
|
||||
handlers._mark_packet_seen()
|
||||
|
||||
|
||||
def _process_contact_update(
|
||||
contact: dict, iface: _MeshcoreInterface, handlers: object
|
||||
) -> None:
|
||||
"""Apply a single ``NEW_CONTACT`` or ``NEXT_CONTACT`` event.
|
||||
|
||||
Parameters:
|
||||
contact: Contact dict containing at minimum ``public_key``.
|
||||
iface: Active interface whose contact snapshot will be updated.
|
||||
handlers: Module reference for :func:`~data.mesh_ingestor.handlers`.
|
||||
"""
|
||||
pub_key = contact.get("public_key", "")
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
if node_id is None:
|
||||
return
|
||||
iface._update_contact(contact)
|
||||
handlers.upsert_node(node_id, _contact_to_node_dict(contact))
|
||||
lat = contact.get("adv_lat")
|
||||
lon = contact.get("adv_lon")
|
||||
if lat is not None and lon is not None and (lat or lon):
|
||||
_store_meshcore_position(
|
||||
node_id,
|
||||
lat,
|
||||
lon,
|
||||
contact.get("last_advert"),
|
||||
handlers.host_node_id(),
|
||||
)
|
||||
handlers._mark_packet_seen()
|
||||
config._debug_log(
|
||||
"MeshCore contact updated",
|
||||
context="meshcore.contact",
|
||||
node_id=node_id,
|
||||
name=contact.get("adv_name"),
|
||||
)
|
||||
|
||||
|
||||
def _make_event_handlers(iface: _MeshcoreInterface, target: str | None) -> dict:
|
||||
"""Build async callbacks for each relevant MeshCore event type.
|
||||
|
||||
All callbacks are closures over *iface* and *target* so they can update
|
||||
connection state and forward data to the ingest queue without global state.
|
||||
|
||||
Parameters:
|
||||
iface: The active :class:`_MeshcoreInterface` instance.
|
||||
target: Human-readable connection target for log messages.
|
||||
|
||||
Returns:
|
||||
Mapping of ``EventType`` member name → async callback coroutine.
|
||||
"""
|
||||
# Deferred imports to avoid a circular dependency: meshcore is imported by
|
||||
# protocols/__init__.py which is imported by the top-level mesh_ingestor
|
||||
# package, while handlers.py and channels.py import from that same package.
|
||||
from ... import channels as _channels
|
||||
from ... import handlers as _handlers
|
||||
|
||||
async def on_channel_info(evt) -> None:
|
||||
payload = evt.payload or {}
|
||||
idx = payload.get("channel_idx")
|
||||
name = payload.get("channel_name", "")
|
||||
if idx is not None and name:
|
||||
_channels.register_channel(idx, name)
|
||||
|
||||
async def on_self_info(evt) -> None:
|
||||
_process_self_info(evt.payload or {}, iface, _handlers)
|
||||
|
||||
async def on_contacts(evt) -> None:
|
||||
_process_contacts(evt.payload or {}, iface, _handlers)
|
||||
|
||||
async def on_contact_update(evt) -> None:
|
||||
_process_contact_update(evt.payload or {}, iface, _handlers)
|
||||
|
||||
async def on_channel_msg(evt) -> None:
|
||||
payload = evt.payload or {}
|
||||
sender_ts = payload.get("sender_timestamp")
|
||||
text = payload.get("text")
|
||||
if sender_ts is None or not text:
|
||||
return
|
||||
|
||||
rx_time = int(time.time())
|
||||
channel_idx = payload.get("channel_idx", 0)
|
||||
|
||||
# MeshCore channel messages carry no sender identifier in the event
|
||||
# payload. Try to resolve the sender from the "SenderName: body"
|
||||
# convention embedded in the message text, matched against the known
|
||||
# contacts roster. When the contacts roster does not yet contain the
|
||||
# sender, create a synthetic placeholder node so that the message
|
||||
# receives a stable from_id and the UI can render a badge immediately.
|
||||
# The web app will migrate messages to the real node ID once the sender
|
||||
# is seen via a contact advertisement.
|
||||
sender_name = _parse_sender_name(text)
|
||||
from_id = iface.lookup_node_id_by_name(sender_name) if sender_name else None
|
||||
if from_id is None and sender_name:
|
||||
synthetic_id = _derive_synthetic_node_id(sender_name)
|
||||
if synthetic_id not in iface._synthetic_node_ids:
|
||||
_handlers.upsert_node(synthetic_id, _synthetic_node_dict(sender_name))
|
||||
iface._synthetic_node_ids.add(synthetic_id)
|
||||
from_id = synthetic_id
|
||||
|
||||
# Upsert synthetic placeholder nodes for any @[Name] mentions in the
|
||||
# message body whose names are not yet in the contacts roster. This
|
||||
# ensures mention badges resolve even before the mentioned node is seen.
|
||||
for mention_name in _extract_mention_names(text):
|
||||
if not iface.lookup_node_id_by_name(mention_name):
|
||||
mention_id = _derive_synthetic_node_id(mention_name)
|
||||
if mention_id not in iface._synthetic_node_ids:
|
||||
_handlers.upsert_node(
|
||||
mention_id, _synthetic_node_dict(mention_name)
|
||||
)
|
||||
iface._synthetic_node_ids.add(mention_id)
|
||||
|
||||
# The dedup fingerprint uses the parsed sender name (lowercased and
|
||||
# stripped) rather than ``from_id``: each ingestor independently
|
||||
# resolves Alice to either her real ``!aabbccdd`` (when she is in its
|
||||
# contact roster) or to a synthetic id derived from her name; the
|
||||
# parsed name lives in the message text itself, so it is identical
|
||||
# across all receivers regardless of roster state.
|
||||
sender_identity = (sender_name or "").strip().lower()
|
||||
|
||||
packet = {
|
||||
"id": _derive_message_id(
|
||||
sender_identity, sender_ts, f"c{channel_idx}", text
|
||||
),
|
||||
"rxTime": rx_time,
|
||||
"rx_time": rx_time,
|
||||
"from_id": from_id,
|
||||
"to_id": "^all",
|
||||
"channel": channel_idx,
|
||||
"snr": payload.get("SNR"),
|
||||
"rssi": payload.get("RSSI"),
|
||||
"protocol": "meshcore",
|
||||
"decoded": {
|
||||
"portnum": "TEXT_MESSAGE_APP",
|
||||
"text": text,
|
||||
"channel": channel_idx,
|
||||
},
|
||||
}
|
||||
_handlers._mark_packet_seen()
|
||||
_handlers.store_packet_dict(packet)
|
||||
config._debug_log(
|
||||
"MeshCore channel message",
|
||||
context="meshcore.channel_msg",
|
||||
channel=channel_idx,
|
||||
sender=sender_name,
|
||||
from_id=from_id,
|
||||
)
|
||||
|
||||
async def on_contact_msg(evt) -> None:
|
||||
payload = evt.payload or {}
|
||||
sender_ts = payload.get("sender_timestamp")
|
||||
text = payload.get("text")
|
||||
if sender_ts is None or not text:
|
||||
return
|
||||
|
||||
rx_time = int(time.time())
|
||||
pubkey_prefix = payload.get("pubkey_prefix", "")
|
||||
from_id = iface.lookup_node_id(pubkey_prefix)
|
||||
# ``pubkey_prefix`` is already a sender-side stable identifier (the
|
||||
# first six bytes of the sender's public key); ``"dm"`` namespaces
|
||||
# direct messages so they cannot collide with channel messages that
|
||||
# happen to share the other components.
|
||||
packet = {
|
||||
"id": _derive_message_id(pubkey_prefix or "", sender_ts, "dm", text),
|
||||
"rxTime": rx_time,
|
||||
"rx_time": rx_time,
|
||||
"from_id": from_id,
|
||||
"to_id": iface.host_node_id,
|
||||
"channel": 0,
|
||||
"snr": payload.get("SNR"),
|
||||
"protocol": "meshcore",
|
||||
"decoded": {
|
||||
"portnum": "TEXT_MESSAGE_APP",
|
||||
"text": text,
|
||||
"channel": 0,
|
||||
},
|
||||
}
|
||||
_handlers._mark_packet_seen()
|
||||
_handlers.store_packet_dict(packet)
|
||||
|
||||
async def on_disconnected(evt) -> None:
|
||||
iface.isConnected = False
|
||||
config._debug_log(
|
||||
"MeshCore node disconnected",
|
||||
context="meshcore.disconnect",
|
||||
target=target or "unknown",
|
||||
severity="warning",
|
||||
always=True,
|
||||
)
|
||||
|
||||
return {
|
||||
"CHANNEL_INFO": on_channel_info,
|
||||
"SELF_INFO": on_self_info,
|
||||
"CONTACTS": on_contacts,
|
||||
"NEW_CONTACT": on_contact_update,
|
||||
"NEXT_CONTACT": on_contact_update,
|
||||
"CHANNEL_MSG_RECV": on_channel_msg,
|
||||
"CONTACT_MSG_RECV": on_contact_msg,
|
||||
"DISCONNECTED": on_disconnected,
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Pure helpers that derive canonical MeshCore node identifiers.
|
||||
|
||||
These helpers are deterministic and side-effect-free so they can be imported
|
||||
from anywhere in the MeshCore package without circular concerns.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
|
||||
from ._constants import _MESHCORE_ADV_TYPE_ROLE
|
||||
|
||||
|
||||
def _meshcore_node_id(public_key_hex: str | None) -> str | None:
|
||||
"""Derive a canonical ``!xxxxxxxx`` node ID from a MeshCore public key.
|
||||
|
||||
Uses the first four bytes (eight hex characters) of the 32-byte public
|
||||
key, formatted as ``!xxxxxxxx``.
|
||||
|
||||
Parameters:
|
||||
public_key_hex: 64-character lowercase hex string for the node's
|
||||
public key as returned by the MeshCore library.
|
||||
|
||||
Returns:
|
||||
Canonical ``!xxxxxxxx`` node ID string, or ``None`` when the key is
|
||||
absent or too short.
|
||||
"""
|
||||
if not public_key_hex or len(public_key_hex) < 8:
|
||||
return None
|
||||
return "!" + public_key_hex[:8].lower()
|
||||
|
||||
|
||||
def _meshcore_short_name(node_id: str | None) -> str:
|
||||
"""Derive a four-character short name from a canonical node ID.
|
||||
|
||||
Uses the first two bytes (four hex characters) of the ``!xxxxxxxx`` node
|
||||
ID. This keeps the short name consistent with the node ID itself — if the
|
||||
node ID is later replaced when the real public key is heard, the short name
|
||||
will update alongside it.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical ``!xxxxxxxx`` node ID string (as returned by
|
||||
:func:`_meshcore_node_id`).
|
||||
|
||||
Returns:
|
||||
Four lowercase hex characters (e.g. ``"cafe"``), or an empty string
|
||||
when the node ID is missing or too short.
|
||||
"""
|
||||
if not node_id:
|
||||
return ""
|
||||
raw = node_id.lstrip("!")
|
||||
if len(raw) < 4:
|
||||
return ""
|
||||
return raw[:4].lower()
|
||||
|
||||
|
||||
def _meshcore_adv_type_to_role(adv_type: object) -> str | None:
|
||||
"""Map MeshCore ``ADV_TYPE_*`` (contact ``type`` / self ``adv_type``) to ingest role.
|
||||
|
||||
Values match MeshCore firmware ``AdvertDataHelpers.h`` (``ADV_TYPE_CHAT``,
|
||||
``ADV_TYPE_REPEATER``, …). Role strings match the MeshCore palette keys
|
||||
used by the web dashboard (``COMPANION``, ``REPEATER``, …).
|
||||
|
||||
Parameters:
|
||||
adv_type: Raw type byte from meshcore_py (typically ``int`` 0–4).
|
||||
Non-integer values (e.g. ``float``, ``None``) are rejected and
|
||||
return ``None``. Future firmware type codes not yet in the mapping
|
||||
also return ``None`` until the table is updated.
|
||||
|
||||
Returns:
|
||||
Uppercase role string, or ``None`` when the value is unknown or should
|
||||
not override the web default (``ADV_TYPE_NONE`` / unrecognised).
|
||||
"""
|
||||
if not isinstance(adv_type, int):
|
||||
return None
|
||||
return _MESHCORE_ADV_TYPE_ROLE.get(adv_type)
|
||||
|
||||
|
||||
def _derive_synthetic_node_id(long_name: str) -> str:
|
||||
"""Derive a deterministic synthetic ``!xxxxxxxx`` node ID from a long name.
|
||||
|
||||
Uses the first four bytes of SHA-256(UTF-8 encoded name), formatted as
|
||||
``!xxxxxxxx``. The same long name always produces the same ID across
|
||||
restarts. The probability of collision with a real public-key-derived ID
|
||||
is ~1 in 4 billion per pair, which is negligible in practice.
|
||||
|
||||
Parameters:
|
||||
long_name: Node long name used as the hash input.
|
||||
|
||||
Returns:
|
||||
Canonical ``!xxxxxxxx`` node ID string.
|
||||
"""
|
||||
return "!" + hashlib.sha256(long_name.encode("utf-8")).hexdigest()[:8]
|
||||
|
||||
|
||||
def _pubkey_prefix_to_node_id(contacts: dict, pubkey_prefix: str) -> str | None:
|
||||
"""Look up a canonical node ID by six-byte public-key prefix.
|
||||
|
||||
Parameters:
|
||||
contacts: Mapping of full ``public_key`` hex strings to contact dicts.
|
||||
pubkey_prefix: Twelve-character hex string (six bytes) as used in
|
||||
MeshCore direct-message events.
|
||||
|
||||
Returns:
|
||||
Canonical ``!xxxxxxxx`` node ID for the first matching contact, or
|
||||
``None`` when no contact's public key starts with *pubkey_prefix*.
|
||||
"""
|
||||
for pub_key in contacts:
|
||||
if pub_key.startswith(pubkey_prefix):
|
||||
return _meshcore_node_id(pub_key)
|
||||
return None
|
||||
@@ -1,159 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Live MeshCore interface and the connection-stage shutdown sentinel."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import threading
|
||||
|
||||
from .decode import _contact_to_node_dict
|
||||
from .identity import _meshcore_node_id, _pubkey_prefix_to_node_id
|
||||
|
||||
|
||||
class ClosedBeforeConnectedError(ConnectionError):
|
||||
"""Raised when :meth:`_MeshcoreInterface.close` is called while the
|
||||
connection coroutine is still waiting for the device handshake to complete.
|
||||
|
||||
This is a :exc:`ConnectionError` subclass so callers that only handle the
|
||||
base class continue to work, while callers that need to distinguish a
|
||||
user-initiated shutdown from a hardware failure can catch this type
|
||||
specifically.
|
||||
"""
|
||||
|
||||
|
||||
class _MeshcoreInterface:
|
||||
"""Live MeshCore interface managing an asyncio event loop in a background thread.
|
||||
|
||||
Holds connection state, a thread-safe snapshot of known contacts, and the
|
||||
handles needed to shut down cleanly when the daemon requests a disconnect.
|
||||
"""
|
||||
|
||||
host_node_id: str | None = None
|
||||
"""Canonical ``!xxxxxxxx`` identifier for the connected host device."""
|
||||
|
||||
def __init__(self, *, target: str | None) -> None:
|
||||
"""Initialise the interface with the connection *target*."""
|
||||
self._target = target
|
||||
self._mc: object | None = None
|
||||
self._loop: asyncio.AbstractEventLoop | None = None
|
||||
self._thread: threading.Thread | None = None
|
||||
self._stop_event: asyncio.Event | None = None
|
||||
self._contacts_lock = threading.Lock()
|
||||
self._contacts: dict = {}
|
||||
self.isConnected: bool = False
|
||||
# Tracks synthetic node IDs already upserted this session to avoid
|
||||
# repeating the HTTP POST for every message from the same unknown sender.
|
||||
# This set is reset on reconnect (because _MeshcoreInterface is recreated),
|
||||
# which may cause extra upserts after a disconnect — the ON CONFLICT guard
|
||||
# in the Ruby web app ensures those are idempotent and safe.
|
||||
self._synthetic_node_ids: set[str] = set()
|
||||
self._self_info_payload: dict | None = None
|
||||
"""Most recent SELF_INFO payload received from the device, or ``None``."""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Contact management (called from the asyncio thread)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _update_contact(self, contact: dict) -> None:
|
||||
"""Thread-safely add or update a contact in the local snapshot.
|
||||
|
||||
Parameters:
|
||||
contact: Contact dict from a ``CONTACTS``, ``NEW_CONTACT``, or
|
||||
``NEXT_CONTACT`` event.
|
||||
"""
|
||||
pub_key = contact.get("public_key")
|
||||
if pub_key:
|
||||
with self._contacts_lock:
|
||||
self._contacts[pub_key] = contact
|
||||
|
||||
def contacts_snapshot(self) -> list[tuple[str, dict]]:
|
||||
"""Return a thread-safe snapshot of all known contacts as node entries.
|
||||
|
||||
Returns:
|
||||
List of ``(canonical_node_id, node_dict)`` pairs, skipping any
|
||||
contact whose public key cannot be mapped to a valid node ID.
|
||||
"""
|
||||
with self._contacts_lock:
|
||||
items = list(self._contacts.items())
|
||||
result = []
|
||||
for pub_key, contact in items:
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
if node_id is not None:
|
||||
result.append((node_id, _contact_to_node_dict(contact)))
|
||||
return result
|
||||
|
||||
def lookup_node_id(self, pubkey_prefix: str) -> str | None:
|
||||
"""Return the canonical node ID for the contact matching *pubkey_prefix*.
|
||||
|
||||
Parameters:
|
||||
pubkey_prefix: Twelve-character hex string (six bytes) from a
|
||||
``CONTACT_MSG_RECV`` event.
|
||||
|
||||
Returns:
|
||||
Canonical ``!xxxxxxxx`` node ID, or ``None`` when no match.
|
||||
"""
|
||||
with self._contacts_lock:
|
||||
return _pubkey_prefix_to_node_id(self._contacts, pubkey_prefix)
|
||||
|
||||
def lookup_node_id_by_name(self, adv_name: str) -> str | None:
|
||||
"""Return the canonical node ID for the contact whose ``adv_name`` matches.
|
||||
|
||||
Used to resolve the sender of a MeshCore channel message from the
|
||||
``"SenderName: body"`` text prefix when no ``pubkey_prefix`` is
|
||||
available in the event payload. The comparison is case-sensitive
|
||||
because ``adv_name`` values come verbatim from the MeshCore firmware.
|
||||
|
||||
Parameters:
|
||||
adv_name: Advertised name to look up. Leading and trailing
|
||||
whitespace is stripped before comparison.
|
||||
|
||||
Returns:
|
||||
Canonical ``!xxxxxxxx`` node ID, or ``None`` when no contact with
|
||||
that name is known.
|
||||
"""
|
||||
name = adv_name.strip() if adv_name else ""
|
||||
if not name:
|
||||
return None
|
||||
with self._contacts_lock:
|
||||
for pub_key, contact in self._contacts.items():
|
||||
contact_name = (contact.get("adv_name") or "").strip()
|
||||
if contact_name == name:
|
||||
return _meshcore_node_id(pub_key)
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Lifecycle
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def close(self) -> None:
|
||||
"""Signal the background event loop to stop and wait for the thread.
|
||||
|
||||
Safe to call multiple times and from any thread.
|
||||
"""
|
||||
self.isConnected = False
|
||||
loop = self._loop
|
||||
stop_event = self._stop_event
|
||||
if loop is not None and not loop.is_closed():
|
||||
try:
|
||||
if stop_event is not None:
|
||||
loop.call_soon_threadsafe(stop_event.set)
|
||||
else:
|
||||
loop.call_soon_threadsafe(loop.stop)
|
||||
except RuntimeError:
|
||||
pass
|
||||
thread = self._thread
|
||||
if thread is not None and thread.is_alive():
|
||||
thread.join(timeout=5.0)
|
||||
@@ -1,130 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Sender-side fingerprinting and parsing helpers for MeshCore messages."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
from ._constants import _MENTION_RE, _MESHCORE_ID_MASK
|
||||
|
||||
|
||||
def _derive_message_id(
|
||||
sender_identity: str,
|
||||
sender_ts: int,
|
||||
discriminator: str,
|
||||
text: str,
|
||||
) -> int:
|
||||
"""Derive a stable 53-bit message ID from sender-side MeshCore fields.
|
||||
|
||||
MeshCore does not assign firmware-side packet IDs. This function produces
|
||||
a deterministic 53-bit integer fingerprint of a physical transmission so
|
||||
that the same packet heard by multiple ingestors collapses to a single
|
||||
``messages`` row via the ``messages.id`` PRIMARY KEY upsert path. Every
|
||||
component of the fingerprint is sender-side, ensuring two receivers with
|
||||
different clocks or roster state still compute the same value.
|
||||
|
||||
Parameters:
|
||||
sender_identity: Stable sender identifier shared across receivers.
|
||||
For channel messages this is the lowercased+stripped sender name
|
||||
parsed from the message text via :func:`_parse_sender_name`; for
|
||||
direct messages it is the sender's MeshCore ``pubkey_prefix``.
|
||||
Must be a string (use ``""`` when unavailable).
|
||||
sender_ts: Unix timestamp from the sender's clock (identical across
|
||||
receivers regardless of receiver-side clock skew).
|
||||
discriminator: Namespace tag separating message classes that could
|
||||
otherwise collide. ``"c<N>"`` is reserved for channel messages
|
||||
on channel ``N``; ``"dm"`` is reserved for direct messages.
|
||||
text: Message text exactly as transmitted by the sender.
|
||||
|
||||
Returns:
|
||||
A non-negative 53-bit integer suitable for the ``id`` column. The
|
||||
value is bounded by ``0 <= id <= (1 << 53) - 1`` so it survives the
|
||||
JSON → JavaScript number round-trip without precision loss.
|
||||
"""
|
||||
# The ``v1:`` prefix lets us evolve the fingerprint format (e.g. add a
|
||||
# channel-secret hash) by bumping to ``v2:`` without colliding with
|
||||
# existing ids written under the v1 scheme.
|
||||
fingerprint = f"v1:{sender_identity}:{sender_ts}:{discriminator}:{text}"
|
||||
digest = hashlib.sha256(fingerprint.encode("utf-8", errors="replace")).digest()
|
||||
return int.from_bytes(digest[:7], "big") & _MESHCORE_ID_MASK
|
||||
|
||||
|
||||
def _parse_sender_name(text: str) -> str | None:
|
||||
"""Extract the sender name from a MeshCore channel message text.
|
||||
|
||||
MeshCore channel messages use the convention ``"SenderName: body"``.
|
||||
Only the first colon is treated as the separator; colons that appear in the
|
||||
body are preserved. The sender name is stripped of leading and trailing
|
||||
whitespace.
|
||||
|
||||
Parameters:
|
||||
text: Raw message text as stored in the database.
|
||||
|
||||
Returns:
|
||||
Stripped sender name string, or ``None`` when the text does not
|
||||
contain a colon or the portion before the colon is blank.
|
||||
"""
|
||||
colon_idx = text.find(":")
|
||||
if colon_idx < 0:
|
||||
return None
|
||||
name = text[:colon_idx].strip()
|
||||
return name if name else None
|
||||
|
||||
|
||||
def _extract_mention_names(text: str) -> list[str]:
|
||||
"""Extract all ``@[Name]`` mention names from a MeshCore message body.
|
||||
|
||||
Parameters:
|
||||
text: Raw message text that may contain ``@[Name]`` mention patterns.
|
||||
|
||||
Returns:
|
||||
List of extracted name strings (may be empty).
|
||||
"""
|
||||
return _MENTION_RE.findall(text)
|
||||
|
||||
|
||||
def _synthetic_node_dict(long_name: str) -> dict:
|
||||
"""Build a synthetic node dict for an unknown MeshCore channel sender.
|
||||
|
||||
Synthetic nodes are placeholder entries created when a channel message
|
||||
arrives from a sender who is not yet in the connected device's contacts
|
||||
roster. They carry ``role=COMPANION`` (the only role capable of sending
|
||||
channel messages). The short name is intentionally omitted here — the
|
||||
Ruby web app derives it at query time via
|
||||
``meshcore_companion_display_short_name`` for all COMPANION nodes.
|
||||
|
||||
When the real contact advertisement is later received, the Ruby web app
|
||||
detects the matching long name, migrates all messages from the synthetic
|
||||
node ID to the real one, and removes the placeholder row.
|
||||
|
||||
Parameters:
|
||||
long_name: Sender name parsed from the ``"SenderName: body"`` prefix.
|
||||
|
||||
Returns:
|
||||
Node dict compatible with the ``POST /api/nodes`` payload format,
|
||||
with ``user.synthetic`` set to ``True``.
|
||||
"""
|
||||
return {
|
||||
"lastHeard": int(time.time()),
|
||||
"protocol": "meshcore",
|
||||
"user": {
|
||||
"longName": long_name,
|
||||
"shortName": "",
|
||||
"role": "COMPANION",
|
||||
"synthetic": True,
|
||||
},
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Forward MeshCore advertised positions to ``POST /api/positions``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
|
||||
from ... import queue as _queue
|
||||
from ...serialization import _iso, _node_num_from_id
|
||||
|
||||
|
||||
def _store_meshcore_position(
|
||||
node_id: str,
|
||||
lat: float,
|
||||
lon: float,
|
||||
position_time: int | None,
|
||||
ingestor: str | None,
|
||||
) -> None:
|
||||
"""Enqueue a ``POST /api/positions`` for a MeshCore contact's advertised position.
|
||||
|
||||
MeshCore does not issue dedicated position packets; position data is embedded
|
||||
in contact advertisements. A stable pseudo-ID is derived from the node
|
||||
identity and the position timestamp so repeated advertisements of the same
|
||||
position are idempotently de-duplicated by the web app's ``ON CONFLICT``
|
||||
clause.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical ``!xxxxxxxx`` node identifier.
|
||||
lat: Latitude in decimal degrees.
|
||||
lon: Longitude in decimal degrees.
|
||||
position_time: Unix timestamp from the contact's ``last_advert`` field,
|
||||
or ``None`` to fall back to the current wall-clock time.
|
||||
ingestor: Canonical node ID of the host ingestor, or ``None``.
|
||||
"""
|
||||
rx_time = int(time.time())
|
||||
pt = position_time or rx_time
|
||||
# Stable 63-bit pseudo-ID unique to (node, position_time) so that the web
|
||||
# app ON CONFLICT clause de-duplicates repeated advertisements of the same
|
||||
# position without collisions between different nodes.
|
||||
digest = hashlib.sha256(f"{node_id}:{pt}".encode()).digest()
|
||||
pos_id = int.from_bytes(digest[:8], "big") & 0x7FFFFFFFFFFFFFFF
|
||||
node_num = _node_num_from_id(node_id)
|
||||
payload = {
|
||||
"id": pos_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"from_id": node_id,
|
||||
"latitude": lat,
|
||||
"longitude": lon,
|
||||
"position_time": pt,
|
||||
"ingestor": ingestor,
|
||||
}
|
||||
_queue._queue_post_json("/api/positions", payload)
|
||||
@@ -1,196 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Public ``MeshcoreProvider`` satisfying the :class:`MeshProtocol` interface."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from ... import config
|
||||
from ._constants import _CONNECT_TIMEOUT_SECS
|
||||
from .decode import _self_info_to_node_dict
|
||||
from .identity import _meshcore_node_id
|
||||
from .interface import _MeshcoreInterface
|
||||
|
||||
|
||||
class MeshcoreProvider:
|
||||
"""MeshCore ingestion provider.
|
||||
|
||||
Connects to a MeshCore node via serial port, BLE, or TCP/IP. The
|
||||
connection type is inferred from the target string; see :meth:`connect`
|
||||
for routing rules.
|
||||
|
||||
The provider runs MeshCore's ``asyncio`` event loop in a background daemon
|
||||
thread. Incoming ``SELF_INFO``, ``CONTACTS``, ``NEW_CONTACT``,
|
||||
``CHANNEL_MSG_RECV``, and ``CONTACT_MSG_RECV`` events are forwarded to the
|
||||
HTTP ingest queue via the shared handler functions.
|
||||
"""
|
||||
|
||||
name = "meshcore"
|
||||
|
||||
def subscribe(self) -> list[str]:
|
||||
"""Return subscribed topic names.
|
||||
|
||||
MeshCore uses an ``asyncio`` event system rather than a pubsub bus,
|
||||
so there are no topics to register at startup.
|
||||
"""
|
||||
return []
|
||||
|
||||
def connect(
|
||||
self, *, active_candidate: str | None
|
||||
) -> tuple[object, str | None, str | None]:
|
||||
"""Connect to a MeshCore node via serial, BLE, or TCP.
|
||||
|
||||
Starts an asyncio event loop in a background daemon thread, performs
|
||||
the MeshCore companion-protocol handshake, and blocks until the node's
|
||||
self-info is received or the timeout expires.
|
||||
|
||||
Connection type is inferred from *active_candidate* (or
|
||||
:data:`~data.mesh_ingestor.config.CONNECTION`):
|
||||
|
||||
* BLE MAC / UUID → :class:`meshcore.BLEConnection`
|
||||
* ``host:port`` → :class:`meshcore.TCPConnection`
|
||||
* serial path → :class:`meshcore.SerialConnection`
|
||||
* ``None`` / empty → first candidate from
|
||||
:func:`~data.mesh_ingestor.connection.default_serial_targets`
|
||||
|
||||
Parameters:
|
||||
active_candidate: Previously resolved connection target, or
|
||||
``None`` to fall back to
|
||||
:data:`~data.mesh_ingestor.config.CONNECTION`.
|
||||
|
||||
Returns:
|
||||
``(iface, resolved_target, next_active_candidate)`` matching the
|
||||
:class:`~data.mesh_ingestor.provider.Provider` contract.
|
||||
|
||||
Raises:
|
||||
ConnectionError: When the node does not complete the handshake
|
||||
within :data:`_CONNECT_TIMEOUT_SECS` seconds.
|
||||
"""
|
||||
target: str | None = active_candidate or config.CONNECTION
|
||||
|
||||
if not target:
|
||||
# Look up via the package so test fakes installed via
|
||||
# ``monkeypatch.setattr(mod, "default_serial_targets", ...)`` apply.
|
||||
pkg = sys.modules["data.mesh_ingestor.protocols.meshcore"]
|
||||
candidates = pkg.default_serial_targets()
|
||||
target = candidates[0] if candidates else "/dev/ttyACM0"
|
||||
|
||||
config._debug_log(
|
||||
"Connecting to MeshCore node",
|
||||
context="meshcore.connect",
|
||||
target=target,
|
||||
)
|
||||
|
||||
iface = _MeshcoreInterface(target=target)
|
||||
connected_event = threading.Event()
|
||||
error_holder: list = [None]
|
||||
|
||||
# Resolve the runner + asyncio handler via the parent package so test
|
||||
# fakes installed via ``monkeypatch.setattr(mod, "_run_meshcore", ...)``
|
||||
# apply at call time.
|
||||
pkg = sys.modules["data.mesh_ingestor.protocols.meshcore"]
|
||||
|
||||
def _run_loop() -> None:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
# Second line of defence around issue #754: if a detached task
|
||||
# inside the upstream ``meshcore`` library ever raises an
|
||||
# exception we do not anticipate in ``_meshcore_patches``, funnel
|
||||
# it through our logger instead of the default handler (which
|
||||
# only writes ``Task exception was never retrieved`` to stderr).
|
||||
loop.set_exception_handler(pkg._log_unhandled_loop_exception)
|
||||
iface._loop = loop
|
||||
try:
|
||||
loop.run_until_complete(
|
||||
pkg._run_meshcore(iface, target, connected_event, error_holder)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
thread = threading.Thread(target=_run_loop, name="meshcore-loop", daemon=True)
|
||||
iface._thread = thread
|
||||
thread.start()
|
||||
|
||||
if not connected_event.wait(timeout=_CONNECT_TIMEOUT_SECS):
|
||||
iface.close()
|
||||
raise ConnectionError(
|
||||
f"Timed out waiting for MeshCore node at {target!r} "
|
||||
f"after {_CONNECT_TIMEOUT_SECS:g}s."
|
||||
)
|
||||
|
||||
if error_holder[0] is not None:
|
||||
iface.close()
|
||||
raise error_holder[0]
|
||||
|
||||
return iface, target, target
|
||||
|
||||
def extract_host_node_id(self, iface: object) -> str | None:
|
||||
"""Return the canonical ``!xxxxxxxx`` host node ID from the interface.
|
||||
|
||||
Parameters:
|
||||
iface: Active :class:`_MeshcoreInterface` returned by
|
||||
:meth:`connect`.
|
||||
"""
|
||||
return getattr(iface, "host_node_id", None)
|
||||
|
||||
def self_node_item(self, iface: object) -> tuple[str, dict] | None:
|
||||
"""Return the ``(node_id, node_dict)`` pair for the host self-node.
|
||||
|
||||
Uses the most recently cached ``SELF_INFO`` payload stored on the
|
||||
interface. Returns ``None`` when no SELF_INFO has been received yet
|
||||
or when the public key cannot be mapped to a valid node ID.
|
||||
|
||||
Parameters:
|
||||
iface: Active :class:`_MeshcoreInterface` instance.
|
||||
|
||||
Returns:
|
||||
``(canonical_node_id, node_dict)`` tuple or ``None``.
|
||||
"""
|
||||
if not isinstance(iface, _MeshcoreInterface):
|
||||
return None
|
||||
payload = getattr(iface, "_self_info_payload", None)
|
||||
if not payload:
|
||||
return None
|
||||
node_id = _meshcore_node_id(payload.get("public_key", ""))
|
||||
if not node_id:
|
||||
return None
|
||||
return node_id, _self_info_to_node_dict(payload)
|
||||
|
||||
def node_snapshot_items(self, iface: object) -> list[tuple[str, dict]]:
|
||||
"""Return a snapshot of all known MeshCore contacts as node entries.
|
||||
|
||||
Includes the host self-node when a ``SELF_INFO`` payload has already
|
||||
been received, so that the initial snapshot sent by the daemon
|
||||
covers the local device even when the background event loop delivers
|
||||
``SELF_INFO`` before the snapshot is taken.
|
||||
|
||||
Parameters:
|
||||
iface: Active :class:`_MeshcoreInterface` instance. Any other
|
||||
object type causes an empty list to be returned.
|
||||
|
||||
Returns:
|
||||
List of ``(canonical_node_id, node_dict)`` pairs suitable for
|
||||
passing to :func:`~data.mesh_ingestor.handlers.upsert_node`.
|
||||
"""
|
||||
if not isinstance(iface, _MeshcoreInterface):
|
||||
return []
|
||||
items: list[tuple[str, dict]] = list(iface.contacts_snapshot())
|
||||
self_item = self.self_node_item(iface)
|
||||
if self_item is not None:
|
||||
items.append(self_item)
|
||||
return items
|
||||
@@ -1,152 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Asyncio entry point that drives a MeshCore connection from a worker thread."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
import threading
|
||||
|
||||
from ... import config
|
||||
from ._constants import _DEFAULT_BAUDRATE
|
||||
from .channels import _ensure_channel_names
|
||||
from .connection import _make_connection
|
||||
from .handlers import _make_event_handlers
|
||||
from .interface import ClosedBeforeConnectedError, _MeshcoreInterface
|
||||
|
||||
|
||||
async def _run_meshcore(
|
||||
iface: _MeshcoreInterface,
|
||||
target: str,
|
||||
connected_event: threading.Event,
|
||||
error_holder: list,
|
||||
) -> None:
|
||||
"""Connect to a MeshCore node and keep the event loop running until closed.
|
||||
|
||||
This coroutine is the single entry point for the background asyncio thread.
|
||||
It connects the MeshCore library, registers event handlers, fetches the
|
||||
initial contact list, starts auto-message polling, and then waits for the
|
||||
:attr:`_MeshcoreInterface._stop_event` to be set.
|
||||
|
||||
Parameters:
|
||||
iface: Shared interface object for state and contact tracking.
|
||||
target: Resolved, non-empty connection target (serial, BLE, or TCP).
|
||||
connected_event: Threading event signalled when the connection
|
||||
succeeds or fails, to unblock the calling ``connect()`` method.
|
||||
error_holder: Single-element list; set to the raised exception when
|
||||
the connection attempt fails so the caller can re-raise it.
|
||||
"""
|
||||
# Install early so :meth:`_MeshcoreInterface.close` can signal shutdown with
|
||||
# ``stop_event.set()`` instead of ``loop.stop()`` while ``connect()`` or the
|
||||
# ``finally`` disconnect is still running (avoids RuntimeError from
|
||||
# :meth:`asyncio.loop.run_until_complete`).
|
||||
stop_event = asyncio.Event()
|
||||
iface._stop_event = stop_event
|
||||
|
||||
# Resolve meshcore-library symbols via the parent package so test fakes
|
||||
# installed via ``monkeypatch.setattr(mod, "MeshCore", ...)`` apply.
|
||||
pkg = sys.modules["data.mesh_ingestor.protocols.meshcore"]
|
||||
MeshCore = pkg.MeshCore
|
||||
EventType = pkg.EventType
|
||||
|
||||
mc = None
|
||||
try:
|
||||
cx = _make_connection(target, _DEFAULT_BAUDRATE)
|
||||
mc = MeshCore(cx)
|
||||
iface._mc = mc
|
||||
|
||||
handlers_map = _make_event_handlers(iface, target)
|
||||
for event_name, callback in handlers_map.items():
|
||||
mc.subscribe(EventType[event_name], callback)
|
||||
|
||||
_handled_types = frozenset(EventType[n] for n in handlers_map)
|
||||
# Bookkeeping events that require no action and should not be logged.
|
||||
_silent_types = frozenset(
|
||||
{
|
||||
EventType.CONNECTED,
|
||||
EventType.ACK,
|
||||
EventType.OK,
|
||||
EventType.ERROR,
|
||||
EventType.NO_MORE_MSGS,
|
||||
EventType.MESSAGES_WAITING,
|
||||
EventType.MSG_SENT,
|
||||
EventType.CURRENT_TIME,
|
||||
}
|
||||
)
|
||||
|
||||
async def _on_unhandled(evt) -> None:
|
||||
if evt.type in _handled_types or evt.type in _silent_types:
|
||||
return
|
||||
# Look up via the parent package so test fakes installed via
|
||||
# ``monkeypatch.setattr(mod, "_record_meshcore_message", ...)`` apply.
|
||||
pkg._record_meshcore_message(
|
||||
evt.payload,
|
||||
source=f"{target or 'auto'}:{evt.type.name}",
|
||||
)
|
||||
|
||||
mc.subscribe(None, _on_unhandled)
|
||||
|
||||
result = await mc.connect()
|
||||
if result is None:
|
||||
raise ConnectionError(
|
||||
f"MeshCore node at {target!r} did not respond to the appstart "
|
||||
"handshake. Ensure the device is running MeshCore companion-mode "
|
||||
"firmware."
|
||||
)
|
||||
|
||||
if stop_event.is_set():
|
||||
raise ClosedBeforeConnectedError(
|
||||
"Mesh interface close was requested before the connection could be completed."
|
||||
)
|
||||
|
||||
iface.isConnected = True
|
||||
connected_event.set()
|
||||
|
||||
try:
|
||||
await mc.ensure_contacts()
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to fetch initial contacts",
|
||||
context="meshcore.contacts",
|
||||
severity="warning",
|
||||
always=True,
|
||||
error=str(exc),
|
||||
)
|
||||
|
||||
try:
|
||||
await _ensure_channel_names(mc)
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to fetch channel names",
|
||||
context="meshcore.channels",
|
||||
severity="warning",
|
||||
error=str(exc),
|
||||
)
|
||||
|
||||
await mc.start_auto_message_fetching()
|
||||
|
||||
await stop_event.wait()
|
||||
|
||||
except Exception as exc:
|
||||
if not connected_event.is_set():
|
||||
error_holder[0] = exc
|
||||
connected_event.set()
|
||||
finally:
|
||||
if mc is not None:
|
||||
try:
|
||||
await mc.disconnect()
|
||||
except Exception:
|
||||
pass
|
||||
@@ -1,100 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Meshtastic protocol implementation."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pubsub import pub
|
||||
|
||||
from .. import config, daemon as _daemon, handlers, interfaces
|
||||
from ..utils import _retry_dict_snapshot
|
||||
|
||||
|
||||
class MeshtasticProvider:
|
||||
"""Meshtastic ingestion protocol (current default)."""
|
||||
|
||||
name = "meshtastic"
|
||||
|
||||
def __init__(self):
|
||||
self._subscribed: list[str] = []
|
||||
|
||||
def subscribe(self) -> list[str]:
|
||||
"""Subscribe Meshtastic pubsub receive topics."""
|
||||
|
||||
if self._subscribed:
|
||||
return list(self._subscribed)
|
||||
|
||||
subscribed = []
|
||||
for topic in _daemon._RECEIVE_TOPICS:
|
||||
try:
|
||||
pub.subscribe(handlers.on_receive, topic)
|
||||
subscribed.append(topic)
|
||||
except Exception as exc: # pragma: no cover
|
||||
config._debug_log(f"failed to subscribe to {topic!r}: {exc}")
|
||||
self._subscribed = subscribed
|
||||
return list(subscribed)
|
||||
|
||||
def connect(
|
||||
self, *, active_candidate: str | None
|
||||
) -> tuple[object, str | None, str | None]:
|
||||
"""Create a Meshtastic interface using the existing interface helpers."""
|
||||
|
||||
iface = None
|
||||
resolved_target = None
|
||||
next_candidate = active_candidate
|
||||
|
||||
if active_candidate:
|
||||
iface, resolved_target = interfaces._create_serial_interface(
|
||||
active_candidate
|
||||
)
|
||||
else:
|
||||
iface, resolved_target = interfaces._create_default_interface()
|
||||
next_candidate = resolved_target
|
||||
|
||||
interfaces._ensure_radio_metadata(iface)
|
||||
interfaces._ensure_channel_metadata(iface)
|
||||
|
||||
return iface, resolved_target, next_candidate
|
||||
|
||||
def extract_host_node_id(self, iface: object) -> str | None:
|
||||
return interfaces._extract_host_node_id(iface)
|
||||
|
||||
def node_snapshot_items(self, iface: object) -> list[tuple[str, object]]:
|
||||
"""Return a stable snapshot of all known nodes from ``iface``.
|
||||
|
||||
Uses :func:`~data.mesh_ingestor.utils._retry_dict_snapshot` to
|
||||
tolerate concurrent modifications from the Meshtastic background
|
||||
thread.
|
||||
|
||||
Parameters:
|
||||
iface: Live Meshtastic interface whose ``nodes`` dict to snapshot.
|
||||
|
||||
Returns:
|
||||
List of ``(node_id, node_dict)`` tuples, or an empty list when
|
||||
the snapshot fails after retries.
|
||||
"""
|
||||
|
||||
nodes = getattr(iface, "nodes", {}) or {}
|
||||
result = _retry_dict_snapshot(lambda: list(nodes.items()))
|
||||
if result is None:
|
||||
config._debug_log(
|
||||
"Skipping node snapshot due to concurrent modification",
|
||||
context="meshtastic.snapshot",
|
||||
)
|
||||
return []
|
||||
return result
|
||||
|
||||
|
||||
__all__ = ["MeshtasticProvider"]
|
||||
+30
-348
@@ -73,61 +73,52 @@ def _payload_key_value_pairs(payload: Mapping[str, object]) -> str:
|
||||
return " ".join(pairs)
|
||||
|
||||
|
||||
_INGESTOR_POST_PRIORITY = 0
|
||||
_CHANNEL_POST_PRIORITY = 10
|
||||
_NODE_POST_PRIORITY = 20
|
||||
_MESSAGE_POST_PRIORITY = 30
|
||||
_NEIGHBOR_POST_PRIORITY = 40
|
||||
_TRACE_POST_PRIORITY = 50
|
||||
_POSITION_POST_PRIORITY = 60
|
||||
_TELEMETRY_POST_PRIORITY = 70
|
||||
_MESSAGE_POST_PRIORITY = 10
|
||||
_INGESTOR_POST_PRIORITY = 80
|
||||
_NEIGHBOR_POST_PRIORITY = 20
|
||||
_TRACE_POST_PRIORITY = 25
|
||||
_POSITION_POST_PRIORITY = 30
|
||||
_TELEMETRY_POST_PRIORITY = 40
|
||||
_NODE_POST_PRIORITY = 50
|
||||
_DEFAULT_POST_PRIORITY = 90
|
||||
|
||||
_MAX_SEND_RETRIES = 3
|
||||
"""Maximum number of times a failed POST item is re-queued before being dropped."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueueState:
|
||||
"""Mutable state for the HTTP POST priority queue."""
|
||||
|
||||
lock: threading.Lock = field(default_factory=threading.Lock)
|
||||
# Heap tuple: (priority, counter, path, payload, retries).
|
||||
queue: list[tuple[int, int, str, dict, int]] = field(default_factory=list)
|
||||
queue: list[tuple[int, int, str, dict]] = field(default_factory=list)
|
||||
counter: Iterable[int] = field(default_factory=itertools.count)
|
||||
active: bool = False
|
||||
# Background drain thread. When the drainer is alive, _queue_post_json
|
||||
# signals drain_event instead of blocking the caller with HTTP calls.
|
||||
drain_event: threading.Event = field(default_factory=threading.Event)
|
||||
drainer: threading.Thread | None = None
|
||||
# Set to request the drainer thread to exit its loop cleanly.
|
||||
shutdown: threading.Event = field(default_factory=threading.Event)
|
||||
|
||||
|
||||
STATE = QueueState()
|
||||
|
||||
|
||||
def _send_single(
|
||||
instance: str,
|
||||
api_token: str,
|
||||
def _post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
) -> bool:
|
||||
"""Transmit a single JSON payload to one instance.
|
||||
*,
|
||||
instance: str | None = None,
|
||||
api_token: str | None = None,
|
||||
) -> None:
|
||||
"""Send a JSON payload to the configured web API.
|
||||
|
||||
Parameters:
|
||||
instance: Base URL of the target instance.
|
||||
api_token: Bearer token for this instance (may be empty).
|
||||
path: API path relative to the instance root.
|
||||
path: API path relative to the configured instance root.
|
||||
payload: JSON-serialisable body to transmit.
|
||||
|
||||
Returns:
|
||||
``True`` when the request succeeded, ``False`` on failure.
|
||||
instance: Optional override for :data:`config.INSTANCE`.
|
||||
api_token: Optional override for :data:`config.API_TOKEN`.
|
||||
"""
|
||||
|
||||
if not instance:
|
||||
return True
|
||||
if instance is None:
|
||||
instance = config.INSTANCE
|
||||
if api_token is None:
|
||||
api_token = config.API_TOKEN
|
||||
|
||||
if not instance:
|
||||
return
|
||||
url = f"{instance}{path}"
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
|
||||
@@ -152,80 +143,15 @@ def _send_single(
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
resp.read()
|
||||
return True
|
||||
except Exception as exc:
|
||||
except Exception as exc: # pragma: no cover - exercised in production
|
||||
config._debug_log(
|
||||
"POST request failed",
|
||||
context="queue.post_json",
|
||||
severity="warn",
|
||||
always=True,
|
||||
url=url,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
instance: str | None = None,
|
||||
api_token: str | None = None,
|
||||
) -> bool:
|
||||
"""Send a JSON payload to one or more configured web API instances.
|
||||
|
||||
When ``instance`` is provided explicitly the payload is sent to that
|
||||
single target. Otherwise every ``(url, token)`` pair in
|
||||
:data:`config.INSTANCES` receives the payload independently so that
|
||||
one failure does not block delivery to the remaining targets.
|
||||
|
||||
Parameters:
|
||||
path: API path relative to the instance root.
|
||||
payload: JSON-serialisable body to transmit.
|
||||
instance: Optional single-instance override.
|
||||
api_token: Optional token override (only used with ``instance``).
|
||||
|
||||
Returns:
|
||||
``True`` when at least one instance received the payload
|
||||
successfully, ``False`` when all targets failed. A missing
|
||||
configuration is not a transient failure and returns ``True``
|
||||
(retrying would not help).
|
||||
"""
|
||||
|
||||
if instance is not None:
|
||||
if not instance:
|
||||
return True
|
||||
return _send_single(instance, api_token or "", path, payload)
|
||||
|
||||
targets: tuple[tuple[str, str], ...] = config.INSTANCES
|
||||
if not targets:
|
||||
# Backward-compatible fallback for callers that only set
|
||||
# config.INSTANCE / config.API_TOKEN directly.
|
||||
inst = config.INSTANCE
|
||||
if not inst:
|
||||
try:
|
||||
config._debug_log(
|
||||
"No target instances configured; discarding payload",
|
||||
context="queue.post_json",
|
||||
severity="error",
|
||||
always=True,
|
||||
path=path,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
return _send_single(inst, api_token or config.API_TOKEN, path, payload)
|
||||
|
||||
any_ok = False
|
||||
any_attempted = False
|
||||
for inst, token in targets:
|
||||
if not inst:
|
||||
continue
|
||||
any_attempted = True
|
||||
if _send_single(inst, token, path, payload):
|
||||
any_ok = True
|
||||
return any_ok or not any_attempted
|
||||
|
||||
|
||||
def _enqueue_post_json(
|
||||
@@ -234,7 +160,6 @@ def _enqueue_post_json(
|
||||
priority: int,
|
||||
*,
|
||||
state: QueueState = STATE,
|
||||
retries: int = 0,
|
||||
) -> None:
|
||||
"""Store a POST request in the priority queue.
|
||||
|
||||
@@ -243,17 +168,11 @@ def _enqueue_post_json(
|
||||
payload: JSON-serialisable body.
|
||||
priority: Lower values execute first.
|
||||
state: Shared queue state, injectable for testing.
|
||||
retries: Number of prior failed send attempts for this item.
|
||||
"""
|
||||
|
||||
with state.lock:
|
||||
counter = next(state.counter)
|
||||
# Heap tuple: (priority, counter, path, payload, retries). Lower
|
||||
# priority values are dequeued first (min-heap semantics). The
|
||||
# monotonically increasing counter breaks ties so equal-priority
|
||||
# items are processed in FIFO order without comparing the
|
||||
# non-orderable payload dict.
|
||||
heapq.heappush(state.queue, (priority, counter, path, payload, retries))
|
||||
heapq.heappush(state.queue, (priority, counter, path, payload))
|
||||
|
||||
|
||||
def _drain_post_queue(
|
||||
@@ -261,12 +180,6 @@ def _drain_post_queue(
|
||||
) -> None:
|
||||
"""Process queued POST requests in priority order.
|
||||
|
||||
When the *send* callable returns ``False`` (transient failure) the item
|
||||
is re-queued up to :data:`_MAX_SEND_RETRIES` times. Items exceeding
|
||||
the limit are dropped with a warning. Custom *send* callables that
|
||||
return ``None`` (the typical test/heartbeat pattern) are never retried
|
||||
— the ``result is False`` identity check ensures backward compatibility.
|
||||
|
||||
Parameters:
|
||||
state: Queue container holding pending items.
|
||||
send: Optional callable used to transmit requests.
|
||||
@@ -281,184 +194,13 @@ def _drain_post_queue(
|
||||
if not state.queue:
|
||||
state.active = False
|
||||
return
|
||||
item = heapq.heappop(state.queue)
|
||||
|
||||
# Support both 5-tuple (current) and 4-tuple (legacy/test) items.
|
||||
if len(item) >= 5:
|
||||
priority, _idx, path, payload, retries = item[:5]
|
||||
else:
|
||||
priority, _idx, path, payload = item[:4]
|
||||
retries = 0
|
||||
|
||||
result = send(path, payload)
|
||||
|
||||
# Only retry when the send callable explicitly signals failure
|
||||
# (returns False). Custom send callables (tests, heartbeat)
|
||||
# return None and must NOT be treated as failures.
|
||||
if result is False:
|
||||
if retries < _MAX_SEND_RETRIES:
|
||||
_enqueue_post_json(
|
||||
path, payload, priority, state=state, retries=retries + 1
|
||||
)
|
||||
else:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Dropping item after max retries",
|
||||
context="queue.drain",
|
||||
severity="warn",
|
||||
always=True,
|
||||
path=path,
|
||||
retries=retries,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
_priority, _idx, path, payload = heapq.heappop(state.queue)
|
||||
send(path, payload)
|
||||
finally:
|
||||
with state.lock:
|
||||
state.active = False
|
||||
|
||||
|
||||
_QUEUE_DEPTH_WARNING_THRESHOLD = 100
|
||||
"""Log a warning when the queue grows past this many items."""
|
||||
|
||||
|
||||
def _queue_drainer_loop(state: QueueState = STATE) -> None:
|
||||
"""Body of the background queue-drain daemon thread.
|
||||
|
||||
Blocks on :attr:`QueueState.drain_event`, clears it, then empties the
|
||||
queue by calling :func:`_drain_post_queue`. The thread is created as a
|
||||
daemon so it terminates automatically when the process exits.
|
||||
|
||||
The loop exits cleanly when :attr:`QueueState.shutdown` is set, allowing
|
||||
tests (and graceful-shutdown paths) to join the thread instead of leaking
|
||||
daemon threads that accumulate across a test run.
|
||||
|
||||
The loop is deliberately hardened so that **no** :class:`Exception` can
|
||||
kill the thread. The ``_debug_log`` calls inside the error handler are
|
||||
themselves wrapped in ``try/except`` to prevent cascading failures
|
||||
(e.g. ``BrokenPipeError`` from ``print()`` to a closed stdout).
|
||||
|
||||
.. note::
|
||||
There is a benign race between ``drain_event.clear()`` and the end
|
||||
of :func:`_drain_post_queue`: a signal arriving in that window is
|
||||
consumed by ``clear()`` but the item is still drained because the
|
||||
drain loop empties the queue completely. However, an item enqueued
|
||||
*after* the drain loop finds the queue empty and *before*
|
||||
``wait()`` re-blocks will sit until the next ``drain_event.set()``
|
||||
call (i.e. the next enqueue). This is acceptable for a best-effort
|
||||
ingestor — maximum extra latency equals the inter-packet interval.
|
||||
|
||||
Parameters:
|
||||
state: Queue state instance to drain.
|
||||
"""
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue drainer thread started",
|
||||
context="queue.drainer",
|
||||
severity="info",
|
||||
always=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
while not state.shutdown.is_set():
|
||||
state.drain_event.wait(timeout=1.0)
|
||||
if state.shutdown.is_set():
|
||||
break
|
||||
state.drain_event.clear()
|
||||
|
||||
depth = len(state.queue)
|
||||
if depth > _QUEUE_DEPTH_WARNING_THRESHOLD:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue depth warning",
|
||||
context="queue.drainer",
|
||||
severity="warn",
|
||||
always=True,
|
||||
depth=depth,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
_drain_post_queue(state)
|
||||
except Exception as exc:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue drainer error",
|
||||
context="queue.drainer",
|
||||
severity="error",
|
||||
always=True,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue drainer thread exiting",
|
||||
context="queue.drainer",
|
||||
severity="info",
|
||||
always=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _start_queue_drainer(state: QueueState = STATE) -> None:
|
||||
"""Idempotently start the background queue-drain thread.
|
||||
|
||||
Calling this function when a drainer thread is already alive is a
|
||||
no-op. The thread is created as a daemon so it does not prevent
|
||||
process exit. The check-and-start is performed under :attr:`state.lock`
|
||||
to avoid starting duplicate threads under concurrent callers.
|
||||
|
||||
If items are already in the queue when the drainer is started,
|
||||
:attr:`QueueState.drain_event` is signalled immediately so they are not
|
||||
stranded waiting for the next packet to arrive.
|
||||
|
||||
Parameters:
|
||||
state: Queue state whose :func:`_queue_drainer_loop` to start.
|
||||
"""
|
||||
with state.lock:
|
||||
if state.drainer is not None and state.drainer.is_alive():
|
||||
return
|
||||
# Reset in case the prior thread was stopped or crashed while
|
||||
# shutdown was already set.
|
||||
state.shutdown.clear()
|
||||
t = threading.Thread(
|
||||
target=_queue_drainer_loop,
|
||||
args=(state,),
|
||||
name="queue-drainer",
|
||||
daemon=True,
|
||||
)
|
||||
t.start()
|
||||
state.drainer = t
|
||||
if state.queue:
|
||||
state.drain_event.set()
|
||||
|
||||
|
||||
def _stop_queue_drainer(state: QueueState = STATE, timeout: float = 5.0) -> None:
|
||||
"""Signal the drainer thread to exit and wait for it to finish.
|
||||
|
||||
Sets :attr:`QueueState.shutdown` and :attr:`QueueState.drain_event` so
|
||||
the loop wakes up, observes the shutdown flag, and terminates. After
|
||||
joining (up to *timeout* seconds) the drainer reference is cleared.
|
||||
|
||||
Safe to call when no drainer is running (no-op).
|
||||
|
||||
Parameters:
|
||||
state: Queue state whose drainer to stop.
|
||||
timeout: Maximum seconds to wait for the thread to finish.
|
||||
"""
|
||||
if state.drainer is None or not state.drainer.is_alive():
|
||||
return
|
||||
state.shutdown.set()
|
||||
state.drain_event.set()
|
||||
state.drainer.join(timeout=timeout)
|
||||
state.drainer = None
|
||||
|
||||
|
||||
def _queue_post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
@@ -467,32 +209,14 @@ def _queue_post_json(
|
||||
state: QueueState = STATE,
|
||||
send: Callable[[str, dict], None] | None = None,
|
||||
) -> None:
|
||||
"""Queue a POST request and wake the drain thread (or drain inline).
|
||||
|
||||
When a background drainer thread is running (started via
|
||||
:func:`_start_queue_drainer`), this function enqueues the item and
|
||||
signals :attr:`QueueState.drain_event` without blocking — the drain
|
||||
happens on the dedicated thread. This keeps the caller's thread (which
|
||||
may be the Meshtastic asyncio I/O thread) free to process serial events.
|
||||
|
||||
When no background drainer is alive the call falls back to a
|
||||
synchronous inline drain. This path is used by tests (which pass a
|
||||
``send`` override via :func:`_fresh_state`) and for any standalone use
|
||||
without calling :func:`_start_queue_drainer`.
|
||||
|
||||
.. note::
|
||||
The background drainer is used **only** when no custom ``send``
|
||||
override is provided (i.e. the production ``_post_json`` path).
|
||||
Any caller that supplies a custom ``send`` (tests, heartbeat
|
||||
helpers) always gets the synchronous inline drain so its transport
|
||||
is honoured correctly.
|
||||
"""Queue a POST request and start processing if idle.
|
||||
|
||||
Parameters:
|
||||
path: API path for the request.
|
||||
payload: JSON payload to send.
|
||||
priority: Scheduling priority where lower values run first.
|
||||
state: Queue container used to store pending requests.
|
||||
send: Optional transport override (synchronous fallback only).
|
||||
send: Optional transport override, primarily for tests.
|
||||
"""
|
||||
|
||||
if send is None:
|
||||
@@ -512,42 +236,6 @@ def _queue_post_json(
|
||||
)
|
||||
|
||||
_enqueue_post_json(path, payload, priority, state=state)
|
||||
|
||||
# Use the background drainer only when it is alive AND no custom send
|
||||
# override is in play. A custom send (used by tests and callers such as
|
||||
# ingestors.queue_ingestor_heartbeat) must be honoured synchronously
|
||||
# because the background drainer always calls _drain_post_queue without
|
||||
# a send override.
|
||||
#
|
||||
# The ``is`` check is intentional: _post_json is a module-level function
|
||||
# so identity comparison reliably detects the "no override" default that
|
||||
# was assigned at the top of this function.
|
||||
if send is _post_json:
|
||||
if state.drainer is not None and state.drainer.is_alive():
|
||||
state.drain_event.set()
|
||||
return
|
||||
|
||||
# The drainer was previously started but has died (e.g. unhandled
|
||||
# exception). Restart it so the caller stays non-blocking and the
|
||||
# MeshCore asyncio event loop is not stalled by inline HTTP calls.
|
||||
if state.drainer is not None:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Restarting dead queue drainer thread",
|
||||
context="queue.queue_post_json",
|
||||
severity="warn",
|
||||
always=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
_start_queue_drainer(state)
|
||||
# If the restart succeeded, delegate to the background thread.
|
||||
if state.drainer is not None and state.drainer.is_alive():
|
||||
state.drain_event.set()
|
||||
return
|
||||
|
||||
# Synchronous fallback: no drainer was ever started, the restart
|
||||
# failed, or a custom send override is in play.
|
||||
with state.lock:
|
||||
if state.active:
|
||||
return
|
||||
@@ -570,23 +258,17 @@ def _clear_post_queue(state: QueueState = STATE) -> None:
|
||||
__all__ = [
|
||||
"STATE",
|
||||
"QueueState",
|
||||
"_CHANNEL_POST_PRIORITY",
|
||||
"_DEFAULT_POST_PRIORITY",
|
||||
"_INGESTOR_POST_PRIORITY",
|
||||
"_MAX_SEND_RETRIES",
|
||||
"_MESSAGE_POST_PRIORITY",
|
||||
"_INGESTOR_POST_PRIORITY",
|
||||
"_NEIGHBOR_POST_PRIORITY",
|
||||
"_NODE_POST_PRIORITY",
|
||||
"_POSITION_POST_PRIORITY",
|
||||
"_QUEUE_DEPTH_WARNING_THRESHOLD",
|
||||
"_TRACE_POST_PRIORITY",
|
||||
"_TELEMETRY_POST_PRIORITY",
|
||||
"_clear_post_queue",
|
||||
"_drain_post_queue",
|
||||
"_enqueue_post_json",
|
||||
"_post_json",
|
||||
"_queue_drainer_loop",
|
||||
"_queue_post_json",
|
||||
"_start_queue_drainer",
|
||||
"_stop_queue_drainer",
|
||||
]
|
||||
|
||||
@@ -33,9 +33,6 @@ from google.protobuf.json_format import MessageToDict
|
||||
from google.protobuf.message import DecodeError
|
||||
from google.protobuf.message import Message as ProtoMessage
|
||||
|
||||
from .node_identity import canonical_node_id as _canonical_node_id
|
||||
from .node_identity import node_num_from_id as _node_num_from_id
|
||||
|
||||
_CLI_ROLE_MODULE_NAMES: tuple[str, ...] = (
|
||||
"meshtastic.cli.common",
|
||||
"meshtastic.cli.roles",
|
||||
@@ -128,10 +125,6 @@ def _load_cli_role_lookup() -> dict[int, str]:
|
||||
mapping[key_int] = str(value)
|
||||
return mapping
|
||||
|
||||
# Iterate through candidate module paths in preference order. The CLI
|
||||
# package ships several role-enum locations across versions; we stop at
|
||||
# the first module that yields a non-empty mapping so we do not silently
|
||||
# merge partial enums from two different meshtastic-cli releases.
|
||||
for module_name in _CLI_ROLE_MODULE_NAMES:
|
||||
try:
|
||||
module = importlib.import_module(module_name)
|
||||
@@ -436,6 +429,91 @@ def _pkt_to_dict(packet) -> dict:
|
||||
return {"_unparsed": str(packet)}
|
||||
|
||||
|
||||
def _canonical_node_id(value) -> str | None:
|
||||
"""Convert node identifiers into the canonical ``!xxxxxxxx`` format.
|
||||
|
||||
Parameters:
|
||||
value: Input identifier which may be an int, float or string.
|
||||
|
||||
Returns:
|
||||
The canonical identifier or ``None`` if conversion fails.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
try:
|
||||
num = int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if num < 0:
|
||||
return None
|
||||
return f"!{num & 0xFFFFFFFF:08x}"
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("^"):
|
||||
return trimmed
|
||||
if trimmed.startswith("!"):
|
||||
body = trimmed[1:]
|
||||
elif trimmed.lower().startswith("0x"):
|
||||
body = trimmed[2:]
|
||||
elif trimmed.isdigit():
|
||||
try:
|
||||
return f"!{int(trimmed, 10) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
else:
|
||||
body = trimmed
|
||||
|
||||
if not body:
|
||||
return None
|
||||
try:
|
||||
return f"!{int(body, 16) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _node_num_from_id(node_id) -> int | None:
|
||||
"""Extract the numeric node ID from a canonical identifier.
|
||||
|
||||
Parameters:
|
||||
node_id: Identifier value accepted by :func:`_canonical_node_id`.
|
||||
|
||||
Returns:
|
||||
The numeric node ID or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
return None
|
||||
if isinstance(node_id, (int, float)):
|
||||
try:
|
||||
num = int(node_id)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return num if num >= 0 else None
|
||||
if not isinstance(node_id, str):
|
||||
return None
|
||||
|
||||
trimmed = node_id.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("!"):
|
||||
trimmed = trimmed[1:]
|
||||
if trimmed.lower().startswith("0x"):
|
||||
trimmed = trimmed[2:]
|
||||
try:
|
||||
return int(trimmed, 16)
|
||||
except ValueError:
|
||||
try:
|
||||
return int(trimmed, 10)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _merge_mappings(base, extra):
|
||||
"""Merge two mapping-like objects recursively.
|
||||
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Shared utility helpers for the mesh ingestor package."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Callable, TypeVar
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
def _retry_dict_snapshot(fn: Callable[[], _T], retries: int = 3) -> _T | None:
|
||||
"""Call ``fn()`` retrying on concurrent dictionary-modification errors.
|
||||
|
||||
Meshtastic's node dictionary is updated on a background thread. Iterating
|
||||
it can raise a :class:`RuntimeError` with the message "dictionary changed
|
||||
size during iteration". This helper retries the call up to ``retries``
|
||||
times, yielding the thread scheduler between attempts via :func:`time.sleep`.
|
||||
|
||||
Parameters:
|
||||
fn: Zero-argument callable that performs the iteration.
|
||||
retries: Maximum number of attempts before giving up.
|
||||
|
||||
Returns:
|
||||
The return value of ``fn`` on success, or ``None`` when all retries are
|
||||
exhausted.
|
||||
"""
|
||||
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
return fn()
|
||||
except RuntimeError as err:
|
||||
# Only retry the specific concurrent-modification error; re-raise
|
||||
# anything else so genuine bugs surface immediately.
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
# Yield to the thread scheduler to let the mutating thread complete
|
||||
# before we attempt the snapshot again.
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
|
||||
__all__ = ["_retry_dict_snapshot"]
|
||||
+1
-3
@@ -29,9 +29,7 @@ CREATE TABLE IF NOT EXISTS messages (
|
||||
modem_preset TEXT,
|
||||
channel_name TEXT,
|
||||
reply_id INTEGER,
|
||||
emoji TEXT,
|
||||
ingestor TEXT,
|
||||
protocol TEXT NOT NULL DEFAULT 'meshtastic'
|
||||
emoji TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
|
||||
|
||||
@@ -1,39 +0,0 @@
|
||||
-- Copyright © 2025-26 l5yth & contributors
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Add a protocol column to every entity and event table so records from
|
||||
-- different mesh backends (meshtastic, meshcore, reticulum, …) can co-exist
|
||||
-- in the same database and be queried independently.
|
||||
--
|
||||
-- Existing rows default to 'meshtastic' for backward compatibility.
|
||||
|
||||
BEGIN;
|
||||
ALTER TABLE ingestors ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
ALTER TABLE nodes ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
ALTER TABLE messages ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
ALTER TABLE positions ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
ALTER TABLE telemetry ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
ALTER TABLE traces ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
ALTER TABLE neighbors ADD COLUMN protocol TEXT NOT NULL DEFAULT 'meshtastic';
|
||||
|
||||
-- Indices to support ?protocol= filtering on every entity endpoint without
|
||||
-- full table scans as multi-protocol traffic grows.
|
||||
CREATE INDEX IF NOT EXISTS idx_ingestors_protocol ON ingestors(protocol);
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_protocol ON nodes(protocol);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_protocol ON messages(protocol);
|
||||
CREATE INDEX IF NOT EXISTS idx_positions_protocol ON positions(protocol);
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_protocol ON telemetry(protocol);
|
||||
CREATE INDEX IF NOT EXISTS idx_traces_protocol ON traces(protocol);
|
||||
CREATE INDEX IF NOT EXISTS idx_neighbors_protocol ON neighbors(protocol);
|
||||
COMMIT;
|
||||
@@ -1,47 +0,0 @@
|
||||
-- Copyright © 2025-26 l5yth & contributors
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
-- Add telemetry subtype discriminator to enable per-chart type filtering.
|
||||
-- Backfills existing rows using field-presence heuristics that mirror
|
||||
-- classifySnapshot() in node-page.js, so historical data is classified
|
||||
-- consistently regardless of whether the new ingestors are deployed yet.
|
||||
|
||||
BEGIN;
|
||||
ALTER TABLE telemetry ADD COLUMN telemetry_type TEXT;
|
||||
|
||||
-- Device metrics: battery/channel fields are exclusive to device_metrics
|
||||
UPDATE telemetry SET telemetry_type = 'device'
|
||||
WHERE telemetry_type IS NULL
|
||||
AND (battery_level IS NOT NULL OR channel_utilization IS NOT NULL
|
||||
OR air_util_tx IS NOT NULL OR uptime_seconds IS NOT NULL);
|
||||
|
||||
-- Power sensor: current is the unambiguous power-sensor discriminator.
|
||||
-- voltage is intentionally excluded here: device_metrics also stores a voltage
|
||||
-- reading (~4.2 V for battery), so using voltage alone would misclassify device
|
||||
-- rows whose four device-discriminator fields (battery_level, channel_utilization,
|
||||
-- air_util_tx, uptime_seconds) happen to be NULL. Rows that have only voltage
|
||||
-- and no other classifiable fields are left as NULL (unclassified), which is
|
||||
-- more accurate than a wrong classification.
|
||||
UPDATE telemetry SET telemetry_type = 'power'
|
||||
WHERE telemetry_type IS NULL
|
||||
AND current IS NOT NULL;
|
||||
|
||||
-- Environment: temperature/humidity/pressure
|
||||
UPDATE telemetry SET telemetry_type = 'environment'
|
||||
WHERE telemetry_type IS NULL
|
||||
AND (temperature IS NOT NULL OR relative_humidity IS NOT NULL
|
||||
OR barometric_pressure IS NOT NULL OR iaq IS NOT NULL
|
||||
OR gas_resistance IS NOT NULL);
|
||||
|
||||
COMMIT;
|
||||
@@ -17,8 +17,6 @@ CREATE TABLE IF NOT EXISTS neighbors (
|
||||
neighbor_id TEXT NOT NULL,
|
||||
snr REAL,
|
||||
rx_time INTEGER NOT NULL,
|
||||
ingestor TEXT,
|
||||
protocol TEXT NOT NULL DEFAULT 'meshtastic',
|
||||
PRIMARY KEY (node_id, neighbor_id),
|
||||
FOREIGN KEY (node_id) REFERENCES nodes(node_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (neighbor_id) REFERENCES nodes(node_id) ON DELETE CASCADE
|
||||
|
||||
+1
-4
@@ -41,12 +41,9 @@ CREATE TABLE IF NOT EXISTS nodes (
|
||||
longitude REAL,
|
||||
altitude REAL,
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT,
|
||||
protocol TEXT NOT NULL DEFAULT 'meshtastic',
|
||||
synthetic BOOLEAN NOT NULL DEFAULT 0
|
||||
modem_preset TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_last_heard ON nodes(last_heard);
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_hw_model ON nodes(hw_model);
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_latlon ON nodes(latitude, longitude);
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_long_name ON nodes(long_name);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user