Compare commits

..

46 Commits

Author SHA1 Message Date
l5y bb7a09cb6f web: decryption confidence scoring 2026-01-11 08:38:24 +01:00
l5y fed8b9e124 matrix: config loading now merges optional TOML with CLI/env/secret inputs (#617)
* matrix: config loading now merges optional TOML with CLI/env/secret inputs

* matrix: fix tests

* matrix: address review comments

* matrix: fix tests

* matrix: cover missing unit test vectors
2026-01-10 23:39:53 +01:00
l5y 60e734086f matrix: logs only non-sensitive config fields (#616)
* matrix: logs only non-sensitive config fields

* matrix: run fmt
2026-01-10 21:06:51 +01:00
l5y c3181e9bd5 web: decrypted takes precedence (#614)
* web: decrypted takes precedence

* web: run rufo

* web: fix tests

* web: fix tests

* web: cover missing unit test vectors

* web: fix tests
2026-01-10 13:13:55 +01:00
l5y f4fa487b2d Add Apache headers to missing sources (#615) 2026-01-10 13:07:47 +01:00
l5y e0237108c6 web: decrypt PSK-1 unencrypted messages on arrival (#611)
* web: decrypt PSK-1 unencrypted messages on arrival

* web: address review comments

* web: use proper psk to decrypt instead of alias

* cover missing unit test vectors

* tests: run black formatter

* web: fix tests

* web: refine decryption data processing logic

* web: address review comments

* web: cover missing unit test vectors

* web: cover missing unit test vectors

* web: cover missing unit test vectors

* web: cover missing unit test vectors
2026-01-10 12:33:59 +01:00
l5y d7a636251d web: daemonize federation worker pool to avoid deadlocks on stuck announcments (#610)
* web: daemonize federation worker pool to avoid deadlocks on stuck announcments

* web: address review comments

* web: address review comments
2026-01-09 09:12:25 +01:00
l5y 108573b100 web: add announcement banner (#609)
* web: add announcement banner

* web: cover missing unit test vectors
2026-01-08 21:17:59 +01:00
l5y 36f55e6b79 l5y chore version 0510 (#608)
* chore: bump version to 0.5.10

* chore: bump version to 0.5.10

* chore: update changelog
2026-01-08 16:20:14 +01:00
l5y b4dd72e7eb matrix: listen for synapse on port 41448 (#607)
* matrix: listen for synapse on port 41448

* matrix: address review comments

* matrix: address review comments

* matrix: cover missing unit test vectors

* matrix: cover missing unit test vectors
2026-01-08 15:51:31 +01:00
l5y f5f2e977a1 web: collapse federation map ledgend (#604)
* web: collapse federation map ledgend

* web: cover missing unit test vectors
2026-01-06 17:31:20 +01:00
l5y e9a0dc0d59 web: fix stale node queries (#603) 2026-01-06 16:13:04 +01:00
l5y d75c395514 matrix: move short name to display name (#602)
* matrix: move short name to display name

* matrix: run fmt
2026-01-05 23:24:27 +01:00
l5y b08f951780 ci: update ruby to 4 (#601)
* ci: update ruby to 4

* ci: update dispatch triggers
2026-01-05 23:23:56 +01:00
l5y 955431ac18 web: display traces of last 28 days if available (#599)
* web: display traces of last 28 days if available

* web: address review comments

* web: fix tests

* web: fix tests
2026-01-05 21:22:16 +01:00
l5y 7f40abf92a web: establish menu structure (#597)
* web: establish menu structure

* web: cover missing unit test vectors

* web: fix tests
2026-01-05 21:18:51 +01:00
l5y c157fd481b matrix: fixed the text-message checkpoint regression (#595)
* matrix: fixed the text-message checkpoint regression

* matrix: improve formatting

* matrix: fix tests
2026-01-05 18:20:25 +01:00
l5y a6fc7145bc matrix: cache seen messages by rx_time not id (#594)
* matrix: cache seen messages by rx_time not id

* matrix: fix review comments

* matrix: fix review comments

* matrix: cover missing unit test vectors

* matrix: fix tests
2026-01-05 17:34:54 +01:00
l5y ca05cbb2c5 web: hide the default '0' tab when not active (#593) 2026-01-05 16:26:56 +01:00
l5y 5c79572c4d matrix: fix empty bridge state json (#592)
* matrix: fix empty bridge state json

* matrix: fix tests
2026-01-05 16:11:24 +01:00
l5y 6fd8e5ad12 web: allow certain charts to overflow upper bounds (#585)
* web: allow certain charts to overflow upper bounds

* web: cover missing unit test vectors
2025-12-31 15:15:18 +01:00
l5y 09fbc32e48 ingestor: support ROUTING_APP messages (#584)
* ingestor: support ROUTING_APP messages

* data: cover missing unit test vectors

* data: address review comments

* tests: fix
2025-12-31 13:13:34 +01:00
l5y 4591d5acd6 ci: run nix flake check on ci (#583)
* ci: run nix flake check on ci

* ci: fix tests
2025-12-31 12:58:37 +01:00
l5y 6c711f80b4 web: hide legend by default (#582)
* web: hide legend my default

* web: run rufo
2025-12-31 12:42:53 +01:00
Benjamin Grosse e61e701240 nix flake (#577) 2025-12-31 12:00:11 +01:00
apo-mak 42f4e80a26 Support BLE UUID format for macOS Bluetooth devices (#575)
* Initial plan

* Add BLE UUID support for macOS devices

Co-authored-by: apo-mak <25563515+apo-mak@users.noreply.github.com>

* docs: Add UUID format example for macOS BLE connections

Co-authored-by: apo-mak <25563515+apo-mak@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: apo-mak <25563515+apo-mak@users.noreply.github.com>
2025-12-20 20:21:59 +01:00
l5y 4dc03f33ca web: add mesh.qrp.ro as seed node (#573) 2025-12-17 10:48:51 +01:00
l5y 5572c6cd12 web: ensure unknown nodes for messages and traces (#572) 2025-12-17 10:21:03 +01:00
l5y 4f7e66de82 chore: bump version to 0.5.9 (#569) 2025-12-16 21:14:10 +00:00
l5y c1898037c0 web: add secondary seed node jmrp.io (#568) 2025-12-16 21:38:41 +01:00
l5y efc5f64279 data: implement whitelist for ingestor (#567)
* data: implement whitelist for ingestor

* data: run black

* data: cover missing unit test vectors
2025-12-16 21:11:53 +01:00
l5y 636a203254 web: add ?since= parameter to all apis (#566) 2025-12-16 20:24:31 +01:00
l5y 2e78fa7a3a matrix: fix docker build 2025-12-16 19:26:31 +01:00
l5y e74f985630 matrix: fix docker build (#564) 2025-12-16 18:52:07 +01:00
l5y e4facd7f26 web: fix federation signature validation and create fallback (#563)
* web: fix federation signature validation and create fallback

* web: cover missing unit test vectors
2025-12-16 10:52:59 +01:00
l5y f533362f8a chore: update readme (#561) 2025-12-16 08:54:31 +01:00
l5y 175a8f368f matrix: add docker file for bridge (#556)
* matrix: add docker file for bridge

* matrix: address review comments

* matrix: address review comments

* matrix: address review comments

* matrix: address review comments

* matrix: address review comments
2025-12-16 08:53:01 +01:00
l5y 872bcbd529 matrix: add health checks to startup (#555)
* matrix: add health checks to startup

* matrix: address review comments

* matrix: cover missing unit test vectors

* matrix: cover missing unit test vectors
2025-12-15 22:53:32 +01:00
l5y 8811f71e53 matrix: omit the api part in base url (#554)
* matrix: omit the api part in base url

* matrix: address review comments
2025-12-15 22:04:01 +01:00
l5y fec649a159 app: add utility coverage tests for main.dart (#552)
* Add utility coverage tests for main.dart

* Add channel names to message sorting tests

* Fix MeshMessage sort test construction

* chore: run dart formatter
2025-12-15 11:03:51 +01:00
l5y 9e3f481401 Add unit tests for daemon helpers (#553) 2025-12-15 08:43:13 +01:00
l5y 1a497864a7 chore: bump version to 0.5.8 (#551)
* chore: bump version to 0.5.8

* chore: add missing license headers
2025-12-15 08:29:27 +01:00
l5y 06fb90513f data: track ingestors heartbeat (#549)
* data: track ingestors heartbeat

* data: address review comments

* cover missing unit test vectors

* cover missing unit test vectors
2025-12-14 18:42:17 +01:00
l5y b5eecb1ec1 Harden instance selector navigation URLs (#550)
* Harden instance selector navigation URLs

* Cover malformed instance URL handling
2025-12-14 18:40:41 +01:00
l5y 0e211aebdd data: hide channels that have been flag for ignoring (#548)
* data: hide channels that have been flag for ignoring

* data: address review comments
2025-12-14 16:47:44 +01:00
l5y 96b62d7e14 web: fix limit when counting remote nodes (#547) 2025-12-14 15:05:19 +01:00
111 changed files with 14522 additions and 510 deletions
+27 -5
View File
@@ -43,7 +43,7 @@ jobs:
strategy:
matrix:
service: [web, ingestor]
service: [web, ingestor, matrix-bridge]
architecture:
- { name: linux-amd64, platform: linux/amd64, label: "Linux x86_64", os: linux, architecture: amd64 }
- { name: linux-arm64, platform: linux/arm64, label: "Linux ARM64", os: linux, architecture: arm64 }
@@ -109,8 +109,8 @@ jobs:
uses: docker/build-push-action@v5
with:
context: .
file: ./${{ matrix.service == 'web' && 'web/Dockerfile' || 'data/Dockerfile' }}
target: production
file: ${{ matrix.service == 'web' && './web/Dockerfile' || matrix.service == 'ingestor' && './data/Dockerfile' || './matrix/Dockerfile' }}
target: ${{ matrix.service == 'matrix-bridge' && 'runtime' || 'production' }}
platforms: ${{ matrix.architecture.platform }}
push: true
tags: |
@@ -119,12 +119,12 @@ jobs:
${{ steps.tagging.outputs.include_latest == 'true' && format('{0}/{1}-{2}-{3}:latest', env.REGISTRY, env.IMAGE_PREFIX, matrix.service, matrix.architecture.name) || '' }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || matrix.service == 'ingestor' && 'Python Ingestor' || 'Matrix Bridge' }} for ${{ matrix.architecture.label }}
org.opencontainers.image.licenses=Apache-2.0
org.opencontainers.image.version=${{ steps.version.outputs.version }}
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || 'Ingestor' }} (${{ matrix.architecture.label }})
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || matrix.service == 'ingestor' && 'Ingestor' || 'Matrix Bridge' }} (${{ matrix.architecture.label }})
org.opencontainers.image.vendor=PotatoMesh
org.opencontainers.image.architecture=${{ matrix.architecture.architecture }}
org.opencontainers.image.os=${{ matrix.architecture.os }}
@@ -208,6 +208,19 @@ jobs:
VERSION=${GITHUB_REF#refs/tags/v}
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Determine tagging strategy
id: tagging
run: |
VERSION="${{ steps.version.outputs.version }}"
if echo "$VERSION" | grep -E -- '-(rc|beta|alpha|dev)'; then
INCLUDE_LATEST=false
else
INCLUDE_LATEST=true
fi
echo "include_latest=$INCLUDE_LATEST" >> $GITHUB_OUTPUT
- name: Publish release summary
run: |
echo "## 🚀 PotatoMesh Images Published to GHCR" >> $GITHUB_STEP_SUMMARY
@@ -234,4 +247,13 @@ jobs:
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
# Matrix bridge images
echo "### 🧩 Matrix Bridge" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.tagging.outputs.include_latest }}" = "true" ]; then
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
+1
View File
@@ -20,6 +20,7 @@ on:
pull_request:
branches: [ "main" ]
paths:
- '.github/**'
- 'web/**'
- 'tests/**'
+1
View File
@@ -20,6 +20,7 @@ on:
pull_request:
branches: [ "main" ]
paths:
- '.github/**'
- 'app/**'
- 'tests/**'
+35
View File
@@ -0,0 +1,35 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Nix
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
jobs:
flake-check:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install Nix
uses: cachix/install-nix-action@v30
with:
extra_nix_config: |
experimental-features = nix-command flakes
- name: Run flake checks
run: nix flake check
+1
View File
@@ -20,6 +20,7 @@ on:
pull_request:
branches: [ "main" ]
paths:
- '.github/**'
- 'data/**'
- 'tests/**'
+2 -1
View File
@@ -20,6 +20,7 @@ on:
pull_request:
branches: [ "main" ]
paths:
- '.github/**'
- 'web/**'
- 'tests/**'
@@ -34,7 +35,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
ruby-version: ['3.3', '3.4']
ruby-version: ['3.4', '4.0']
steps:
- uses: actions/checkout@v5
+4
View File
@@ -17,11 +17,15 @@ The repository splits runtime and ingestion logic. `web/` holds the Sinatra dash
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
`matrix/` contains the Rust Matrix bridge; build with `cargo build --release` or `docker build -f matrix/Dockerfile .`, and keep bridge config under `matrix/Config.toml` when running locally.
## Build, Test, and Development Commands
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
Container images publish via `.github/workflows/docker.yml` as `potato-mesh-{service}-linux-$arch` (`web`, `ingestor`, `matrix-bridge`), using the Dockerfiles in `web/`, `data/`, and `matrix/`.
## Coding Style & Naming Conventions
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
+101
View File
@@ -1,5 +1,106 @@
# CHANGELOG
## v0.5.9
* Matrix: listen for synapse on port 41448 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/607>
* Web: collapse federation map ledgend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/604>
* Web: fix stale node queries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/603>
* Matrix: move short name to display name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/602>
* Ci: update ruby to 4 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/601>
* Web: display traces of last 28 days if available by @l5yth in <https://github.com/l5yth/potato-mesh/pull/599>
* Web: establish menu structure by @l5yth in <https://github.com/l5yth/potato-mesh/pull/597>
* Matrix: fixed the text-message checkpoint regression by @l5yth in <https://github.com/l5yth/potato-mesh/pull/595>
* Matrix: cache seen messages by rx_time not id by @l5yth in <https://github.com/l5yth/potato-mesh/pull/594>
* Web: hide the default '0' tab when not active by @l5yth in <https://github.com/l5yth/potato-mesh/pull/593>
* Matrix: fix empty bridge state json by @l5yth in <https://github.com/l5yth/potato-mesh/pull/592>
* Web: allow certain charts to overflow upper bounds by @l5yth in <https://github.com/l5yth/potato-mesh/pull/585>
* Ingestor: support ROUTING_APP messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/584>
* Ci: run nix flake check on ci by @l5yth in <https://github.com/l5yth/potato-mesh/pull/583>
* Web: hide legend by default by @l5yth in <https://github.com/l5yth/potato-mesh/pull/582>
* Nix flake by @benjajaja in <https://github.com/l5yth/potato-mesh/pull/577>
* Support BLE UUID format for macOS Bluetooth devices by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/575>
* Web: add mesh.qrp.ro as seed node by @l5yth in <https://github.com/l5yth/potato-mesh/pull/573>
* Web: ensure unknown nodes for messages and traces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/572>
* Chore: bump version to 0.5.9 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/569>
## v0.5.8
* Web: add secondary seed node jmrp.io by @l5yth in <https://github.com/l5yth/potato-mesh/pull/568>
* Data: implement whitelist for ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/567>
* Web: add ?since= parameter to all apis by @l5yth in <https://github.com/l5yth/potato-mesh/pull/566>
* Matrix: fix docker build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/565>
* Matrix: fix docker build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/564>
* Web: fix federation signature validation and create fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/563>
* Chore: update readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/561>
* Matrix: add docker file for bridge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/556>
* Matrix: add health checks to startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/555>
* Matrix: omit the api part in base url by @l5yth in <https://github.com/l5yth/potato-mesh/pull/554>
* App: add utility coverage tests for main.dart by @l5yth in <https://github.com/l5yth/potato-mesh/pull/552>
* Data: add thorough daemon unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/553>
* Chore: bump version to 0.5.8 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/551>
## v0.5.7
* Data: track ingestors heartbeat by @l5yth in <https://github.com/l5yth/potato-mesh/pull/549>
* Harden instance selector navigation URLs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/550>
* Data: hide channels that have been flag for ignoring by @l5yth in <https://github.com/l5yth/potato-mesh/pull/548>
* Web: fix limit when counting remote nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/547>
* Web: improve instances map and table view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/546>
* Web: fix traces submission with optional fields on udp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/545>
* Chore: bump version to 0.5.7 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/542>
* Handle zero telemetry aggregates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/538>
* Web: fix telemetry api to return current in amperes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/541>
* Web: fix traces rendering by @l5yth in <https://github.com/l5yth/potato-mesh/pull/535>
* Normalize numeric node roles to canonical labels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/539>
* Use INSTANCE_DOMAIN env for ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/536>
* Web: further refine the federation page by @l5yth in <https://github.com/l5yth/potato-mesh/pull/534>
* Add Federation Map by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/532>
* Add contact link to the instance data by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/533>
* Matrix: create potato-matrix-bridge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/528>
## v0.5.6
* Web: display sats in view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/523>
* Web: display air quality in separate chart by @l5yth in <https://github.com/l5yth/potato-mesh/pull/521>
* Ci: Add macOS and Ubuntu builds to Flutter workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/519>
* Web: add current to charts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/520>
* App: fix notification icon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/518>
* Spec: update test fixtures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/517>
* App: generate proper icons by @l5yth in <https://github.com/l5yth/potato-mesh/pull/516>
* Web: fix favicon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/515>
* Web: add ?since= parameter to api/messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/512>
* App: implement notifications by @l5yth in <https://github.com/l5yth/potato-mesh/pull/511>
* App: add theme selector by @l5yth in <https://github.com/l5yth/potato-mesh/pull/507>
* App: further harden refresh logic and prefer local first by @l5yth in <https://github.com/l5yth/potato-mesh/pull/506>
* Ci: fix app artifacts for tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/504>
* Ci: build app artifacts for tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/503>
* App: add persistance by @l5yth in <https://github.com/l5yth/potato-mesh/pull/501>
* App: instance and chat mvp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/498>
* App: add instance selector to settings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/497>
* App: add scaffholding gitignore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/496>
* Handle reaction app packets without reply id by @l5yth in <https://github.com/l5yth/potato-mesh/pull/495>
* Render reaction multiplier counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/494>
* Add comprehensive tests for Flutter reader by @l5yth in <https://github.com/l5yth/potato-mesh/pull/491>
* Map numeric role ids to canonical Meshtastic roles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/489>
* Update node detail hydration for traces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/490>
* Add mobile Flutter CI workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/488>
* Align OCI labels in docker workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/487>
* Add Meshtastic reader Flutter app by @l5yth in <https://github.com/l5yth/potato-mesh/pull/483>
* Handle pre-release Docker tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/486>
* Web: remove range from charts labels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/485>
* Floor override frequencies to MHz integers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/476>
* Prevent message ids from being treated as node identifiers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/475>
* Fix 1 after emojis in reply. by @Alexkurd in <https://github.com/l5yth/potato-mesh/pull/464>
* Add frequency and preset to node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/472>
* Subscribe to traceroute app pubsub topic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/471>
* Aggregate telemetry over the last 7 days by @l5yth in <https://github.com/l5yth/potato-mesh/pull/470>
* Address missing id field ingestor bug by @l5yth in <https://github.com/l5yth/potato-mesh/pull/469>
* Merge secondary channels by name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/468>
* Rate limit host device telemetry by @l5yth in <https://github.com/l5yth/potato-mesh/pull/467>
* Add traceroutes to frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/466>
* Feat: implement traceroute app packet handling across the stack by @l5yth in <https://github.com/l5yth/potato-mesh/pull/463>
* Bump version and update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/462>
## v0.5.5
* Added comprehensive helper unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/457>
+2
View File
@@ -53,6 +53,8 @@ Additional environment variables are optional:
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom (disables the auto-fit checkbox when set). |
| `MAX_DISTANCE` | `42` | Maximum relationship distance (km) before edges are hidden. |
| `DEBUG` | `0` | Enables verbose logging across services when set to `1`. |
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; other channels are skipped before hidden filters. |
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor skips when forwarding packets. |
| `FEDERATION` | `1` | Controls whether the instance announces itself and crawls peers (`1`) or stays isolated (`0`). |
| `PRIVATE` | `0` | Restricts public visibility and disables chat/message endpoints when set to `1`. |
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the radio. |
+74 -8
View File
@@ -7,13 +7,20 @@
[![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/l5yth/potato-mesh/issues)
[![Matrix Chat](https://img.shields.io/badge/matrix-%23potatomesh:dod.ngo-blue)](https://matrix.to/#/#potatomesh:dod.ngo)
A federated Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
A federated, Meshtastic-powered node dashboard for your local community.
_No MQTT clutter, just local LoRa aether._
* Web app with chat window and map view showing nodes, neighbors, telemetry, and messages.
* API to POST (authenticated) and to GET nodes and messages.
* Shows new node notifications (first seen) in chat.
* Web dashboard with chat window and map view showing nodes, positions, neighbors,
trace routes, telemetry, and messages.
* API to POST (authenticated) and to GET nodes, messages, and telemetry.
* Shows new node notifications (first seen) and telemetry logs in chat.
* Allows searching and filtering for nodes in map and table view.
* Federated: _automatically_ froms a federation with other communities running
Potato Mesh!
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
* Supports multiple ingestors per instance.
* Matrix bridge that posts Meshtastic messages to a defined matrix channel (no
radio required).
* Mobile app to _read_ messages on your local aether (no radio required).
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
@@ -58,6 +65,7 @@ RACK_ENV="production" \
APP_ENV="production" \
API_TOKEN="SuperSecureTokenReally" \
INSTANCE_DOMAIN="https://potatomesh.net" \
MAP_CENTER="53.55,13.42" \
exec ruby app.rb -p 41447 -o 0.0.0.0
```
@@ -68,6 +76,7 @@ exec ruby app.rb -p 41447 -o 0.0.0.0
* Provide a strong `API_TOKEN` value to authorize POST requests against the API.
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
links and generated metadata resolve correctly.
* Don't forget to set a `MAP_CENTER` to point to your local region.
The web app can be configured with environment variables (defaults shown):
@@ -79,10 +88,13 @@ The web app can be configured with environment variables (defaults shown):
| `CHANNEL` | `"#LongFast"` | Default channel name displayed in the UI. |
| `FREQUENCY` | `"915MHz"` | Default frequency description displayed in the UI. |
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix alias rendered in the footer and overlays. |
| `ANNOUNCEMENT` | _unset_ | Optional announcement banner text rendered above the header on every page. |
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map on load. |
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
| `DEBUG` | `0` | Set to `1` for verbose logging in the web and ingestor services. |
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; when set, all other channels are skipped before hidden filters. |
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor will ignore when forwarding packets. |
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
@@ -133,7 +145,9 @@ The web app contains an API:
* GET `/api/messages?limit=100&encrypted=false&since=0` - returns the latest 100 messages newer than the provided unix timestamp (defaults to `since=0` to return full history; disabled when `PRIVATE=1`)
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
* GET `/api/traces?limit=100` - returns the latest 100 trace-routes caught
* GET `/api/instances` - returns known potato-mesh instances in other locations
* GET `/api/ingestors` - returns active potato-mesh python ingestors that feed data
* GET `/metrics`- metrics for the prometheus endpoint
* GET `/version`- information about the potato-mesh instance
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
@@ -141,6 +155,7 @@ The web app contains an API:
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
* POST `/api/telemetry` - appends telemetry provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/neighbors` - appends neighbor tuples provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/traces` - appends caught traces routes provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
@@ -188,10 +203,52 @@ Run the script with `INSTANCE_DOMAIN` and `API_TOKEN` to keep updating
node records and parsing new incoming messages. Enable debug output with `DEBUG=1`,
specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set it to
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available. The
ingestor will still honor the legacy `POTATOMESH_INSTANCE` variable when
`INSTANCE_DOMAIN` is unset to ease upgrades from earlier deployments.
interface. `CONNECTION` also accepts Bluetooth device addresses in MAC format (e.g.,
`ED:4D:9E:95:CF:60`) or UUID format for macOS (e.g., `C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E`)
and the script attempts a BLE connection if available. To keep
ingestion limited, set `ALLOWED_CHANNELS` to a comma-separated whitelist (for
example `ALLOWED_CHANNELS="Chat,Ops"`); packets on other channels are discarded.
Use `HIDDEN_CHANNELS` to block specific channels from the web UI even when they
appear in the allowlist.
## Nix
For the dev shell, run:
```bash
nix develop
```
The shell provides Ruby plus the Python ingestor dependencies (including `meshtastic`
and `protobuf`). To sanity-check that the ingestor starts, run `python -m data.mesh`
with the usual environment variables (`INSTANCE_DOMAIN`, `API_TOKEN`, `CONNECTION`).
To run the packaged apps directly:
```bash
nix run .#web
nix run .#ingestor
```
Minimal NixOS module snippet:
```nix
services.potato-mesh = {
enable = true;
apiTokenFile = config.sops.secrets.potato-mesh-api-token.path;
dataDir = "/var/lib/potato-mesh";
port = 41447;
instanceDomain = "https://mesh.me";
siteName = "Nix Mesh";
contactLink = "homeserver.mx";
mapCenter = "28.96,-13.56";
frequency = "868MHz";
ingestor = {
enable = true;
connection = "192.168.X.Y:4403";
};
};
```
## Docker
@@ -201,12 +258,21 @@ Docker images are published on Github for each release:
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
docker pull ghcr.io/l5yth/potato-mesh/web:v0.5.5 # pinned historical release
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
docker pull ghcr.io/l5yth/potato-mesh/matrix-bridge:latest
```
Feel free to run the [configure.sh](./configure.sh) script to set up your
environment. See the [Docker guide](DOCKER.md) for more details and custom
deployment instructions.
## Matrix Bridge
A matrix bridge is currently being worked on. It requests messages from a configured
potato-mesh instance and forwards it to a specified matrix channel; see
[matrix/README.md](./matrix/README.md).
![matrix bridge](./scrot-0.6.png)
## Mobile App
A mobile _reader_ app is currently being worked on. Stay tuned for releases and updates.
+15
View File
@@ -1,3 +1,18 @@
/*
* Copyright © 2025-26 l5yth & contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
plugins {
id("com.android.application")
id("kotlin-android")
@@ -1,3 +1,16 @@
// Copyright © 2025-26 l5yth & contributors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net.potatomesh.reader
import io.flutter.embedding.android.FlutterActivity
+15
View File
@@ -1,3 +1,18 @@
/*
* Copyright © 2025-26 l5yth & contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
allprojects {
repositories {
google()
+15
View File
@@ -1,3 +1,18 @@
/*
* Copyright © 2025-26 l5yth & contributors
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pluginManagement {
val flutterSdkPath =
run {
+13 -1
View File
@@ -1,5 +1,18 @@
#!/usr/bin/env bash
# Copyright © 2025-26 l5yth & contributors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export GIT_TAG="$(git describe --tags --abbrev=0)"
export GIT_COMMITS="$(git rev-list --count ${GIT_TAG}..HEAD)"
export GIT_SHA="$(git rev-parse --short=9 HEAD)"
@@ -12,4 +25,3 @@ flutter run \
--dart-define=GIT_SHA="${GIT_SHA}" \
--dart-define=GIT_DIRTY="${GIT_DIRTY}" \
--device-id 38151FDJH00D4C
+2 -2
View File
@@ -15,11 +15,11 @@
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>0.5.7</string>
<string>0.5.10</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>0.5.7</string>
<string>0.5.10</string>
<key>MinimumOSVersion</key>
<string>14.0</string>
</dict>
+13
View File
@@ -1,3 +1,16 @@
// Copyright © 2025-26 l5yth & contributors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import Flutter
import UIKit
+13
View File
@@ -1 +1,14 @@
// Copyright © 2025-26 l5yth & contributors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#import "GeneratedPluginRegistrant.h"
+13
View File
@@ -1,3 +1,16 @@
// Copyright © 2025-26 l5yth & contributors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import Flutter
import UIKit
import XCTest
+1 -1
View File
@@ -1,7 +1,7 @@
name: potato_mesh_reader
description: Meshtastic Reader — read-only view for PotatoMesh messages.
publish_to: "none"
version: 0.5.7
version: 0.5.10
environment:
sdk: ">=3.4.0 <4.0.0"
+13 -1
View File
@@ -1,5 +1,18 @@
#!/usr/bin/env bash
# Copyright © 2025-26 l5yth & contributors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euo pipefail
export GIT_TAG="$(git describe --tags --abbrev=0)"
@@ -27,4 +40,3 @@ fi
export APK_DIR="build/app/outputs/flutter-apk"
mv -v "${APK_DIR}/app-release.apk" "${APK_DIR}/potatomesh-reader-android-${TAG_NAME}.apk"
(cd "${APK_DIR}" && sha256sum "potatomesh-reader-android-${TAG_NAME}.apk" > "potatomesh-reader-android-${TAG_NAME}.apk.sha256sum")
+128
View File
@@ -0,0 +1,128 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import 'package:flutter/material.dart';
import 'package:flutter_test/flutter_test.dart';
import 'package:potato_mesh_reader/main.dart';
void main() {
TestWidgetsFlutterBinding.ensureInitialized();
test('BootstrapProgress renders stage, counts, and detail', () {
const progress = BootstrapProgress(
stage: 'Downloading',
current: 2,
total: 5,
detail: 'instances',
);
expect(progress.label, 'Downloading 2/5 • instances');
const fallback = BootstrapProgress(stage: 'Starting');
expect(fallback.label, 'Starting');
});
test('InstanceVersion summary prefers populated fields', () {
const populated = InstanceVersion(
name: 'BerlinMesh',
channel: '#MediumFast',
frequency: '868MHz',
instanceDomain: 'potatomesh.net',
);
expect(populated.summary, 'BerlinMesh · #MediumFast · 868MHz');
const minimal = InstanceVersion(
name: '',
channel: null,
frequency: null,
instanceDomain: null,
);
expect(minimal.summary, 'Unknown');
});
test('sortMessagesByRxTime keeps unknown timestamps in place', () {
MeshMessage buildMessage({
required int id,
required String text,
required String rxIso,
DateTime? rxTime,
}) {
return MeshMessage(
id: id,
rxTime: rxTime,
rxIso: rxIso,
fromId: '!$id',
nodeId: '!$id',
toId: '^',
channelName: '#general',
channel: 1,
portnum: 'TEXT',
text: text,
rssi: -50,
snr: 1.0,
hopLimit: 1,
);
}
final withTime = buildMessage(
id: 2,
rxTime: DateTime.utc(2024, 1, 1, 12, 1),
rxIso: '2024-01-01T12:01:00Z',
text: 'timed',
);
final withoutTime = buildMessage(
id: 1,
rxTime: null,
rxIso: 'unknown',
text: 'unknown',
);
final laterTime = buildMessage(
id: 3,
rxTime: DateTime.utc(2024, 1, 1, 12, 5),
rxIso: '2024-01-01T12:05:00Z',
text: 'later',
);
final sorted = sortMessagesByRxTime([withoutTime, laterTime, withTime]);
expect(sorted.first.id, withoutTime.id,
reason: 'messages without rxTime should retain position');
expect(sorted[1].id, withTime.id,
reason: 'messages with timestamps should be ordered chronologically');
expect(sorted.last.id, laterTime.id);
});
testWidgets('LoadingScreen displays progress label and icon', (tester) async {
const screen = LoadingScreen(
progress: BootstrapProgress(stage: 'Fetching'),
);
await tester.pumpWidget(const MaterialApp(home: screen));
expect(find.byType(CircularProgressIndicator), findsOneWidget);
expect(find.text('Fetching'), findsOneWidget);
expect(find.bySemanticsLabel('PotatoMesh'), findsOneWidget);
});
testWidgets('LoadingScreen surfaces errors', (tester) async {
const screen = LoadingScreen(
progress: BootstrapProgress(stage: 'Loading'),
error: 'boom',
);
await tester.pumpWidget(const MaterialApp(home: screen));
expect(find.textContaining('Failed to load: boom'), findsOneWidget);
});
}
+13
View File
@@ -1,3 +1,16 @@
// Copyright © 2025-26 l5yth & contributors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This is a basic Flutter widget test.
//
// To perform an interaction with a widget in your test, use the WidgetTester
+19
View File
@@ -76,6 +76,8 @@ CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
FEDERATION=$(grep "^FEDERATION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "1")
PRIVATE=$(grep "^PRIVATE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
HIDDEN_CHANNELS=$(grep "^HIDDEN_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
ALLOWED_CHANNELS=$(grep "^ALLOWED_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
MAP_ZOOM=$(grep "^MAP_ZOOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
@@ -126,6 +128,11 @@ echo "-------------------"
echo "Private mode hides public mesh messages from unauthenticated visitors."
echo "Set to 1 to hide public feeds or 0 to keep them visible."
read_with_default "Enable private mode (1=yes, 0=no)" "$PRIVATE" PRIVATE
echo "Provide a comma-separated whitelist of channel names to ingest (optional)."
echo "When set, only listed channels are ingested unless explicitly hidden below."
read_with_default "Allowed channels" "$ALLOWED_CHANNELS" ALLOWED_CHANNELS
echo "Provide a comma-separated list of channel names to hide from the web UI (optional)."
read_with_default "Hidden channels" "$HIDDEN_CHANNELS" HIDDEN_CHANNELS
echo ""
echo "🛠 Docker Settings"
@@ -196,6 +203,16 @@ update_env "POTATOMESH_IMAGE_TAG" "$POTATOMESH_IMAGE_TAG"
update_env "FEDERATION" "$FEDERATION"
update_env "PRIVATE" "$PRIVATE"
update_env "CONNECTION" "$CONNECTION"
if [ -n "$ALLOWED_CHANNELS" ]; then
update_env "ALLOWED_CHANNELS" "\"$ALLOWED_CHANNELS\""
else
sed -i.bak '/^ALLOWED_CHANNELS=.*/d' .env
fi
if [ -n "$HIDDEN_CHANNELS" ]; then
update_env "HIDDEN_CHANNELS" "\"$HIDDEN_CHANNELS\""
else
sed -i.bak '/^HIDDEN_CHANNELS=.*/d' .env
fi
if [ -n "$INSTANCE_DOMAIN" ]; then
update_env "INSTANCE_DOMAIN" "$INSTANCE_DOMAIN"
else
@@ -244,6 +261,8 @@ echo " API Token: ${API_TOKEN:0:8}..."
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
echo " Docker Image Tag: $POTATOMESH_IMAGE_TAG"
echo " Private Mode: ${PRIVATE}"
echo " Allowed Channels: ${ALLOWED_CHANNELS:-'All'}"
echo " Hidden Channels: ${HIDDEN_CHANNELS:-'None'}"
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
if [ "${FEDERATION:-1}" = "0" ]; then
echo " Federation: Disabled"
+4
View File
@@ -50,6 +50,8 @@ USER potatomesh
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
ALLOWED_CHANNELS="" \
HIDDEN_CHANNELS="" \
INSTANCE_DOMAIN="" \
API_TOKEN=""
@@ -75,6 +77,8 @@ USER ContainerUser
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
ALLOWED_CHANNELS="" \
HIDDEN_CHANNELS="" \
INSTANCE_DOMAIN="" \
API_TOKEN=""
+1 -1
View File
@@ -18,7 +18,7 @@ The ``data.mesh`` module exposes helpers for reading Meshtastic node and
message information before forwarding it to the accompanying web application.
"""
VERSION = "0.5.7"
VERSION = "0.5.10"
"""Semantic version identifier shared with the dashboard and front-end."""
__version__ = VERSION
+26
View File
@@ -0,0 +1,26 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
PRAGMA journal_mode=WAL;
CREATE TABLE IF NOT EXISTS ingestors (
node_id TEXT PRIMARY KEY,
start_time INTEGER NOT NULL,
last_seen_time INTEGER NOT NULL,
version TEXT,
lora_freq INTEGER,
modem_preset TEXT
);
CREATE INDEX IF NOT EXISTS idx_ingestors_last_seen ON ingestors(last_seen_time);
+38 -2
View File
@@ -21,7 +21,17 @@ import threading as threading # re-exported for compatibility
import sys
import types
from . import channels, config, daemon, handlers, interfaces, queue, serialization
from .. import VERSION as _PACKAGE_VERSION
from . import (
channels,
config,
daemon,
handlers,
ingestors,
interfaces,
queue,
serialization,
)
__all__: list[str] = []
@@ -40,7 +50,15 @@ def _export_constants() -> None:
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
for _module in (channels, daemon, handlers, interfaces, queue, serialization):
for _module in (
channels,
daemon,
handlers,
interfaces,
queue,
serialization,
ingestors,
):
_reexport(_module)
_export_constants()
@@ -52,11 +70,14 @@ _CONFIG_ATTRS = {
"DEBUG",
"INSTANCE",
"API_TOKEN",
"ALLOWED_CHANNELS",
"HIDDEN_CHANNELS",
"LORA_FREQ",
"MODEM_PRESET",
"_RECONNECT_INITIAL_DELAY_SECS",
"_RECONNECT_MAX_DELAY_SECS",
"_CLOSE_TIMEOUT_SECS",
"_INGESTOR_HEARTBEAT_SECS",
"_debug_log",
}
@@ -70,9 +91,16 @@ _HANDLER_ATTRS = set(handlers.__all__)
_DAEMON_ATTRS = set(daemon.__all__)
_SERIALIZATION_ATTRS = set(serialization.__all__)
_INTERFACE_EXPORTS = set(interfaces.__all__)
_INGESTOR_ATTRS = set(ingestors.__all__)
# Re-export the package version for callers that previously referenced
# data.mesh_ingestor.VERSION directly.
VERSION = _PACKAGE_VERSION
__all__.append("VERSION")
__all__.extend(sorted(_CONFIG_ATTRS))
__all__.extend(sorted(_INTERFACE_ATTRS))
__all__.append("VERSION")
class _MeshIngestorModule(types.ModuleType):
@@ -87,6 +115,10 @@ class _MeshIngestorModule(types.ModuleType):
return getattr(interfaces, name)
if name in _INTERFACE_EXPORTS:
return getattr(interfaces, name)
if name in _INGESTOR_ATTRS:
return getattr(ingestors, name)
if name == "VERSION":
return VERSION
raise AttributeError(name)
def __setattr__(self, name: str, value): # type: ignore[override]
@@ -121,6 +153,10 @@ class _MeshIngestorModule(types.ModuleType):
setattr(serialization, name, value)
super().__setattr__(name, getattr(serialization, name, value))
handled = True
if name in _INGESTOR_ATTRS:
setattr(ingestors, name, value)
super().__setattr__(name, getattr(ingestors, name, value))
handled = True
if handled:
return
super().__setattr__(name, value)
+52
View File
@@ -222,6 +222,54 @@ def channel_name(channel_index: int | None) -> str | None:
return _CHANNEL_LOOKUP.get(int(channel_index))
def hidden_channel_names() -> tuple[str, ...]:
"""Return the configured set of hidden channel names."""
return tuple(getattr(config, "HIDDEN_CHANNELS", ()))
def allowed_channel_names() -> tuple[str, ...]:
"""Return the configured set of explicitly allowed channel names."""
return tuple(getattr(config, "ALLOWED_CHANNELS", ()))
def is_allowed_channel(channel_name_value: str | None) -> bool:
"""Return ``True`` when ``channel_name_value`` is permitted by policy."""
allowed = getattr(config, "ALLOWED_CHANNELS", ())
if not allowed:
return True
if channel_name_value is None:
return False
normalized = channel_name_value.strip()
if not normalized:
return False
normalized_casefold = normalized.casefold()
for allowed_name in allowed:
if normalized_casefold == allowed_name.casefold():
return True
return False
def is_hidden_channel(channel_name_value: str | None) -> bool:
"""Return ``True`` when ``channel_name_value`` is configured as hidden."""
if channel_name_value is None:
return False
normalized = channel_name_value.strip()
if not normalized:
return False
normalized_casefold = normalized.casefold()
for hidden in getattr(config, "HIDDEN_CHANNELS", ()):
if normalized_casefold == hidden.casefold():
return True
return False
def _reset_channel_cache() -> None:
"""Clear cached channel data. Intended for use in tests only."""
@@ -234,5 +282,9 @@ __all__ = [
"capture_from_interface",
"channel_mappings",
"channel_name",
"allowed_channel_names",
"hidden_channel_names",
"is_allowed_channel",
"is_hidden_channel",
"_reset_channel_cache",
]
+50
View File
@@ -46,6 +46,9 @@ DEFAULT_ENERGY_ONLINE_DURATION_SECS = 300.0
DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
"""Sleep duration used when energy saving mode is active."""
DEFAULT_INGESTOR_HEARTBEAT_SECS = float(60 * 60)
"""Interval between ingestor heartbeat announcements."""
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
"""Optional connection target for the mesh interface.
@@ -63,6 +66,49 @@ CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
DEBUG = os.environ.get("DEBUG") == "1"
def _parse_channel_names(raw_value: str | None) -> tuple[str, ...]:
"""Normalise a comma-separated list of channel names.
Parameters:
raw_value: Raw environment string containing channel names separated by
commas. ``None`` and empty segments are ignored.
Returns:
A tuple of unique, non-empty channel names preserving input order while
deduplicating case-insensitively.
"""
if not raw_value:
return ()
normalized_entries: list[str] = []
seen: set[str] = set()
for part in raw_value.split(","):
name = part.strip()
if not name:
continue
key = name.casefold()
if key in seen:
continue
seen.add(key)
normalized_entries.append(name)
return tuple(normalized_entries)
def _parse_hidden_channels(raw_value: str | None) -> tuple[str, ...]:
"""Compatibility wrapper that parses hidden channel names."""
return _parse_channel_names(raw_value)
HIDDEN_CHANNELS = _parse_hidden_channels(os.environ.get("HIDDEN_CHANNELS"))
"""Channel names configured to be ignored by the ingestor."""
ALLOWED_CHANNELS = _parse_channel_names(os.environ.get("ALLOWED_CHANNELS"))
"""Explicitly permitted channel names; when set, other channels are ignored."""
def _resolve_instance_domain() -> str:
"""Resolve the configured instance domain from the environment.
@@ -100,6 +146,7 @@ _CLOSE_TIMEOUT_SECS = DEFAULT_CLOSE_TIMEOUT_SECS
_INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
_INGESTOR_HEARTBEAT_SECS = DEFAULT_INGESTOR_HEARTBEAT_SECS
# Backwards compatibility shim for legacy imports.
PORT = CONNECTION
@@ -144,6 +191,8 @@ __all__ = [
"SNAPSHOT_SECS",
"CHANNEL_INDEX",
"DEBUG",
"HIDDEN_CHANNELS",
"ALLOWED_CHANNELS",
"INSTANCE",
"API_TOKEN",
"ENERGY_SAVING",
@@ -155,6 +204,7 @@ __all__ = [
"_INACTIVITY_RECONNECT_SECS",
"_ENERGY_ONLINE_DURATION_SECS",
"_ENERGY_SLEEP_SECS",
"_INGESTOR_HEARTBEAT_SECS",
"_debug_log",
]
+43 -1
View File
@@ -23,7 +23,7 @@ import time
from pubsub import pub
from . import config, handlers, interfaces
from . import config, handlers, ingestors, interfaces
_RECEIVE_TOPICS = (
"meshtastic.receive",
@@ -169,6 +169,41 @@ def _is_ble_interface(iface_obj) -> bool:
return "ble_interface" in module_name
def _process_ingestor_heartbeat(iface, *, ingestor_announcement_sent: bool) -> bool:
"""Send ingestor liveness heartbeats when a host id is known.
Parameters:
iface: Active mesh interface used to extract a host node id when absent.
ingestor_announcement_sent: Whether an initial heartbeat has already
been sent during the current session.
Returns:
Updated ``ingestor_announcement_sent`` flag reflecting whether an
initial heartbeat was transmitted.
"""
host_id = handlers.host_node_id()
if host_id is None and iface is not None:
extracted = interfaces._extract_host_node_id(iface)
if extracted:
handlers.register_host_node_id(extracted)
host_id = handlers.host_node_id()
if host_id:
ingestors.set_ingestor_node_id(host_id)
heartbeat_sent = ingestors.queue_ingestor_heartbeat(
force=not ingestor_announcement_sent
)
if heartbeat_sent and not ingestor_announcement_sent:
return True
return ingestor_announcement_sent
iface_cls = getattr(iface_obj, "__class__", None)
if iface_cls is None:
return False
module_name = getattr(iface_cls, "__module__", "") or ""
return "ble_interface" in module_name
def _connected_state(candidate) -> bool | None:
"""Return the connection state advertised by ``candidate``.
@@ -233,6 +268,7 @@ def main(existing_interface=None) -> None:
inactivity_reconnect_secs = max(
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
)
ingestor_announcement_sent = False
energy_saving_enabled = config.ENERGY_SAVING
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
@@ -288,6 +324,7 @@ def main(existing_interface=None) -> None:
handlers.register_host_node_id(
interfaces._extract_host_node_id(iface)
)
ingestors.set_ingestor_node_id(handlers.host_node_id())
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
initial_snapshot_sent = False
if not announced_target and resolved_target:
@@ -501,6 +538,10 @@ def main(existing_interface=None) -> None:
iface_connected_at = None
continue
ingestor_announcement_sent = _process_ingestor_heartbeat(
iface, ingestor_announcement_sent=ingestor_announcement_sent
)
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
stop.wait(config.SNAPSHOT_SECS)
except KeyboardInterrupt: # pragma: no cover - interactive only
@@ -520,6 +561,7 @@ __all__ = [
"_node_items_snapshot",
"_subscribe_receive_topics",
"_is_ble_interface",
"_process_ingestor_heartbeat",
"_connected_state",
"main",
]
+86
View File
@@ -0,0 +1,86 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode Meshtastic protobuf payloads from stdin JSON."""
from __future__ import annotations
import base64
import json
import os
import sys
from typing import Any, Dict, Tuple
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
if SCRIPT_DIR in sys.path:
sys.path.remove(SCRIPT_DIR)
from google.protobuf.json_format import MessageToDict
from meshtastic.protobuf import mesh_pb2, telemetry_pb2
PORTNUM_MAP: Dict[int, Tuple[str, Any]] = {
3: ("POSITION_APP", mesh_pb2.Position),
4: ("NODEINFO_APP", mesh_pb2.NodeInfo),
5: ("ROUTING_APP", mesh_pb2.Routing),
67: ("TELEMETRY_APP", telemetry_pb2.Telemetry),
70: ("TRACEROUTE_APP", mesh_pb2.RouteDiscovery),
71: ("NEIGHBORINFO_APP", mesh_pb2.NeighborInfo),
}
def _decode_payload(portnum: int, payload_b64: str) -> dict[str, Any]:
if portnum not in PORTNUM_MAP:
return {"error": "unsupported-port", "portnum": portnum}
try:
payload_bytes = base64.b64decode(payload_b64, validate=True)
except Exception as exc:
return {"error": f"invalid-payload: {exc}"}
name, message_cls = PORTNUM_MAP[portnum]
msg = message_cls()
try:
msg.ParseFromString(payload_bytes)
except Exception as exc:
return {"error": f"decode-failed: {exc}", "portnum": portnum, "type": name}
decoded = MessageToDict(msg, preserving_proto_field_name=True)
return {"portnum": portnum, "type": name, "payload": decoded}
def main() -> int:
raw = sys.stdin.read()
try:
request = json.loads(raw)
except json.JSONDecodeError as exc:
sys.stdout.write(json.dumps({"error": f"invalid-json: {exc}"}))
return 1
portnum = request.get("portnum")
payload_b64 = request.get("payload_b64")
if not isinstance(portnum, int):
sys.stdout.write(json.dumps({"error": "missing-portnum"}))
return 1
if not isinstance(payload_b64, str):
sys.stdout.write(json.dumps({"error": "missing-payload"}))
return 1
result = _decode_payload(portnum, payload_b64)
sys.stdout.write(json.dumps(result))
return 0
if __name__ == "__main__":
raise SystemExit(main())
+94 -51
View File
@@ -100,6 +100,41 @@ from .serialization import (
)
def _portnum_candidates(name: str) -> set[int]:
"""Return Meshtastic port number candidates for ``name``.
Parameters:
name: Port name to look up in Meshtastic ``PortNum`` enums.
Returns:
Set of integer port numbers resolved from Meshtastic modules.
"""
candidates: set[int] = set()
for module_name in (
"meshtastic.portnums_pb2",
"meshtastic.protobuf.portnums_pb2",
):
module = sys.modules.get(module_name)
if module is None:
with contextlib.suppress(ModuleNotFoundError):
module = importlib.import_module(module_name)
if module is None:
continue
portnum_enum = getattr(module, "PortNum", None)
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
if callable(value_lookup):
with contextlib.suppress(Exception):
candidate = _coerce_int(value_lookup(name))
if candidate is not None:
candidates.add(candidate)
constant_value = getattr(module, name, None)
candidate = _coerce_int(constant_value)
if candidate is not None:
candidates.add(candidate)
return candidates
def register_host_node_id(node_id: str | None) -> None:
"""Record the canonical identifier for the connected host device.
@@ -1280,28 +1315,7 @@ def store_packet_dict(packet: Mapping) -> None:
traceroute_section = (
decoded.get("traceroute") if isinstance(decoded, Mapping) else None
)
traceroute_port_ints: set[int] = set()
for module_name in (
"meshtastic.portnums_pb2",
"meshtastic.protobuf.portnums_pb2",
):
module = sys.modules.get(module_name)
if module is None:
with contextlib.suppress(ModuleNotFoundError):
module = importlib.import_module(module_name)
if module is None:
continue
portnum_enum = getattr(module, "PortNum", None)
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
if callable(value_lookup):
with contextlib.suppress(Exception):
candidate = _coerce_int(value_lookup("TRACEROUTE_APP"))
if candidate is not None:
traceroute_port_ints.add(candidate)
constant_value = getattr(module, "TRACEROUTE_APP", None)
candidate = _coerce_int(constant_value)
if candidate is not None:
traceroute_port_ints.add(candidate)
traceroute_port_ints = _portnum_candidates("TRACEROUTE_APP")
if (
portnum == "TRACEROUTE_APP"
@@ -1359,36 +1373,43 @@ def store_packet_dict(packet: Mapping) -> None:
if emoji_text:
emoji = emoji_text
allowed_port_values = {"1", "TEXT_MESSAGE_APP", "REACTION_APP"}
routing_section = decoded.get("routing") if isinstance(decoded, Mapping) else None
routing_port_candidates = _portnum_candidates("ROUTING_APP")
if text is None and (
portnum == "ROUTING_APP"
or (portnum_int is not None and portnum_int in routing_port_candidates)
or isinstance(routing_section, Mapping)
):
routing_payload = _first(decoded, "payload", "data", default=None)
if routing_payload is not None:
if isinstance(routing_payload, bytes):
text = base64.b64encode(routing_payload).decode("ascii")
elif isinstance(routing_payload, str):
text = routing_payload
else:
try:
text = json.dumps(routing_payload, ensure_ascii=True)
except TypeError:
text = str(routing_payload)
if isinstance(text, str):
text = text.strip() or None
allowed_port_values = {"1", "TEXT_MESSAGE_APP", "REACTION_APP", "ROUTING_APP"}
allowed_port_ints = {1}
reaction_port_candidates: set[int] = set()
for module_name in (
"meshtastic.portnums_pb2",
"meshtastic.protobuf.portnums_pb2",
):
module = sys.modules.get(module_name)
if module is None:
with contextlib.suppress(ModuleNotFoundError):
module = importlib.import_module(module_name)
if module is None:
continue
portnum_enum = getattr(module, "PortNum", None)
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
if callable(value_lookup):
with contextlib.suppress(Exception):
candidate = _coerce_int(value_lookup("REACTION_APP"))
if candidate is not None:
reaction_port_candidates.add(candidate)
constant_value = getattr(module, "REACTION_APP", None)
candidate = _coerce_int(constant_value)
if candidate is not None:
reaction_port_candidates.add(candidate)
reaction_port_candidates = _portnum_candidates("REACTION_APP")
for candidate in reaction_port_candidates:
allowed_port_ints.add(candidate)
allowed_port_values.add(str(candidate))
for candidate in routing_port_candidates:
allowed_port_ints.add(candidate)
allowed_port_values.add(str(candidate))
if isinstance(routing_section, Mapping) and portnum_int is not None:
allowed_port_ints.add(portnum_int)
allowed_port_values.add(str(portnum_int))
is_reaction_packet = portnum == "REACTION_APP" or (
reply_id is not None and emoji is not None
)
@@ -1414,6 +1435,8 @@ def store_packet_dict(packet: Mapping) -> None:
except Exception:
channel = 0
channel_name_value = channels.channel_name(channel)
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
if pkt_id is None:
_record_ignored_packet(packet, reason="missing-packet-id")
@@ -1459,6 +1482,29 @@ def store_packet_dict(packet: Mapping) -> None:
_record_ignored_packet(packet, reason="skipped-direct-message")
return
if not channels.is_allowed_channel(channel_name_value):
_record_ignored_packet(packet, reason="disallowed-channel")
if config.DEBUG:
config._debug_log(
"Ignored packet on disallowed channel",
context="handlers.store_packet_dict",
channel=channel,
channel_name=channel_name_value,
allowed_channels=channels.allowed_channel_names(),
)
return
if channels.is_hidden_channel(channel_name_value):
_record_ignored_packet(packet, reason="hidden-channel")
if config.DEBUG:
config._debug_log(
"Ignored packet on hidden channel",
context="handlers.store_packet_dict",
channel=channel,
channel_name=channel_name_value,
)
return
message_payload = {
"id": int(pkt_id),
"rx_time": rx_time,
@@ -1476,11 +1522,8 @@ def store_packet_dict(packet: Mapping) -> None:
"emoji": emoji,
}
channel_name_value = None
if not encrypted_flag:
channel_name_value = channels.channel_name(channel)
if channel_name_value:
message_payload["channel_name"] = channel_name_value
if not encrypted_flag and channel_name_value:
message_payload["channel_name"] = channel_name_value
_queue_post_json(
"/api/messages",
_apply_radio_metadata(message_payload),
+139
View File
@@ -0,0 +1,139 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for tracking ingestor identity and liveness announcements."""
from __future__ import annotations
import time
from dataclasses import dataclass, field
from typing import Callable
from .. import VERSION as INGESTOR_VERSION
from . import config, queue
from .serialization import _canonical_node_id
HEARTBEAT_INTERVAL_SECS = 60 * 60
"""Default interval between ingestor heartbeat announcements."""
@dataclass
class _IngestorState:
"""Mutable ingestor identity and heartbeat tracking data."""
start_time: int = field(default_factory=lambda: int(time.time()))
last_heartbeat: int | None = None
node_id: str | None = None
STATE = _IngestorState()
"""Shared ingestor identity state."""
# Alias retained for clarity without exporting into the top-level mesh module to
# avoid colliding with the HTTP queue state.
INGESTOR_STATE = STATE
def ingestor_start_time() -> int:
"""Return the unix timestamp representing when the ingestor booted."""
return STATE.start_time
def set_ingestor_node_id(node_id: str | None) -> str | None:
"""Record the canonical host node identifier for the ingestor.
Parameters:
node_id: Raw node identifier reported by the connected device.
Returns:
Canonical node identifier in ``!xxxxxxxx`` form or ``None`` when the
provided value cannot be normalised.
"""
canonical = _canonical_node_id(node_id)
if canonical is None:
return None
if STATE.node_id != canonical:
STATE.node_id = canonical
STATE.last_heartbeat = None
return canonical
def queue_ingestor_heartbeat(
*,
force: bool = False,
send: Callable[[str, dict], None] | None = None,
node_id: str | None = None,
) -> bool:
"""Queue a heartbeat payload advertising ingestor liveness.
Parameters:
force: When ``True``, bypasses the heartbeat interval guard so an
announcement is queued immediately.
send: Optional transport callable used for tests; defaults to the queue
dispatcher.
node_id: Optional node identifier to register before sending. When
omitted the previously recorded identifier is reused.
Returns:
``True`` when a heartbeat payload was queued, ``False`` otherwise.
"""
canonical = _canonical_node_id(node_id) if node_id is not None else None
if canonical:
set_ingestor_node_id(canonical)
canonical = STATE.node_id
if canonical is None:
return False
now = int(time.time())
interval = max(
0, int(getattr(config, "_INGESTOR_HEARTBEAT_SECS", HEARTBEAT_INTERVAL_SECS))
)
last = STATE.last_heartbeat
if not force and last is not None and now - last < interval:
return False
payload = {
"node_id": canonical,
"start_time": STATE.start_time,
"last_seen_time": now,
"version": INGESTOR_VERSION,
}
if getattr(config, "LORA_FREQ", None) is not None:
payload["lora_freq"] = config.LORA_FREQ
if getattr(config, "MODEM_PRESET", None) is not None:
payload["modem_preset"] = config.MODEM_PRESET
queue._queue_post_json(
"/api/ingestors",
payload,
priority=getattr(
queue, "_INGESTOR_POST_PRIORITY", queue._DEFAULT_POST_PRIORITY
),
send=send,
)
STATE.last_heartbeat = now
return True
__all__ = [
"HEARTBEAT_INTERVAL_SECS",
"INGESTOR_STATE",
"ingestor_start_time",
"queue_ingestor_heartbeat",
"set_ingestor_node_id",
]
+12 -3
View File
@@ -628,7 +628,13 @@ _DEFAULT_SERIAL_PATTERNS = (
"/dev/cu.usbserial*",
)
_BLE_ADDRESS_RE = re.compile(r"^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$")
# Support both MAC addresses (Linux/Windows) and UUIDs (macOS)
_BLE_ADDRESS_RE = re.compile(
r"^(?:"
r"(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}|" # MAC address format
r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" # UUID format
r")$"
)
class _DummySerialInterface:
@@ -642,13 +648,13 @@ class _DummySerialInterface:
def _parse_ble_target(value: str) -> str | None:
"""Return an uppercase BLE MAC address when ``value`` matches the format.
"""Return a normalized BLE address (MAC or UUID) when ``value`` matches the format.
Parameters:
value: User-provided target string.
Returns:
The normalised MAC address or ``None`` when validation fails.
The normalised MAC address or UUID, or ``None`` when validation fails.
"""
if not value:
@@ -772,10 +778,13 @@ def _create_serial_interface(port: str) -> tuple[object, str]:
return _DummySerialInterface(), "mock"
ble_target = _parse_ble_target(port_value)
if ble_target:
# Determine if it's a MAC address or UUID
address_type = "MAC" if ":" in ble_target else "UUID"
config._debug_log(
"Using BLE interface",
context="interfaces.ble",
address=ble_target,
address_type=address_type,
)
return _load_ble_interface()(address=ble_target), ble_target
network_target = _parse_network_target(port_value)
+2
View File
@@ -74,6 +74,7 @@ def _payload_key_value_pairs(payload: Mapping[str, object]) -> str:
_MESSAGE_POST_PRIORITY = 10
_INGESTOR_POST_PRIORITY = 80
_NEIGHBOR_POST_PRIORITY = 20
_TRACE_POST_PRIORITY = 25
_POSITION_POST_PRIORITY = 30
@@ -259,6 +260,7 @@ __all__ = [
"QueueState",
"_DEFAULT_POST_PRIORITY",
"_MESSAGE_POST_PRIORITY",
"_INGESTOR_POST_PRIORITY",
"_NEIGHBOR_POST_PRIORITY",
"_NODE_POST_PRIORITY",
"_POSITION_POST_PRIORITY",
+3 -1
View File
@@ -29,7 +29,9 @@ CREATE TABLE IF NOT EXISTS messages (
modem_preset TEXT,
channel_name TEXT,
reply_id INTEGER,
emoji TEXT
emoji TEXT,
decrypted INTEGER NOT NULL DEFAULT 0,
decryption_confidence REAL
);
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
+39
View File
@@ -49,6 +49,8 @@ x-ingestor-base: &ingestor-base
environment:
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
ALLOWED_CHANNELS: ${ALLOWED_CHANNELS:-""}
HIDDEN_CHANNELS: ${HIDDEN_CHANNELS:-""}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
@@ -75,6 +77,21 @@ x-ingestor-base: &ingestor-base
memory: 128M
cpus: '0.1'
x-matrix-bridge-base: &matrix-bridge-base
image: ghcr.io/l5yth/potato-mesh-matrix-bridge-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
volumes:
- potatomesh_matrix_bridge_state:/app
- ./matrix/Config.toml:/app/Config.toml:ro
restart: unless-stopped
deploy:
resources:
limits:
memory: 128M
cpus: '0.1'
reservations:
memory: 64M
cpus: '0.05'
services:
web:
<<: *web-base
@@ -108,6 +125,26 @@ services:
profiles:
- bridge
matrix-bridge:
<<: *matrix-bridge-base
network_mode: host
depends_on:
- web
extra_hosts:
- "web:127.0.0.1"
matrix-bridge-bridge:
<<: *matrix-bridge-base
container_name: potatomesh-matrix-bridge
networks:
- potatomesh-network
depends_on:
- web-bridge
ports:
- "41448:41448"
profiles:
- bridge
volumes:
potatomesh_data:
driver: local
@@ -115,6 +152,8 @@ volumes:
driver: local
potatomesh_logs:
driver: local
potatomesh_matrix_bridge_state:
driver: local
networks:
potatomesh-network:
Generated
+61
View File
@@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1766070988,
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}
+384
View File
@@ -0,0 +1,384 @@
{
description = "PotatoMesh - A federated, Meshtastic-powered node dashboard";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = nixpkgs.legacyPackages.${system};
# Python environment for the ingestor
pythonEnv = pkgs.python3.withPackages (ps: with ps; [
meshtastic
protobuf
requests
]);
# Web app wrapper script
webApp = pkgs.writeShellApplication {
name = "potato-mesh-web";
runtimeInputs = [ pkgs.ruby pkgs.bundler pkgs.sqlite pkgs.git pkgs.gnumake pkgs.gcc ];
text = ''
if [ -n "''${XDG_DATA_HOME:-}" ]; then
BASEDIR="$XDG_DATA_HOME"
else
BASEDIR="$HOME/.local/share/potato-mesh"
fi
WORKDIR="$BASEDIR/web"
mkdir -p "$WORKDIR"
# Copy app files if not present or outdated
APP_SRC="${./web}"
DATA_SRC="${./data}"
if [ ! -f "$WORKDIR/.installed" ] || [ "$APP_SRC" != "$(cat "$WORKDIR/.src_path" 2>/dev/null)" ]; then
# Copy web app
cp -rT "$APP_SRC" "$WORKDIR/"
chmod -R u+w "$WORKDIR"
# Copy data directory (contains SQL schemas)
mkdir -p "$BASEDIR/data"
cp -rT "$DATA_SRC" "$BASEDIR/data/"
chmod -R u+w "$BASEDIR/data"
echo "$APP_SRC" > "$WORKDIR/.src_path"
rm -f "$WORKDIR/.installed"
fi
cd "$WORKDIR"
# Install gems if needed
if [ ! -f ".installed" ]; then
bundle config set --local path 'vendor/bundle'
bundle install
touch .installed
fi
exec bundle exec ruby app.rb -p "''${PORT:-41447}" -o "''${HOST:-0.0.0.0}"
'';
};
# Ingestor wrapper script
ingestor = pkgs.writeShellApplication {
name = "potato-mesh-ingestor";
runtimeInputs = [ pythonEnv ];
text = ''
# The ingestor needs to run from parent directory with data/ folder
if [ -n "''${XDG_DATA_HOME:-}" ]; then
BASEDIR="$XDG_DATA_HOME"
else
BASEDIR="$HOME/.local/share/potato-mesh"
fi
if [ ! -d "$BASEDIR/data" ]; then
mkdir -p "$BASEDIR"
cp -rT "${./data}" "$BASEDIR/data/"
chmod -R u+w "$BASEDIR/data"
fi
cd "$BASEDIR"
exec python -m data.mesh
'';
};
in {
packages = {
web = webApp;
ingestor = ingestor;
default = webApp;
};
apps = {
web = {
type = "app";
program = "${webApp}/bin/potato-mesh-web";
};
ingestor = {
type = "app";
program = "${ingestor}/bin/potato-mesh-ingestor";
};
default = self.apps.${system}.web;
};
devShells.default = pkgs.mkShell {
buildInputs = [
pkgs.ruby
pkgs.bundler
pythonEnv
pkgs.sqlite
];
shellHook = ''
echo "PotatoMesh development shell"
echo " - Ruby: $(ruby --version)"
echo " - Python: $(python --version)"
echo ""
echo "To run the web app: cd web && bundle install && ./app.sh"
echo "To run the ingestor: cd data && python mesh.py"
'';
};
checks.potato-mesh-nixos = pkgs.testers.nixosTest {
name = "potato-mesh-data-dir";
nodes.machine = { lib, ... }: {
imports = [ self.nixosModules.default ];
services.potato-mesh = {
enable = true;
apiToken = "test-token";
dataDir = "/var/lib/potato-mesh";
ingestor.enable = true;
};
systemd.services.potato-mesh-ingestor.wantedBy = lib.mkForce [];
};
testScript = ''
machine.start
machine.succeed("grep -q 'XDG_DATA_HOME=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-web.service")
machine.succeed("grep -q 'XDG_DATA_HOME=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-ingestor.service")
machine.succeed("grep -q 'WorkingDirectory=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-web.service")
machine.succeed("grep -q 'WorkingDirectory=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-ingestor.service")
'';
};
}
) // {
# NixOS module
nixosModules.default = { config, lib, pkgs, ... }:
let
cfg = config.services.potato-mesh;
in {
options.services.potato-mesh = {
enable = lib.mkEnableOption "PotatoMesh web dashboard";
package = lib.mkOption {
type = lib.types.package;
default = self.packages.${pkgs.system}.web;
description = "The potato-mesh web package to use";
};
port = lib.mkOption {
type = lib.types.port;
default = 41447;
description = "Port to listen on";
};
host = lib.mkOption {
type = lib.types.str;
default = "0.0.0.0";
description = "Host to bind to";
};
apiToken = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Shared secret that authorizes ingestors and API clients making POST requests. Warning: visible in nix store. Prefer apiTokenFile for production.";
};
apiTokenFile = lib.mkOption {
type = lib.types.nullOr lib.types.path;
default = null;
description = "File containing API_TOKEN=<secret> (recommended for production)";
};
instanceDomain = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Public hostname used for metadata, federation, and generated API links";
};
siteName = lib.mkOption {
type = lib.types.str;
default = "PotatoMesh Demo";
description = "Title and header displayed in the UI";
};
channel = lib.mkOption {
type = lib.types.str;
default = "#LongFast";
description = "Default channel name displayed in the UI";
};
frequency = lib.mkOption {
type = lib.types.str;
default = "915MHz";
description = "Default frequency description displayed in the UI";
};
contactLink = lib.mkOption {
type = lib.types.str;
default = "#potatomesh:dod.ngo";
description = "Chat link or Matrix alias rendered in the footer and overlays";
};
mapCenter = lib.mkOption {
type = lib.types.str;
default = "38.761944,-27.090833";
description = "Latitude and longitude that centre the map on load";
};
mapZoom = lib.mkOption {
type = lib.types.nullOr lib.types.int;
default = null;
description = "Fixed Leaflet zoom applied on first load; disables auto-fit when provided";
};
maxDistance = lib.mkOption {
type = lib.types.int;
default = 42;
description = "Maximum distance (km) before node relationships are hidden on the map";
};
debug = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Enable verbose logging";
};
allowedChannels = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Comma-separated channel names the ingestor accepts";
};
hiddenChannels = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
description = "Comma-separated channel names the ingestor will ignore";
};
federation = lib.mkOption {
type = lib.types.bool;
default = true;
description = "Announce instance and crawl peers";
};
private = lib.mkOption {
type = lib.types.bool;
default = false;
description = "Hide chat UI, disable message APIs, and exclude hidden clients from public listings";
};
dataDir = lib.mkOption {
type = lib.types.path;
default = "/var/lib/potato-mesh";
description = "Directory to store database and configuration";
};
user = lib.mkOption {
type = lib.types.str;
default = "potato-mesh";
description = "User to run the service as";
};
group = lib.mkOption {
type = lib.types.str;
default = "potato-mesh";
description = "Group to run the service as";
};
# Ingestor options
ingestor = {
enable = lib.mkEnableOption "PotatoMesh Python ingestor";
package = lib.mkOption {
type = lib.types.package;
default = self.packages.${pkgs.system}.ingestor;
description = "The potato-mesh ingestor package to use";
};
connection = lib.mkOption {
type = lib.types.str;
default = "/dev/ttyACM0";
description = "Connection target: serial port, IP:port for TCP, or Bluetooth address for BLE";
};
};
};
config = lib.mkIf cfg.enable {
users.users.${cfg.user} = {
isSystemUser = true;
group = cfg.group;
home = cfg.dataDir;
createHome = true;
};
users.groups.${cfg.group} = {};
systemd.services.potato-mesh-web = {
description = "PotatoMesh Web Dashboard";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
environment = {
RACK_ENV = "production";
APP_ENV = "production";
PORT = toString cfg.port;
HOST = cfg.host;
SITE_NAME = cfg.siteName;
CHANNEL = cfg.channel;
FREQUENCY = cfg.frequency;
CONTACT_LINK = cfg.contactLink;
MAP_CENTER = cfg.mapCenter;
MAX_DISTANCE = toString cfg.maxDistance;
DEBUG = if cfg.debug then "1" else "0";
FEDERATION = if cfg.federation then "1" else "0";
PRIVATE = if cfg.private then "1" else "0";
XDG_DATA_HOME = cfg.dataDir;
XDG_CONFIG_HOME = "${cfg.dataDir}/config";
} // lib.optionalAttrs (cfg.instanceDomain != null) {
INSTANCE_DOMAIN = cfg.instanceDomain;
} // lib.optionalAttrs (cfg.mapZoom != null) {
MAP_ZOOM = toString cfg.mapZoom;
} // lib.optionalAttrs (cfg.allowedChannels != null) {
ALLOWED_CHANNELS = cfg.allowedChannels;
} // lib.optionalAttrs (cfg.hiddenChannels != null) {
HIDDEN_CHANNELS = cfg.hiddenChannels;
} // lib.optionalAttrs (cfg.apiToken != null) {
API_TOKEN = cfg.apiToken;
};
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.dataDir;
ExecStart = "${cfg.package}/bin/potato-mesh-web";
Restart = "always";
RestartSec = 5;
} // lib.optionalAttrs (cfg.apiTokenFile != null) {
EnvironmentFile = cfg.apiTokenFile;
};
};
systemd.services.potato-mesh-ingestor = lib.mkIf cfg.ingestor.enable {
description = "PotatoMesh Python Ingestor";
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "potato-mesh-web.service" ];
requires = [ "potato-mesh-web.service" ];
environment = {
INSTANCE_DOMAIN = "http://127.0.0.1:${toString cfg.port}";
CONNECTION = cfg.ingestor.connection;
DEBUG = if cfg.debug then "1" else "0";
XDG_DATA_HOME = cfg.dataDir;
} // lib.optionalAttrs (cfg.allowedChannels != null) {
ALLOWED_CHANNELS = cfg.allowedChannels;
} // lib.optionalAttrs (cfg.hiddenChannels != null) {
HIDDEN_CHANNELS = cfg.hiddenChannels;
} // lib.optionalAttrs (cfg.apiToken != null) {
API_TOKEN = cfg.apiToken;
};
serviceConfig = {
Type = "simple";
User = cfg.user;
Group = cfg.group;
WorkingDirectory = cfg.dataDir;
ExecStart = "${cfg.ingestor.package}/bin/potato-mesh-ingestor";
Restart = "always";
RestartSec = 10;
} // lib.optionalAttrs (cfg.apiTokenFile != null) {
EnvironmentFile = cfg.apiTokenFile;
};
};
};
};
};
}
+1 -1
View File
@@ -1,3 +1,3 @@
target/
Cargo.lock
coverage.lcov
bridge_state.json
+2316
View File
File diff suppressed because it is too large Load Diff
+19 -2
View File
@@ -1,6 +1,20 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[package]
name = "potatomesh-matrix-bridge"
version = "0.5.7"
version = "0.5.10"
edition = "2021"
[dependencies]
@@ -13,8 +27,11 @@ anyhow = "1"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] }
urlencoding = "2"
axum = { version = "0.7", features = ["json"] }
clap = { version = "4", features = ["derive"] }
[dev-dependencies]
tempfile = "3"
mockito = "1"
serial_test = "3"
serial_test = "3"
tower = "0.5"
+8 -6
View File
@@ -1,18 +1,20 @@
[potatomesh]
# Base URL without trailing slash
base_url = "https://potatomesh.net/api"
# Base domain (with or without trailing slash)
base_url = "https://potatomesh.net"
# Poll interval in seconds
poll_interval_secs = 60
[matrix]
# Homeserver base URL (client API) without trailing slash
homeserver = "https://matrix.example.org"
homeserver = "https://matrix.dod.ngo"
# Appservice access token (from your registration.yaml)
as_token = "YOUR_APPSERVICE_AS_TOKEN"
as_token = "INVALID_TOKEN_NOT_WORKING"
# Homeserver token used to authenticate Synapse callbacks
hs_token = "INVALID_TOKEN_NOT_WORKING"
# Server name (domain) part of Matrix user IDs
server_name = "example.org"
server_name = "dod.ngo"
# Room ID to send into (must be joined by the appservice / puppets)
room_id = "!yourroomid:example.org"
room_id = "!sXabOBXbVObAlZQEUs:c-base.org" # "#potato-bridge:c-base.org"
[state]
# Where to persist last seen message id (optional but recommended)
+44
View File
@@ -0,0 +1,44 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM rust:1.92-bookworm AS builder
WORKDIR /app
COPY matrix/Cargo.toml matrix/Cargo.lock ./
COPY matrix/src ./src
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
cargo build --release --locked
FROM debian:bookworm-slim AS runtime
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates gosu \
&& rm -rf /var/lib/apt/lists/*
RUN useradd --create-home --uid 10001 --shell /usr/sbin/nologin potatomesh
WORKDIR /app
COPY --from=builder /app/target/release/potatomesh-matrix-bridge /usr/local/bin/potatomesh-matrix-bridge
COPY matrix/Config.toml /app/Config.example.toml
COPY matrix/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
EXPOSE 41448
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
+121 -21
View File
@@ -2,9 +2,11 @@
A small Rust daemon that bridges **PotatoMesh** LoRa messages into a **Matrix** room.
![matrix bridge](../scrot-0.6.png)
For each PotatoMesh node, the bridge creates (or uses) a **Matrix puppet user**:
- Matrix localpart: the hex node id (without `!`), e.g. `!67fc83cb``@67fc83cb:example.org`
- Matrix localpart: `potato_` + the hex node id (without `!`), e.g. `!67fc83cb``@potato_67fc83cb:example.org`
- Matrix display name: the nodes `long_name` from the PotatoMesh API
Messages from PotatoMesh are periodically fetched and forwarded to a single Matrix room as those puppet users.
@@ -13,10 +15,10 @@ Messages from PotatoMesh are periodically fetched and forwarded to a single Matr
## Features
- Polls `https://potatomesh.net/api/messages` (or any configured base URL)
- Looks up node metadata via `GET /nodes/{hex}` and caches it
- Polls `https://potatomesh.net/api/messages` (deriving `/api` from the configured base domain)
- Looks up node metadata via `GET /api/nodes/{hex}` and caches it
- One Matrix user per node:
- username: hex node id
- username: `potato_{hex node id}`
- display name: `long_name`
- Forwards `TEXT_MESSAGE_APP` messages into a single Matrix room
- Persists last-seen message ID to avoid duplicates across restarts
@@ -26,12 +28,12 @@ Messages from PotatoMesh are periodically fetched and forwarded to a single Matr
## Architecture Overview
- **PotatoMesh side**
- `GET /messages` returns an array of messages
- `GET /nodes/{hex}` returns node metadata (including `long_name`)
- `GET /api/messages` returns an array of messages
- `GET /api/nodes/{hex}` returns node metadata (including `long_name`)
- **Matrix side**
- Uses the Matrix Client-Server API with an **appservice access token**
- Impersonates puppet users via `user_id=@{hex}:{server_name}&access_token={as_token}`
- Impersonates puppet users via `user_id=@potato_{hex}:{server_name}&access_token={as_token}`
- Sends `m.room.message` events into a configured room
This is **not** a full appservice framework; it just speaks the minimal HTTP needed.
@@ -43,25 +45,33 @@ This is **not** a full appservice framework; it just speaks the minimal HTTP nee
- Rust (stable) and `cargo`
- A Matrix homeserver you control (e.g. Synapse)
- An **application service registration** on your homeserver that:
- Whitelists the puppet user namespace (e.g. `@[0-9a-f]{8}:example.org`)
- Whitelists the puppet user namespace (e.g. `@potato_[0-9a-f]{8}:example.org`)
- Provides an `as_token` the bridge can use
- Network access from the bridge host to:
- `https://potatomesh.net/api` (or your configured PotatoMesh API)
- `https://potatomesh.net/` (bridge appends `/api`)
- Your Matrix homeserver (`https://matrix.example.org`)
---
## Configuration
All configuration is in `Config.toml` in the project root.
Configuration can come from a TOML file, CLI flags, environment variables, or secret files. The bridge merges inputs in this order (highest to lowest):
Example:
1. CLI flags
2. Environment variables
3. Secret files (`*_FILE` paths or container defaults)
4. TOML config file
5. Container defaults (paths + poll interval)
If no TOML file is provided, required values must be supplied via CLI/env/secret inputs.
Example TOML:
```toml
[potatomesh]
# Base URL without trailing slash
base_url = "https://potatomesh.net/api"
# Base domain (bridge will call {base_url}/api)
base_url = "https://potatomesh.net/"
# Poll interval in seconds
poll_interval_secs = 10
@@ -70,6 +80,8 @@ poll_interval_secs = 10
homeserver = "https://matrix.example.org"
# Appservice access token (from your registration.yaml)
as_token = "YOUR_APPSERVICE_AS_TOKEN"
# Appservice homeserver token (must match registration hs_token)
hs_token = "SECRET_HS_TOKEN"
# Server name (domain) part of Matrix user IDs
server_name = "example.org"
# Room ID to send into (must be joined by the appservice / puppets)
@@ -80,11 +92,65 @@ room_id = "!yourroomid:example.org"
state_file = "bridge_state.json"
````
The `hs_token` is used to validate inbound appservice transactions. Keep it identical in `Config.toml` and your Matrix appservice registration file.
### CLI Flags
Run `potatomesh-matrix-bridge --help` for the full list. Common flags:
* `--config PATH`
* `--state-file PATH`
* `--potatomesh-base-url URL`
* `--potatomesh-poll-interval-secs SECS`
* `--matrix-homeserver URL`
* `--matrix-as-token TOKEN`
* `--matrix-as-token-file PATH`
* `--matrix-hs-token TOKEN`
* `--matrix-hs-token-file PATH`
* `--matrix-server-name NAME`
* `--matrix-room-id ROOM`
* `--container` / `--no-container`
* `--secrets-dir PATH`
### Environment Variables
* `POTATOMESH_CONFIG`
* `POTATOMESH_BASE_URL`
* `POTATOMESH_POLL_INTERVAL_SECS`
* `MATRIX_HOMESERVER`
* `MATRIX_AS_TOKEN`
* `MATRIX_AS_TOKEN_FILE`
* `MATRIX_HS_TOKEN`
* `MATRIX_HS_TOKEN_FILE`
* `MATRIX_SERVER_NAME`
* `MATRIX_ROOM_ID`
* `STATE_FILE`
* `POTATOMESH_CONTAINER`
* `POTATOMESH_SECRETS_DIR`
### Secret Files
If you supply `*_FILE` values, the bridge reads the secret contents and trims whitespace. When running inside a container, the bridge also checks the default secrets directory (default: `/run/secrets`) for:
* `matrix_as_token`
* `matrix_hs_token`
### Container Defaults
Container detection checks `POTATOMESH_CONTAINER`, `CONTAINER`, and `/proc/1/cgroup`. When detected (or forced with `--container`), defaults shift to:
* Config path: `/app/Config.toml`
* State file: `/app/bridge_state.json`
* Secrets dir: `/run/secrets`
* Poll interval: 15 seconds (if not otherwise configured)
Set `POTATOMESH_CONTAINER=0` or `--no-container` to opt out of container defaults.
### PotatoMesh API
The bridge assumes:
* Messages: `GET {base_url}/messages` JSON array, for example:
* Messages: `GET {base_url}/api/messages` JSON array, for example:
```json
[
@@ -108,7 +174,7 @@ The bridge assumes:
]
```
* Nodes: `GET {base_url}/nodes/{hex}` JSON, for example:
* Nodes: `GET {base_url}/api/nodes/{hex}` JSON, for example:
```json
{
@@ -122,7 +188,7 @@ The bridge assumes:
}
```
Node hex ID is derived from `node_id` by stripping the leading `!` and using the remainder as the Matrix localpart.
Node hex ID is derived from `node_id` by stripping the leading `!` and using the remainder inside the puppet localpart prefix (`potato_{hex}`).
---
@@ -134,7 +200,7 @@ A minimal example sketch (you **must** adjust URLs, secrets, namespaces):
```yaml
id: potatomesh-bridge
url: "http://your-bridge-host:8080" # not used by this bridge if it only calls out
url: "http://your-bridge-host:41448"
as_token: "YOUR_APPSERVICE_AS_TOKEN"
hs_token: "SECRET_HS_TOKEN"
sender_localpart: "potatomesh-bridge"
@@ -142,13 +208,15 @@ rate_limited: false
namespaces:
users:
- exclusive: true
regex: "@[0-9a-f]{8}:example.org"
regex: "@potato_[0-9a-f]{8}:example.org"
```
For this bridge, only the `as_token` and `namespaces.users` actually matter. The bridge does not accept inbound events; it only uses the `as_token` to call the homeserver.
This bridge listens for Synapse appservice callbacks on port `41448` so it can log inbound transaction payloads. It still only forwards messages one way (PotatoMesh → Matrix), so inbound Matrix events are acknowledged but not bridged. The `as_token` and `namespaces.users` entries remain required for outbound calls, and the `url` should point at the listener.
In Synapses `homeserver.yaml`, add the registration file under `app_service_config_files`, restart, and invite a puppet user to your target room (or use room ID directly).
The bridge validates inbound appservice callbacks by comparing the `access_token` query param to `hs_token` in `Config.toml`, so keep those values in sync.
---
## Build
@@ -170,6 +238,38 @@ target/release/potatomesh-matrix-bridge
---
## Docker
Build the container from the repo root with the included `matrix/Dockerfile`:
```bash
docker build -f matrix/Dockerfile -t potatomesh-matrix-bridge .
```
Provide your config at `/app/Config.toml` (or use CLI/env/secret overrides) and persist the bridge state file by mounting volumes. Minimal example:
```bash
docker run --rm \
-p 41448:41448 \
-v bridge_state:/app \
-v "$(pwd)/matrix/Config.toml:/app/Config.toml:ro" \
potatomesh-matrix-bridge
```
If you prefer to isolate the state file from the config, mount it directly instead of the whole `/app` directory:
```bash
docker run --rm \
-p 41448:41448 \
-v bridge_state:/app \
-v "$(pwd)/matrix/Config.toml:/app/Config.toml:ro" \
potatomesh-matrix-bridge
```
The image ships `Config.example.toml` for reference. If `/app/Config.toml` is absent, set the required values via environment variables, CLI flags, or secrets instead.
---
## Run
Ensure `Config.toml` is present and valid, then:
@@ -193,7 +293,7 @@ The bridge will:
3. For each new `TEXT_MESSAGE_APP`:
* Fetch node info.
* Ensure puppet is registered (`@{hex}:{server_name}`).
* Ensure puppet is registered (`@potato_{hex}:{server_name}`).
* Set puppet display name to `long_name`.
* Send a formatted text message into `room_id` as that puppet.
* Update and persist `bridge_state.json`.
@@ -204,7 +304,7 @@ Delete `bridge_state.json` if you want it to replay all currently available mess
## Development
Run tests (currently mostly compile checks, no real tests yet):
Run tests:
```bash
cargo test
+40
View File
@@ -0,0 +1,40 @@
#!/bin/sh
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Default to container-aware configuration paths unless explicitly overridden.
: "${POTATOMESH_CONTAINER:=1}"
: "${POTATOMESH_SECRETS_DIR:=/run/secrets}"
export POTATOMESH_CONTAINER
export POTATOMESH_SECRETS_DIR
# Default state file path from Config.toml unless overridden.
STATE_FILE="${STATE_FILE:-/app/bridge_state.json}"
STATE_DIR="$(dirname "$STATE_FILE")"
# Ensure state directory exists and is writable by the non-root user without
# touching the read-only config bind mount.
if [ ! -d "$STATE_DIR" ]; then
mkdir -p "$STATE_DIR"
fi
# Best-effort ownership fix; ignore if the underlying volume is read-only.
chown potatomesh:potatomesh "$STATE_DIR" 2>/dev/null || true
touch "$STATE_FILE" 2>/dev/null || true
chown potatomesh:potatomesh "$STATE_FILE" 2>/dev/null || true
exec gosu potatomesh potatomesh-matrix-bridge "$@"
+105
View File
@@ -0,0 +1,105 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use clap::{ArgAction, Parser};
#[cfg(not(test))]
use crate::config::{ConfigInputs, ConfigOverrides};
/// CLI arguments for the Matrix bridge.
#[derive(Debug, Parser)]
#[command(
name = "potatomesh-matrix-bridge",
version,
about = "PotatoMesh Matrix bridge"
)]
pub struct Cli {
/// Path to the configuration TOML file.
#[arg(long, value_name = "PATH")]
pub config: Option<String>,
/// Path to the bridge state file.
#[arg(long, value_name = "PATH")]
pub state_file: Option<String>,
/// PotatoMesh base URL.
#[arg(long, value_name = "URL")]
pub potatomesh_base_url: Option<String>,
/// Poll interval in seconds.
#[arg(long, value_name = "SECS")]
pub potatomesh_poll_interval_secs: Option<u64>,
/// Matrix homeserver base URL.
#[arg(long, value_name = "URL")]
pub matrix_homeserver: Option<String>,
/// Matrix appservice access token.
#[arg(long, value_name = "TOKEN")]
pub matrix_as_token: Option<String>,
/// Path to a secret file containing the Matrix appservice access token.
#[arg(long, value_name = "PATH")]
pub matrix_as_token_file: Option<String>,
/// Matrix homeserver token for inbound appservice requests.
#[arg(long, value_name = "TOKEN")]
pub matrix_hs_token: Option<String>,
/// Path to a secret file containing the Matrix homeserver token.
#[arg(long, value_name = "PATH")]
pub matrix_hs_token_file: Option<String>,
/// Matrix server name (domain).
#[arg(long, value_name = "NAME")]
pub matrix_server_name: Option<String>,
/// Matrix room id to forward into.
#[arg(long, value_name = "ROOM")]
pub matrix_room_id: Option<String>,
/// Force container defaults (overrides detection).
#[arg(long, action = ArgAction::SetTrue)]
pub container: bool,
/// Disable container defaults (overrides detection).
#[arg(long, action = ArgAction::SetTrue)]
pub no_container: bool,
/// Directory to search for default secret files.
#[arg(long, value_name = "PATH")]
pub secrets_dir: Option<String>,
}
impl Cli {
/// Convert CLI args into configuration inputs.
#[cfg(not(test))]
pub fn to_inputs(&self) -> ConfigInputs {
ConfigInputs {
config_path: self.config.clone(),
secrets_dir: self.secrets_dir.clone(),
container_override: resolve_container_override(self.container, self.no_container),
container_hint: None,
overrides: ConfigOverrides {
potatomesh_base_url: self.potatomesh_base_url.clone(),
potatomesh_poll_interval_secs: self.potatomesh_poll_interval_secs,
matrix_homeserver: self.matrix_homeserver.clone(),
matrix_as_token: self.matrix_as_token.clone(),
matrix_as_token_file: self.matrix_as_token_file.clone(),
matrix_hs_token: self.matrix_hs_token.clone(),
matrix_hs_token_file: self.matrix_hs_token_file.clone(),
matrix_server_name: self.matrix_server_name.clone(),
matrix_room_id: self.matrix_room_id.clone(),
state_file: self.state_file.clone(),
},
}
}
}
/// Resolve container override flags into an optional boolean.
#[cfg(not(test))]
fn resolve_container_override(container: bool, no_container: bool) -> Option<bool> {
match (container, no_container) {
(true, false) => Some(true),
(false, true) => Some(false),
_ => None,
}
}
+859 -24
View File
@@ -1,25 +1,51 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Deserialize;
use std::{fs, path::Path};
const DEFAULT_CONFIG_PATH: &str = "Config.toml";
const CONTAINER_CONFIG_PATH: &str = "/app/Config.toml";
const DEFAULT_STATE_FILE: &str = "bridge_state.json";
const CONTAINER_STATE_FILE: &str = "/app/bridge_state.json";
const DEFAULT_SECRETS_DIR: &str = "/run/secrets";
const CONTAINER_POLL_INTERVAL_SECS: u64 = 15;
/// PotatoMesh API settings.
#[derive(Debug, Deserialize, Clone)]
pub struct PotatomeshConfig {
pub base_url: String,
pub poll_interval_secs: u64,
}
/// Matrix appservice settings for the bridge.
#[derive(Debug, Deserialize, Clone)]
pub struct MatrixConfig {
pub homeserver: String,
pub as_token: String,
pub hs_token: String,
pub server_name: String,
pub room_id: String,
}
/// State file configuration for the bridge.
#[derive(Debug, Deserialize, Clone)]
pub struct StateConfig {
pub state_file: String,
}
/// Full configuration loaded for the bridge runtime.
#[derive(Debug, Deserialize, Clone)]
pub struct Config {
pub potatomesh: PotatomeshConfig,
@@ -27,19 +53,447 @@ pub struct Config {
pub state: StateConfig,
}
#[derive(Debug, Deserialize, Clone, Default)]
struct PartialPotatomeshConfig {
#[serde(default)]
base_url: Option<String>,
#[serde(default)]
poll_interval_secs: Option<u64>,
}
#[derive(Debug, Deserialize, Clone, Default)]
struct PartialMatrixConfig {
#[serde(default)]
homeserver: Option<String>,
#[serde(default)]
as_token: Option<String>,
#[serde(default)]
hs_token: Option<String>,
#[serde(default)]
server_name: Option<String>,
#[serde(default)]
room_id: Option<String>,
}
#[derive(Debug, Deserialize, Clone, Default)]
struct PartialStateConfig {
#[serde(default)]
state_file: Option<String>,
}
#[derive(Debug, Deserialize, Clone, Default)]
struct PartialConfig {
#[serde(default)]
potatomesh: PartialPotatomeshConfig,
#[serde(default)]
matrix: PartialMatrixConfig,
#[serde(default)]
state: PartialStateConfig,
}
/// Overwrite an optional value when the incoming value is present.
fn merge_option<T>(target: &mut Option<T>, incoming: Option<T>) {
if incoming.is_some() {
*target = incoming;
}
}
/// CLI or environment overrides for configuration fields.
#[derive(Debug, Clone, Default)]
pub struct ConfigOverrides {
pub potatomesh_base_url: Option<String>,
pub potatomesh_poll_interval_secs: Option<u64>,
pub matrix_homeserver: Option<String>,
pub matrix_as_token: Option<String>,
pub matrix_as_token_file: Option<String>,
pub matrix_hs_token: Option<String>,
pub matrix_hs_token_file: Option<String>,
pub matrix_server_name: Option<String>,
pub matrix_room_id: Option<String>,
pub state_file: Option<String>,
}
impl ConfigOverrides {
fn apply_non_token_overrides(&self, cfg: &mut PartialConfig) {
merge_option(
&mut cfg.potatomesh.base_url,
self.potatomesh_base_url.clone(),
);
merge_option(
&mut cfg.potatomesh.poll_interval_secs,
self.potatomesh_poll_interval_secs,
);
merge_option(&mut cfg.matrix.homeserver, self.matrix_homeserver.clone());
merge_option(&mut cfg.matrix.server_name, self.matrix_server_name.clone());
merge_option(&mut cfg.matrix.room_id, self.matrix_room_id.clone());
merge_option(&mut cfg.state.state_file, self.state_file.clone());
}
fn merge(self, higher: ConfigOverrides) -> ConfigOverrides {
let matrix_as_token = if higher.matrix_as_token_file.is_some() {
higher.matrix_as_token
} else {
higher.matrix_as_token.or(self.matrix_as_token)
};
let matrix_hs_token = if higher.matrix_hs_token_file.is_some() {
higher.matrix_hs_token
} else {
higher.matrix_hs_token.or(self.matrix_hs_token)
};
ConfigOverrides {
potatomesh_base_url: higher.potatomesh_base_url.or(self.potatomesh_base_url),
potatomesh_poll_interval_secs: higher
.potatomesh_poll_interval_secs
.or(self.potatomesh_poll_interval_secs),
matrix_homeserver: higher.matrix_homeserver.or(self.matrix_homeserver),
matrix_as_token,
matrix_as_token_file: higher.matrix_as_token_file.or(self.matrix_as_token_file),
matrix_hs_token,
matrix_hs_token_file: higher.matrix_hs_token_file.or(self.matrix_hs_token_file),
matrix_server_name: higher.matrix_server_name.or(self.matrix_server_name),
matrix_room_id: higher.matrix_room_id.or(self.matrix_room_id),
state_file: higher.state_file.or(self.state_file),
}
}
}
/// Inputs gathered from CLI flags or environment variables.
#[derive(Debug, Clone, Default)]
pub struct ConfigInputs {
pub config_path: Option<String>,
pub secrets_dir: Option<String>,
pub container_override: Option<bool>,
pub container_hint: Option<String>,
pub overrides: ConfigOverrides,
}
impl ConfigInputs {
/// Merge two input sets, preferring values from `higher`.
pub fn merge(self, higher: ConfigInputs) -> ConfigInputs {
ConfigInputs {
config_path: higher.config_path.or(self.config_path),
secrets_dir: higher.secrets_dir.or(self.secrets_dir),
container_override: higher.container_override.or(self.container_override),
container_hint: higher.container_hint.or(self.container_hint),
overrides: self.overrides.merge(higher.overrides),
}
}
/// Load configuration inputs from the process environment.
#[cfg(not(test))]
pub fn from_env() -> anyhow::Result<Self> {
let overrides = ConfigOverrides {
potatomesh_base_url: env_var("POTATOMESH_BASE_URL"),
potatomesh_poll_interval_secs: parse_u64_env("POTATOMESH_POLL_INTERVAL_SECS")?,
matrix_homeserver: env_var("MATRIX_HOMESERVER"),
matrix_as_token: env_var("MATRIX_AS_TOKEN"),
matrix_as_token_file: env_var("MATRIX_AS_TOKEN_FILE"),
matrix_hs_token: env_var("MATRIX_HS_TOKEN"),
matrix_hs_token_file: env_var("MATRIX_HS_TOKEN_FILE"),
matrix_server_name: env_var("MATRIX_SERVER_NAME"),
matrix_room_id: env_var("MATRIX_ROOM_ID"),
state_file: env_var("STATE_FILE"),
};
Ok(ConfigInputs {
config_path: env_var("POTATOMESH_CONFIG"),
secrets_dir: env_var("POTATOMESH_SECRETS_DIR"),
container_override: parse_bool_env("POTATOMESH_CONTAINER")?,
container_hint: env_var("CONTAINER"),
overrides,
})
}
}
impl Config {
/// Load a full Config from a TOML file.
#[cfg(test)]
pub fn load_from_file(path: &str) -> anyhow::Result<Self> {
let contents = fs::read_to_string(path)?;
let cfg = toml::from_str(&contents)?;
Ok(cfg)
}
}
pub fn from_default_path() -> anyhow::Result<Self> {
let path = "Config.toml";
if !Path::new(path).exists() {
anyhow::bail!("Config file {path} not found");
/// Load a Config by merging CLI/env overrides with an optional TOML file.
#[cfg(not(test))]
pub fn load(cli_inputs: ConfigInputs) -> anyhow::Result<Config> {
let env_inputs = ConfigInputs::from_env()?;
let cgroup_hint = read_cgroup();
load_from_sources(cli_inputs, env_inputs, cgroup_hint.as_deref())
}
/// Load configuration by merging CLI/env inputs and an optional config file.
fn load_from_sources(
cli_inputs: ConfigInputs,
env_inputs: ConfigInputs,
cgroup_hint: Option<&str>,
) -> anyhow::Result<Config> {
let merged_inputs = env_inputs.merge(cli_inputs);
let container = detect_container(
merged_inputs.container_override,
merged_inputs.container_hint.as_deref(),
cgroup_hint,
);
let defaults = default_paths(container);
let base_cfg = resolve_base_config(&merged_inputs, &defaults)?;
let mut cfg = base_cfg.unwrap_or_default();
merged_inputs.overrides.apply_non_token_overrides(&mut cfg);
let secrets_dir = resolve_secrets_dir(&merged_inputs, container, &defaults);
let as_token = resolve_token(
cfg.matrix.as_token.clone(),
merged_inputs.overrides.matrix_as_token.clone(),
merged_inputs.overrides.matrix_as_token_file.as_deref(),
secrets_dir.as_deref(),
"matrix_as_token",
)?;
let hs_token = resolve_token(
cfg.matrix.hs_token.clone(),
merged_inputs.overrides.matrix_hs_token.clone(),
merged_inputs.overrides.matrix_hs_token_file.as_deref(),
secrets_dir.as_deref(),
"matrix_hs_token",
)?;
if cfg.potatomesh.poll_interval_secs.is_none() && container {
cfg.potatomesh.poll_interval_secs = Some(defaults.poll_interval_secs);
}
if cfg.state.state_file.is_none() {
cfg.state.state_file = Some(defaults.state_file);
}
let missing = collect_missing_fields(&cfg, &as_token, &hs_token);
if !missing.is_empty() {
anyhow::bail!(
"Missing required configuration values: {}",
missing.join(", ")
);
}
Ok(Config {
potatomesh: PotatomeshConfig {
base_url: cfg.potatomesh.base_url.unwrap(),
poll_interval_secs: cfg.potatomesh.poll_interval_secs.unwrap(),
},
matrix: MatrixConfig {
homeserver: cfg.matrix.homeserver.unwrap(),
as_token: as_token.unwrap(),
hs_token: hs_token.unwrap(),
server_name: cfg.matrix.server_name.unwrap(),
room_id: cfg.matrix.room_id.unwrap(),
},
state: StateConfig {
state_file: cfg.state.state_file.unwrap(),
},
})
}
/// Collect the missing required field identifiers for error reporting.
fn collect_missing_fields(
cfg: &PartialConfig,
as_token: &Option<String>,
hs_token: &Option<String>,
) -> Vec<&'static str> {
let mut missing = Vec::new();
if cfg.potatomesh.base_url.is_none() {
missing.push("potatomesh.base_url");
}
if cfg.potatomesh.poll_interval_secs.is_none() {
missing.push("potatomesh.poll_interval_secs");
}
if cfg.matrix.homeserver.is_none() {
missing.push("matrix.homeserver");
}
if as_token.is_none() {
missing.push("matrix.as_token");
}
if hs_token.is_none() {
missing.push("matrix.hs_token");
}
if cfg.matrix.server_name.is_none() {
missing.push("matrix.server_name");
}
if cfg.matrix.room_id.is_none() {
missing.push("matrix.room_id");
}
if cfg.state.state_file.is_none() {
missing.push("state.state_file");
}
missing
}
/// Resolve the base TOML config file, honoring explicit config paths.
fn resolve_base_config(
inputs: &ConfigInputs,
defaults: &DefaultPaths,
) -> anyhow::Result<Option<PartialConfig>> {
if let Some(path) = &inputs.config_path {
return Ok(Some(load_partial_from_file(path)?));
}
let container_path = Path::new(&defaults.config_path);
if container_path.exists() {
return Ok(Some(load_partial_from_file(&defaults.config_path)?));
}
let host_path = Path::new(DEFAULT_CONFIG_PATH);
if host_path.exists() {
return Ok(Some(load_partial_from_file(DEFAULT_CONFIG_PATH)?));
}
Ok(None)
}
/// Decide which secrets directory to use based on inputs and defaults.
fn resolve_secrets_dir(
inputs: &ConfigInputs,
container: bool,
defaults: &DefaultPaths,
) -> Option<String> {
if let Some(explicit) = inputs.secrets_dir.clone() {
return Some(explicit);
}
if container {
return Some(defaults.secrets_dir.clone());
}
None
}
/// Resolve a token value from explicit values, secret files, or config file values.
fn resolve_token(
base_value: Option<String>,
explicit_value: Option<String>,
explicit_file: Option<&str>,
secrets_dir: Option<&str>,
default_secret_name: &str,
) -> anyhow::Result<Option<String>> {
if let Some(value) = explicit_value {
return Ok(Some(value));
}
if let Some(path) = explicit_file {
return Ok(Some(read_secret_file(path)?));
}
if let Some(dir) = secrets_dir {
let default_path = Path::new(dir).join(default_secret_name);
if default_path.exists() {
return Ok(Some(read_secret_file(
default_path
.to_str()
.ok_or_else(|| anyhow::anyhow!("Invalid secret file path"))?,
)?));
}
Self::load_from_file(path)
}
Ok(base_value)
}
/// Read and trim a secret file from disk.
fn read_secret_file(path: &str) -> anyhow::Result<String> {
let contents = fs::read_to_string(path)?;
let trimmed = contents.trim();
if trimmed.is_empty() {
anyhow::bail!("Secret file {path} is empty");
}
Ok(trimmed.to_string())
}
/// Load a partial config from a TOML file.
fn load_partial_from_file(path: &str) -> anyhow::Result<PartialConfig> {
let contents = fs::read_to_string(path)?;
let cfg = toml::from_str(&contents)?;
Ok(cfg)
}
/// Compute default paths and intervals based on container mode.
fn default_paths(container: bool) -> DefaultPaths {
if container {
DefaultPaths {
config_path: CONTAINER_CONFIG_PATH.to_string(),
state_file: CONTAINER_STATE_FILE.to_string(),
secrets_dir: DEFAULT_SECRETS_DIR.to_string(),
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
}
} else {
DefaultPaths {
config_path: DEFAULT_CONFIG_PATH.to_string(),
state_file: DEFAULT_STATE_FILE.to_string(),
secrets_dir: DEFAULT_SECRETS_DIR.to_string(),
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
}
}
}
#[derive(Debug, Clone)]
struct DefaultPaths {
config_path: String,
state_file: String,
secrets_dir: String,
poll_interval_secs: u64,
}
/// Detect whether the bridge is running inside a container.
fn detect_container(
override_value: Option<bool>,
env_hint: Option<&str>,
cgroup_hint: Option<&str>,
) -> bool {
if let Some(value) = override_value {
return value;
}
if let Some(hint) = env_hint {
if !hint.trim().is_empty() {
return true;
}
}
if let Some(cgroup) = cgroup_hint {
let haystack = cgroup.to_ascii_lowercase();
return haystack.contains("docker")
|| haystack.contains("kubepods")
|| haystack.contains("containerd")
|| haystack.contains("podman");
}
false
}
/// Read the primary cgroup file for container detection.
#[cfg(not(test))]
fn read_cgroup() -> Option<String> {
fs::read_to_string("/proc/1/cgroup").ok()
}
/// Read and trim an environment variable value.
#[cfg(not(test))]
fn env_var(key: &str) -> Option<String> {
std::env::var(key).ok().filter(|v| !v.trim().is_empty())
}
/// Parse a u64 environment variable value.
#[cfg(not(test))]
fn parse_u64_env(key: &str) -> anyhow::Result<Option<u64>> {
match env_var(key) {
None => Ok(None),
Some(value) => value
.parse::<u64>()
.map(Some)
.map_err(|e| anyhow::anyhow!("Invalid {key} value: {e}")),
}
}
/// Parse a boolean environment variable value.
#[cfg(not(test))]
fn parse_bool_env(key: &str) -> anyhow::Result<Option<bool>> {
match env_var(key) {
None => Ok(None),
Some(value) => parse_bool_value(key, &value).map(Some),
}
}
/// Parse a boolean string with standard truthy/falsy values.
#[cfg(not(test))]
fn parse_bool_value(key: &str, value: &str) -> anyhow::Result<bool> {
let normalized = value.trim().to_ascii_lowercase();
match normalized.as_str() {
"1" | "true" | "yes" | "on" => Ok(true),
"0" | "false" | "no" | "off" => Ok(false),
_ => anyhow::bail!("Invalid {key} value: {value}"),
}
}
@@ -48,17 +502,55 @@ mod tests {
use super::*;
use serial_test::serial;
use std::io::Write;
use std::path::{Path, PathBuf};
struct CwdGuard {
original: PathBuf,
}
impl CwdGuard {
/// Switch to the provided path and restore the original cwd on drop.
fn enter(path: &Path) -> Self {
let original = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("/"));
std::env::set_current_dir(path).unwrap();
Self { original }
}
}
impl Drop for CwdGuard {
fn drop(&mut self) {
if std::env::set_current_dir(&self.original).is_err() {
let _ = std::env::set_current_dir("/");
}
}
}
fn minimal_overrides() -> ConfigOverrides {
ConfigOverrides {
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
potatomesh_poll_interval_secs: Some(10),
matrix_homeserver: Some("https://matrix.example.org".to_string()),
matrix_as_token: Some("AS_TOKEN".to_string()),
matrix_hs_token: Some("HS_TOKEN".to_string()),
matrix_server_name: Some("example.org".to_string()),
matrix_room_id: Some("!roomid:example.org".to_string()),
state_file: Some("bridge_state.json".to_string()),
matrix_as_token_file: None,
matrix_hs_token_file: None,
}
}
#[test]
fn parse_minimal_config_from_toml_str() {
let toml_str = r#"
[potatomesh]
base_url = "https://potatomesh.net/api"
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
homeserver = "https://matrix.example.org"
as_token = "AS_TOKEN"
hs_token = "HS_TOKEN"
server_name = "example.org"
room_id = "!roomid:example.org"
@@ -67,11 +559,12 @@ mod tests {
"#;
let cfg: Config = toml::from_str(toml_str).expect("toml should parse");
assert_eq!(cfg.potatomesh.base_url, "https://potatomesh.net/api");
assert_eq!(cfg.potatomesh.base_url, "https://potatomesh.net/");
assert_eq!(cfg.potatomesh.poll_interval_secs, 10);
assert_eq!(cfg.matrix.homeserver, "https://matrix.example.org");
assert_eq!(cfg.matrix.as_token, "AS_TOKEN");
assert_eq!(cfg.matrix.hs_token, "HS_TOKEN");
assert_eq!(cfg.matrix.server_name, "example.org");
assert_eq!(cfg.matrix.room_id, "!roomid:example.org");
@@ -88,12 +581,13 @@ mod tests {
fn load_from_file_valid_file() {
let toml_str = r#"
[potatomesh]
base_url = "https://potatomesh.net/api"
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
homeserver = "https://matrix.example.org"
as_token = "AS_TOKEN"
hs_token = "HS_TOKEN"
server_name = "example.org"
room_id = "!roomid:example.org"
@@ -107,37 +601,378 @@ mod tests {
}
#[test]
#[serial]
fn from_default_path_not_found() {
let tmp_dir = tempfile::tempdir().unwrap();
std::env::set_current_dir(tmp_dir.path()).unwrap();
let result = Config::from_default_path();
assert!(result.is_err());
fn detect_container_prefers_override() {
assert!(detect_container(Some(true), None, None));
assert!(!detect_container(
Some(false),
Some("docker"),
Some("docker")
));
}
#[test]
#[serial]
fn from_default_path_found() {
fn detect_container_from_hint_or_cgroup() {
assert!(detect_container(None, Some("docker"), None));
assert!(detect_container(None, None, Some("kubepods")));
assert!(!detect_container(None, None, Some("")));
}
#[test]
fn load_uses_cli_overrides_over_env() {
let toml_str = r#"
[potatomesh]
base_url = "https://potatomesh.net/api"
poll_interval_secs = 10
base_url = "https://potatomesh.net/"
poll_interval_secs = 5
[matrix]
homeserver = "https://matrix.example.org"
as_token = "AS_TOKEN"
hs_token = "HS_TOKEN"
server_name = "example.org"
room_id = "!roomid:example.org"
[state]
state_file = "bridge_state.json"
"#;
let tmp_dir = tempfile::tempdir().unwrap();
let file_path = tmp_dir.path().join("Config.toml");
let mut file = std::fs::File::create(file_path).unwrap();
let mut file = tempfile::NamedTempFile::new().unwrap();
write!(file, "{}", toml_str).unwrap();
std::env::set_current_dir(tmp_dir.path()).unwrap();
let result = Config::from_default_path();
assert!(result.is_ok());
let env_inputs = ConfigInputs {
config_path: Some(file.path().to_str().unwrap().to_string()),
overrides: ConfigOverrides {
potatomesh_base_url: Some("https://env.example/".to_string()),
..minimal_overrides()
},
..ConfigInputs::default()
};
let cli_inputs = ConfigInputs {
overrides: ConfigOverrides {
potatomesh_base_url: Some("https://cli.example/".to_string()),
..ConfigOverrides::default()
},
..ConfigInputs::default()
};
let cfg = load_from_sources(cli_inputs, env_inputs, None).unwrap();
assert_eq!(cfg.potatomesh.base_url, "https://cli.example/");
}
#[test]
#[serial]
fn load_uses_container_secret_defaults() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let secrets_dir = tmp_dir.path();
fs::write(secrets_dir.join("matrix_as_token"), "FROM_SECRET").unwrap();
let cli_inputs = ConfigInputs {
secrets_dir: Some(secrets_dir.to_string_lossy().to_string()),
container_override: Some(true),
overrides: ConfigOverrides {
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
potatomesh_poll_interval_secs: Some(10),
matrix_homeserver: Some("https://matrix.example.org".to_string()),
matrix_hs_token: Some("HS_TOKEN".to_string()),
matrix_server_name: Some("example.org".to_string()),
matrix_room_id: Some("!roomid:example.org".to_string()),
state_file: Some("bridge_state.json".to_string()),
..ConfigOverrides::default()
},
..ConfigInputs::default()
};
let cfg = load_from_sources(cli_inputs, ConfigInputs::default(), None).unwrap();
assert_eq!(cfg.matrix.as_token, "FROM_SECRET");
}
#[test]
fn resolve_token_prefers_explicit_value() {
let tmp_dir = tempfile::tempdir().unwrap();
let token_file = tmp_dir.path().join("token");
fs::write(&token_file, "FROM_FILE").unwrap();
let resolved = resolve_token(
Some("FROM_BASE".to_string()),
Some("FROM_EXPLICIT".to_string()),
Some(token_file.to_str().unwrap()),
Some(tmp_dir.path().to_str().unwrap()),
"matrix_as_token",
)
.unwrap();
assert_eq!(resolved, Some("FROM_EXPLICIT".to_string()));
}
#[test]
fn resolve_token_reads_explicit_file() {
let tmp_dir = tempfile::tempdir().unwrap();
let token_file = tmp_dir.path().join("token");
fs::write(&token_file, "FROM_FILE").unwrap();
let resolved = resolve_token(
None,
None,
Some(token_file.to_str().unwrap()),
None,
"matrix_as_token",
)
.unwrap();
assert_eq!(resolved, Some("FROM_FILE".to_string()));
}
#[test]
fn resolve_token_reads_default_secret_file() {
let tmp_dir = tempfile::tempdir().unwrap();
fs::write(tmp_dir.path().join("matrix_hs_token"), "FROM_SECRET").unwrap();
let resolved = resolve_token(
None,
None,
None,
Some(tmp_dir.path().to_str().unwrap()),
"matrix_hs_token",
)
.unwrap();
assert_eq!(resolved, Some("FROM_SECRET".to_string()));
}
#[test]
fn resolve_token_errors_on_empty_secret_file() {
let tmp_dir = tempfile::tempdir().unwrap();
let token_file = tmp_dir.path().join("token");
fs::write(&token_file, " ").unwrap();
let result = resolve_token(
None,
None,
Some(token_file.to_str().unwrap()),
None,
"matrix_as_token",
);
assert!(result.is_err());
}
#[test]
fn resolve_secrets_dir_prefers_explicit() {
let defaults = DefaultPaths {
config_path: "Config.toml".to_string(),
state_file: DEFAULT_STATE_FILE.to_string(),
secrets_dir: "default".to_string(),
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
};
let inputs = ConfigInputs {
secrets_dir: Some("explicit".to_string()),
..ConfigInputs::default()
};
let resolved = resolve_secrets_dir(&inputs, true, &defaults);
assert_eq!(resolved, Some("explicit".to_string()));
}
#[test]
fn resolve_secrets_dir_container_default() {
let defaults = DefaultPaths {
config_path: "Config.toml".to_string(),
state_file: DEFAULT_STATE_FILE.to_string(),
secrets_dir: "default".to_string(),
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
};
let inputs = ConfigInputs::default();
let resolved = resolve_secrets_dir(&inputs, true, &defaults);
assert_eq!(resolved, Some("default".to_string()));
assert_eq!(resolve_secrets_dir(&inputs, false, &defaults), None);
}
#[test]
#[serial]
fn resolve_base_config_prefers_explicit_path() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let config_path = tmp_dir.path().join("explicit.toml");
fs::write(
&config_path,
r#"[potatomesh]
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
homeserver = "https://matrix.example.org"
as_token = "AS_TOKEN"
hs_token = "HS_TOKEN"
server_name = "example.org"
room_id = "!roomid:example.org"
[state]
state_file = "bridge_state.json"
"#,
)
.unwrap();
let defaults = default_paths(false);
let inputs = ConfigInputs {
config_path: Some(config_path.to_string_lossy().to_string()),
..ConfigInputs::default()
};
let resolved = resolve_base_config(&inputs, &defaults).unwrap();
assert!(resolved.is_some());
}
#[test]
#[serial]
fn resolve_base_config_uses_container_path_when_present() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let config_path = tmp_dir.path().join("container.toml");
fs::write(
&config_path,
r#"[potatomesh]
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
homeserver = "https://matrix.example.org"
as_token = "AS_TOKEN"
hs_token = "HS_TOKEN"
server_name = "example.org"
room_id = "!roomid:example.org"
[state]
state_file = "bridge_state.json"
"#,
)
.unwrap();
let defaults = DefaultPaths {
config_path: config_path.to_string_lossy().to_string(),
state_file: DEFAULT_STATE_FILE.to_string(),
secrets_dir: DEFAULT_SECRETS_DIR.to_string(),
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
};
let resolved = resolve_base_config(&ConfigInputs::default(), &defaults).unwrap();
assert!(resolved.is_some());
}
#[test]
#[serial]
fn resolve_base_config_uses_host_path_when_present() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
fs::write(
"Config.toml",
r#"[potatomesh]
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
homeserver = "https://matrix.example.org"
as_token = "AS_TOKEN"
hs_token = "HS_TOKEN"
server_name = "example.org"
room_id = "!roomid:example.org"
[state]
state_file = "bridge_state.json"
"#,
)
.unwrap();
let defaults = default_paths(false);
let resolved = resolve_base_config(&ConfigInputs::default(), &defaults).unwrap();
assert!(resolved.is_some());
}
#[test]
#[serial]
fn resolve_base_config_returns_none_when_missing() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let defaults = default_paths(false);
let resolved = resolve_base_config(&ConfigInputs::default(), &defaults).unwrap();
assert!(resolved.is_none());
}
#[test]
#[serial]
fn load_prefers_cli_token_file_over_env_value() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let token_file = tmp_dir.path().join("as_token");
fs::write(&token_file, "CLI_SECRET").unwrap();
let env_inputs = ConfigInputs {
overrides: ConfigOverrides {
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
potatomesh_poll_interval_secs: Some(10),
matrix_homeserver: Some("https://matrix.example.org".to_string()),
matrix_as_token: Some("ENV_TOKEN".to_string()),
matrix_hs_token: Some("HS_TOKEN".to_string()),
matrix_server_name: Some("example.org".to_string()),
matrix_room_id: Some("!roomid:example.org".to_string()),
..ConfigOverrides::default()
},
..ConfigInputs::default()
};
let cli_inputs = ConfigInputs {
overrides: ConfigOverrides {
matrix_as_token_file: Some(token_file.to_string_lossy().to_string()),
..ConfigOverrides::default()
},
..ConfigInputs::default()
};
let cfg = load_from_sources(cli_inputs, env_inputs, None).unwrap();
assert_eq!(cfg.matrix.as_token, "CLI_SECRET");
}
#[test]
#[serial]
fn load_uses_container_default_poll_interval() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let cli_inputs = ConfigInputs {
container_override: Some(true),
overrides: ConfigOverrides {
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
matrix_homeserver: Some("https://matrix.example.org".to_string()),
matrix_as_token: Some("AS_TOKEN".to_string()),
matrix_hs_token: Some("HS_TOKEN".to_string()),
matrix_server_name: Some("example.org".to_string()),
matrix_room_id: Some("!roomid:example.org".to_string()),
..ConfigOverrides::default()
},
..ConfigInputs::default()
};
let cfg = load_from_sources(cli_inputs, ConfigInputs::default(), None).unwrap();
assert_eq!(
cfg.potatomesh.poll_interval_secs,
CONTAINER_POLL_INTERVAL_SECS
);
}
#[test]
#[serial]
fn load_uses_default_state_path_when_missing() {
let tmp_dir = tempfile::tempdir().unwrap();
let _guard = CwdGuard::enter(tmp_dir.path());
let cli_inputs = ConfigInputs {
overrides: ConfigOverrides {
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
potatomesh_poll_interval_secs: Some(10),
matrix_homeserver: Some("https://matrix.example.org".to_string()),
matrix_as_token: Some("AS_TOKEN".to_string()),
matrix_hs_token: Some("HS_TOKEN".to_string()),
matrix_server_name: Some("example.org".to_string()),
matrix_room_id: Some("!roomid:example.org".to_string()),
..ConfigOverrides::default()
},
..ConfigInputs::default()
};
let cfg = load_from_sources(cli_inputs, ConfigInputs::default(), None).unwrap();
assert_eq!(cfg.state.state_file, DEFAULT_STATE_FILE);
}
}
+589 -79
View File
@@ -1,20 +1,54 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod cli;
mod config;
mod matrix;
mod matrix_server;
mod potatomesh;
use std::{fs, path::Path};
use std::{fs, net::SocketAddr, path::Path};
use anyhow::Result;
use tokio::time::{sleep, Duration};
#[cfg(not(test))]
use clap::Parser;
use tokio::time::Duration;
use tracing::{error, info};
#[cfg(not(test))]
use crate::cli::Cli;
#[cfg(not(test))]
use crate::config::Config;
use crate::matrix::MatrixAppserviceClient;
use crate::potatomesh::{PotatoClient, PotatoMessage};
use crate::matrix_server::run_synapse_listener;
use crate::potatomesh::{FetchParams, PotatoClient, PotatoMessage, PotatoNode};
#[cfg(not(test))]
use tokio::time::sleep;
#[derive(Debug, serde::Serialize, serde::Deserialize, Default)]
pub struct BridgeState {
/// Highest message id processed by the bridge.
last_message_id: Option<u64>,
/// Highest rx_time observed; used to build incremental fetch queries.
#[serde(default)]
last_rx_time: Option<u64>,
/// Message ids seen at the current last_rx_time for de-duplication.
#[serde(default)]
last_rx_time_ids: Vec<u64>,
/// Legacy checkpoint timestamp used before last_rx_time was added.
#[serde(default, skip_serializing)]
last_checked_at: Option<u64>,
}
impl BridgeState {
@@ -23,7 +57,15 @@ impl BridgeState {
return Ok(Self::default());
}
let data = fs::read_to_string(path)?;
let s: Self = serde_json::from_str(&data)?;
// Treat empty/whitespace-only files as a fresh state.
if data.trim().is_empty() {
return Ok(Self::default());
}
let mut s: Self = serde_json::from_str(&data)?;
if s.last_rx_time.is_none() {
s.last_rx_time = s.last_checked_at;
}
s.last_checked_at = None;
Ok(s)
}
@@ -34,20 +76,131 @@ impl BridgeState {
}
fn should_forward(&self, msg: &PotatoMessage) -> bool {
match self.last_message_id {
None => true,
Some(last) => msg.id > last,
match self.last_rx_time {
None => match self.last_message_id {
None => true,
Some(last_id) => msg.id > last_id,
},
Some(last_ts) => {
if msg.rx_time > last_ts {
true
} else if msg.rx_time < last_ts {
false
} else {
!self.last_rx_time_ids.contains(&msg.id)
}
}
}
}
fn update_with(&mut self, msg: &PotatoMessage) {
self.last_message_id = Some(match self.last_message_id {
None => msg.id,
Some(last) => last.max(msg.id),
});
self.last_message_id = Some(msg.id);
if self.last_rx_time.is_none() || Some(msg.rx_time) > self.last_rx_time {
self.last_rx_time = Some(msg.rx_time);
self.last_rx_time_ids = vec![msg.id];
} else if Some(msg.rx_time) == self.last_rx_time && !self.last_rx_time_ids.contains(&msg.id)
{
self.last_rx_time_ids.push(msg.id);
}
}
}
fn build_fetch_params(state: &BridgeState) -> FetchParams {
if state.last_message_id.is_none() {
FetchParams {
limit: None,
since: None,
}
} else if let Some(ts) = state.last_rx_time {
FetchParams {
limit: None,
since: Some(ts),
}
} else {
FetchParams {
limit: Some(10),
since: None,
}
}
}
/// Persist the bridge state and log any write errors.
fn persist_state(state: &BridgeState, state_path: &str) {
if let Err(e) = state.save(state_path) {
error!("Error saving state: {:?}", e);
}
}
/// Emit an info log for the latest bridge state snapshot.
fn log_state_update(state: &BridgeState) {
info!("Updated state: {:?}", state);
}
/// Emit a sanitized config log without sensitive tokens.
#[cfg(not(test))]
fn log_config(cfg: &Config) {
info!(
potatomesh_base_url = cfg.potatomesh.base_url.as_str(),
matrix_homeserver = cfg.matrix.homeserver.as_str(),
matrix_server_name = cfg.matrix.server_name.as_str(),
matrix_room_id = cfg.matrix.room_id.as_str(),
state_file = cfg.state.state_file.as_str(),
"Loaded config"
);
}
async fn poll_once(
potato: &PotatoClient,
matrix: &MatrixAppserviceClient,
state: &mut BridgeState,
state_path: &str,
) {
let params = build_fetch_params(state);
match potato.fetch_messages(params).await {
Ok(mut msgs) => {
// sort by rx_time so we process by actual receipt time
msgs.sort_by_key(|m| m.rx_time);
for msg in &msgs {
if !state.should_forward(msg) {
continue;
}
// Filter to the ports you care about
if let Some(port) = &msg.portnum {
if port != "TEXT_MESSAGE_APP" {
state.update_with(msg);
log_state_update(state);
persist_state(state, state_path);
continue;
}
}
if let Err(e) = handle_message(potato, matrix, state, msg).await {
error!("Error handling message {}: {:?}", msg.id, e);
continue;
}
// persist after each processed message
persist_state(state, state_path);
}
}
Err(e) => {
error!("Error fetching PotatoMesh messages: {:?}", e);
}
}
}
fn spawn_synapse_listener(addr: SocketAddr, token: String) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
if let Err(e) = run_synapse_listener(addr, token).await {
error!("Synapse listener failed: {:?}", e);
}
})
}
#[cfg(not(test))]
#[tokio::main]
async fn main() -> Result<()> {
// Logging: RUST_LOG=info,bridge=debug,reqwest=warn ...
@@ -59,12 +212,19 @@ async fn main() -> Result<()> {
)
.init();
let cfg = Config::from_default_path()?;
info!("Loaded config: {:?}", cfg);
let cli = Cli::parse();
let cfg = config::load(cli.to_inputs())?;
log_config(&cfg);
let http = reqwest::Client::builder().build()?;
let potato = PotatoClient::new(http.clone(), cfg.potatomesh.clone());
potato.health_check().await?;
let matrix = MatrixAppserviceClient::new(http.clone(), cfg.matrix.clone());
matrix.health_check().await?;
let synapse_addr = SocketAddr::from(([0, 0, 0, 0], 41448));
let synapse_token = cfg.matrix.hs_token.clone();
let _synapse_handle = spawn_synapse_listener(synapse_addr, synapse_token);
let state_path = &cfg.state.state_file;
let mut state = BridgeState::load(state_path)?;
@@ -73,36 +233,7 @@ async fn main() -> Result<()> {
let poll_interval = Duration::from_secs(cfg.potatomesh.poll_interval_secs);
loop {
match potato.fetch_messages().await {
Ok(mut msgs) => {
// sort by id ascending so we process in order
msgs.sort_by_key(|m| m.id);
for msg in msgs {
if !state.should_forward(&msg) {
continue;
}
// Filter to the ports you care about
if msg.portnum != "TEXT_MESSAGE_APP" {
state.update_with(&msg);
continue;
}
if let Err(e) = handle_message(&potato, &matrix, &mut state, &msg).await {
error!("Error handling message {}: {:?}", msg.id, e);
}
// persist after each processed message
if let Err(e) = state.save(state_path) {
error!("Error saving state: {:?}", e);
}
}
}
Err(e) => {
error!("Error fetching PotatoMesh messages: {:?}", e);
}
}
poll_once(&potato, &matrix, &mut state, state_path).await;
sleep(poll_interval).await;
}
@@ -120,32 +251,79 @@ async fn handle_message(
// Ensure puppet exists & has display name
matrix.ensure_user_registered(&localpart).await?;
matrix.set_display_name(&user_id, &node.long_name).await?;
matrix.ensure_user_joined_room(&user_id).await?;
let display_name = display_name_for_node(&node);
matrix.set_display_name(&user_id, &display_name).await?;
// Format the bridged message
let short = node
.short_name
.clone()
.unwrap_or_else(|| node.long_name.clone());
let body = format!(
"[{short}] {text}\n({from_id}{to_id}, RSSI {rssi} dB, SNR {snr} dB, {chan}/{preset})",
short = short,
text = msg.text,
from_id = msg.from_id,
to_id = msg.to_id,
rssi = msg.rssi,
snr = msg.snr,
chan = msg.channel_name,
preset = msg.modem_preset,
let preset_short = modem_preset_short(&msg.modem_preset);
let prefix = format!(
"[{freq}][{preset_short}][{channel}]",
freq = msg.lora_freq,
preset_short = preset_short,
channel = msg.channel_name,
);
let (body, formatted_body) = format_message_bodies(&prefix, &msg.text);
matrix.send_text_message_as(&user_id, &body).await?;
matrix
.send_formatted_message_as(&user_id, &body, &formatted_body)
.await?;
info!("Bridged message: {:?}", msg);
state.update_with(msg);
log_state_update(state);
Ok(())
}
/// Build a compact modem preset label like "LF" for "LongFast".
fn modem_preset_short(preset: &str) -> String {
let letters: String = preset
.chars()
.filter(|ch| ch.is_ascii_uppercase())
.collect();
if letters.is_empty() {
preset.chars().take(2).collect()
} else {
letters
}
}
/// Build plain text + HTML message bodies with inline-code metadata.
fn format_message_bodies(prefix: &str, text: &str) -> (String, String) {
let body = format!("`{}` {}", prefix, text);
let formatted_body = format!("<code>{}</code> {}", escape_html(prefix), escape_html(text));
(body, formatted_body)
}
/// Build the Matrix display name from a node's long/short names.
fn display_name_for_node(node: &PotatoNode) -> String {
match node
.short_name
.as_deref()
.map(str::trim)
.filter(|s| !s.is_empty())
{
Some(short) if short != node.long_name => format!("{} ({})", node.long_name, short),
_ => node.long_name.clone(),
}
}
/// Minimal HTML escaping for Matrix formatted_body payloads.
fn escape_html(input: &str) -> String {
let mut escaped = String::with_capacity(input.len());
for ch in input.chars() {
match ch {
'&' => escaped.push_str("&amp;"),
'<' => escaped.push_str("&lt;"),
'>' => escaped.push_str("&gt;"),
'"' => escaped.push_str("&quot;"),
'\'' => escaped.push_str("&#39;"),
_ => escaped.push(ch),
}
}
escaped
}
#[cfg(test)]
mod tests {
use super::*;
@@ -161,19 +339,67 @@ mod tests {
from_id: "!abcd1234".to_string(),
to_id: "^all".to_string(),
channel: 1,
portnum: "TEXT_MESSAGE_APP".to_string(),
portnum: Some("TEXT_MESSAGE_APP".to_string()),
text: "Ping".to_string(),
rssi: -100,
hop_limit: 1,
rssi: Some(-100),
hop_limit: Some(1),
lora_freq: 868,
modem_preset: "MediumFast".to_string(),
channel_name: "TEST".to_string(),
snr: 0.0,
snr: Some(0.0),
reply_id: None,
node_id: "!abcd1234".to_string(),
}
}
fn sample_node(short_name: Option<&str>, long_name: &str) -> PotatoNode {
PotatoNode {
node_id: "!abcd1234".to_string(),
short_name: short_name.map(str::to_string),
long_name: long_name.to_string(),
role: None,
hw_model: None,
last_heard: None,
first_heard: None,
latitude: None,
longitude: None,
altitude: None,
}
}
#[test]
fn modem_preset_short_handles_camelcase() {
assert_eq!(modem_preset_short("LongFast"), "LF");
assert_eq!(modem_preset_short("MediumFast"), "MF");
}
#[test]
fn format_message_bodies_escape_html() {
let (body, formatted) = format_message_bodies("[868][LF]", "Hello <&>");
assert_eq!(body, "`[868][LF]` Hello <&>");
assert_eq!(formatted, "<code>[868][LF]</code> Hello &lt;&amp;&gt;");
}
#[test]
fn escape_html_escapes_quotes() {
assert_eq!(escape_html("a\"b'c"), "a&quot;b&#39;c");
}
#[test]
fn display_name_for_node_includes_short_when_present() {
let node = sample_node(Some("TN"), "Test Node");
assert_eq!(display_name_for_node(&node), "Test Node (TN)");
}
#[test]
fn display_name_for_node_ignores_empty_or_duplicate_short() {
let empty_short = sample_node(Some(""), "Test Node");
assert_eq!(display_name_for_node(&empty_short), "Test Node");
let duplicate_short = sample_node(Some("Test Node"), "Test Node");
assert_eq!(display_name_for_node(&duplicate_short), "Test Node");
}
#[test]
fn bridge_state_initially_forwards_all() {
let state = BridgeState::default();
@@ -183,38 +409,72 @@ mod tests {
}
#[test]
fn bridge_state_tracks_highest_id_and_skips_older() {
fn bridge_state_tracks_latest_rx_time_and_skips_older() {
let mut state = BridgeState::default();
let m1 = sample_msg(10);
let m2 = sample_msg(20);
let m3 = sample_msg(15);
let m1 = PotatoMessage { rx_time: 10, ..m1 };
let m2 = PotatoMessage { rx_time: 20, ..m2 };
let m3 = PotatoMessage { rx_time: 15, ..m3 };
// First message, should forward
assert!(state.should_forward(&m1));
state.update_with(&m1);
assert_eq!(state.last_message_id, Some(10));
assert_eq!(state.last_rx_time, Some(10));
// Second message, higher id, should forward
assert!(state.should_forward(&m2));
state.update_with(&m2);
assert_eq!(state.last_message_id, Some(20));
assert_eq!(state.last_rx_time, Some(20));
// Third message, lower than last, should NOT forward
assert!(!state.should_forward(&m3));
// state remains unchanged
assert_eq!(state.last_message_id, Some(20));
assert_eq!(state.last_rx_time, Some(20));
}
#[test]
fn bridge_state_update_is_monotonic() {
let mut state = BridgeState {
last_message_id: Some(50),
fn bridge_state_uses_legacy_id_filter_when_rx_time_missing() {
let state = BridgeState {
last_message_id: Some(10),
last_rx_time: None,
last_rx_time_ids: vec![],
last_checked_at: None,
};
let m = sample_msg(40);
let older = sample_msg(9);
let newer = sample_msg(11);
state.update_with(&m); // id is lower than current
// last_message_id must stay at 50
assert_eq!(state.last_message_id, Some(50));
assert!(!state.should_forward(&older));
assert!(state.should_forward(&newer));
}
#[test]
fn bridge_state_dedupes_same_timestamp() {
let mut state = BridgeState::default();
let m1 = PotatoMessage {
rx_time: 100,
..sample_msg(10)
};
let m2 = PotatoMessage {
rx_time: 100,
..sample_msg(9)
};
let dup = PotatoMessage {
rx_time: 100,
..sample_msg(10)
};
assert!(state.should_forward(&m1));
state.update_with(&m1);
assert!(state.should_forward(&m2));
state.update_with(&m2);
assert!(!state.should_forward(&dup));
assert_eq!(state.last_rx_time, Some(100));
assert_eq!(state.last_rx_time_ids, vec![10, 9]);
}
#[test]
@@ -225,11 +485,17 @@ mod tests {
let state = BridgeState {
last_message_id: Some(12345),
last_rx_time: Some(99),
last_rx_time_ids: vec![123],
last_checked_at: Some(77),
};
state.save(path_str).unwrap();
let loaded_state = BridgeState::load(path_str).unwrap();
assert_eq!(loaded_state.last_message_id, Some(12345));
assert_eq!(loaded_state.last_rx_time, Some(99));
assert_eq!(loaded_state.last_rx_time_ids, vec![123]);
assert_eq!(loaded_state.last_checked_at, None);
}
#[test]
@@ -240,6 +506,226 @@ mod tests {
let state = BridgeState::load(path_str).unwrap();
assert_eq!(state.last_message_id, None);
assert_eq!(state.last_rx_time, None);
assert!(state.last_rx_time_ids.is_empty());
}
#[test]
fn bridge_state_load_empty_file() {
let tmp_dir = tempfile::tempdir().unwrap();
let file_path = tmp_dir.path().join("empty.json");
let path_str = file_path.to_str().unwrap();
fs::write(path_str, "").unwrap();
let state = BridgeState::load(path_str).unwrap();
assert_eq!(state.last_message_id, None);
assert_eq!(state.last_rx_time, None);
assert!(state.last_rx_time_ids.is_empty());
assert_eq!(state.last_checked_at, None);
}
#[test]
fn bridge_state_migrates_legacy_checkpoint() {
let tmp_dir = tempfile::tempdir().unwrap();
let file_path = tmp_dir.path().join("legacy_state.json");
let path_str = file_path.to_str().unwrap();
fs::write(
path_str,
r#"{"last_message_id":42,"last_checked_at":1710000000}"#,
)
.unwrap();
let state = BridgeState::load(path_str).unwrap();
assert_eq!(state.last_message_id, Some(42));
assert_eq!(state.last_rx_time, Some(1_710_000_000));
assert!(state.last_rx_time_ids.is_empty());
}
#[test]
fn fetch_params_respects_missing_last_message_id() {
let state = BridgeState {
last_message_id: None,
last_rx_time: Some(123),
last_rx_time_ids: vec![],
last_checked_at: None,
};
let params = build_fetch_params(&state);
assert_eq!(params.limit, None);
assert_eq!(params.since, None);
}
#[test]
fn fetch_params_uses_since_when_safe() {
let state = BridgeState {
last_message_id: Some(1),
last_rx_time: Some(123),
last_rx_time_ids: vec![],
last_checked_at: None,
};
let params = build_fetch_params(&state);
assert_eq!(params.limit, None);
assert_eq!(params.since, Some(123));
}
#[test]
fn fetch_params_defaults_to_small_window() {
let state = BridgeState {
last_message_id: Some(1),
last_rx_time: None,
last_rx_time_ids: vec![],
last_checked_at: None,
};
let params = build_fetch_params(&state);
assert_eq!(params.limit, Some(10));
assert_eq!(params.since, None);
}
#[test]
fn log_state_update_emits_info() {
let state = BridgeState::default();
log_state_update(&state);
}
#[test]
fn persist_state_writes_file() {
let tmp_dir = tempfile::tempdir().unwrap();
let file_path = tmp_dir.path().join("state.json");
let path_str = file_path.to_str().unwrap();
let state = BridgeState {
last_message_id: Some(42),
last_rx_time: Some(123),
last_rx_time_ids: vec![42],
last_checked_at: None,
};
persist_state(&state, path_str);
let loaded = BridgeState::load(path_str).unwrap();
assert_eq!(loaded.last_message_id, Some(42));
}
#[test]
fn persist_state_logs_on_error() {
let tmp_dir = tempfile::tempdir().unwrap();
let dir_path = tmp_dir.path().to_str().unwrap();
let state = BridgeState::default();
// Writing to a directory path should trigger the error branch.
persist_state(&state, dir_path);
}
#[tokio::test]
async fn spawn_synapse_listener_starts_task() {
let addr = SocketAddr::from(([127, 0, 0, 1], 0));
let handle = spawn_synapse_listener(addr, "HS_TOKEN".to_string());
tokio::time::sleep(Duration::from_millis(10)).await;
handle.abort();
}
#[tokio::test]
async fn spawn_synapse_listener_logs_error_on_bind_failure() {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let handle = spawn_synapse_listener(addr, "HS_TOKEN".to_string());
let _ = handle.await;
}
#[tokio::test]
async fn poll_once_leaves_state_unchanged_without_messages() {
let tmp_dir = tempfile::tempdir().unwrap();
let state_path = tmp_dir.path().join("state.json");
let state_str = state_path.to_str().unwrap();
let mut server = mockito::Server::new_async().await;
let mock_msgs = server
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any)
.with_status(200)
.with_header("content-type", "application/json")
.with_body("[]")
.create();
let http_client = reqwest::Client::new();
let potatomesh_cfg = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 1,
};
let matrix_cfg = MatrixConfig {
homeserver: server.url(),
as_token: "AS_TOKEN".to_string(),
hs_token: "HS_TOKEN".to_string(),
server_name: "example.org".to_string(),
room_id: "!roomid:example.org".to_string(),
};
let potato = PotatoClient::new(http_client.clone(), potatomesh_cfg);
let matrix = MatrixAppserviceClient::new(http_client, matrix_cfg);
let mut state = BridgeState {
last_message_id: Some(1),
last_rx_time: Some(100),
last_rx_time_ids: vec![1],
last_checked_at: None,
};
poll_once(&potato, &matrix, &mut state, state_str).await;
mock_msgs.assert();
// No new data means state remains unchanged and is not persisted.
assert_eq!(state.last_rx_time, Some(100));
assert_eq!(state.last_rx_time_ids, vec![1]);
assert!(!state_path.exists());
}
#[tokio::test]
async fn poll_once_persists_state_for_non_text_messages() {
let tmp_dir = tempfile::tempdir().unwrap();
let state_path = tmp_dir.path().join("state.json");
let state_str = state_path.to_str().unwrap();
let mut server = mockito::Server::new_async().await;
let mock_msgs = server
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any)
.with_status(200)
.with_header("content-type", "application/json")
.with_body(
r#"[{"id":1,"rx_time":100,"rx_iso":"2025-11-27T00:00:00Z","from_id":"!abcd1234","to_id":"^all","channel":1,"portnum":"POSITION_APP","text":"","rssi":-100,"hop_limit":1,"lora_freq":868,"modem_preset":"MediumFast","channel_name":"TEST","snr":0.0,"node_id":"!abcd1234"}]"#,
)
.create();
let http_client = reqwest::Client::new();
let potatomesh_cfg = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 1,
};
let matrix_cfg = MatrixConfig {
homeserver: server.url(),
as_token: "AS_TOKEN".to_string(),
hs_token: "HS_TOKEN".to_string(),
server_name: "example.org".to_string(),
room_id: "!roomid:example.org".to_string(),
};
let potato = PotatoClient::new(http_client.clone(), potatomesh_cfg);
let matrix = MatrixAppserviceClient::new(http_client, matrix_cfg);
let mut state = BridgeState::default();
poll_once(&potato, &matrix, &mut state, state_str).await;
mock_msgs.assert();
assert!(state_path.exists());
let loaded = BridgeState::load(state_str).unwrap();
assert_eq!(loaded.last_message_id, Some(1));
assert_eq!(loaded.last_rx_time, Some(100));
assert_eq!(loaded.last_rx_time_ids, vec![1]);
}
#[tokio::test]
@@ -253,16 +739,19 @@ mod tests {
let matrix_cfg = MatrixConfig {
homeserver: server.url(),
as_token: "AS_TOKEN".to_string(),
hs_token: "HS_TOKEN".to_string(),
server_name: "example.org".to_string(),
room_id: "!roomid:example.org".to_string(),
};
let node_id = "abcd1234";
let user_id = format!("@{}:{}", node_id, matrix_cfg.server_name);
let user_id = format!("@potato_{}:{}", node_id, matrix_cfg.server_name);
let encoded_user = urlencoding::encode(&user_id);
let room_id = matrix_cfg.room_id.clone();
let encoded_room = urlencoding::encode(&room_id);
let mock_get_node = server
.mock("GET", "/nodes/abcd1234")
.mock("GET", "/api/nodes/abcd1234")
.with_status(200)
.with_header("content-type", "application/json")
.with_body(r#"{"node_id": "!abcd1234", "long_name": "Test Node", "short_name": "TN"}"#)
@@ -270,7 +759,18 @@ mod tests {
let mock_register = server
.mock("POST", "/_matrix/client/v3/register")
.match_query("kind=user&access_token=AS_TOKEN")
.match_query("kind=user")
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(200)
.create();
let mock_join = server
.mock(
"POST",
format!("/_matrix/client/v3/rooms/{}/join", encoded_room).as_str(),
)
.match_query(format!("user_id={}", encoded_user).as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(200)
.create();
@@ -279,14 +779,16 @@ mod tests {
"PUT",
format!("/_matrix/client/v3/profile/{}/displayname", encoded_user).as_str(),
)
.match_query(format!("user_id={}&access_token=AS_TOKEN", encoded_user).as_str())
.match_query(format!("user_id={}", encoded_user).as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
"displayname": "Test Node (TN)"
})))
.with_status(200)
.create();
let http_client = reqwest::Client::new();
let matrix_client = MatrixAppserviceClient::new(http_client.clone(), matrix_cfg);
let room_id = &matrix_client.cfg.room_id;
let encoded_room = urlencoding::encode(room_id);
let txn_id = matrix_client
.txn_counter
.load(std::sync::atomic::Ordering::SeqCst);
@@ -300,7 +802,14 @@ mod tests {
)
.as_str(),
)
.match_query(format!("user_id={}&access_token=AS_TOKEN", encoded_user).as_str())
.match_query(format!("user_id={}", encoded_user).as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
"msgtype": "m.text",
"body": "`[868][MF][TEST]` Ping",
"format": "org.matrix.custom.html",
"formatted_body": "<code>[868][MF][TEST]</code> Ping",
})))
.with_status(200)
.create();
@@ -313,6 +822,7 @@ mod tests {
assert!(result.is_ok());
mock_get_node.assert();
mock_register.assert();
mock_join.assert();
mock_display_name.assert();
mock_send.assert();
+199 -65
View File
@@ -1,3 +1,17 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Serialize;
use std::sync::{
atomic::{AtomicU64, Ordering},
@@ -27,9 +41,24 @@ impl MatrixAppserviceClient {
}
}
/// Convert a node_id like "!deadbeef" into Matrix localpart "deadbeef".
/// Basic liveness check against the homeserver.
pub async fn health_check(&self) -> anyhow::Result<()> {
let url = format!("{}/_matrix/client/versions", self.cfg.homeserver);
let resp = self.http.get(&url).send().await?;
if resp.status().is_success() {
tracing::info!("Matrix homeserver healthy at {}", self.cfg.homeserver);
Ok(())
} else {
Err(anyhow::anyhow!(
"Matrix homeserver versions check failed with status {}",
resp.status()
))
}
}
/// Convert a node_id like "!deadbeef" into Matrix localpart "potato_deadbeef".
pub fn localpart_from_node_id(node_id: &str) -> String {
node_id.trim_start_matches('!').to_string()
format!("potato_{}", node_id.trim_start_matches('!'))
}
/// Build a full Matrix user_id from localpart.
@@ -37,10 +66,6 @@ impl MatrixAppserviceClient {
format!("@{}:{}", localpart, self.cfg.server_name)
}
fn auth_query(&self) -> String {
format!("access_token={}", urlencoding::encode(&self.cfg.as_token))
}
/// Ensure the puppet user exists (register via appservice registration).
pub async fn ensure_user_registered(&self, localpart: &str) -> anyhow::Result<()> {
#[derive(Serialize)]
@@ -51,9 +76,8 @@ impl MatrixAppserviceClient {
}
let url = format!(
"{}/_matrix/client/v3/register?kind=user&{}",
self.cfg.homeserver,
self.auth_query()
"{}/_matrix/client/v3/register?kind=user",
self.cfg.homeserver
);
let body = RegisterReq {
@@ -61,7 +85,13 @@ impl MatrixAppserviceClient {
username: localpart,
};
let resp = self.http.post(&url).json(&body).send().await?;
let resp = self
.http
.post(&url)
.bearer_auth(&self.cfg.as_token)
.json(&body)
.send()
.await?;
if resp.status().is_success() {
Ok(())
} else {
@@ -80,18 +110,21 @@ impl MatrixAppserviceClient {
let encoded_user = urlencoding::encode(user_id);
let url = format!(
"{}/_matrix/client/v3/profile/{}/displayname?user_id={}&{}",
self.cfg.homeserver,
encoded_user,
encoded_user,
self.auth_query()
"{}/_matrix/client/v3/profile/{}/displayname?user_id={}",
self.cfg.homeserver, encoded_user, encoded_user
);
let body = DisplayNameReq {
displayname: display_name,
};
let resp = self.http.put(&url).json(&body).send().await?;
let resp = self
.http
.put(&url)
.bearer_auth(&self.cfg.as_token)
.json(&body)
.send()
.await?;
if resp.status().is_success() {
Ok(())
} else {
@@ -105,12 +138,53 @@ impl MatrixAppserviceClient {
}
}
/// Send a plain text message into the configured room as puppet user_id.
pub async fn send_text_message_as(&self, user_id: &str, body_text: &str) -> anyhow::Result<()> {
/// Ensure the puppet user is joined to the configured room.
pub async fn ensure_user_joined_room(&self, user_id: &str) -> anyhow::Result<()> {
#[derive(Serialize)]
struct JoinReq {}
let encoded_room = urlencoding::encode(&self.cfg.room_id);
let encoded_user = urlencoding::encode(user_id);
let url = format!(
"{}/_matrix/client/v3/rooms/{}/join?user_id={}",
self.cfg.homeserver, encoded_room, encoded_user
);
let resp = self
.http
.post(&url)
.bearer_auth(&self.cfg.as_token)
.json(&JoinReq {})
.send()
.await?;
if resp.status().is_success() {
Ok(())
} else {
let status = resp.status();
let body_snip = resp.text().await.unwrap_or_default();
Err(anyhow::anyhow!(
"Matrix join failed for {} in {} with status {} ({})",
user_id,
self.cfg.room_id,
status,
body_snip
))
}
}
/// Send a text message with HTML formatting into the configured room as puppet user_id.
pub async fn send_formatted_message_as(
&self,
user_id: &str,
body_text: &str,
formatted_body: &str,
) -> anyhow::Result<()> {
#[derive(Serialize)]
struct MsgContent<'a> {
msgtype: &'a str,
body: &'a str,
format: &'a str,
formatted_body: &'a str,
}
let txn_id = self.txn_counter.fetch_add(1, Ordering::SeqCst);
@@ -118,35 +192,36 @@ impl MatrixAppserviceClient {
let encoded_user = urlencoding::encode(user_id);
let url = format!(
"{}/_matrix/client/v3/rooms/{}/send/m.room.message/{}?user_id={}&{}",
self.cfg.homeserver,
encoded_room,
txn_id,
encoded_user,
self.auth_query()
"{}/_matrix/client/v3/rooms/{}/send/m.room.message/{}?user_id={}",
self.cfg.homeserver, encoded_room, txn_id, encoded_user
);
let content = MsgContent {
msgtype: "m.text",
body: body_text,
format: "org.matrix.custom.html",
formatted_body,
};
let resp = self.http.put(&url).json(&content).send().await?;
let resp = self
.http
.put(&url)
.bearer_auth(&self.cfg.as_token)
.json(&content)
.send()
.await?;
if !resp.status().is_success() {
let status = resp.status();
// optional: pull a short body snippet for debugging
let body_snip = resp.text().await.unwrap_or_default();
// Log for observability
tracing::warn!(
"Failed to send message as {}: status {}, body: {}",
"Failed to send formatted message as {}: status {}, body: {}",
user_id,
status,
body_snip
);
// Propagate an error so callers know this message was NOT delivered
return Err(anyhow::anyhow!(
"Matrix send failed for {} with status {}",
user_id,
@@ -166,6 +241,7 @@ mod tests {
MatrixConfig {
homeserver: "https://matrix.example.org".to_string(),
as_token: "AS_TOKEN".to_string(),
hs_token: "HS_TOKEN".to_string(),
server_name: "example.org".to_string(),
room_id: "!roomid:example.org".to_string(),
}
@@ -175,11 +251,11 @@ mod tests {
fn localpart_strips_bang_correctly() {
assert_eq!(
MatrixAppserviceClient::localpart_from_node_id("!deadbeef"),
"deadbeef"
"potato_deadbeef"
);
assert_eq!(
MatrixAppserviceClient::localpart_from_node_id("cafebabe"),
"cafebabe"
"potato_cafebabe"
);
}
@@ -188,18 +264,42 @@ mod tests {
let http = reqwest::Client::builder().build().unwrap();
let client = MatrixAppserviceClient::new(http, dummy_cfg());
let uid = client.user_id("deadbeef");
assert_eq!(uid, "@deadbeef:example.org");
let uid = client.user_id("potato_deadbeef");
assert_eq!(uid, "@potato_deadbeef:example.org");
}
#[test]
fn auth_query_contains_access_token() {
let http = reqwest::Client::builder().build().unwrap();
let client = MatrixAppserviceClient::new(http, dummy_cfg());
#[tokio::test]
async fn health_check_success() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/_matrix/client/versions")
.with_status(200)
.create();
let q = client.auth_query();
assert!(q.starts_with("access_token="));
assert!(q.contains("AS_TOKEN"));
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
let result = client.health_check().await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn health_check_failure() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/_matrix/client/versions")
.with_status(500)
.create();
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
let result = client.health_check().await;
mock.assert();
assert!(result.is_err());
}
#[test]
@@ -217,7 +317,8 @@ mod tests {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("POST", "/_matrix/client/v3/register")
.match_query("kind=user&access_token=AS_TOKEN")
.match_query("kind=user")
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(200)
.create();
@@ -235,7 +336,8 @@ mod tests {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("POST", "/_matrix/client/v3/register")
.match_query("kind=user&access_token=AS_TOKEN")
.match_query("kind=user")
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(400) // M_USER_IN_USE
.create();
@@ -253,12 +355,13 @@ mod tests {
let mut server = mockito::Server::new_async().await;
let user_id = "@test:example.org";
let encoded_user = urlencoding::encode(user_id);
let query = format!("user_id={}&access_token=AS_TOKEN", encoded_user);
let query = format!("user_id={}", encoded_user);
let path = format!("/_matrix/client/v3/profile/{}/displayname", encoded_user);
let mock = server
.mock("PUT", path.as_str())
.match_query(query.as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(200)
.create();
@@ -276,12 +379,13 @@ mod tests {
let mut server = mockito::Server::new_async().await;
let user_id = "@test:example.org";
let encoded_user = urlencoding::encode(user_id);
let query = format!("user_id={}&access_token=AS_TOKEN", encoded_user);
let query = format!("user_id={}", encoded_user);
let path = format!("/_matrix/client/v3/profile/{}/displayname", encoded_user);
let mock = server
.mock("PUT", path.as_str())
.match_query(query.as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(500)
.create();
@@ -295,40 +399,61 @@ mod tests {
}
#[tokio::test]
async fn test_send_text_message_as_success() {
async fn test_ensure_user_joined_room_success() {
let mut server = mockito::Server::new_async().await;
let user_id = "@test:example.org";
let room_id = "!roomid:example.org";
let encoded_user = urlencoding::encode(user_id);
let encoded_room = urlencoding::encode(room_id);
let client = {
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
cfg.room_id = room_id.to_string();
MatrixAppserviceClient::new(reqwest::Client::new(), cfg)
};
let txn_id = client.txn_counter.load(Ordering::SeqCst);
let query = format!("user_id={}&access_token=AS_TOKEN", encoded_user);
let path = format!(
"/_matrix/client/v3/rooms/{}/send/m.room.message/{}",
encoded_room, txn_id
);
let query = format!("user_id={}", encoded_user);
let path = format!("/_matrix/client/v3/rooms/{}/join", encoded_room);
let mock = server
.mock("PUT", path.as_str())
.mock("POST", path.as_str())
.match_query(query.as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(200)
.create();
let result = client.send_text_message_as(user_id, "hello").await;
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
cfg.room_id = room_id.to_string();
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
let result = client.ensure_user_joined_room(user_id).await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn test_send_text_message_as_fail() {
async fn test_ensure_user_joined_room_fail() {
let mut server = mockito::Server::new_async().await;
let user_id = "@test:example.org";
let room_id = "!roomid:example.org";
let encoded_user = urlencoding::encode(user_id);
let encoded_room = urlencoding::encode(room_id);
let query = format!("user_id={}", encoded_user);
let path = format!("/_matrix/client/v3/rooms/{}/join", encoded_room);
let mock = server
.mock("POST", path.as_str())
.match_query(query.as_str())
.match_header("authorization", "Bearer AS_TOKEN")
.with_status(403)
.create();
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
cfg.room_id = room_id.to_string();
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
let result = client.ensure_user_joined_room(user_id).await;
mock.assert();
assert!(result.is_err());
}
#[tokio::test]
async fn test_send_formatted_message_as_success() {
let mut server = mockito::Server::new_async().await;
let user_id = "@test:example.org";
let room_id = "!roomid:example.org";
@@ -342,7 +467,7 @@ mod tests {
MatrixAppserviceClient::new(reqwest::Client::new(), cfg)
};
let txn_id = client.txn_counter.load(Ordering::SeqCst);
let query = format!("user_id={}&access_token=AS_TOKEN", encoded_user);
let query = format!("user_id={}", encoded_user);
let path = format!(
"/_matrix/client/v3/rooms/{}/send/m.room.message/{}",
encoded_room, txn_id
@@ -351,12 +476,21 @@ mod tests {
let mock = server
.mock("PUT", path.as_str())
.match_query(query.as_str())
.with_status(500)
.match_header("authorization", "Bearer AS_TOKEN")
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
"msgtype": "m.text",
"body": "`[meta]` hello",
"format": "org.matrix.custom.html",
"formatted_body": "<code>[meta]</code> hello",
})))
.with_status(200)
.create();
let result = client.send_text_message_as(user_id, "hello").await;
let result = client
.send_formatted_message_as(user_id, "`[meta]` hello", "<code>[meta]</code> hello")
.await;
mock.assert();
assert!(result.is_err());
assert!(result.is_ok());
}
}
+289
View File
@@ -0,0 +1,289 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use axum::{
extract::{Path, Query, State},
http::{header::AUTHORIZATION, HeaderMap, StatusCode},
response::IntoResponse,
routing::put,
Json, Router,
};
use serde_json::Value;
use std::net::SocketAddr;
use tracing::info;
#[derive(Clone)]
struct SynapseState {
hs_token: String,
}
#[derive(serde::Deserialize)]
struct AuthQuery {
access_token: Option<String>,
}
/// Pull access tokens from supported auth headers.
fn extract_access_token(headers: &HeaderMap) -> Option<String> {
if let Some(value) = headers.get(AUTHORIZATION) {
if let Ok(raw) = value.to_str() {
if let Some(token) = raw.strip_prefix("Bearer ") {
return Some(token.trim().to_string());
}
if let Some(token) = raw.strip_prefix("bearer ") {
return Some(token.trim().to_string());
}
}
}
if let Some(value) = headers.get("x-access-token") {
if let Ok(raw) = value.to_str() {
return Some(raw.trim().to_string());
}
}
None
}
/// Compare tokens in constant time to avoid timing leakage.
fn constant_time_eq(a: &str, b: &str) -> bool {
let a_bytes = a.as_bytes();
let b_bytes = b.as_bytes();
let max_len = std::cmp::max(a_bytes.len(), b_bytes.len());
let mut diff = (a_bytes.len() ^ b_bytes.len()) as u8;
for idx in 0..max_len {
let left = *a_bytes.get(idx).unwrap_or(&0);
let right = *b_bytes.get(idx).unwrap_or(&0);
diff |= left ^ right;
}
diff == 0
}
/// Captures inbound Synapse transaction payloads for logging.
#[derive(Debug)]
struct SynapseResponse {
txn_id: String,
payload: Value,
}
/// Build the router that handles Synapse appservice transactions.
fn build_router(state: SynapseState) -> Router {
Router::new()
.route(
"/_matrix/appservice/v1/transactions/:txn_id",
put(handle_transaction),
)
.with_state(state)
}
/// Handle inbound transaction callbacks from Synapse.
async fn handle_transaction(
Path(txn_id): Path<String>,
State(state): State<SynapseState>,
Query(auth): Query<AuthQuery>,
headers: HeaderMap,
Json(payload): Json<Value>,
) -> impl IntoResponse {
let header_token = extract_access_token(&headers);
let token_matches = if let Some(token) = header_token.as_deref() {
constant_time_eq(token, &state.hs_token)
} else {
auth.access_token
.as_deref()
.is_some_and(|token| constant_time_eq(token, &state.hs_token))
};
if !token_matches {
return (StatusCode::UNAUTHORIZED, Json(serde_json::json!({})));
}
let response = SynapseResponse { txn_id, payload };
info!(
"Status response: SynapseResponse {{ txn_id: {}, payload: {:?} }}",
response.txn_id, response.payload
);
(StatusCode::OK, Json(serde_json::json!({})))
}
/// Listen for Synapse callbacks on the configured address.
pub async fn run_synapse_listener(addr: SocketAddr, hs_token: String) -> anyhow::Result<()> {
let app = build_router(SynapseState { hs_token });
let listener = tokio::net::TcpListener::bind(addr).await?;
info!("Synapse listener bound on {}", addr);
axum::serve(listener, app).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use axum::body::Body;
use axum::http::Request;
use tokio::time::{sleep, Duration};
use tower::ServiceExt;
#[tokio::test]
async fn transactions_endpoint_accepts_payloads() {
let app = build_router(SynapseState {
hs_token: "HS_TOKEN".to_string(),
});
let payload = serde_json::json!({
"events": [],
"txn_id": "123"
});
let response = app
.oneshot(
Request::builder()
.method("PUT")
.uri("/_matrix/appservice/v1/transactions/123")
.header("authorization", "Bearer HS_TOKEN")
.header("content-type", "application/json")
.body(Body::from(payload.to_string()))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
assert_eq!(body.as_ref(), b"{}");
}
#[tokio::test]
async fn transactions_endpoint_rejects_missing_token() {
let app = build_router(SynapseState {
hs_token: "HS_TOKEN".to_string(),
});
let payload = serde_json::json!({
"events": [],
"txn_id": "123"
});
let response = app
.oneshot(
Request::builder()
.method("PUT")
.uri("/_matrix/appservice/v1/transactions/123")
.header("content-type", "application/json")
.body(Body::from(payload.to_string()))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
assert_eq!(body.as_ref(), b"{}");
}
#[tokio::test]
async fn transactions_endpoint_rejects_wrong_token() {
let app = build_router(SynapseState {
hs_token: "HS_TOKEN".to_string(),
});
let payload = serde_json::json!({
"events": [],
"txn_id": "123"
});
let response = app
.oneshot(
Request::builder()
.method("PUT")
.uri("/_matrix/appservice/v1/transactions/123")
.header("authorization", "Bearer NOPE")
.header("content-type", "application/json")
.body(Body::from(payload.to_string()))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
.await
.unwrap();
assert_eq!(body.as_ref(), b"{}");
}
#[tokio::test]
async fn transactions_endpoint_accepts_legacy_query_token() {
let app = build_router(SynapseState {
hs_token: "HS_TOKEN".to_string(),
});
let payload = serde_json::json!({
"events": [],
"txn_id": "125"
});
let response = app
.oneshot(
Request::builder()
.method("PUT")
.uri("/_matrix/appservice/v1/transactions/125?access_token=HS_TOKEN")
.header("content-type", "application/json")
.body(Body::from(payload.to_string()))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
}
#[tokio::test]
async fn transactions_endpoint_accepts_x_access_token_header() {
let app = build_router(SynapseState {
hs_token: "HS_TOKEN".to_string(),
});
let payload = serde_json::json!({
"events": [],
"txn_id": "126"
});
let response = app
.oneshot(
Request::builder()
.method("PUT")
.uri("/_matrix/appservice/v1/transactions/126")
.header("x-access-token", "HS_TOKEN")
.header("content-type", "application/json")
.body(Body::from(payload.to_string()))
.unwrap(),
)
.await
.unwrap();
assert_eq!(response.status(), StatusCode::OK);
}
#[tokio::test]
async fn run_synapse_listener_starts_and_can_abort() {
let addr = SocketAddr::from(([127, 0, 0, 1], 0));
let handle =
tokio::spawn(async move { run_synapse_listener(addr, "HS_TOKEN".to_string()).await });
sleep(Duration::from_millis(10)).await;
handle.abort();
}
#[tokio::test]
async fn run_synapse_listener_returns_error_on_bind_failure() {
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
let result = run_synapse_listener(addr, "HS_TOKEN".to_string()).await;
assert!(result.is_err());
}
}
+222 -24
View File
@@ -1,3 +1,17 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Deserialize;
use std::collections::HashMap;
use std::sync::Arc;
@@ -14,19 +28,29 @@ pub struct PotatoMessage {
pub from_id: String,
pub to_id: String,
pub channel: u8,
pub portnum: String,
#[serde(default)]
pub portnum: Option<String>,
pub text: String,
pub rssi: i16,
pub hop_limit: u8,
#[serde(default)]
pub rssi: Option<i16>,
#[serde(default)]
pub hop_limit: Option<u8>,
pub lora_freq: u32,
pub modem_preset: String,
pub channel_name: String,
pub snr: f32,
#[serde(default)]
pub snr: Option<f32>,
#[serde(default)]
pub reply_id: Option<u64>,
pub node_id: String,
}
#[derive(Debug, Default, Clone)]
pub struct FetchParams {
pub limit: Option<u32>,
pub since: Option<u64>,
}
#[allow(dead_code)]
#[derive(Debug, Deserialize, Clone)]
pub struct PotatoNode {
@@ -67,22 +91,55 @@ impl PotatoClient {
}
}
/// Build the API root; accept either a bare domain or one already ending in `/api`.
fn api_base(&self) -> String {
let trimmed = self.cfg.base_url.trim_end_matches('/');
if trimmed.ends_with("/api") {
trimmed.to_string()
} else {
format!("{}/api", trimmed)
}
}
fn messages_url(&self) -> String {
format!("{}/messages", self.cfg.base_url)
format!("{}/messages", self.api_base())
}
fn node_url(&self, hex_id: &str) -> String {
// e.g. https://potatomesh.net/api/nodes/67fc83cb
format!("{}/nodes/{}", self.cfg.base_url, hex_id)
format!("{}/nodes/{}", self.api_base(), hex_id)
}
pub async fn fetch_messages(&self) -> anyhow::Result<Vec<PotatoMessage>> {
let resp = self
.http
.get(self.messages_url())
.send()
.await?
.error_for_status()?;
/// Basic liveness check against the PotatoMesh API.
pub async fn health_check(&self) -> anyhow::Result<()> {
let base = self
.cfg
.base_url
.trim_end_matches('/')
.trim_end_matches("/api");
let url = format!("{}/version", base);
let resp = self.http.get(&url).send().await?;
if resp.status().is_success() {
tracing::info!("PotatoMesh API healthy at {}", self.cfg.base_url);
Ok(())
} else {
Err(anyhow::anyhow!(
"PotatoMesh health check failed with status {}",
resp.status()
))
}
}
pub async fn fetch_messages(&self, params: FetchParams) -> anyhow::Result<Vec<PotatoMessage>> {
let mut req = self.http.get(self.messages_url());
if let Some(limit) = params.limit {
req = req.query(&[("limit", limit)]);
}
if let Some(since) = params.since {
req = req.query(&[("since", since)]);
}
let resp = req.send().await?.error_for_status()?;
let msgs: Vec<PotatoMessage> = resp.json().await?;
Ok(msgs)
@@ -146,9 +203,38 @@ mod tests {
assert_eq!(m.id, 2947676906);
assert_eq!(m.from_id, "!da6556d4");
assert_eq!(m.node_id, "!06871773");
assert_eq!(m.portnum, "TEXT_MESSAGE_APP");
assert_eq!(m.portnum.as_deref(), Some("TEXT_MESSAGE_APP"));
assert_eq!(m.lora_freq, 868);
assert!((m.snr - (-9.0)).abs() < f32::EPSILON);
assert!((m.snr.unwrap() - (-9.0)).abs() < f32::EPSILON);
}
#[test]
fn deserialize_message_with_missing_optional_fields() {
let json = r#"
[
{
"id": 1,
"rx_time": 0,
"rx_iso": "2025-11-27T11:03:56Z",
"from_id": "!abcd1234",
"to_id": "^all",
"channel": 1,
"text": "Ping",
"lora_freq": 868,
"modem_preset": "MediumFast",
"channel_name": "TEST",
"node_id": "!abcd1234"
}
]
"#;
let msgs: Vec<PotatoMessage> = serde_json::from_str(json).expect("valid message json");
assert_eq!(msgs.len(), 1);
let m = &msgs[0];
assert!(m.portnum.is_none());
assert!(m.rssi.is_none());
assert!(m.hop_limit.is_none());
assert!(m.snr.is_none());
}
#[test]
@@ -206,7 +292,29 @@ mod tests {
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
assert_eq!(client.messages_url(), "http://localhost:8080/messages");
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
}
#[test]
fn test_messages_url_with_trailing_slash() {
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: "http://localhost:8080/".to_string(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
}
#[test]
fn test_messages_url_with_existing_api_suffix() {
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: "http://localhost:8080/api/".to_string(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
}
#[test]
@@ -219,7 +327,7 @@ mod tests {
let client = PotatoClient::new(http_client, config);
assert_eq!(
client.node_url("!1234"),
"http://localhost:8080/nodes/!1234"
"http://localhost:8080/api/nodes/!1234"
);
}
@@ -227,7 +335,8 @@ mod tests {
async fn test_fetch_messages_success() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/messages")
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any) // allow optional query params
.with_status(200)
.with_header("content-type", "application/json")
.with_body(
@@ -251,7 +360,7 @@ mod tests {
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.fetch_messages().await;
let result = client.fetch_messages(FetchParams::default()).await;
mock.assert();
assert!(result.is_ok());
@@ -261,9 +370,9 @@ mod tests {
}
#[tokio::test]
async fn test_fetch_messages_error() {
async fn test_health_check_success() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/messages").with_status(500).create();
let mock = server.mock("GET", "/version").with_status(200).create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
@@ -271,12 +380,97 @@ mod tests {
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.fetch_messages().await;
let result = client.health_check().await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn test_health_check_strips_api_suffix() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/version").with_status(200).create();
let http_client = reqwest::Client::new();
let mut base = server.url();
base.push_str("/api");
let config = PotatomeshConfig {
base_url: base,
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.health_check().await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn test_health_check_failure() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/version").with_status(500).create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.health_check().await;
mock.assert();
assert!(result.is_err());
}
#[tokio::test]
async fn test_fetch_messages_error() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any)
.with_status(500)
.create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.fetch_messages(FetchParams::default()).await;
mock.assert();
assert!(result.is_err());
}
#[tokio::test]
async fn test_fetch_messages_with_limit_and_since() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/api/messages")
.match_query("limit=10&since=123")
.with_status(200)
.with_header("content-type", "application/json")
.with_body("[]")
.create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let params = FetchParams {
limit: Some(10),
since: Some(123),
};
let result = client.fetch_messages(params).await;
mock.assert();
assert!(result.is_ok());
assert!(result.unwrap().is_empty());
}
#[tokio::test]
async fn test_get_node_cache_hit() {
let http_client = reqwest::Client::new();
@@ -313,7 +507,8 @@ mod tests {
async fn test_get_node_cache_miss() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/nodes/1234")
.mock("GET", "/api/nodes/1234")
.match_query(mockito::Matcher::Any)
.with_status(200)
.with_header("content-type", "application/json")
.with_body(
@@ -348,7 +543,10 @@ mod tests {
#[tokio::test]
async fn test_get_node_error() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/nodes/1234").with_status(500).create();
let mock = server
.mock("GET", "/api/nodes/1234")
.with_status(500)
.create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 62 KiB

+71
View File
@@ -0,0 +1,71 @@
# Copyright © 2025-26 l5yth & contributors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "base64"
require "meshtastic"
require "openssl"
channel_name = "BerlinMesh"
# === Inputs from your packet ===
cipher_b64 = "Q1R7tgI5yXzMXu/3"
psk_b64 = "Nmh7EooP2Tsc+7pvPwXLcEDDuYhk+fBo2GLnbA1Y1sg="
packet_id = 3_915_687_257
from_id = "!9e95cf60"
channel = 35
# === Decode key and ciphertext ===
key = Base64.decode64(psk_b64) # 32 bytes -> AES-256
ciphertext = Base64.decode64(cipher_b64)
# === Derive numeric node id from Meshtastic-style string ===
hex_str = from_id.sub(/^!/, "") # "9e95cf60"
from_node = hex_str.to_i(16) # 0x9e95cf60
# === Build nonce exactly like Meshtastic CryptoEngine ===
# Little-endian 64-bit packet ID + little-endian 32-bit node ID + 4 zero bytes
nonce = [packet_id].pack("Q<") # uint64, little-endian
nonce += [from_node].pack("L<") # uint32, little-endian
nonce += "\x00" * 4 # extraNonce == 0 for PSK channel msgs
raise "Nonce must be 16 bytes" unless nonce.bytesize == 16
raise "Key must be 32 bytes" unless key.bytesize == 32
# === AES-256-CTR decrypt ===
cipher = OpenSSL::Cipher.new("aes-256-ctr")
cipher.decrypt
cipher.key = key
cipher.iv = nonce
plaintext = cipher.update(ciphertext) + cipher.final
# At this point `plaintext` is the raw Meshtastic protobuf payload
plaintext = plaintext.bytes.pack("C*")
data = Meshtastic::Data.decode(plaintext)
msg = data.payload.dup.force_encoding("UTF-8")
puts msg
# Gets channel number from name and psk
def channel_hash(name, psk_b64)
name_bytes = name.b # UTF-8 bytes
psk_bytes = Base64.decode64(psk_b64)
hn = name_bytes.bytes.reduce(0) { |acc, b| acc ^ b } # XOR over name
hp = psk_bytes.bytes.reduce(0) { |acc, b| acc ^ b } # XOR over PSK
(hn ^ hp) & 0xFF
end
channel_h = channel_hash(channel_name, psk_b64)
puts channel_h
puts channel == channel_h
+491
View File
@@ -0,0 +1,491 @@
hash,name
0,Mesh1
1,DEMO
1,Downlink1
1,NightNet
1,Sideband1
2,CommsNet
2,Mesh3
2,PulseNet
3,LightNet
3,Mesh2
3,WestStar
3,WolfMesh
4,Mesh5
4,OPERATIONS
4,Rescue1
4,SignalFire
5,Base2
5,DeltaNet
5,Mesh4
5,MeshMunich
6,Base1
7,MeshTest
7,Rescue2
7,ZuluMesh
8,CourierNet
8,Fire2
8,Grid2
8,LongFast
8,RescueTeam
9,AlphaNet
9,MeshGrid
10,TestBerlin
10,WaWi
11,Fire1
11,Grid1
12,FoxNet
12,MeshRuhr
12,RadioNet
13,Signal1
13,Zone1
14,BetaBerlin
14,Signal2
14,TangoNet
14,Zone2
15,BerlinMesh
15,LongSlow
15,MeshBerlin
15,Zone3
16,CQ
16,EchoMesh
16,Freq2
16,KiloMesh
16,Node2
16,PhoenixNet
16,Repeater2
17,FoxtrotNet
17,Node3
18,LoRa
19,Freq1
19,HarmonyNet
19,Node1
19,RavenNet
19,Repeater1
20,NomadNet
20,SENSOR
20,TEST
20,test
21,BravoNet
21,EastStar
21,MeshCollective
21,SunNet
22,Node4
22,Uplink1
23,EagleNet
23,MeshHessen
23,Node5
24,MediumSlow
24,Router1
25,Checkpoint1
25,HAMNet
26,Checkpoint2
26,GhostNet
27,HQ
27,Router2
31,DemoBerlin
31,FieldNet
31,MediumFast
32,Clinic
32,Convoy
32,Daylight
32,Town
33,Callisto
33,CQ1
33,Daybreak
33,Demo
33,East
33,LoRaMesh
33,Mist
34,CQ2
34,Freq
34,Gold
34,Link
34,Repeater
35,Aquila
35,Doctor
35,Echo
35,Kilo
35,Public
35,Wyvern
36,District
36,Hessen
36,Io
36,LoRaTest
36,Operations
36,Shadow
36,Unit
37,Campfire
37,City
37,Outsider
37,Sync
38,Beacon
38,Collective
38,Harbor
38,Lion
38,Meteor
39,Firebird
39,Fireteam
39,Quasar
39,Snow
39,Universe
39,Uplink
40,Checkpoint
40,Galaxy
40,Jaguar
40,Sunset
40,Zeta
41,Hinterland
41,HQ2
41,Main
41,Meshtastic
41,Router
41,Valley
41,Wander
41,Wolfpack
42,HQ1
42,Lizard
42,Packet
42,Sahara
42,Tunnel
43,Anaconda
43,Basalt
43,Blackout
43,Crow
43,Dusk
43,Falcon
43,Lima
43,Müggelberg
44,Arctic
44,Backup
44,Bronze
44,Corvus
44,Cosmos
44,LoRaBerlin
44,Neukölln
44,Safari
45,Breeze
45,Burrow
45,Gale
45,Saturn
46,Border
46,Nest
47,Borealis
47,Mars
47,Path
47,Ranger
48,Beat
48,Berg
48,Beta
48,Downlink
48,Hive
48,Rhythm
48,Saxony
48,Sideband
48,Wolf
49,Asteroid
49,Carbon
49,Mesh
50,Blizzard
50,Runner
51,Callsign
51,Carpet
51,Desert
51,Dragon
51,Friedrichshain
51,Help
51,Nebula
51,Safe
52,Amazon
52,Fireline
52,Haze
52,LoRaHessen
52,Platinum
52,Sensor
52,Test
52,Zulu
53,Nord
53,Rescue
53,Secure
53,Silver
54,Bear
54,Hospital
54,Munich
54,Python
54,Rain
54,Wind
54,Wolves
55,Base
55,Bolt
55,Hawk
55,Mirage
55,Nightwatch
55,Obsidian
55,Rock
55,Victor
55,West
56,Aurora
56,Dune
56,Iron
56,Lava
56,Nomads
57,Copper
57,Core
57,Spectrum
57,Summit
58,Colony
58,Fire
58,Ganymede
58,Grid
58,Kraken
58,Road
58,Solstice
58,Tundra
59,911
59,Forest
59,Pack
60,Berlin
60,Chat
60,Sierra
60,Signal
60,Wald
60,Zone
61,Alpine
61,Bridge
61,Camp
61,Dortmund
61,Frontier
61,Jungle
61,Peak
62,Burner
62,Dawn
62,Europa
62,Midnight
62,Nightshift
62,Prenzlauer
62,Safety
62,Sector
62,Wanderer
63,Distress
63,Kiez
63,Ruhr
63,Team
64,Epsilon
64,Field
64,Granite
64,Orbit
64,Trail
64,Whisper
65,Central
65,Cologne
65,Layer
65,Relay
65,Runners
65,Stone
65,Tempo
66,Polar
66,Woods
67,Highway
67,Kreuzberg
67,Leopard
67,Metro
67,Omega
67,Phantom
68,Hamburg
68,Hydra
68,Medic
68,Titan
69,Command
69,Control
69,Gamma
69,Ghost
69,Mercury
69,Oasis
70,Diamond
70,Ham
70,HAM
70,Leipzig
70,Paramedic
70,Savanna
71,Frankfurt
71,Gecko
71,Jupiter
71,Sensors
71,SENSORS
71,Sunrise
72,Chameleon
72,Eagle
72,Hilltop
72,Teufelsberg
73,Firefly
73,Steel
74,Bravo
74,Caravan
74,Ost
74,Süd
75,Emergency
75,EMERGENCY
75,Nomad
75,Watch
76,Alert
76,Bavaria
76,Fog
76,Harmony
76,Raven
77,Admin
77,ADMIN
77,Den
77,Ice
77,LoRaNet
77,North
77,SOS
77,Sos
77,Wanderers
78,Foxtrot
78,Med
78,Ops
79,Flock
79,Phoenix
79,PRIVATE
79,Private
79,Signals
79,Tiger
80,Commune
80,Freedom
80,Pluto
80,Snake
80,Squad
80,Stuttgart
81,Grassland
81,Tango
81,Union
82,Comet
82,Flash
82,Lightning
83,Cloud
83,Equinox
83,Firewatch
83,Fox
83,Radio
83,Shelter
84,Cheetah
84,General
84,Outpost
84,Volcano
85,Glacier
85,Storm
86,Alpha
86,Owl
86,Panther
86,Prairie
86,Thunder
87,Courier
87,Nexus
87,South
88,Ash
88,River
88,Syndicate
89,Amateur
89,Astro
89,Avalanche
89,Bonfire
89,Draco
89,Griffin
89,Nightfall
89,Shade
89,Venus
90,Charlie
90,Delta
90,Stratum
90,Viper
91,Bison
91,Tal
92,Network
92,Scout
93,Comms
93,Fluss
93,Group
93,Hub
93,Pulse
93,Smoke
94,Frost
94,Rover
94,Village
95,Cobra
95,Liberty
95,Ridge
97,DarkNet
97,NightshiftNet
97,Radio2
97,Shelter2
98,CampNet
98,Radio1
98,Shelter1
98,TangoMesh
99,BaseAlpha
99,BerlinNet
99,SouthStar
100,CourierMesh
100,Storm1
101,Courier2
101,GridNet
101,OpsCenter
102,Courier1
103,Storm2
104,HawkNet
105,BearNet
105,StarNet
107,emergency
107,ZuluNet
108,Comms1
108,DragonNet
108,Hub1
109,admin
109,NightMesh
110,MeshNet
111,BaseCharlie
111,Comms2
111,GridSouth
111,Hub2
111,MeshNetwork
111,WolfNet
112,Layer1
112,Relay1
112,ShortFast
113,OpsRoom
114,Layer3
114,MeshCologne
115,Layer2
115,Relay2
115,SOSBerlin
116,Command1
116,Control1
116,CrowNet
116,MeshFrankfurt
117,EmergencyBerlin
117,GridNorth
117,MeshLeipzig
117,PacketNet
119,Command2
119,Control2
119,MeshHamburg
120,NomadMesh
121,NorthStar
121,Watch2
122,CommandRoom
122,ControlRoom
122,SyncNet
122,Watch1
123,PacketRadio
123,ShadowNet
124,EchoNet
124,KiloNet
124,Med2
124,Ops2
125,FoxtrotMesh
125,RepeaterHub
126,MoonNet
127,BaseBravo
127,Med1
127,Ops1
127,WolfDen
1 hash name
2 0 Mesh1
3 1 DEMO
4 1 Downlink1
5 1 NightNet
6 1 Sideband1
7 2 CommsNet
8 2 Mesh3
9 2 PulseNet
10 3 LightNet
11 3 Mesh2
12 3 WestStar
13 3 WolfMesh
14 4 Mesh5
15 4 OPERATIONS
16 4 Rescue1
17 4 SignalFire
18 5 Base2
19 5 DeltaNet
20 5 Mesh4
21 5 MeshMunich
22 6 Base1
23 7 MeshTest
24 7 Rescue2
25 7 ZuluMesh
26 8 CourierNet
27 8 Fire2
28 8 Grid2
29 8 LongFast
30 8 RescueTeam
31 9 AlphaNet
32 9 MeshGrid
33 10 TestBerlin
34 10 WaWi
35 11 Fire1
36 11 Grid1
37 12 FoxNet
38 12 MeshRuhr
39 12 RadioNet
40 13 Signal1
41 13 Zone1
42 14 BetaBerlin
43 14 Signal2
44 14 TangoNet
45 14 Zone2
46 15 BerlinMesh
47 15 LongSlow
48 15 MeshBerlin
49 15 Zone3
50 16 CQ
51 16 EchoMesh
52 16 Freq2
53 16 KiloMesh
54 16 Node2
55 16 PhoenixNet
56 16 Repeater2
57 17 FoxtrotNet
58 17 Node3
59 18 LoRa
60 19 Freq1
61 19 HarmonyNet
62 19 Node1
63 19 RavenNet
64 19 Repeater1
65 20 NomadNet
66 20 SENSOR
67 20 TEST
68 20 test
69 21 BravoNet
70 21 EastStar
71 21 MeshCollective
72 21 SunNet
73 22 Node4
74 22 Uplink1
75 23 EagleNet
76 23 MeshHessen
77 23 Node5
78 24 MediumSlow
79 24 Router1
80 25 Checkpoint1
81 25 HAMNet
82 26 Checkpoint2
83 26 GhostNet
84 27 HQ
85 27 Router2
86 31 DemoBerlin
87 31 FieldNet
88 31 MediumFast
89 32 Clinic
90 32 Convoy
91 32 Daylight
92 32 Town
93 33 Callisto
94 33 CQ1
95 33 Daybreak
96 33 Demo
97 33 East
98 33 LoRaMesh
99 33 Mist
100 34 CQ2
101 34 Freq
102 34 Gold
103 34 Link
104 34 Repeater
105 35 Aquila
106 35 Doctor
107 35 Echo
108 35 Kilo
109 35 Public
110 35 Wyvern
111 36 District
112 36 Hessen
113 36 Io
114 36 LoRaTest
115 36 Operations
116 36 Shadow
117 36 Unit
118 37 Campfire
119 37 City
120 37 Outsider
121 37 Sync
122 38 Beacon
123 38 Collective
124 38 Harbor
125 38 Lion
126 38 Meteor
127 39 Firebird
128 39 Fireteam
129 39 Quasar
130 39 Snow
131 39 Universe
132 39 Uplink
133 40 Checkpoint
134 40 Galaxy
135 40 Jaguar
136 40 Sunset
137 40 Zeta
138 41 Hinterland
139 41 HQ2
140 41 Main
141 41 Meshtastic
142 41 Router
143 41 Valley
144 41 Wander
145 41 Wolfpack
146 42 HQ1
147 42 Lizard
148 42 Packet
149 42 Sahara
150 42 Tunnel
151 43 Anaconda
152 43 Basalt
153 43 Blackout
154 43 Crow
155 43 Dusk
156 43 Falcon
157 43 Lima
158 43 Müggelberg
159 44 Arctic
160 44 Backup
161 44 Bronze
162 44 Corvus
163 44 Cosmos
164 44 LoRaBerlin
165 44 Neukölln
166 44 Safari
167 45 Breeze
168 45 Burrow
169 45 Gale
170 45 Saturn
171 46 Border
172 46 Nest
173 47 Borealis
174 47 Mars
175 47 Path
176 47 Ranger
177 48 Beat
178 48 Berg
179 48 Beta
180 48 Downlink
181 48 Hive
182 48 Rhythm
183 48 Saxony
184 48 Sideband
185 48 Wolf
186 49 Asteroid
187 49 Carbon
188 49 Mesh
189 50 Blizzard
190 50 Runner
191 51 Callsign
192 51 Carpet
193 51 Desert
194 51 Dragon
195 51 Friedrichshain
196 51 Help
197 51 Nebula
198 51 Safe
199 52 Amazon
200 52 Fireline
201 52 Haze
202 52 LoRaHessen
203 52 Platinum
204 52 Sensor
205 52 Test
206 52 Zulu
207 53 Nord
208 53 Rescue
209 53 Secure
210 53 Silver
211 54 Bear
212 54 Hospital
213 54 Munich
214 54 Python
215 54 Rain
216 54 Wind
217 54 Wolves
218 55 Base
219 55 Bolt
220 55 Hawk
221 55 Mirage
222 55 Nightwatch
223 55 Obsidian
224 55 Rock
225 55 Victor
226 55 West
227 56 Aurora
228 56 Dune
229 56 Iron
230 56 Lava
231 56 Nomads
232 57 Copper
233 57 Core
234 57 Spectrum
235 57 Summit
236 58 Colony
237 58 Fire
238 58 Ganymede
239 58 Grid
240 58 Kraken
241 58 Road
242 58 Solstice
243 58 Tundra
244 59 911
245 59 Forest
246 59 Pack
247 60 Berlin
248 60 Chat
249 60 Sierra
250 60 Signal
251 60 Wald
252 60 Zone
253 61 Alpine
254 61 Bridge
255 61 Camp
256 61 Dortmund
257 61 Frontier
258 61 Jungle
259 61 Peak
260 62 Burner
261 62 Dawn
262 62 Europa
263 62 Midnight
264 62 Nightshift
265 62 Prenzlauer
266 62 Safety
267 62 Sector
268 62 Wanderer
269 63 Distress
270 63 Kiez
271 63 Ruhr
272 63 Team
273 64 Epsilon
274 64 Field
275 64 Granite
276 64 Orbit
277 64 Trail
278 64 Whisper
279 65 Central
280 65 Cologne
281 65 Layer
282 65 Relay
283 65 Runners
284 65 Stone
285 65 Tempo
286 66 Polar
287 66 Woods
288 67 Highway
289 67 Kreuzberg
290 67 Leopard
291 67 Metro
292 67 Omega
293 67 Phantom
294 68 Hamburg
295 68 Hydra
296 68 Medic
297 68 Titan
298 69 Command
299 69 Control
300 69 Gamma
301 69 Ghost
302 69 Mercury
303 69 Oasis
304 70 Diamond
305 70 Ham
306 70 HAM
307 70 Leipzig
308 70 Paramedic
309 70 Savanna
310 71 Frankfurt
311 71 Gecko
312 71 Jupiter
313 71 Sensors
314 71 SENSORS
315 71 Sunrise
316 72 Chameleon
317 72 Eagle
318 72 Hilltop
319 72 Teufelsberg
320 73 Firefly
321 73 Steel
322 74 Bravo
323 74 Caravan
324 74 Ost
325 74 Süd
326 75 Emergency
327 75 EMERGENCY
328 75 Nomad
329 75 Watch
330 76 Alert
331 76 Bavaria
332 76 Fog
333 76 Harmony
334 76 Raven
335 77 Admin
336 77 ADMIN
337 77 Den
338 77 Ice
339 77 LoRaNet
340 77 North
341 77 SOS
342 77 Sos
343 77 Wanderers
344 78 Foxtrot
345 78 Med
346 78 Ops
347 79 Flock
348 79 Phoenix
349 79 PRIVATE
350 79 Private
351 79 Signals
352 79 Tiger
353 80 Commune
354 80 Freedom
355 80 Pluto
356 80 Snake
357 80 Squad
358 80 Stuttgart
359 81 Grassland
360 81 Tango
361 81 Union
362 82 Comet
363 82 Flash
364 82 Lightning
365 83 Cloud
366 83 Equinox
367 83 Firewatch
368 83 Fox
369 83 Radio
370 83 Shelter
371 84 Cheetah
372 84 General
373 84 Outpost
374 84 Volcano
375 85 Glacier
376 85 Storm
377 86 Alpha
378 86 Owl
379 86 Panther
380 86 Prairie
381 86 Thunder
382 87 Courier
383 87 Nexus
384 87 South
385 88 Ash
386 88 River
387 88 Syndicate
388 89 Amateur
389 89 Astro
390 89 Avalanche
391 89 Bonfire
392 89 Draco
393 89 Griffin
394 89 Nightfall
395 89 Shade
396 89 Venus
397 90 Charlie
398 90 Delta
399 90 Stratum
400 90 Viper
401 91 Bison
402 91 Tal
403 92 Network
404 92 Scout
405 93 Comms
406 93 Fluss
407 93 Group
408 93 Hub
409 93 Pulse
410 93 Smoke
411 94 Frost
412 94 Rover
413 94 Village
414 95 Cobra
415 95 Liberty
416 95 Ridge
417 97 DarkNet
418 97 NightshiftNet
419 97 Radio2
420 97 Shelter2
421 98 CampNet
422 98 Radio1
423 98 Shelter1
424 98 TangoMesh
425 99 BaseAlpha
426 99 BerlinNet
427 99 SouthStar
428 100 CourierMesh
429 100 Storm1
430 101 Courier2
431 101 GridNet
432 101 OpsCenter
433 102 Courier1
434 103 Storm2
435 104 HawkNet
436 105 BearNet
437 105 StarNet
438 107 emergency
439 107 ZuluNet
440 108 Comms1
441 108 DragonNet
442 108 Hub1
443 109 admin
444 109 NightMesh
445 110 MeshNet
446 111 BaseCharlie
447 111 Comms2
448 111 GridSouth
449 111 Hub2
450 111 MeshNetwork
451 111 WolfNet
452 112 Layer1
453 112 Relay1
454 112 ShortFast
455 113 OpsRoom
456 114 Layer3
457 114 MeshCologne
458 115 Layer2
459 115 Relay2
460 115 SOSBerlin
461 116 Command1
462 116 Control1
463 116 CrowNet
464 116 MeshFrankfurt
465 117 EmergencyBerlin
466 117 GridNorth
467 117 MeshLeipzig
468 117 PacketNet
469 119 Command2
470 119 Control2
471 119 MeshHamburg
472 120 NomadMesh
473 121 NorthStar
474 121 Watch2
475 122 CommandRoom
476 122 ControlRoom
477 122 SyncNet
478 122 Watch1
479 123 PacketRadio
480 123 ShadowNet
481 124 EchoNet
482 124 KiloNet
483 124 Med2
484 124 Ops2
485 125 FoxtrotMesh
486 125 RepeaterHub
487 126 MoonNet
488 127 BaseBravo
489 127 Med1
490 127 Ops1
491 127 WolfDen
+736
View File
@@ -0,0 +1,736 @@
{
"59": [
"911",
"Forest",
"Pack"
],
"77": [
"Admin",
"ADMIN",
"Den",
"Ice",
"LoRaNet",
"North",
"SOS",
"Sos",
"Wanderers"
],
"109": [
"admin",
"NightMesh"
],
"76": [
"Alert",
"Bavaria",
"Fog",
"Harmony",
"Raven"
],
"86": [
"Alpha",
"Owl",
"Panther",
"Prairie",
"Thunder"
],
"9": [
"AlphaNet",
"MeshGrid"
],
"61": [
"Alpine",
"Bridge",
"Camp",
"Dortmund",
"Frontier",
"Jungle",
"Peak"
],
"89": [
"Amateur",
"Astro",
"Avalanche",
"Bonfire",
"Draco",
"Griffin",
"Nightfall",
"Shade",
"Venus"
],
"52": [
"Amazon",
"Fireline",
"Haze",
"LoRaHessen",
"Platinum",
"Sensor",
"Test",
"Zulu"
],
"43": [
"Anaconda",
"Basalt",
"Blackout",
"Crow",
"Dusk",
"Falcon",
"Lima",
"Müggelberg"
],
"35": [
"Aquila",
"Doctor",
"Echo",
"Kilo",
"Public",
"Wyvern"
],
"44": [
"Arctic",
"Backup",
"Bronze",
"Corvus",
"Cosmos",
"LoRaBerlin",
"Neukölln",
"Safari"
],
"88": [
"Ash",
"River",
"Syndicate"
],
"49": [
"Asteroid",
"Carbon",
"Mesh"
],
"56": [
"Aurora",
"Dune",
"Iron",
"Lava",
"Nomads"
],
"55": [
"Base",
"Bolt",
"Hawk",
"Mirage",
"Nightwatch",
"Obsidian",
"Rock",
"Victor",
"West"
],
"6": [
"Base1"
],
"5": [
"Base2",
"DeltaNet",
"Mesh4",
"MeshMunich"
],
"99": [
"BaseAlpha",
"BerlinNet",
"SouthStar"
],
"127": [
"BaseBravo",
"Med1",
"Ops1",
"WolfDen"
],
"111": [
"BaseCharlie",
"Comms2",
"GridSouth",
"Hub2",
"MeshNetwork",
"WolfNet"
],
"38": [
"Beacon",
"Collective",
"Harbor",
"Lion",
"Meteor"
],
"54": [
"Bear",
"Hospital",
"Munich",
"Python",
"Rain",
"Wind",
"Wolves"
],
"105": [
"BearNet",
"StarNet"
],
"48": [
"Beat",
"Berg",
"Beta",
"Downlink",
"Hive",
"Rhythm",
"Saxony",
"Sideband",
"Wolf"
],
"60": [
"Berlin",
"Chat",
"Sierra",
"Signal",
"Wald",
"Zone"
],
"15": [
"BerlinMesh",
"LongSlow",
"MeshBerlin",
"Zone3"
],
"14": [
"BetaBerlin",
"Signal2",
"TangoNet",
"Zone2"
],
"91": [
"Bison",
"Tal"
],
"50": [
"Blizzard",
"Runner"
],
"46": [
"Border",
"Nest"
],
"47": [
"Borealis",
"Mars",
"Path",
"Ranger"
],
"74": [
"Bravo",
"Caravan",
"Ost",
"Süd"
],
"21": [
"BravoNet",
"EastStar",
"MeshCollective",
"SunNet"
],
"45": [
"Breeze",
"Burrow",
"Gale",
"Saturn"
],
"62": [
"Burner",
"Dawn",
"Europa",
"Midnight",
"Nightshift",
"Prenzlauer",
"Safety",
"Sector",
"Wanderer"
],
"33": [
"Callisto",
"CQ1",
"Daybreak",
"Demo",
"East",
"LoRaMesh",
"Mist"
],
"51": [
"Callsign",
"Carpet",
"Desert",
"Dragon",
"Friedrichshain",
"Help",
"Nebula",
"Safe"
],
"37": [
"Campfire",
"City",
"Outsider",
"Sync"
],
"98": [
"CampNet",
"Radio1",
"Shelter1",
"TangoMesh"
],
"65": [
"Central",
"Cologne",
"Layer",
"Relay",
"Runners",
"Stone",
"Tempo"
],
"72": [
"Chameleon",
"Eagle",
"Hilltop",
"Teufelsberg"
],
"90": [
"Charlie",
"Delta",
"Stratum",
"Viper"
],
"40": [
"Checkpoint",
"Galaxy",
"Jaguar",
"Sunset",
"Zeta"
],
"25": [
"Checkpoint1",
"HAMNet"
],
"26": [
"Checkpoint2",
"GhostNet"
],
"84": [
"Cheetah",
"General",
"Outpost",
"Volcano"
],
"32": [
"Clinic",
"Convoy",
"Daylight",
"Town"
],
"83": [
"Cloud",
"Equinox",
"Firewatch",
"Fox",
"Radio",
"Shelter"
],
"95": [
"Cobra",
"Liberty",
"Ridge"
],
"58": [
"Colony",
"Fire",
"Ganymede",
"Grid",
"Kraken",
"Road",
"Solstice",
"Tundra"
],
"82": [
"Comet",
"Flash",
"Lightning"
],
"69": [
"Command",
"Control",
"Gamma",
"Ghost",
"Mercury",
"Oasis"
],
"116": [
"Command1",
"Control1",
"CrowNet",
"MeshFrankfurt"
],
"119": [
"Command2",
"Control2",
"MeshHamburg"
],
"122": [
"CommandRoom",
"ControlRoom",
"SyncNet",
"Watch1"
],
"93": [
"Comms",
"Fluss",
"Group",
"Hub",
"Pulse",
"Smoke"
],
"108": [
"Comms1",
"DragonNet",
"Hub1"
],
"2": [
"CommsNet",
"Mesh3",
"PulseNet"
],
"80": [
"Commune",
"Freedom",
"Pluto",
"Snake",
"Squad",
"Stuttgart"
],
"57": [
"Copper",
"Core",
"Spectrum",
"Summit"
],
"87": [
"Courier",
"Nexus",
"South"
],
"102": [
"Courier1"
],
"101": [
"Courier2",
"GridNet",
"OpsCenter"
],
"100": [
"CourierMesh",
"Storm1"
],
"8": [
"CourierNet",
"Fire2",
"Grid2",
"LongFast",
"RescueTeam"
],
"16": [
"CQ",
"EchoMesh",
"Freq2",
"KiloMesh",
"Node2",
"PhoenixNet",
"Repeater2"
],
"34": [
"CQ2",
"Freq",
"Gold",
"Link",
"Repeater"
],
"97": [
"DarkNet",
"NightshiftNet",
"Radio2",
"Shelter2"
],
"1": [
"DEMO",
"Downlink1",
"NightNet",
"Sideband1"
],
"31": [
"DemoBerlin",
"FieldNet",
"MediumFast"
],
"70": [
"Diamond",
"Ham",
"HAM",
"Leipzig",
"Paramedic",
"Savanna"
],
"63": [
"Distress",
"Kiez",
"Ruhr",
"Team"
],
"36": [
"District",
"Hessen",
"Io",
"LoRaTest",
"Operations",
"Shadow",
"Unit"
],
"23": [
"EagleNet",
"MeshHessen",
"Node5"
],
"124": [
"EchoNet",
"KiloNet",
"Med2",
"Ops2"
],
"75": [
"Emergency",
"EMERGENCY",
"Nomad",
"Watch"
],
"107": [
"emergency",
"ZuluNet"
],
"117": [
"EmergencyBerlin",
"GridNorth",
"MeshLeipzig",
"PacketNet"
],
"64": [
"Epsilon",
"Field",
"Granite",
"Orbit",
"Trail",
"Whisper"
],
"11": [
"Fire1",
"Grid1"
],
"39": [
"Firebird",
"Fireteam",
"Quasar",
"Snow",
"Universe",
"Uplink"
],
"73": [
"Firefly",
"Steel"
],
"79": [
"Flock",
"Phoenix",
"PRIVATE",
"Private",
"Signals",
"Tiger"
],
"12": [
"FoxNet",
"MeshRuhr",
"RadioNet"
],
"78": [
"Foxtrot",
"Med",
"Ops"
],
"125": [
"FoxtrotMesh",
"RepeaterHub"
],
"17": [
"FoxtrotNet",
"Node3"
],
"71": [
"Frankfurt",
"Gecko",
"Jupiter",
"Sensors",
"SENSORS",
"Sunrise"
],
"19": [
"Freq1",
"HarmonyNet",
"Node1",
"RavenNet",
"Repeater1"
],
"94": [
"Frost",
"Rover",
"Village"
],
"85": [
"Glacier",
"Storm"
],
"81": [
"Grassland",
"Tango",
"Union"
],
"68": [
"Hamburg",
"Hydra",
"Medic",
"Titan"
],
"104": [
"HawkNet"
],
"67": [
"Highway",
"Kreuzberg",
"Leopard",
"Metro",
"Omega",
"Phantom"
],
"41": [
"Hinterland",
"HQ2",
"Main",
"Meshtastic",
"Router",
"Valley",
"Wander",
"Wolfpack"
],
"27": [
"HQ",
"Router2"
],
"42": [
"HQ1",
"Lizard",
"Packet",
"Sahara",
"Tunnel"
],
"112": [
"Layer1",
"Relay1",
"ShortFast"
],
"115": [
"Layer2",
"Relay2",
"SOSBerlin"
],
"114": [
"Layer3",
"MeshCologne"
],
"3": [
"LightNet",
"Mesh2",
"WestStar",
"WolfMesh"
],
"18": [
"LoRa"
],
"24": [
"MediumSlow",
"Router1"
],
"0": [
"Mesh1"
],
"4": [
"Mesh5",
"OPERATIONS",
"Rescue1",
"SignalFire"
],
"110": [
"MeshNet"
],
"7": [
"MeshTest",
"Rescue2",
"ZuluMesh"
],
"126": [
"MoonNet"
],
"92": [
"Network",
"Scout"
],
"22": [
"Node4",
"Uplink1"
],
"120": [
"NomadMesh"
],
"20": [
"NomadNet",
"SENSOR",
"TEST",
"test"
],
"53": [
"Nord",
"Rescue",
"Secure",
"Silver"
],
"121": [
"NorthStar",
"Watch2"
],
"113": [
"OpsRoom"
],
"123": [
"PacketRadio",
"ShadowNet"
],
"66": [
"Polar",
"Woods"
],
"13": [
"Signal1",
"Zone1"
],
"103": [
"Storm2"
],
"10": [
"TestBerlin",
"WaWi"
]
}
+134
View File
@@ -0,0 +1,134 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
# Copyright © 2025-26 l5yth & contributors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
require "base64"
require "json"
require "csv"
# --- CONFIG --------------------------------------------------------
# The PSK you want. Here: public mesh, "AQ==" (0x01).
PSK_B64 = ENV.fetch("PSK_B64", "AQ==")
# 1000 potential channel candidate names for rainbow indices.
CANDIDATE_NAMES = %w[
911 Admin ADMIN admin Alert Alpha AlphaNet Alpine Amateur Amazon Anaconda Aquila Arctic Ash Asteroid Astro Aurora Avalanche Backup Basalt Base Base1 Base2 BaseAlpha BaseBravo BaseCharlie Bavaria Beacon Bear BearNet Beat Berg Berlin BerlinMesh BerlinNet Beta BetaBerlin Bison Blackout Blizzard Bolt Bonfire Border Borealis Bravo BravoNet Breeze Bridge Bronze Burner Burrow Callisto Callsign Camp Campfire CampNet Caravan Carbon Carpet Central Chameleon Charlie Chat Checkpoint Checkpoint1 Checkpoint2 Cheetah City Clinic Cloud Cobra Collective Cologne Colony Comet Command Command1 Command2 CommandRoom Comms Comms1 Comms2 CommsNet Commune Control Control1 Control2 ControlRoom Convoy Copper Core Corvus Cosmos Courier Courier1 Courier2 CourierMesh CourierNet CQ CQ1 CQ2 Crow CrowNet DarkNet Dawn Daybreak Daylight Delta DeltaNet Demo DEMO DemoBerlin Den Desert Diamond Distress District Doctor Dortmund Downlink Downlink1 Draco Dragon DragonNet Dune Dusk Eagle EagleNet East EastStar Echo EchoMesh EchoNet Emergency emergency EMERGENCY EmergencyBerlin Epsilon Equinox Europa Falcon Field FieldNet Fire Fire1 Fire2 Firebird Firefly Fireline Fireteam Firewatch Flash Flock Fluss Fog Forest Fox FoxNet Foxtrot FoxtrotMesh FoxtrotNet Frankfurt Freedom Freq Freq1 Freq2 Friedrichshain Frontier Frost Galaxy Gale Gamma Ganymede Gecko General Ghost GhostNet Glacier Gold Granite Grassland Grid Grid1 Grid2 GridNet GridNorth GridSouth Griffin Group Ham HAM Hamburg HAMNet Harbor Harmony HarmonyNet Hawk HawkNet Haze Help Hessen Highway Hilltop Hinterland Hive Hospital HQ HQ1 HQ2 Hub Hub1 Hub2 Hydra Ice Io Iron Jaguar Jungle Jupiter Kiez Kilo KiloMesh KiloNet Kraken Kreuzberg Lava Layer Layer1 Layer2 Layer3 Leipzig Leopard Liberty LightNet Lightning Lima Link Lion Lizard LongFast LongSlow LoRa LoRaBerlin LoRaHessen LoRaMesh LoRaNet LoRaTest Main Mars Med Med1 Med2 Medic MediumFast MediumSlow Mercury Mesh Mesh1 Mesh2 Mesh3 Mesh4 Mesh5 MeshBerlin MeshCollective MeshCologne MeshFrankfurt MeshGrid MeshHamburg MeshHessen MeshLeipzig MeshMunich MeshNet MeshNetwork MeshRuhr Meshtastic MeshTest Meteor Metro Midnight Mirage Mist MoonNet Munich Müggelberg Nebula Nest Network Neukölln Nexus Nightfall NightMesh NightNet Nightshift NightshiftNet Nightwatch Node1 Node2 Node3 Node4 Node5 Nomad NomadMesh NomadNet Nomads Nord North NorthStar Oasis Obsidian Omega Operations OPERATIONS Ops Ops1 Ops2 OpsCenter OpsRoom Orbit Ost Outpost Outsider Owl Pack Packet PacketNet PacketRadio Panther Paramedic Path Peak Phantom Phoenix PhoenixNet Platinum Pluto Polar Prairie Prenzlauer PRIVATE Private Public Pulse PulseNet Python Quasar Radio Radio1 Radio2 RadioNet Rain Ranger Raven RavenNet Relay Relay1 Relay2 Repeater Repeater1 Repeater2 RepeaterHub Rescue Rescue1 Rescue2 RescueTeam Rhythm Ridge River Road Rock Router Router1 Router2 Rover Ruhr Runner Runners Safari Safe Safety Sahara Saturn Savanna Saxony Scout Sector Secure Sensor SENSOR Sensors SENSORS Shade Shadow ShadowNet Shelter Shelter1 Shelter2 ShortFast Sideband Sideband1 Sierra Signal Signal1 Signal2 SignalFire Signals Silver Smoke Snake Snow Solstice SOS Sos SOSBerlin South SouthStar Spectrum Squad StarNet Steel Stone Storm Storm1 Storm2 Stratum Stuttgart Summit SunNet Sunrise Sunset Sync SyncNet Syndicate Süd Tal Tango TangoMesh TangoNet Team Tempo Test TEST test TestBerlin Teufelsberg Thunder Tiger Titan Town Trail Tundra Tunnel Union Unit Universe Uplink Uplink1 Valley Venus Victor Village Viper Volcano Wald Wander Wanderer Wanderers Watch Watch1 Watch2 WaWi West WestStar Whisper Wind Wolf WolfDen WolfMesh WolfNet Wolfpack Wolves Woods Wyvern Zeta Zone Zone1 Zone2 Zone3 Zulu ZuluMesh ZuluNet
]
# Output filenames
CSV_OUT = ENV.fetch("CSV_OUT", "rainbow.csv")
JSON_OUT = ENV.fetch("JSON_OUT", "rainbow.json")
# --- HASH FUNCTION -------------------------------------------------
def xor_bytes(str_or_bytes)
bytes = str_or_bytes.is_a?(String) ? str_or_bytes.bytes : str_or_bytes
bytes.reduce(0) { |acc, b| (acc ^ b) & 0xFF }
end
def expanded_key(psk_b64)
raw = Base64.decode64(psk_b64 || "")
case raw.bytesize
when 0
# no encryption: length 0, xor = 0
"".b
when 1
alias_index = raw.bytes.first
alias_keys = {
1 => [
0xD4, 0xF1, 0xBB, 0x3A, 0x20, 0x29, 0x07, 0x59,
0xF0, 0xBC, 0xFF, 0xAB, 0xCF, 0x4E, 0x69, 0x01,
].pack("C*"),
2 => [
0x38, 0x4B, 0xBC, 0xC0, 0x1D, 0xC0, 0x22, 0xD1,
0x81, 0xBF, 0x36, 0xB8, 0x61, 0x21, 0xE1, 0xFB,
0x96, 0xB7, 0x2E, 0x55, 0xBF, 0x74, 0x22, 0x7E,
0x9D, 0x6A, 0xFB, 0x48, 0xD6, 0x4C, 0xB1, 0xA1,
].pack("C*"),
}
alias_keys.fetch(alias_index) { raise "Unknown PSK alias #{alias_index}" }
when 2..15
# pad to 16 (AES128)
(raw.bytes + [0] * (16 - raw.bytesize)).pack("C*")
when 16
raw
when 17..31
# pad to 32 (AES256)
(raw.bytes + [0] * (32 - raw.bytesize)).pack("C*")
when 32
raw
else
raise "PSK too long (#{raw.bytesize} bytes)"
end
end
def channel_hash(name, psk_b64)
effective_name = name.b
key = expanded_key(psk_b64)
h_name = xor_bytes(effective_name)
h_key = xor_bytes(key)
(h_name ^ h_key) & 0xFF
end
# --- BUILD RAINBOW TABLE -------------------------------------------
psk_b64 = PSK_B64
puts "Using PSK_B64=#{psk_b64.inspect}"
hash_to_names = Hash.new { |h, k| h[k] = [] }
CANDIDATE_NAMES.each do |name|
h = channel_hash(name, psk_b64)
hash_to_names[h] << name
end
# --- WRITE CSV (hash,name) -----------------------------------------
CSV.open(CSV_OUT, "w") do |csv|
csv << %w[hash name]
hash_to_names.keys.sort.each do |h|
hash_to_names[h].each do |name|
csv << [h, name]
end
end
end
puts "Wrote CSV rainbow table to #{CSV_OUT}"
# --- WRITE JSON ({hash: [names...]}) -------------------------------
json_hash = hash_to_names.transform_keys(&:to_s)
File.write(JSON_OUT, JSON.pretty_generate(json_hash))
puts "Wrote JSON rainbow table to #{JSON_OUT}"
# --- OPTIONAL: interactive query -----------------------------------
if ARGV.first == "query"
target = Integer(ARGV[1] || raise("Usage: #{File.basename($0)} query <hash>"))
names = hash_to_names[target]
if names.empty?
puts "No names for hash #{target}"
else
puts "Names for hash #{target}:"
names.each { |n| puts " - #{n}" }
end
else
puts "Run again with: #{File.basename($0)} query <hash> # to inspect a specific hash"
end
+437
View File
@@ -0,0 +1,437 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for :mod:`data.mesh_ingestor.daemon`."""
from __future__ import annotations
import sys
import threading
import types
from pathlib import Path
from typing import Any
import pytest
REPO_ROOT = Path(__file__).resolve().parents[1]
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
from data.mesh_ingestor import daemon
class FakeEvent:
"""Test double for :class:`threading.Event` that can auto-set itself."""
instances: list["FakeEvent"] = []
def __init__(self, *, auto_set_on_wait: bool = False):
self._is_set = False
self._auto_set_on_wait = auto_set_on_wait
self.wait_calls: list[Any] = []
FakeEvent.instances.append(self)
def set(self) -> None:
"""Mark the event as set."""
self._is_set = True
def is_set(self) -> bool:
"""Return whether the event is currently set."""
return self._is_set
def wait(self, timeout: float | None = None) -> bool:
"""Record waits and optionally auto-set the flag."""
self.wait_calls.append(timeout)
if self._auto_set_on_wait:
self._is_set = True
return self._is_set
class AutoSetEvent(FakeEvent):
"""Event variant that automatically sets on each wait call."""
def __init__(self): # noqa: D401 - short initializer docstring handled by class
super().__init__(auto_set_on_wait=True)
@pytest.fixture(autouse=True)
def reset_fake_events():
"""Ensure :class:`FakeEvent` registry is cleared between tests."""
FakeEvent.instances.clear()
yield
FakeEvent.instances.clear()
def test_event_wait_default_detection(monkeypatch):
"""``_event_wait_allows_default_timeout`` matches defaulted signatures."""
assert daemon._event_wait_allows_default_timeout() is True
class _NoDefaultEvent:
def wait(self, timeout): # type: ignore[override]
return bool(timeout)
monkeypatch.setattr(
daemon, "threading", types.SimpleNamespace(Event=_NoDefaultEvent)
)
assert daemon._event_wait_allows_default_timeout() is False
def test_subscribe_receive_topics(monkeypatch):
"""Subscribing to receive topics returns the exact topic list."""
subscribed: list[str] = []
def _record_subscription(_handler, topic):
subscribed.append(topic)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=_record_subscription)
)
assert daemon._subscribe_receive_topics() == list(daemon._RECEIVE_TOPICS)
assert subscribed == list(daemon._RECEIVE_TOPICS)
def test_node_items_snapshot_handles_mutation(monkeypatch):
"""Snapshots tolerate temporary runtime errors while iterating."""
class MutatingMapping(dict):
def __bool__(self):
return True
def items(self): # type: ignore[override]
raise RuntimeError("dictionary changed size during iteration")
monkeypatch.setattr(daemon.time, "sleep", lambda _: None)
assert daemon._node_items_snapshot({"a": 1}) == [("a", 1)]
assert daemon._node_items_snapshot(MutatingMapping(), retries=1) is None
class IteratingMapping:
def __init__(self):
self.calls = 0
self._data = {"x": 10, "y": 20}
def __iter__(self):
self.calls += 1
if self.calls == 1:
raise RuntimeError("dictionary changed size during iteration")
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
mapping = IteratingMapping()
assert daemon._node_items_snapshot(mapping, retries=2) == [("x", 10), ("y", 20)]
def test_close_interface_respects_timeout(monkeypatch):
"""Long-running close calls emit a timeout debug log."""
log_calls = []
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0.01)
monkeypatch.setattr(
daemon.config, "_debug_log", lambda *args, **kwargs: log_calls.append(kwargs)
)
blocker = threading.Event()
class SlowInterface:
def close(self):
blocker.wait(timeout=0.1)
daemon._close_interface(SlowInterface())
assert any("timeout_seconds" in entry for entry in log_calls)
def test_close_interface_immediate_path(monkeypatch):
"""A zero timeout calls ``close`` inline without threading."""
flags = {"called": False}
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0)
class ImmediateInterface:
def close(self):
flags["called"] = True
daemon._close_interface(ImmediateInterface())
assert flags["called"] is True
def test_ble_interface_detection():
"""Detect BLE module names reliably."""
class BLE:
__module__ = "meshtastic.ble_interface"
class NonBLE:
__module__ = "meshtastic.serial"
assert daemon._is_ble_interface(BLE()) is True
assert daemon._is_ble_interface(NonBLE()) is False
assert daemon._is_ble_interface(None) is False
def test_process_ingestor_heartbeat_with_extracted_host(monkeypatch):
"""Host id extraction triggers heartbeat announcement flag updates."""
host_ids: list[str | None] = [None]
ingestor_ids: list[str | None] = []
queued: list[bool] = []
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: host_ids[0])
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!abcd"
)
monkeypatch.setattr(
daemon.handlers,
"register_host_node_id",
lambda node: host_ids.__setitem__(0, node),
)
monkeypatch.setattr(daemon.ingestors, "set_ingestor_node_id", ingestor_ids.append)
monkeypatch.setattr(
daemon.ingestors,
"queue_ingestor_heartbeat",
lambda force: queued.append(force) or True,
)
assert (
daemon._process_ingestor_heartbeat(object(), ingestor_announcement_sent=False)
is True
)
assert host_ids[0] == "!abcd"
assert ingestor_ids[-1] == "!abcd"
assert queued[-1] is True
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!abcd")
monkeypatch.setattr(
daemon.ingestors,
"queue_ingestor_heartbeat",
lambda force: queued.append(force) or False,
)
assert (
daemon._process_ingestor_heartbeat(object(), ingestor_announcement_sent=True)
is True
)
assert queued[-1] is False
def test_connected_state_branches(monkeypatch):
"""Connection state resolves across multiple attribute forms."""
event = threading.Event()
event.set()
assert daemon._connected_state(event) is True
class CallableCandidate:
def __call__(self):
return False
assert daemon._connected_state(CallableCandidate()) is False
class BooleanCandidate:
def __bool__(self):
raise RuntimeError("cannot bool")
assert daemon._connected_state(BooleanCandidate()) is None
class HasIsSet:
def is_set(self):
raise RuntimeError("broken")
assert daemon._connected_state(HasIsSet()) is None
def _configure_common_defaults(
monkeypatch, *, energy_saving: bool = False, inactivity: float = 0.0
):
"""Set fast configuration defaults shared by daemon integration tests."""
monkeypatch.setattr(daemon.config, "SNAPSHOT_SECS", 0)
monkeypatch.setattr(daemon.config, "_RECONNECT_INITIAL_DELAY_SECS", 0)
monkeypatch.setattr(daemon.config, "_RECONNECT_MAX_DELAY_SECS", 0)
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0)
monkeypatch.setattr(daemon.config, "ENERGY_SAVING", energy_saving)
monkeypatch.setattr(
daemon.config, "_ENERGY_ONLINE_DURATION_SECS", 0 if energy_saving else 0.0
)
monkeypatch.setattr(daemon.config, "_ENERGY_SLEEP_SECS", 0.0)
monkeypatch.setattr(daemon.config, "_INGESTOR_HEARTBEAT_SECS", 0)
monkeypatch.setattr(daemon.config, "_INACTIVITY_RECONNECT_SECS", inactivity)
monkeypatch.setattr(daemon.config, "CONNECTION", "serial0")
class DummyInterface:
"""Lightweight mesh interface stand-in used for daemon integration tests."""
def __init__(self, *, nodes=None, is_connected=True, client_present=True):
self.nodes = nodes if nodes is not None else {"!node": {"id": 1}}
self.isConnected = is_connected
self.client = object() if client_present else None
def close(self):
return None
def test_main_happy_path(monkeypatch):
"""The main loop processes snapshots and heartbeats once before stopping."""
_configure_common_defaults(monkeypatch)
monkeypatch.setattr(
daemon,
"threading",
types.SimpleNamespace(
Event=AutoSetEvent,
current_thread=threading.current_thread,
main_thread=threading.main_thread,
),
)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
)
monkeypatch.setattr(
daemon.interfaces,
"_create_serial_interface",
lambda candidate: (DummyInterface(), candidate),
)
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
monkeypatch.setattr(
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
)
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
)
host_id = {"value": None}
monkeypatch.setattr(
daemon.handlers,
"register_host_node_id",
lambda node: host_id.__setitem__("value", node),
)
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: host_id["value"])
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
heartbeats: list[bool] = []
monkeypatch.setattr(
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(
daemon.ingestors,
"queue_ingestor_heartbeat",
lambda force: heartbeats.append(force) or True,
)
daemon.main()
assert heartbeats
assert host_id["value"] == "!host"
assert FakeEvent.instances and FakeEvent.instances[0].is_set() is True
def test_main_energy_saving_disconnect(monkeypatch):
"""Energy saving mode disconnects and sleeps when deadlines expire."""
_configure_common_defaults(monkeypatch, energy_saving=True)
monkeypatch.setattr(
daemon,
"threading",
types.SimpleNamespace(
Event=AutoSetEvent,
current_thread=threading.current_thread,
main_thread=threading.main_thread,
),
)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
)
monkeypatch.setattr(
daemon.interfaces,
"_create_serial_interface",
lambda candidate: (DummyInterface(), candidate),
)
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
monkeypatch.setattr(
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
)
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
)
monkeypatch.setattr(
daemon.handlers, "register_host_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!host")
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
monkeypatch.setattr(
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(
daemon.ingestors, "queue_ingestor_heartbeat", lambda *_args, **_kwargs: True
)
daemon.main()
assert FakeEvent.instances and FakeEvent.instances[0].is_set() is True
def test_main_inactivity_reconnect(monkeypatch):
"""Inactivity triggers reconnect attempts and respects stop events."""
_configure_common_defaults(monkeypatch, inactivity=0.5)
monkeypatch.setattr(
daemon,
"threading",
types.SimpleNamespace(
Event=AutoSetEvent,
current_thread=threading.current_thread,
main_thread=threading.main_thread,
),
)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
)
interface_cycle = iter(
[DummyInterface(is_connected=False), DummyInterface(is_connected=True)]
)
monkeypatch.setattr(
daemon.interfaces,
"_create_serial_interface",
lambda candidate: (next(interface_cycle), candidate),
)
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
monkeypatch.setattr(
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
)
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
)
monkeypatch.setattr(
daemon.handlers, "register_host_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!host")
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
monotonic_calls = iter([0.0, 1.0, 2.0, 3.0, 4.0])
monkeypatch.setattr(daemon.time, "monotonic", lambda: next(monotonic_calls))
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: 0.0)
monkeypatch.setattr(
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(
daemon.ingestors, "queue_ingestor_heartbeat", lambda *_args, **_kwargs: True
)
daemon.main()
assert any(event.is_set() for event in FakeEvent.instances)
+183
View File
@@ -0,0 +1,183 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import base64
import io
import json
import sys
from meshtastic.protobuf import mesh_pb2
from meshtastic.protobuf import telemetry_pb2
from data.mesh_ingestor import decode_payload
def run_main_with_input(payload: dict) -> tuple[int, dict]:
stdin = io.StringIO(json.dumps(payload))
stdout = io.StringIO()
original_stdin = sys.stdin
original_stdout = sys.stdout
try:
sys.stdin = stdin
sys.stdout = stdout
status = decode_payload.main()
finally:
sys.stdin = original_stdin
sys.stdout = original_stdout
output = json.loads(stdout.getvalue() or "{}")
return status, output
def test_decode_payload_position_success():
position = mesh_pb2.Position()
position.latitude_i = 525598720
position.longitude_i = 136577024
position.altitude = 11
position.precision_bits = 13
payload_b64 = base64.b64encode(position.SerializeToString()).decode("ascii")
result = decode_payload._decode_payload(3, payload_b64)
assert result["type"] == "POSITION_APP"
assert result["payload"]["latitude_i"] == 525598720
assert result["payload"]["longitude_i"] == 136577024
assert result["payload"]["altitude"] == 11
def test_decode_payload_rejects_invalid_payload():
result = decode_payload._decode_payload(3, "not-base64")
assert result["error"].startswith("invalid-payload")
assert "invalid-payload" in result["error"]
def test_decode_payload_rejects_unsupported_port():
result = decode_payload._decode_payload(
999, base64.b64encode(b"ok").decode("ascii")
)
assert result["error"] == "unsupported-port"
assert result["portnum"] == 999
def test_main_handles_invalid_json():
stdin = io.StringIO("nope")
stdout = io.StringIO()
original_stdin = sys.stdin
original_stdout = sys.stdout
try:
sys.stdin = stdin
sys.stdout = stdout
status = decode_payload.main()
finally:
sys.stdin = original_stdin
sys.stdout = original_stdout
result = json.loads(stdout.getvalue())
assert status == 1
assert result["error"].startswith("invalid-json")
def test_main_requires_portnum():
status, result = run_main_with_input(
{"payload_b64": base64.b64encode(b"ok").decode("ascii")}
)
assert status == 1
assert result["error"] == "missing-portnum"
def test_main_requires_integer_portnum():
status, result = run_main_with_input(
{"portnum": "3", "payload_b64": base64.b64encode(b"ok").decode("ascii")}
)
assert status == 1
assert result["error"] == "missing-portnum"
def test_main_requires_payload():
status, result = run_main_with_input({"portnum": 3})
assert status == 1
assert result["error"] == "missing-payload"
def test_main_requires_string_payload():
status, result = run_main_with_input({"portnum": 3, "payload_b64": 123})
assert status == 1
assert result["error"] == "missing-payload"
def test_main_success_position_payload():
position = mesh_pb2.Position()
position.latitude_i = 525598720
position.longitude_i = 136577024
payload_b64 = base64.b64encode(position.SerializeToString()).decode("ascii")
status, result = run_main_with_input({"portnum": 3, "payload_b64": payload_b64})
assert status == 0
assert result["type"] == "POSITION_APP"
assert result["payload"]["latitude_i"] == 525598720
def test_decode_payload_handles_parse_failure():
class BrokenMessage:
def ParseFromString(self, _payload):
raise ValueError("boom")
decode_payload.PORTNUM_MAP[99] = ("BROKEN", BrokenMessage)
payload_b64 = base64.b64encode(b"\x00").decode("ascii")
result = decode_payload._decode_payload(99, payload_b64)
assert result["error"].startswith("decode-failed")
assert result["type"] == "BROKEN"
decode_payload.PORTNUM_MAP.pop(99, None)
def test_main_entrypoint_executes():
import runpy
payload = {"portnum": 3, "payload_b64": base64.b64encode(b"").decode("ascii")}
stdin = io.StringIO(json.dumps(payload))
stdout = io.StringIO()
original_stdin = sys.stdin
original_stdout = sys.stdout
try:
sys.stdin = stdin
sys.stdout = stdout
try:
runpy.run_module("data.mesh_ingestor.decode_payload", run_name="__main__")
except SystemExit as exc:
assert exc.code == 0
finally:
sys.stdin = original_stdin
sys.stdout = original_stdout
def test_decode_payload_telemetry_success():
telemetry = telemetry_pb2.Telemetry()
telemetry.time = 123
payload_b64 = base64.b64encode(telemetry.SerializeToString()).decode("ascii")
result = decode_payload._decode_payload(67, payload_b64)
assert result["type"] == "TELEMETRY_APP"
assert result["payload"]["time"] == 123
+484
View File
@@ -20,6 +20,7 @@ import re
import sys
import threading
import types
import time
"""End-to-end tests covering the mesh ingestion package."""
@@ -214,6 +215,9 @@ def mesh_module(monkeypatch):
if attr in module.__dict__:
delattr(module, attr)
module.channels._reset_channel_cache()
module.ingestors.STATE.start_time = int(time.time())
module.ingestors.STATE.last_heartbeat = None
module.ingestors.STATE.node_id = None
yield module
@@ -281,6 +285,59 @@ def test_instance_domain_infers_scheme_for_hostnames(mesh_module, monkeypatch):
mesh_module.INSTANCE = mesh_module.config.INSTANCE
def test_parse_channel_names_applies_allowlist(mesh_module):
"""Ensure allowlists reuse the shared channel parser."""
mesh = mesh_module
previous_allowed = mesh.ALLOWED_CHANNELS
try:
parsed = mesh.config._parse_channel_names(" Primary ,Chat ,primary , Ops ")
mesh.ALLOWED_CHANNELS = parsed
assert parsed == ("Primary", "Chat", "Ops")
assert mesh.channels.allowed_channel_names() == ("Primary", "Chat", "Ops")
assert mesh.channels.is_allowed_channel("chat")
assert mesh.channels.is_allowed_channel(" ops ")
assert not mesh.channels.is_allowed_channel("unknown")
assert not mesh.channels.is_allowed_channel(None)
assert mesh.config._parse_channel_names("") == ()
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
def test_allowed_channel_defaults_allow_all(mesh_module):
"""Ensure unset allowlists do not block any channels."""
mesh = mesh_module
previous_allowed = mesh.ALLOWED_CHANNELS
try:
mesh.ALLOWED_CHANNELS = ()
assert mesh.channels.is_allowed_channel("Any")
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
def test_parse_hidden_channels_deduplicates_names(mesh_module):
"""Ensure hidden channel parsing strips blanks and deduplicates."""
mesh = mesh_module
previous_hidden = mesh.HIDDEN_CHANNELS
try:
parsed = mesh.config._parse_hidden_channels(" Chat , ,Secret ,chat")
mesh.HIDDEN_CHANNELS = parsed
assert parsed == ("Chat", "Secret")
assert mesh.channels.hidden_channel_names() == ("Chat", "Secret")
assert mesh.channels.is_hidden_channel(" chat ")
assert not mesh.channels.is_hidden_channel("unknown")
assert mesh.config._parse_hidden_channels("") == ()
finally:
mesh.HIDDEN_CHANNELS = previous_hidden
def test_subscribe_receive_topics_covers_all_handlers(mesh_module, monkeypatch):
mesh = mesh_module
daemon_mod = sys.modules["data.mesh_ingestor.daemon"]
@@ -1872,6 +1929,110 @@ def test_store_packet_dict_allows_primary_channel_broadcast(mesh_module, monkeyp
assert priority == mesh._MESSAGE_POST_PRIORITY
def test_store_packet_dict_accepts_routing_app_messages(mesh_module, monkeypatch):
"""Ensure routing app payloads are treated as message posts."""
mesh = mesh_module
captured = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
packet = {
"id": 333,
"rxTime": 999,
"fromId": "!node",
"toId": "^all",
"channel": 0,
"decoded": {"payload": "GAA=", "portnum": "ROUTING_APP"},
}
mesh.store_packet_dict(packet)
assert captured, "Expected routing packet to be stored"
path, payload, priority = captured[0]
assert path == "/api/messages"
assert payload["portnum"] == "ROUTING_APP"
assert payload["text"] == "GAA="
assert payload["channel"] == 0
assert payload["encrypted"] is None
assert priority == mesh._MESSAGE_POST_PRIORITY
def test_store_packet_dict_serializes_routing_payloads(mesh_module, monkeypatch):
"""Ensure routing payloads are serialized when text is absent."""
mesh = mesh_module
captured = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
packet = {
"id": 334,
"rxTime": 1000,
"fromId": "!node",
"toId": "^all",
"channel": 0,
"decoded": {
"payload": b"\x01\x02",
"portnum": "ROUTING_APP",
},
}
mesh.store_packet_dict(packet)
assert captured, "Expected routing packet to be stored"
_, payload, _ = captured[0]
assert payload["text"] == "AQI="
captured.clear()
packet["decoded"]["payload"] = {"kind": "ack"}
mesh.store_packet_dict(packet)
assert captured, "Expected routing packet to be stored"
_, payload, _ = captured[0]
assert payload["text"] == '{"kind": "ack"}'
captured.clear()
packet["decoded"]["portnum"] = 7
packet["decoded"]["payload"] = b"\x00"
packet["decoded"]["routing"] = {"errorReason": "NONE"}
mesh.store_packet_dict(packet)
assert captured, "Expected numeric routing packet to be stored"
_, payload, _ = captured[0]
assert payload["text"] == "AA=="
def test_portnum_candidates_reads_enum_values(mesh_module, monkeypatch):
"""Ensure portnum candidates include enum and constants when available."""
mesh = mesh_module
module_name = "meshtastic.portnums_pb2"
class DummyPortNum:
@staticmethod
def Value(name):
if name == "ROUTING_APP":
return 7
raise KeyError(name)
dummy_module = types.SimpleNamespace(PortNum=DummyPortNum, ROUTING_APP=8)
monkeypatch.setitem(sys.modules, module_name, dummy_module)
candidates = mesh.handlers._portnum_candidates("ROUTING_APP")
assert 7 in candidates
assert 8 in candidates
def test_store_packet_dict_appends_channel_name(mesh_module, monkeypatch, capsys):
mesh = mesh_module
mesh.channels._reset_channel_cache()
@@ -1932,6 +2093,146 @@ def test_store_packet_dict_appends_channel_name(mesh_module, monkeypatch, capsys
assert "channel_display='Chat'" in log_output
def test_store_packet_dict_skips_hidden_channel(mesh_module, monkeypatch, capsys):
mesh = mesh_module
mesh.channels._reset_channel_cache()
mesh.config.MODEM_PRESET = None
class DummyInterface:
def __init__(self) -> None:
self.localNode = SimpleNamespace(
channels=[
SimpleNamespace(
role=1,
settings=SimpleNamespace(name="Primary"),
),
SimpleNamespace(
role=2,
index=5,
settings=SimpleNamespace(name="Chat"),
),
]
)
def waitForConfig(self):
return None
mesh.channels.capture_from_interface(DummyInterface())
capsys.readouterr()
captured: list[tuple[str, dict, int]] = []
ignored: list[str] = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
monkeypatch.setattr(
mesh.handlers,
"_record_ignored_packet",
lambda packet, *, reason: ignored.append(reason),
)
previous_debug = mesh.config.DEBUG
previous_hidden = mesh.HIDDEN_CHANNELS
previous_allowed = mesh.ALLOWED_CHANNELS
mesh.config.DEBUG = True
mesh.DEBUG = True
mesh.ALLOWED_CHANNELS = ("Chat",)
mesh.HIDDEN_CHANNELS = ("Chat",)
try:
packet = {
"id": "999",
"rxTime": 24_680,
"from": "!sender",
"to": "^all",
"channel": 5,
"decoded": {"text": "hidden msg", "portnum": 1},
}
mesh.store_packet_dict(packet)
assert captured == []
assert ignored == ["hidden-channel"]
assert "Ignored packet on hidden channel" in capsys.readouterr().out
finally:
mesh.HIDDEN_CHANNELS = previous_hidden
mesh.ALLOWED_CHANNELS = previous_allowed
mesh.config.DEBUG = previous_debug
mesh.DEBUG = previous_debug
def test_store_packet_dict_skips_disallowed_channel(mesh_module, monkeypatch, capsys):
mesh = mesh_module
mesh.channels._reset_channel_cache()
mesh.config.MODEM_PRESET = None
class DummyInterface:
def __init__(self) -> None:
self.localNode = SimpleNamespace(
channels=[
SimpleNamespace(
role=1,
settings=SimpleNamespace(name="Primary"),
),
SimpleNamespace(
role=2,
index=5,
settings=SimpleNamespace(name="Chat"),
),
]
)
def waitForConfig(self):
return None
mesh.channels.capture_from_interface(DummyInterface())
capsys.readouterr()
captured: list[tuple[str, dict, int]] = []
ignored: list[str] = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
monkeypatch.setattr(
mesh.handlers,
"_record_ignored_packet",
lambda packet, *, reason: ignored.append(reason),
)
previous_debug = mesh.config.DEBUG
previous_allowed = mesh.ALLOWED_CHANNELS
previous_hidden = mesh.HIDDEN_CHANNELS
mesh.config.DEBUG = True
mesh.DEBUG = True
mesh.ALLOWED_CHANNELS = ("Primary",)
mesh.HIDDEN_CHANNELS = ()
try:
packet = {
"id": "1001",
"rxTime": 25_680,
"from": "!sender",
"to": "^all",
"channel": 5,
"decoded": {"text": "disallowed msg", "portnum": 1},
}
mesh.store_packet_dict(packet)
assert captured == []
assert ignored == ["disallowed-channel"]
assert "Ignored packet on disallowed channel" in capsys.readouterr().out
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
mesh.HIDDEN_CHANNELS = previous_hidden
mesh.config.DEBUG = previous_debug
mesh.DEBUG = previous_debug
def test_store_packet_dict_includes_encrypted_payload(mesh_module, monkeypatch):
mesh = mesh_module
captured = []
@@ -2443,6 +2744,62 @@ def test_parse_ble_target_rejects_invalid_values(mesh_module):
assert mesh._parse_ble_target("zz:zz:zz:zz:zz:zz") is None
def test_parse_ble_target_accepts_mac_addresses(mesh_module):
"""Test that _parse_ble_target accepts valid MAC address format (Linux/Windows)."""
mesh = mesh_module
# Valid MAC addresses should be accepted and normalized to uppercase
assert mesh._parse_ble_target("ED:4D:9E:95:CF:60") == "ED:4D:9E:95:CF:60"
assert mesh._parse_ble_target("ed:4d:9e:95:cf:60") == "ED:4D:9E:95:CF:60"
assert mesh._parse_ble_target("AA:BB:CC:DD:EE:FF") == "AA:BB:CC:DD:EE:FF"
assert mesh._parse_ble_target("00:11:22:33:44:55") == "00:11:22:33:44:55"
# With whitespace
assert mesh._parse_ble_target(" ED:4D:9E:95:CF:60 ") == "ED:4D:9E:95:CF:60"
# Invalid MAC addresses should be rejected
assert mesh._parse_ble_target("ED:4D:9E:95:CF") is None # Too short
assert mesh._parse_ble_target("ED:4D:9E:95:CF:60:AB") is None # Too long
assert mesh._parse_ble_target("GG:HH:II:JJ:KK:LL") is None # Invalid hex
def test_parse_ble_target_accepts_uuids(mesh_module):
"""Test that _parse_ble_target accepts valid UUID format (macOS)."""
mesh = mesh_module
# Valid UUIDs should be accepted and normalized to uppercase
assert (
mesh._parse_ble_target("C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E")
== "C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E"
)
assert (
mesh._parse_ble_target("c0aea92f-045e-9b82-c9a6-a1fd822b3a9e")
== "C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E"
)
assert (
mesh._parse_ble_target("12345678-1234-5678-9ABC-DEF012345678")
== "12345678-1234-5678-9ABC-DEF012345678"
)
# With whitespace
assert (
mesh._parse_ble_target(" C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E ")
== "C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E"
)
# Invalid UUIDs should be rejected
assert mesh._parse_ble_target("C0AEA92F-045E-9B82-C9A6") is None # Too short
assert (
mesh._parse_ble_target("C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E-EXTRA") is None
) # Too long
assert (
mesh._parse_ble_target("GGGGGGGG-GGGG-GGGG-GGGG-GGGGGGGGGGGG") is None
) # Invalid hex
assert (
mesh._parse_ble_target("C0AEA92F:045E:9B82:C9A6:A1FD822B3A9E") is None
) # Wrong separator
def test_parse_network_target_additional_cases(mesh_module):
mesh = mesh_module
@@ -2575,6 +2932,133 @@ def test_queue_post_json_skips_when_active(mesh_module, monkeypatch):
mesh._clear_post_queue()
def test_process_ingestor_heartbeat_updates_flag(mesh_module, monkeypatch):
mesh = mesh_module
mesh.ingestors.STATE.last_heartbeat = None
mesh.ingestors.STATE.node_id = None
mesh.handlers.register_host_node_id(None)
recorded = {"force": None, "count": 0}
def fake_queue_ingestor_heartbeat(*, force):
recorded["force"] = force
recorded["count"] += 1
return True
monkeypatch.setattr(
mesh.ingestors, "queue_ingestor_heartbeat", fake_queue_ingestor_heartbeat
)
class DummyIface:
def __init__(self):
self.myNodeNum = 0xCAFEBABE
updated = mesh._process_ingestor_heartbeat(
DummyIface(), ingestor_announcement_sent=False
)
assert updated is True
assert recorded["force"] is True
assert recorded["count"] == 1
assert mesh.handlers.host_node_id() == "!cafebabe"
def test_process_ingestor_heartbeat_skips_without_host(mesh_module, monkeypatch):
mesh = mesh_module
mesh.handlers.register_host_node_id(None)
mesh.ingestors.STATE.node_id = None
mesh.ingestors.STATE.last_heartbeat = None
monkeypatch.setattr(mesh.ingestors, "queue_ingestor_heartbeat", lambda **_: False)
updated = mesh._process_ingestor_heartbeat(None, ingestor_announcement_sent=False)
assert updated is False
assert mesh.ingestors.STATE.node_id is None
assert mesh.ingestors.STATE.last_heartbeat is None
def test_ingestor_heartbeat_respects_interval_override(mesh_module, monkeypatch):
mesh = mesh_module
mesh.ingestors.STATE.start_time = 100
mesh.ingestors.STATE.last_heartbeat = 1_000
mesh.ingestors.STATE.node_id = "!abcd0001"
mesh._INGESTOR_HEARTBEAT_SECS = 10_000
monkeypatch.setattr(mesh.ingestors.time, "time", lambda: 2_000)
sent = mesh.ingestors.queue_ingestor_heartbeat()
assert sent is False
assert mesh.ingestors.STATE.last_heartbeat == 1_000
def test_setting_ingestor_attr_propagates(mesh_module):
mesh = mesh_module
mesh._INGESTOR_HEARTBEAT_SECS = 123
assert mesh.config._INGESTOR_HEARTBEAT_SECS == 123
def test_queue_ingestor_heartbeat_requires_node_id(mesh_module, monkeypatch):
mesh = mesh_module
captured = []
monkeypatch.setattr(
mesh.queue,
"_queue_post_json",
lambda path, payload, *, priority, send=None: captured.append(
(path, payload, priority)
),
)
mesh.ingestors.STATE.node_id = None
mesh.ingestors.STATE.last_heartbeat = None
queued = mesh.ingestors.queue_ingestor_heartbeat(force=True)
assert queued is False
assert captured == []
def test_queue_ingestor_heartbeat_enqueues_and_throttles(mesh_module, monkeypatch):
mesh = mesh_module
captured = []
monkeypatch.setattr(
mesh.queue,
"_queue_post_json",
lambda path, payload, *, priority, send=None: captured.append(
(path, payload, priority)
),
)
mesh.ingestors.STATE.start_time = 1_700_000_000
mesh.ingestors.STATE.last_heartbeat = None
mesh.ingestors.STATE.node_id = None
mesh.config.LORA_FREQ = 915
mesh.config.MODEM_PRESET = "LongFast"
mesh.ingestors.set_ingestor_node_id("!CAFEBABE")
first = mesh.ingestors.queue_ingestor_heartbeat(force=True)
second = mesh.ingestors.queue_ingestor_heartbeat()
assert first is True
assert second is False
assert len(captured) == 1
path, payload, priority = captured[0]
assert path == "/api/ingestors"
assert payload["node_id"] == "!cafebabe"
assert payload["start_time"] == 1_700_000_000
assert payload["last_seen_time"] >= payload["start_time"]
assert payload["version"] == mesh.VERSION
assert payload["lora_freq"] == 915
assert payload["modem_preset"] == "LongFast"
assert priority == mesh.queue._INGESTOR_POST_PRIORITY
def test_mesh_version_export_matches_package(mesh_module):
import data
mesh = mesh_module
assert mesh.VERSION == data.VERSION
def test_node_to_dict_handles_proto_fallback(mesh_module, monkeypatch):
mesh = mesh_module
+11
View File
@@ -23,6 +23,9 @@ ENV BUNDLE_FORCE_RUBY_PLATFORM=true
# Install build dependencies and SQLite3
RUN apk add --no-cache \
build-base \
python3 \
py3-pip \
py3-virtualenv \
sqlite-dev \
linux-headers \
pkgconfig
@@ -38,11 +41,16 @@ RUN bundle config set --local force_ruby_platform true && \
bundle config set --local without 'development test' && \
bundle install --jobs=4 --retry=3
# Install Meshtastic decoder dependencies in a dedicated venv
RUN python3 -m venv /opt/meshtastic-venv && \
/opt/meshtastic-venv/bin/pip install --no-cache-dir meshtastic protobuf
# Production stage
FROM ruby:3.3-alpine AS production
# Install runtime dependencies
RUN apk add --no-cache \
python3 \
sqlite \
tzdata \
curl
@@ -56,6 +64,7 @@ WORKDIR /app
# Copy installed gems from builder stage
COPY --from=builder /usr/local/bundle /usr/local/bundle
COPY --from=builder /opt/meshtastic-venv /opt/meshtastic-venv
# Copy application code (excluding the Dockerfile which is not required at runtime)
COPY --chown=potatomesh:potatomesh web/app.rb ./
@@ -70,6 +79,7 @@ COPY --chown=potatomesh:potatomesh web/scripts ./scripts
# Copy SQL schema files from data directory
COPY --chown=potatomesh:potatomesh data/*.sql /data/
COPY --chown=potatomesh:potatomesh data/mesh_ingestor/decode_payload.py /app/data/mesh_ingestor/decode_payload.py
# Create data and configuration directories with correct ownership
RUN mkdir -p /app/.local/share/potato-mesh \
@@ -85,6 +95,7 @@ EXPOSE 41447
# Default environment variables (can be overridden by host)
ENV RACK_ENV=production \
APP_ENV=production \
MESHTASTIC_PYTHON=/opt/meshtastic-venv/bin/python \
XDG_DATA_HOME=/app/.local/share \
XDG_CONFIG_HOME=/app/.config \
SITE_NAME="PotatoMesh Demo" \
+6
View File
@@ -49,6 +49,12 @@ require_relative "application/worker_pool"
require_relative "application/federation"
require_relative "application/prometheus"
require_relative "application/queries"
require_relative "application/meshtastic/channel_names"
require_relative "application/meshtastic/channel_hash"
require_relative "application/meshtastic/protobuf"
require_relative "application/meshtastic/rainbow_table"
require_relative "application/meshtastic/cipher"
require_relative "application/meshtastic/payload_decoder"
require_relative "application/data_processing"
require_relative "application/filesystem"
require_relative "application/instances"
@@ -110,11 +110,20 @@ module PotatoMesh
["!#{canonical_hex}", parsed, short_id]
end
def broadcast_node_ref?(node_ref, fallback_num = nil)
return true if fallback_num == 0xFFFFFFFF
trimmed = string_or_nil(node_ref)
return false unless trimmed
normalized = trimmed.delete_prefix("!").strip.downcase
normalized == "ffffffff"
end
def ensure_unknown_node(db, node_ref, fallback_num = nil, heard_time: nil)
parts = canonical_node_parts(node_ref, fallback_num)
return unless parts
node_id, node_num, short_id = parts
return if broadcast_node_ref?(node_id, node_num)
existing = db.get_first_value(
"SELECT 1 FROM nodes WHERE node_id = ? LIMIT 1",
@@ -151,14 +160,25 @@ module PotatoMesh
inserted
end
def touch_node_last_seen(db, node_ref, fallback_num = nil, rx_time: nil, source: nil)
def touch_node_last_seen(
db,
node_ref,
fallback_num = nil,
rx_time: nil,
source: nil,
lora_freq: nil,
modem_preset: nil
)
timestamp = coerce_integer(rx_time)
return unless timestamp
node_id = nil
parts = canonical_node_parts(node_ref, fallback_num)
node_id, = parts if parts
if parts
node_id, node_num = parts
return if broadcast_node_ref?(node_id, node_num)
end
unless node_id
trimmed = string_or_nil(node_ref)
@@ -170,17 +190,22 @@ module PotatoMesh
end
end
return if broadcast_node_ref?(node_id, fallback_num)
return unless node_id
lora_freq = coerce_integer(lora_freq)
modem_preset = string_or_nil(modem_preset)
updated = false
with_busy_retry do
db.execute <<~SQL, [timestamp, timestamp, timestamp, node_id]
db.execute <<~SQL, [timestamp, timestamp, timestamp, lora_freq, modem_preset, node_id]
UPDATE nodes
SET last_heard = CASE
WHEN COALESCE(last_heard, 0) >= ? THEN last_heard
ELSE ?
END,
first_heard = COALESCE(first_heard, ?)
first_heard = COALESCE(first_heard, ?),
lora_freq = COALESCE(?, lora_freq),
modem_preset = COALESCE(?, modem_preset)
WHERE node_id = ?
SQL
updated ||= db.changes.positive?
@@ -193,12 +218,74 @@ module PotatoMesh
node_id: node_id,
timestamp: timestamp,
source: source || :unknown,
lora_freq: lora_freq,
modem_preset: modem_preset,
)
end
updated
end
# Insert or update an ingestor heartbeat payload.
#
# @param db [SQLite3::Database] open database handle.
# @param payload [Hash] ingestor payload from the collector.
# @return [Boolean] true when persistence succeeded.
def upsert_ingestor(db, payload)
return false unless payload.is_a?(Hash)
parts = canonical_node_parts(payload["node_id"] || payload["id"])
return false unless parts
node_id, = parts
now = Time.now.to_i
start_time = coerce_integer(payload["start_time"] || payload["startTime"]) || now
last_seen_time =
coerce_integer(payload["last_seen_time"] || payload["lastSeenTime"]) || start_time
start_time = 0 if start_time.negative?
last_seen_time = 0 if last_seen_time.negative?
start_time = now if start_time > now
last_seen_time = now if last_seen_time > now
last_seen_time = start_time if last_seen_time < start_time
version = string_or_nil(payload["version"] || payload["ingestorVersion"])
return false unless version
lora_freq = coerce_integer(payload["lora_freq"])
modem_preset = string_or_nil(payload["modem_preset"])
with_busy_retry do
db.execute <<~SQL, [node_id, start_time, last_seen_time, version, lora_freq, modem_preset]
INSERT INTO ingestors(node_id, start_time, last_seen_time, version, lora_freq, modem_preset)
VALUES(?,?,?,?,?,?)
ON CONFLICT(node_id) DO UPDATE SET
start_time = CASE
WHEN excluded.start_time > ingestors.start_time THEN excluded.start_time
ELSE ingestors.start_time
END,
last_seen_time = CASE
WHEN excluded.last_seen_time > ingestors.last_seen_time THEN excluded.last_seen_time
ELSE ingestors.last_seen_time
END,
version = COALESCE(excluded.version, ingestors.version),
lora_freq = COALESCE(excluded.lora_freq, ingestors.lora_freq),
modem_preset = COALESCE(excluded.modem_preset, ingestors.modem_preset)
SQL
end
true
rescue SQLite3::SQLException => e
warn_log(
"Failed to upsert ingestor record",
context: "data_processing.ingestors",
node_id: node_id,
error_class: e.class.name,
error_message: e.message,
)
false
end
def upsert_node(db, node_id, n)
user = n["user"] || {}
met = n["deviceMetrics"] || {}
@@ -417,20 +504,37 @@ module PotatoMesh
rx_iso ||= Time.at(rx_time).utc.iso8601
raw_node_id = payload["node_id"] || payload["from_id"] || payload["from"]
node_id = string_or_nil(raw_node_id)
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
raw_node_num = coerce_integer(payload["node_num"]) || coerce_integer(payload["num"])
node_id ||= format("!%08x", raw_node_num & 0xFFFFFFFF) if node_id.nil? && raw_node_num
payload_for_num = payload.is_a?(Hash) ? payload.dup : {}
payload_for_num["num"] ||= raw_node_num if raw_node_num
node_num = resolve_node_num(node_id, payload_for_num)
node_num ||= raw_node_num
canonical = normalize_node_id(db, node_id || node_num)
node_id = canonical if canonical
canonical_parts = canonical_node_parts(raw_node_id, raw_node_num)
if canonical_parts
node_id, node_num, = canonical_parts
else
node_id = string_or_nil(raw_node_id)
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
node_id ||= format("!%08x", raw_node_num & 0xFFFFFFFF) if node_id.nil? && raw_node_num
payload_for_num = payload.is_a?(Hash) ? payload.dup : {}
payload_for_num["num"] ||= raw_node_num if raw_node_num
node_num = resolve_node_num(node_id, payload_for_num)
node_num ||= raw_node_num
canonical = normalize_node_id(db, node_id || node_num)
node_id = canonical if canonical
end
lora_freq = coerce_integer(payload["lora_freq"] || payload["loraFrequency"])
modem_preset = string_or_nil(payload["modem_preset"] || payload["modemPreset"])
ensure_unknown_node(db, node_id || node_num, node_num, heard_time: rx_time)
touch_node_last_seen(db, node_id || node_num, node_num, rx_time: rx_time, source: :position)
touch_node_last_seen(
db,
node_id || node_num,
node_num,
rx_time: rx_time,
source: :position,
lora_freq: lora_freq,
modem_preset: modem_preset,
)
to_id = string_or_nil(payload["to_id"] || payload["to"])
@@ -674,7 +778,15 @@ module PotatoMesh
end
end
def update_node_from_telemetry(db, node_id, node_num, rx_time, metrics = {})
def update_node_from_telemetry(
db,
node_id,
node_num,
rx_time,
metrics = {},
lora_freq: nil,
modem_preset: nil
)
num = coerce_integer(node_num)
id = string_or_nil(node_id)
if id&.start_with?("!")
@@ -684,7 +796,15 @@ module PotatoMesh
return unless id
ensure_unknown_node(db, id, num, heard_time: rx_time)
touch_node_last_seen(db, id, num, rx_time: rx_time, source: :telemetry)
touch_node_last_seen(
db,
id,
num,
rx_time: rx_time,
source: :telemetry,
lora_freq: lora_freq,
modem_preset: modem_preset,
)
battery = coerce_float(metrics[:battery_level] || metrics["battery_level"])
voltage = coerce_float(metrics[:voltage] || metrics["voltage"])
@@ -828,17 +948,23 @@ module PotatoMesh
rx_iso ||= Time.at(rx_time).utc.iso8601
raw_node_id = payload["node_id"] || payload["from_id"] || payload["from"]
node_id = string_or_nil(raw_node_id)
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
raw_node_num = coerce_integer(payload["node_num"]) || coerce_integer(payload["num"])
payload_for_num = payload.dup
payload_for_num["num"] ||= raw_node_num if raw_node_num
node_num = resolve_node_num(node_id, payload_for_num)
node_num ||= raw_node_num
canonical_parts = canonical_node_parts(raw_node_id, raw_node_num)
if canonical_parts
node_id, node_num, = canonical_parts
else
node_id = string_or_nil(raw_node_id)
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
canonical = normalize_node_id(db, node_id || node_num)
node_id = canonical if canonical
payload_for_num = payload.dup
payload_for_num["num"] ||= raw_node_num if raw_node_num
node_num = resolve_node_num(node_id, payload_for_num)
node_num ||= raw_node_num
canonical = normalize_node_id(db, node_id || node_num)
node_id = canonical if canonical
end
from_id = string_or_nil(payload["from_id"]) || node_id
to_id = string_or_nil(payload["to_id"] || payload["to"])
@@ -853,6 +979,8 @@ module PotatoMesh
rssi = coerce_integer(payload["rssi"])
bitfield = coerce_integer(payload["bitfield"])
payload_b64 = string_or_nil(payload["payload_b64"] || payload["payload"])
lora_freq = coerce_integer(payload["lora_freq"] || payload["loraFrequency"])
modem_preset = string_or_nil(payload["modem_preset"] || payload["modemPreset"])
telemetry_section = normalize_json_object(payload["telemetry"])
device_metrics = normalize_json_object(payload["device_metrics"] || payload["deviceMetrics"])
@@ -1235,13 +1363,21 @@ module PotatoMesh
SQL
end
update_node_from_telemetry(db, node_id, node_num, rx_time, {
battery_level: battery_level,
voltage: voltage,
channel_utilization: channel_utilization,
air_util_tx: air_util_tx,
uptime_seconds: uptime_seconds,
})
update_node_from_telemetry(
db,
node_id,
node_num,
rx_time,
{
battery_level: battery_level,
voltage: voltage,
channel_utilization: channel_utilization,
air_util_tx: air_util_tx,
uptime_seconds: uptime_seconds,
},
lora_freq: lora_freq,
modem_preset: modem_preset,
)
end
# Persist a traceroute observation and its hop path.
@@ -1312,6 +1448,59 @@ module PotatoMesh
end
end
# Attempt to decrypt an encrypted Meshtastic message payload.
#
# @param message [Hash] message payload supplied by the ingestor.
# @param packet_id [Integer] message packet identifier.
# @param from_id [String, nil] canonical node identifier when available.
# @param from_num [Integer, nil] numeric node identifier when available.
# @param channel_index [Integer, nil] channel hash index.
# @return [Hash, nil] decrypted payload metadata when parsing succeeds.
def decrypt_meshtastic_message(message, packet_id, from_id, from_num, channel_index)
return nil unless message.is_a?(Hash)
cipher_b64 = string_or_nil(message["encrypted"])
return nil unless cipher_b64
if (ENV["RACK_ENV"] == "test" || ENV["APP_ENV"] == "test" || defined?(RSpec)) &&
ENV["MESHTASTIC_PSK_B64"].nil?
return nil
end
node_num = coerce_integer(from_num)
if node_num.nil?
parts = canonical_node_parts(from_id)
node_num = parts[1] if parts
end
return nil unless node_num
psk_b64 = PotatoMesh::Config.meshtastic_psk_b64
data = PotatoMesh::App::Meshtastic::Cipher.decrypt_data(
cipher_b64: cipher_b64,
packet_id: packet_id,
from_id: from_id,
from_num: node_num,
psk_b64: psk_b64,
)
return nil unless data
channel_name = nil
if channel_index.is_a?(Integer)
candidates = PotatoMesh::App::Meshtastic::RainbowTable.channel_names_for(
channel_index,
psk_b64: psk_b64,
)
channel_name = candidates.first if candidates.any?
end
{
text: data[:text],
portnum: data[:portnum],
payload: data[:payload],
channel_name: channel_name,
decryption_confidence: data[:decryption_confidence],
}
end
def insert_message(db, message)
return unless message.is_a?(Hash)
@@ -1342,6 +1531,14 @@ module PotatoMesh
from_id = canonical_from_id
end
end
if from_id && !from_id.start_with?("^")
canonical_parts = canonical_node_parts(from_id, message["from_num"])
if canonical_parts && !from_id.start_with?("!")
from_id = canonical_parts[0]
message["from_num"] ||= canonical_parts[1]
end
end
sender_present = !from_id.nil? || !coerce_integer(message["from_num"]).nil? || !trimmed_from_id.nil?
raw_to_id = message["to_id"]
raw_to_id = message["to"] if raw_to_id.nil? || raw_to_id.to_s.strip.empty?
@@ -1355,17 +1552,60 @@ module PotatoMesh
to_id = canonical_to_id
end
end
if to_id && !to_id.start_with?("^")
canonical_parts = canonical_node_parts(to_id, message["to_num"])
if canonical_parts && !to_id.start_with?("!")
to_id = canonical_parts[0]
message["to_num"] ||= canonical_parts[1]
end
end
encrypted = string_or_nil(message["encrypted"])
text = message["text"]
portnum = message["portnum"]
clear_encrypted = false
channel_index = coerce_integer(message["channel"] || message["channel_index"] || message["channelIndex"])
ensure_unknown_node(db, from_id || raw_from_id, message["from_num"], heard_time: rx_time)
touch_node_last_seen(
db,
from_id || raw_from_id || message["from_num"],
message["from_num"],
rx_time: rx_time,
source: :message,
)
decrypted_payload = nil
decrypted_text = nil
decrypted_portnum = nil
decrypted_flag = false
decryption_confidence = nil
if encrypted && (text.nil? || text.to_s.strip.empty?)
decrypted_data = decrypt_meshtastic_message(
message,
msg_id,
from_id,
message["from_num"],
channel_index,
)
if decrypted_data
decrypted_payload = decrypted_data
decrypted_portnum = decrypted_data[:portnum]
if decrypted_data[:text]
text = decrypted_data[:text]
decrypted_text = text
clear_encrypted = true
encrypted = nil
message["text"] = text
message["channel_name"] ||= decrypted_data[:channel_name]
decrypted_flag = true
decryption_confidence = decrypted_data[:decryption_confidence] || 0.0
if portnum.nil? && decrypted_portnum
portnum = decrypted_portnum
message["portnum"] = portnum
end
end
end
end
if encrypted && (text.nil? || text.to_s.strip.empty?)
portnum = nil
message.delete("portnum")
end
lora_freq = coerce_integer(message["lora_freq"] || message["loraFrequency"])
modem_preset = string_or_nil(message["modem_preset"] || message["modemPreset"])
@@ -1380,8 +1620,8 @@ module PotatoMesh
from_id,
to_id,
message["channel"],
message["portnum"],
message["text"],
portnum,
text,
encrypted,
message["snr"],
message["rssi"],
@@ -1391,19 +1631,28 @@ module PotatoMesh
channel_name,
reply_id,
emoji,
decrypted_flag ? 1 : 0,
decryption_confidence,
]
with_busy_retry do
existing = db.get_first_row(
"SELECT from_id, to_id, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji FROM messages WHERE id = ?",
"SELECT from_id, to_id, text, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji, portnum, decrypted, decryption_confidence FROM messages WHERE id = ?",
[msg_id],
)
if existing
updates = {}
existing_text = existing.is_a?(Hash) ? existing["text"] : existing[2]
existing_text_str = existing_text&.to_s
existing_has_text = existing_text_str && !existing_text_str.strip.empty?
existing_from = existing.is_a?(Hash) ? existing["from_id"] : existing[0]
existing_from_str = existing_from&.to_s
return if !sender_present && (existing_from_str.nil? || existing_from_str.strip.empty?)
existing_encrypted = existing.is_a?(Hash) ? existing["encrypted"] : existing[3]
existing_encrypted_str = existing_encrypted&.to_s
decrypted_precedence = text && (clear_encrypted || (existing_encrypted_str && !existing_encrypted_str.strip.empty?))
if from_id
existing_from = existing.is_a?(Hash) ? existing["from_id"] : existing[0]
existing_from_str = existing_from&.to_s
should_update = existing_from_str.nil? || existing_from_str.strip.empty?
should_update ||= existing_from != from_id
updates["from_id"] = from_id if should_update
@@ -1417,21 +1666,53 @@ module PotatoMesh
updates["to_id"] = to_id if should_update
end
if encrypted
existing_encrypted = existing.is_a?(Hash) ? existing["encrypted"] : existing[2]
existing_encrypted_str = existing_encrypted&.to_s
if clear_encrypted || (decrypted_precedence && existing_encrypted_str && !existing_encrypted_str.strip.empty?)
updates["encrypted"] = nil if existing_encrypted
elsif encrypted && !existing_has_text
should_update = existing_encrypted_str.nil? || existing_encrypted_str.strip.empty?
should_update ||= existing_encrypted != encrypted
updates["encrypted"] = encrypted if should_update
end
if text
should_update = existing_text_str.nil? || existing_text_str.strip.empty?
should_update ||= existing_text != text
updates["text"] = text if should_update
end
if decrypted_precedence
updates["channel"] = message["channel"] if message.key?("channel")
updates["snr"] = message["snr"] if message.key?("snr")
updates["rssi"] = message["rssi"] if message.key?("rssi")
updates["hop_limit"] = message["hop_limit"] if message.key?("hop_limit")
updates["lora_freq"] = lora_freq unless lora_freq.nil?
updates["modem_preset"] = modem_preset if modem_preset
updates["channel_name"] = channel_name if channel_name
updates["rx_time"] = rx_time if rx_time
updates["rx_iso"] = rx_iso if rx_iso
end
if clear_encrypted
updates["decrypted"] = 1
updates["decryption_confidence"] = decryption_confidence
end
if portnum
existing_portnum = existing.is_a?(Hash) ? existing["portnum"] : existing[9]
existing_portnum_str = existing_portnum&.to_s
should_update = existing_portnum_str.nil? || existing_portnum_str.strip.empty?
should_update ||= existing_portnum != portnum
should_update ||= decrypted_precedence
updates["portnum"] = portnum if should_update
end
unless lora_freq.nil?
existing_lora = existing.is_a?(Hash) ? existing["lora_freq"] : existing[3]
existing_lora = existing.is_a?(Hash) ? existing["lora_freq"] : existing[4]
updates["lora_freq"] = lora_freq if existing_lora != lora_freq
end
if modem_preset
existing_preset = existing.is_a?(Hash) ? existing["modem_preset"] : existing[4]
existing_preset = existing.is_a?(Hash) ? existing["modem_preset"] : existing[5]
existing_preset_str = existing_preset&.to_s
should_update = existing_preset_str.nil? || existing_preset_str.strip.empty?
should_update ||= existing_preset != modem_preset
@@ -1439,7 +1720,7 @@ module PotatoMesh
end
if channel_name
existing_channel = existing.is_a?(Hash) ? existing["channel_name"] : existing[5]
existing_channel = existing.is_a?(Hash) ? existing["channel_name"] : existing[6]
existing_channel_str = existing_channel&.to_s
should_update = existing_channel_str.nil? || existing_channel_str.strip.empty?
should_update ||= existing_channel != channel_name
@@ -1447,12 +1728,12 @@ module PotatoMesh
end
unless reply_id.nil?
existing_reply = existing.is_a?(Hash) ? existing["reply_id"] : existing[6]
existing_reply = existing.is_a?(Hash) ? existing["reply_id"] : existing[7]
updates["reply_id"] = reply_id if existing_reply != reply_id
end
if emoji
existing_emoji = existing.is_a?(Hash) ? existing["emoji"] : existing[7]
existing_emoji = existing.is_a?(Hash) ? existing["emoji"] : existing[8]
existing_emoji_str = existing_emoji&.to_s
should_update = existing_emoji_str.nil? || existing_emoji_str.strip.empty?
should_update ||= existing_emoji != emoji
@@ -1468,17 +1749,48 @@ module PotatoMesh
begin
db.execute <<~SQL, row
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji,decrypted,decryption_confidence)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
SQL
rescue SQLite3::ConstraintException
existing_row = db.get_first_row(
"SELECT text, encrypted FROM messages WHERE id = ?",
[msg_id],
)
existing_text = existing_row.is_a?(Hash) ? existing_row["text"] : existing_row&.[](0)
existing_text_str = existing_text&.to_s
allow_encrypted_update = existing_text_str.nil? || existing_text_str.strip.empty?
existing_encrypted = existing_row.is_a?(Hash) ? existing_row["encrypted"] : existing_row&.[](1)
existing_encrypted_str = existing_encrypted&.to_s
decrypted_precedence = text && (clear_encrypted || (existing_encrypted_str && !existing_encrypted_str.strip.empty?))
fallback_updates = {}
fallback_updates["from_id"] = from_id if from_id
fallback_updates["to_id"] = to_id if to_id
fallback_updates["encrypted"] = encrypted if encrypted
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
fallback_updates["modem_preset"] = modem_preset if modem_preset
fallback_updates["channel_name"] = channel_name if channel_name
fallback_updates["text"] = text if text
fallback_updates["encrypted"] = encrypted if encrypted && allow_encrypted_update
fallback_updates["encrypted"] = nil if clear_encrypted
fallback_updates["portnum"] = portnum if portnum
if clear_encrypted
fallback_updates["decrypted"] = 1
fallback_updates["decryption_confidence"] = decryption_confidence
end
if decrypted_precedence
fallback_updates["channel"] = message["channel"] if message.key?("channel")
fallback_updates["snr"] = message["snr"] if message.key?("snr")
fallback_updates["rssi"] = message["rssi"] if message.key?("rssi")
fallback_updates["hop_limit"] = message["hop_limit"] if message.key?("hop_limit")
fallback_updates["portnum"] = portnum if portnum
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
fallback_updates["modem_preset"] = modem_preset if modem_preset
fallback_updates["channel_name"] = channel_name if channel_name
fallback_updates["rx_time"] = rx_time if rx_time
fallback_updates["rx_iso"] = rx_iso if rx_iso
else
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
fallback_updates["modem_preset"] = modem_preset if modem_preset
fallback_updates["channel_name"] = channel_name if channel_name
end
fallback_updates["reply_id"] = reply_id unless reply_id.nil?
fallback_updates["emoji"] = emoji if emoji
unless fallback_updates.empty?
@@ -1488,6 +1800,213 @@ module PotatoMesh
end
end
end
if clear_encrypted && decrypted_text
debug_log(
"Stored decrypted text message",
context: "data_processing.insert_message",
message_id: msg_id,
channel: message["channel"],
channel_name: message["channel_name"],
portnum: portnum,
)
end
stored_decrypted = nil
if decrypted_payload
stored_decrypted = store_decrypted_payload(
db,
message,
msg_id,
decrypted_payload,
rx_time: rx_time,
rx_iso: rx_iso,
from_id: from_id,
to_id: to_id,
channel: message["channel"],
portnum: portnum || decrypted_portnum,
hop_limit: message["hop_limit"],
snr: message["snr"],
rssi: message["rssi"],
)
end
if stored_decrypted && encrypted
with_busy_retry do
db.execute("UPDATE messages SET encrypted = NULL WHERE id = ?", [msg_id])
end
debug_log(
"Cleared encrypted payload after decoding",
context: "data_processing.insert_message",
message_id: msg_id,
portnum: portnum || decrypted_portnum,
)
end
should_touch_message = !stored_decrypted || decrypted_text
if should_touch_message
ensure_unknown_node(db, from_id || raw_from_id, message["from_num"], heard_time: rx_time)
touch_node_last_seen(
db,
from_id || raw_from_id || message["from_num"],
message["from_num"],
rx_time: rx_time,
source: :message,
lora_freq: lora_freq,
modem_preset: modem_preset,
)
ensure_unknown_node(db, to_id || raw_to_id, message["to_num"], heard_time: rx_time) if to_id || raw_to_id
if to_id || raw_to_id || message.key?("to_num")
touch_node_last_seen(
db,
to_id || raw_to_id || message["to_num"],
message["to_num"],
rx_time: rx_time,
source: :message,
lora_freq: lora_freq,
modem_preset: modem_preset,
)
end
end
end
# Decode and store decrypted payloads in domain-specific tables.
#
# @param db [SQLite3::Database] open database handle.
# @param message [Hash] original message payload.
# @param packet_id [Integer] packet identifier for the message.
# @param decrypted [Hash] decrypted payload metadata.
# @param rx_time [Integer] receive time.
# @param rx_iso [String] ISO 8601 receive timestamp.
# @param from_id [String, nil] canonical sender identifier.
# @param to_id [String, nil] destination identifier.
# @param channel [Integer, nil] channel index.
# @param portnum [Object, nil] port number identifier.
# @param hop_limit [Integer, nil] hop limit value.
# @param snr [Numeric, nil] signal-to-noise ratio.
# @param rssi [Integer, nil] RSSI value.
# @return [void]
def store_decrypted_payload(
db,
message,
packet_id,
decrypted,
rx_time:,
rx_iso:,
from_id:,
to_id:,
channel:,
portnum:,
hop_limit:,
snr:,
rssi:
)
payload_bytes = decrypted[:payload]
return false unless payload_bytes
portnum_value = coerce_integer(portnum || decrypted[:portnum])
return false unless portnum_value
payload_b64 = Base64.strict_encode64(payload_bytes)
supported_ports = [3, 67, 70, 71]
return false unless supported_ports.include?(portnum_value)
decoded = PotatoMesh::App::Meshtastic::PayloadDecoder.decode(
portnum: portnum_value,
payload_b64: payload_b64,
)
return false unless decoded.is_a?(Hash)
return false unless decoded["payload"].is_a?(Hash)
common_payload = {
"id" => packet_id,
"packet_id" => packet_id,
"rx_time" => rx_time,
"rx_iso" => rx_iso,
"from_id" => from_id,
"to_id" => to_id,
"channel" => channel,
"portnum" => portnum_value.to_s,
"hop_limit" => hop_limit,
"snr" => snr,
"rssi" => rssi,
"lora_freq" => coerce_integer(message["lora_freq"] || message["loraFrequency"]),
"modem_preset" => string_or_nil(message["modem_preset"] || message["modemPreset"]),
"payload_b64" => payload_b64,
}
case decoded["type"]
when "POSITION_APP"
payload = common_payload.merge("position" => decoded["payload"])
insert_position(db, payload)
debug_log(
"Stored decrypted position payload",
context: "data_processing.store_decrypted_payload",
message_id: packet_id,
portnum: portnum_value,
)
true
when "TELEMETRY_APP"
payload = common_payload.merge("telemetry" => decoded["payload"])
insert_telemetry(db, payload)
debug_log(
"Stored decrypted telemetry payload",
context: "data_processing.store_decrypted_payload",
message_id: packet_id,
portnum: portnum_value,
)
true
when "NEIGHBORINFO_APP"
neighbor_payload = decoded["payload"]
neighbors = neighbor_payload["neighbors"]
neighbors = [] unless neighbors.is_a?(Array)
normalized_neighbors = neighbors.map do |neighbor|
next unless neighbor.is_a?(Hash)
{
"neighbor_id" => neighbor["node_id"] || neighbor["nodeId"] || neighbor["id"],
"snr" => neighbor["snr"],
"rx_time" => neighbor["last_rx_time"],
}.compact
end.compact
return false if normalized_neighbors.empty?
payload = common_payload.merge(
"node_id" => neighbor_payload["node_id"] || from_id,
"neighbors" => normalized_neighbors,
"node_broadcast_interval_secs" => neighbor_payload["node_broadcast_interval_secs"],
"last_sent_by_id" => neighbor_payload["last_sent_by_id"],
)
insert_neighbors(db, payload)
debug_log(
"Stored decrypted neighbor payload",
context: "data_processing.store_decrypted_payload",
message_id: packet_id,
portnum: portnum_value,
)
true
when "TRACEROUTE_APP"
route = decoded["payload"]["route"]
route_back = decoded["payload"]["route_back"]
hops = route.is_a?(Array) ? route : route_back.is_a?(Array) ? route_back : []
dest = hops.last if hops.is_a?(Array) && !hops.empty?
src_num = coerce_integer(message["from_num"]) || resolve_node_num(from_id, message)
payload = common_payload.merge(
"src" => src_num,
"dest" => dest,
"hops" => hops,
)
insert_trace(db, payload)
debug_log(
"Stored decrypted traceroute payload",
context: "data_processing.store_decrypted_payload",
message_id: packet_id,
portnum: portnum_value,
)
true
else
false
end
end
def normalize_node_id(db, node_ref)
+31 -3
View File
@@ -81,10 +81,10 @@ module PotatoMesh
return false unless File.exist?(PotatoMesh::Config.db_path)
db = open_database(readonly: true)
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops]
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops ingestors]
tables =
db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops')",
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops','ingestors')",
).flatten
(required - tables).empty?
rescue SQLite3::Exception
@@ -99,7 +99,7 @@ module PotatoMesh
def init_db
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
db = open_database
%w[nodes messages positions telemetry neighbors instances traces].each do |schema|
%w[nodes messages positions telemetry neighbors instances traces ingestors].each do |schema|
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
@@ -150,6 +150,16 @@ module PotatoMesh
message_columns << "emoji"
end
unless message_columns.include?("decrypted")
db.execute("ALTER TABLE messages ADD COLUMN decrypted INTEGER NOT NULL DEFAULT 0")
message_columns << "decrypted"
end
unless message_columns.include?("decryption_confidence")
db.execute("ALTER TABLE messages ADD COLUMN decryption_confidence REAL")
message_columns << "decryption_confidence"
end
reply_index_exists =
db.get_first_value(
"SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_messages_reply_id'",
@@ -197,6 +207,24 @@ module PotatoMesh
traces_schema = File.expand_path("../../../../data/traces.sql", __dir__)
db.execute_batch(File.read(traces_schema))
end
ingestor_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ingestors'").flatten
if ingestor_tables.empty?
ingestors_schema = File.expand_path("../../../../data/ingestors.sql", __dir__)
db.execute_batch(File.read(ingestors_schema))
else
ingestor_columns = db.execute("PRAGMA table_info(ingestors)").map { |row| row[1] }
unless ingestor_columns.include?("version")
db.execute("ALTER TABLE ingestors ADD COLUMN version TEXT")
end
unless ingestor_columns.include?("lora_freq")
db.execute("ALTER TABLE ingestors ADD COLUMN lora_freq INTEGER")
end
unless ingestor_columns.include?("modem_preset")
db.execute("ALTER TABLE ingestors ADD COLUMN modem_preset TEXT")
end
end
rescue SQLite3::SQLException, Errno::ENOENT => e
warn_log(
"Failed to apply schema upgrade",
@@ -177,6 +177,7 @@ module PotatoMesh
pool = PotatoMesh::App::WorkerPool.new(
size: PotatoMesh::Config.federation_worker_pool_size,
max_queue: PotatoMesh::Config.federation_worker_queue_capacity,
task_timeout: PotatoMesh::Config.federation_task_timeout_seconds,
name: "potato-mesh-fed",
)
@@ -442,6 +443,8 @@ module PotatoMesh
end
end
thread.name = "potato-mesh-federation" if thread.respond_to?(:name=)
# Allow shutdown even if the announcement loop is still sleeping.
thread.daemon = true if thread.respond_to?(:daemon=)
set(:federation_thread, thread)
thread
end
@@ -474,6 +477,8 @@ module PotatoMesh
end
thread.name = "potato-mesh-federation-initial" if thread.respond_to?(:name=)
thread.report_on_exception = false if thread.respond_to?(:report_on_exception=)
# Avoid blocking process shutdown during delayed startup announcements.
thread.daemon = true if thread.respond_to?(:daemon=)
set(:initial_federation_thread, thread)
thread
end
@@ -806,7 +811,7 @@ module PotatoMesh
attributes[:is_private] = false if attributes[:is_private].nil?
nodes_since_path = "/api/nodes?since=#{recent_cutoff}"
nodes_since_path = "/api/nodes?since=#{recent_cutoff}&limit=1000"
nodes_since_window, nodes_since_metadata = fetch_instance_json(attributes[:domain], nodes_since_path)
if nodes_since_window.is_a?(Array)
attributes[:nodes_count] = nodes_since_window.length
@@ -20,6 +20,8 @@ module PotatoMesh
# its intended consumers to ensure consistent behaviour across the Sinatra
# application.
module Helpers
ANNOUNCEMENT_URL_PATTERN = %r{\bhttps?://[^\s<]+}i.freeze
# Fetch an application level constant exposed by {PotatoMesh::Application}.
#
# @param name [Symbol] constant identifier to retrieve.
@@ -92,6 +94,47 @@ module PotatoMesh
PotatoMesh::Sanitizer.sanitized_site_name
end
# Retrieve the configured announcement banner copy.
#
# @return [String, nil] sanitised announcement or nil when unset.
def sanitized_announcement
PotatoMesh::Sanitizer.sanitized_announcement
end
# Render the announcement copy with safe outbound links.
#
# @return [String, nil] escaped HTML snippet or nil when unset.
def announcement_html
announcement = sanitized_announcement
return nil unless announcement
fragments = []
last_index = 0
announcement.to_enum(:scan, ANNOUNCEMENT_URL_PATTERN).each do
match = Regexp.last_match
next unless match
start_index = match.begin(0)
end_index = match.end(0)
if start_index > last_index
fragments << Rack::Utils.escape_html(announcement[last_index...start_index])
end
url = match[0]
escaped_url = Rack::Utils.escape_html(url)
fragments << %(<a href="#{escaped_url}" target="_blank" rel="noopener noreferrer">#{escaped_url}</a>)
last_index = end_index
end
if last_index < announcement.length
fragments << Rack::Utils.escape_html(announcement[last_index..])
end
fragments.join
end
# Retrieve the configured channel.
#
# @return [String] sanitised channel identifier.
@@ -0,0 +1,102 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "base64"
module PotatoMesh
module App
module Meshtastic
# Compute Meshtastic channel hashes from a name and pre-shared key.
module ChannelHash
module_function
DEFAULT_PSK_ALIAS_KEYS = {
1 => [
0xD4, 0xF1, 0xBB, 0x3A, 0x20, 0x29, 0x07, 0x59,
0xF0, 0xBC, 0xFF, 0xAB, 0xCF, 0x4E, 0x69, 0x01,
].pack("C*"),
2 => [
0x38, 0x4B, 0xBC, 0xC0, 0x1D, 0xC0, 0x22, 0xD1,
0x81, 0xBF, 0x36, 0xB8, 0x61, 0x21, 0xE1, 0xFB,
0x96, 0xB7, 0x2E, 0x55, 0xBF, 0x74, 0x22, 0x7E,
0x9D, 0x6A, 0xFB, 0x48, 0xD6, 0x4C, 0xB1, 0xA1,
].pack("C*"),
}.freeze
# Calculate the Meshtastic channel hash for the given name and PSK.
#
# @param name [String] channel name candidate.
# @param psk_b64 [String, nil] base64-encoded PSK or PSK alias.
# @return [Integer, nil] channel hash byte or nil when inputs are invalid.
def channel_hash(name, psk_b64)
return nil unless name
key = expanded_key(psk_b64)
return nil unless key
h_name = xor_bytes(name.b)
h_key = xor_bytes(key)
(h_name ^ h_key) & 0xFF
end
# Expand the provided PSK into a valid AES key length.
#
# @param psk_b64 [String, nil] base64 PSK value.
# @return [String, nil] expanded key bytes or nil when invalid.
def expanded_key(psk_b64)
raw = Base64.decode64(psk_b64.to_s)
case raw.bytesize
when 0
"".b
when 1
default_key_for_alias(raw.bytes.first)
when 2..15
(raw.bytes + [0] * (16 - raw.bytesize)).pack("C*")
when 16
raw
when 17..31
(raw.bytes + [0] * (32 - raw.bytesize)).pack("C*")
when 32
raw
else
nil
end
end
# Map PSK alias bytes to their default key material.
#
# @param alias_index [Integer, nil] alias identifier for the PSK.
# @return [String, nil] key bytes or nil when unknown.
def default_key_for_alias(alias_index)
return nil unless alias_index
DEFAULT_PSK_ALIAS_KEYS[alias_index]&.dup
end
# XOR all bytes in the given string or byte array.
#
# @param value [String, Array<Integer>] input byte sequence.
# @return [Integer] XOR of all bytes.
def xor_bytes(value)
bytes = value.is_a?(String) ? value.bytes : value
bytes.reduce(0) { |acc, byte| (acc ^ byte) & 0xFF }
end
end
end
end
end
@@ -0,0 +1,28 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Meshtastic
# Canonical list of candidate channel names used to build rainbow tables.
module ChannelNames
CHANNEL_NAME_CANDIDATES = %w[
911 Admin ADMIN admin Alert Alpha AlphaNet Alpine Amateur Amazon Anaconda Aquila Arctic Ash Asteroid Astro Aurora Avalanche Backup Basalt Base Base1 Base2 BaseAlpha BaseBravo BaseCharlie Bavaria Beacon Bear BearNet Beat Berg Berlin BerlinMesh BerlinNet Beta BetaBerlin Bison Blackout Blizzard Bolt Bonfire Border Borealis Bravo BravoNet Breeze Bridge Bronze Burner Burrow Callisto Callsign Camp Campfire CampNet Caravan Carbon Carpet Central Chameleon Charlie Chat Checkpoint Checkpoint1 Checkpoint2 Cheetah City Clinic Cloud Cobra Collective Cologne Colony Comet Command Command1 Command2 CommandRoom Comms Comms1 Comms2 CommsNet Commune Control Control1 Control2 ControlRoom Convoy Copper Core Corvus Cosmos Courier Courier1 Courier2 CourierMesh CourierNet CQ CQ1 CQ2 Crow CrowNet DarkNet Dawn Daybreak Daylight Delta DeltaNet Demo DEMO DemoBerlin Den Desert Diamond Distress District Doctor Dortmund Downlink Downlink1 Draco Dragon DragonNet Dune Dusk Eagle EagleNet East EastStar Echo EchoMesh EchoNet Emergency emergency EMERGENCY EmergencyBerlin Epsilon Equinox Europa Falcon Field FieldNet Fire Fire1 Fire2 Firebird Firefly Fireline Fireteam Firewatch Flash Flock Fluss Fog Forest Fox FoxNet Foxtrot FoxtrotMesh FoxtrotNet Frankfurt Freedom Freq Freq1 Freq2 Friedrichshain Frontier Frost Galaxy Gale Gamma Ganymede Gecko General Ghost GhostNet Glacier Gold Granite Grassland Grid Grid1 Grid2 GridNet GridNorth GridSouth Griffin Group Ham HAM Hamburg HAMNet Harbor Harmony HarmonyNet Hawk HawkNet Haze Help Hessen Highway Hilltop Hinterland Hive Hospital HQ HQ1 HQ2 Hub Hub1 Hub2 Hydra Ice Io Iron Jaguar Jungle Jupiter Kiez Kilo KiloMesh KiloNet Kraken Kreuzberg Lava Layer Layer1 Layer2 Layer3 Leipzig Leopard Liberty LightNet Lightning Lima Link Lion Lizard LongFast LongSlow LoRa LoRaBerlin LoRaHessen LoRaMesh LoRaNet LoRaTest Main Mars Med Med1 Med2 Medic MediumFast MediumSlow Mercury Mesh Mesh1 Mesh2 Mesh3 Mesh4 Mesh5 MeshBerlin MeshCollective MeshCologne MeshFrankfurt MeshGrid MeshHamburg MeshHessen MeshLeipzig MeshMunich MeshNet MeshNetwork MeshRuhr Meshtastic MeshTest Meteor Metro Midnight Mirage Mist MoonNet Munich Müggelberg Nebula Nest Network Neukölln Nexus Nightfall NightMesh NightNet Nightshift NightshiftNet Nightwatch Node1 Node2 Node3 Node4 Node5 Nomad NomadMesh NomadNet Nomads Nord North NorthStar Oasis Obsidian Omega Operations OPERATIONS Ops Ops1 Ops2 OpsCenter OpsRoom Orbit Ost Outpost Outsider Owl Pack Packet PacketNet PacketRadio Panther Paramedic Path Peak Phantom Phoenix PhoenixNet Platinum Pluto Polar Prairie Prenzlauer PRIVATE Private Public PUBLIC Pulse PulseNet Python Quasar Radio Radio1 Radio2 RadioNet Rain Ranger Raven RavenNet Relay Relay1 Relay2 Repeater Repeater1 Repeater2 RepeaterHub Rescue Rescue1 Rescue2 RescueTeam Rhythm Ridge River Road Rock Router Router1 Router2 Rover Ruhr Runner Runners Safari Safe Safety Sahara Saturn Savanna Saxony Scout Sector Secure Sensor SENSOR Sensors SENSORS Shade Shadow ShadowNet Shelter Shelter1 Shelter2 ShortFast Sideband Sideband1 Sierra Signal Signal1 Signal2 SignalFire Signals Silver Smoke Snake Snow Solstice SOS Sos SOSBerlin South SouthStar Spectrum Squad StarNet Steel Stone Storm Storm1 Storm2 Stratum Stuttgart Summit SunNet Sunrise Sunset Sync SyncNet Syndicate Süd Tal Tango TangoMesh TangoNet Team Tempo Test TEST test TestBerlin Teufelsberg Thunder Tiger Titan Town Trail Tundra Tunnel Union Unit Universe Uplink Uplink1 Valley Venus Victor Village Viper Volcano Wald Wander Wanderer Wanderers Watch Watch1 Watch2 WaWi West WestStar Whisper Wind Wolf WolfDen WolfMesh WolfNet Wolfpack Wolves Woods Wyvern Zeta Zone Zone1 Zone2 Zone3 Zulu ZuluMesh ZuluNet
].freeze
end
end
end
end
@@ -0,0 +1,213 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "base64"
require "openssl"
require_relative "channel_hash"
require_relative "protobuf"
module PotatoMesh
module App
module Meshtastic
# Decrypt Meshtastic payloads with AES-CTR using Meshtastic nonce rules.
module Cipher
module_function
DEFAULT_PSK_B64 = "AQ=="
TEXT_MESSAGE_PORTNUM = 1
# Number of characters required for full confidence scoring.
CONFIDENCE_LENGTH_TARGET = 8.0
# Decrypt an encrypted Meshtastic payload into UTF-8 text.
#
# @param cipher_b64 [String] base64-encoded encrypted payload.
# @param packet_id [Integer] packet identifier used for the nonce.
# @param from_id [String, nil] Meshtastic node identifier (e.g. "!9e95cf60").
# @param from_num [Integer, nil] numeric node identifier override.
# @param psk_b64 [String, nil] base64 PSK or alias.
# @return [String, nil] decrypted text or nil when decryption fails.
def decrypt_text(cipher_b64:, packet_id:, from_id: nil, from_num: nil, psk_b64: DEFAULT_PSK_B64)
data = decrypt_data(
cipher_b64: cipher_b64,
packet_id: packet_id,
from_id: from_id,
from_num: from_num,
psk_b64: psk_b64,
)
data && data[:text]
end
# Decrypt the Meshtastic data protobuf payload.
#
# @param cipher_b64 [String] base64-encoded encrypted payload.
# @param packet_id [Integer] packet identifier used for the nonce.
# @param from_id [String, nil] Meshtastic node identifier.
# @param from_num [Integer, nil] numeric node identifier override.
# @param psk_b64 [String, nil] base64 PSK or alias.
# @return [Hash, nil] decrypted data payload details or nil when decryption fails.
def decrypt_data(cipher_b64:, packet_id:, from_id: nil, from_num: nil, psk_b64: DEFAULT_PSK_B64)
ciphertext = Base64.strict_decode64(cipher_b64)
key = ChannelHash.expanded_key(psk_b64)
return nil unless key
return nil unless [16, 32].include?(key.bytesize)
packet_value = normalize_packet_id(packet_id)
return nil unless packet_value
from_value = normalize_node_num(from_id, from_num)
return nil unless from_value
nonce = build_nonce(packet_value, from_value)
plaintext = decrypt_aes_ctr(ciphertext, key, nonce)
return nil unless plaintext
data = Protobuf.parse_data(plaintext)
return nil unless data
text = nil
decryption_confidence = nil
if data[:portnum] == TEXT_MESSAGE_PORTNUM
candidate = data[:payload].dup.force_encoding("UTF-8")
if candidate.valid_encoding? && !candidate.empty?
text = candidate
decryption_confidence = text_confidence(text)
end
end
{
portnum: data[:portnum],
payload: data[:payload],
text: text,
decryption_confidence: decryption_confidence,
}
rescue ArgumentError, OpenSSL::Cipher::CipherError
nil
end
# Decrypt the Meshtastic data protobuf payload bytes.
#
# @param cipher_b64 [String] base64-encoded encrypted payload.
# @param packet_id [Integer] packet identifier used for the nonce.
# @param from_id [String, nil] Meshtastic node identifier.
# @param from_num [Integer, nil] numeric node identifier override.
# @param psk_b64 [String, nil] base64 PSK or alias.
# @return [String, nil] payload bytes or nil when decryption fails.
def decrypt_payload_bytes(cipher_b64:, packet_id:, from_id: nil, from_num: nil, psk_b64: DEFAULT_PSK_B64)
data = decrypt_data(
cipher_b64: cipher_b64,
packet_id: packet_id,
from_id: from_id,
from_num: from_num,
psk_b64: psk_b64,
)
data && data[:payload]
end
# Build the Meshtastic AES nonce from packet and node identifiers.
#
# @param packet_id [Integer] packet identifier.
# @param from_num [Integer] numeric node identifier.
# @return [String] 16-byte nonce.
def build_nonce(packet_id, from_num)
[packet_id].pack("Q<") + [from_num].pack("L<") + ("\x00" * 4)
end
# Decrypt data using AES-CTR with the derived nonce.
#
# @param ciphertext [String] encrypted payload bytes.
# @param key [String] expanded AES key bytes.
# @param nonce [String] 16-byte nonce.
# @return [String] decrypted plaintext bytes.
def decrypt_aes_ctr(ciphertext, key, nonce)
cipher_name = key.bytesize == 16 ? "aes-128-ctr" : "aes-256-ctr"
cipher = OpenSSL::Cipher.new(cipher_name)
cipher.decrypt
cipher.key = key
cipher.iv = nonce
cipher.update(ciphertext) + cipher.final
end
# Normalise the packet identifier into an integer.
#
# @param packet_id [Integer, nil] packet identifier.
# @return [Integer, nil] validated packet id or nil when invalid.
def normalize_packet_id(packet_id)
return packet_id if packet_id.is_a?(Integer) && packet_id >= 0
return nil if packet_id.nil?
if packet_id.is_a?(Numeric)
return nil if packet_id.negative?
return packet_id.to_i
end
return nil unless packet_id.respond_to?(:to_s)
trimmed = packet_id.to_s.strip
return nil if trimmed.empty?
return trimmed.to_i(10) if trimmed.match?(/\A\d+\z/)
nil
end
# Score the plausibility of decrypted text content.
#
# @param text [String] decrypted text candidate.
# @return [Float] confidence score between 0.0 and 1.0.
def text_confidence(text)
return 0.0 unless text.is_a?(String)
return 0.0 if text.empty?
total = text.length.to_f
length_score = [total / CONFIDENCE_LENGTH_TARGET, 1.0].min
control_count = text.scan(/[\p{Cc}\p{Cs}]/).length
control_ratio = control_count / total
acceptable_count = text.scan(/[\p{L}\p{N}\p{P}\p{S}\p{Zs}\t\n\r]/).length
acceptable_ratio = acceptable_count / total
score = length_score * acceptable_ratio * (1.0 - control_ratio)
score.clamp(0.0, 1.0)
end
# Resolve the node number from any of the supported identifiers.
#
# @param from_id [String, nil] Meshtastic node identifier.
# @param from_num [Integer, nil] numeric node identifier override.
# @return [Integer, nil] node number or nil when invalid.
def normalize_node_num(from_id, from_num)
if from_num.is_a?(Integer)
return from_num & 0xFFFFFFFF
elsif from_num.is_a?(Numeric)
return from_num.to_i & 0xFFFFFFFF
end
return nil unless from_id
trimmed = from_id.to_s.strip
return nil if trimmed.empty?
hex = trimmed.delete_prefix("!")
hex = hex[2..] if hex.start_with?("0x", "0X")
return nil unless hex.match?(/\A[0-9A-Fa-f]+\z/)
hex.to_i(16) & 0xFFFFFFFF
end
end
end
end
end
@@ -0,0 +1,120 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "json"
require "open3"
module PotatoMesh
module App
module Meshtastic
# Decode Meshtastic protobuf payloads via the Python helper script.
module PayloadDecoder
module_function
PYTHON_ENV_KEY = "MESHTASTIC_PYTHON"
DEFAULT_PYTHON_RELATIVE = File.join("data", ".venv", "bin", "python")
DEFAULT_DECODER_RELATIVE = File.join("data", "mesh_ingestor", "decode_payload.py")
FALLBACK_PYTHON_NAMES = ["python3", "python"].freeze
# Decode a protobuf payload using the Meshtastic helper.
#
# @param portnum [Integer] Meshtastic port number.
# @param payload_b64 [String] base64-encoded payload bytes.
# @return [Hash, nil] decoded payload hash or nil when decoding fails.
def decode(portnum:, payload_b64:)
return nil unless portnum && payload_b64
decoder_path = decoder_script_path
python_path = python_executable_path
return nil unless decoder_path && python_path
input = JSON.generate({ portnum: portnum, payload_b64: payload_b64 })
stdout, stderr, status = Open3.capture3(python_path, decoder_path, stdin_data: input)
return nil unless status.success?
parsed = JSON.parse(stdout)
return nil unless parsed.is_a?(Hash)
return nil if parsed["error"]
parsed
rescue JSON::ParserError
nil
rescue Errno::ENOENT
nil
rescue ArgumentError
nil
end
# Resolve the configured Python executable for Meshtastic decoding.
#
# @return [String, nil] python path or nil when missing.
def python_executable_path
configured = ENV[PYTHON_ENV_KEY]
return configured if configured && !configured.strip.empty?
candidate = File.expand_path(DEFAULT_PYTHON_RELATIVE, repo_root)
return candidate if File.exist?(candidate)
FALLBACK_PYTHON_NAMES.each do |name|
found = find_executable(name)
return found if found
end
nil
end
# Resolve the Meshtastic payload decoder script path.
#
# @return [String, nil] script path or nil when missing.
def decoder_script_path
repo_candidate = File.expand_path(DEFAULT_DECODER_RELATIVE, repo_root)
return repo_candidate if File.exist?(repo_candidate)
web_candidate = File.expand_path(DEFAULT_DECODER_RELATIVE, web_root)
return web_candidate if File.exist?(web_candidate)
nil
end
# Resolve the repository root directory from the application config.
#
# @return [String] absolute path to the repository root.
def repo_root
PotatoMesh::Config.repo_root
end
def web_root
PotatoMesh::Config.web_root
end
def find_executable(name)
# Locate an executable in PATH without invoking a subshell.
#
# @param name [String] executable name to resolve.
# @return [String, nil] full path when found.
ENV.fetch("PATH", "").split(File::PATH_SEPARATOR).each do |path|
candidate = File.join(path, name)
return candidate if File.file?(candidate) && File.executable?(candidate)
end
nil
end
private_class_method :find_executable
end
end
end
end
@@ -0,0 +1,140 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Meshtastic
# Minimal protobuf helpers for extracting payload bytes from Meshtastic data.
module Protobuf
module_function
WIRE_TYPE_VARINT = 0
WIRE_TYPE_64BIT = 1
WIRE_TYPE_LENGTH_DELIMITED = 2
WIRE_TYPE_32BIT = 5
DATA_PORTNUM_FIELD = 1
DATA_PAYLOAD_FIELD = 2
# Extract a length-delimited field from a protobuf message.
#
# @param payload [String] raw protobuf-encoded bytes.
# @param field_number [Integer] field to extract.
# @return [String, nil] field bytes or nil when absent/invalid.
def extract_field_bytes(payload, field_number)
return nil unless payload && field_number
bytes = payload.bytes
index = 0
while index < bytes.length
tag, index = read_varint(bytes, index)
return nil unless tag
field = tag >> 3
wire = tag & 0x7
case wire
when WIRE_TYPE_VARINT
_, index = read_varint(bytes, index)
return nil unless index
when WIRE_TYPE_64BIT
index += 8
when WIRE_TYPE_LENGTH_DELIMITED
length, index = read_varint(bytes, index)
return nil unless length
return nil if index + length > bytes.length
value = bytes[index, length].pack("C*")
index += length
return value if field == field_number
when WIRE_TYPE_32BIT
index += 4
else
return nil
end
end
nil
end
# Parse a Meshtastic Data message for the port number and payload.
#
# @param payload [String] raw protobuf-encoded bytes.
# @return [Hash, nil] parsed port number and payload bytes.
def parse_data(payload)
return nil unless payload
bytes = payload.bytes
index = 0
portnum = nil
data_payload = nil
while index < bytes.length
tag, index = read_varint(bytes, index)
return nil unless tag
field = tag >> 3
wire = tag & 0x7
case wire
when WIRE_TYPE_VARINT
value, index = read_varint(bytes, index)
return nil unless value
portnum = value if field == DATA_PORTNUM_FIELD
when WIRE_TYPE_64BIT
index += 8
when WIRE_TYPE_LENGTH_DELIMITED
length, index = read_varint(bytes, index)
return nil unless length
return nil if index + length > bytes.length
value = bytes[index, length].pack("C*")
index += length
data_payload = value if field == DATA_PAYLOAD_FIELD
when WIRE_TYPE_32BIT
index += 4
else
return nil
end
end
return nil unless portnum && data_payload
{ portnum: portnum, payload: data_payload }
end
# Read a protobuf varint from a byte array.
#
# @param bytes [Array<Integer>] byte stream.
# @param index [Integer] read offset.
# @return [Array(Integer, Integer), nil] value and new index or nil when invalid.
def read_varint(bytes, index)
shift = 0
value = 0
while index < bytes.length
byte = bytes[index]
index += 1
value |= (byte & 0x7F) << shift
return [value, index] if (byte & 0x80).zero?
shift += 7
return nil if shift > 63
end
nil
end
end
end
end
end
@@ -0,0 +1,68 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require_relative "channel_hash"
require_relative "channel_names"
module PotatoMesh
module App
module Meshtastic
# Resolve candidate channel names for a hashed channel index.
module RainbowTable
module_function
@tables = {}
# Lookup candidate channel names for a hashed channel index.
#
# @param index [Integer, nil] channel hash byte.
# @param psk_b64 [String, nil] base64 PSK or alias.
# @return [Array<String>] list of candidate names.
def channel_names_for(index, psk_b64:)
return [] unless index.is_a?(Integer)
table_for(psk_b64)[index] || []
end
# Build or retrieve the cached rainbow table for the given PSK.
#
# @param psk_b64 [String, nil] base64 PSK or alias.
# @return [Hash{Integer=>Array<String>}] mapping of hash bytes to names.
def table_for(psk_b64)
key = psk_b64.to_s
@tables[key] ||= build_table(psk_b64)
end
# Build a hash-to-name mapping for the provided PSK.
#
# @param psk_b64 [String, nil] base64 PSK or alias.
# @return [Hash{Integer=>Array<String>}] mapping of hash bytes to names.
def build_table(psk_b64)
mapping = Hash.new { |hash, key| hash[key] = [] }
ChannelNames::CHANNEL_NAME_CANDIDATES.each do |name|
hash = ChannelHash.channel_hash(name, psk_b64)
next unless hash
mapping[hash] << name
end
mapping
end
end
end
end
end
+138 -17
View File
@@ -116,6 +116,17 @@ module PotatoMesh
coerced
end
# Normalise a caller-supplied timestamp for API pagination windows.
#
# @param since [Object] requested lower bound expressed as seconds since the epoch.
# @param floor [Integer] minimum allowable timestamp used to clamp the value.
# @return [Integer] non-negative timestamp greater than or equal to +floor+.
def normalize_since_threshold(since, floor: 0)
threshold = coerce_integer(since)
threshold = 0 if threshold.nil? || threshold.negative?
[threshold, floor].max
end
def node_reference_tokens(node_ref)
parts = canonical_node_parts(node_ref)
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
@@ -198,12 +209,20 @@ module PotatoMesh
["(#{clauses.join(" OR ")})", params]
end
def query_nodes(limit, node_ref: nil)
# Fetch node state optionally scoped by identifier and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to narrow results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
# @return [Array<Hash>] compacted node rows suitable for API responses.
def query_nodes(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_heard = now - PotatoMesh::Config.week_seconds
since_floor = node_ref ? 0 : min_last_heard
since_threshold = normalize_since_threshold(since, floor: since_floor)
params = []
where_clauses = []
@@ -214,7 +233,7 @@ module PotatoMesh
params.concat(clause.last)
else
where_clauses << "last_heard >= ?"
params << min_last_heard
params << since_threshold
end
if private_mode?
@@ -242,7 +261,7 @@ module PotatoMesh
.map { |value| coerce_integer(value) }
.compact
.max
last_candidate && last_candidate >= min_last_heard
last_candidate && last_candidate >= since_threshold
end
rows.each do |r|
r["role"] ||= "CLIENT"
@@ -262,6 +281,47 @@ module PotatoMesh
db&.close
end
# Fetch ingestor heartbeats with optional freshness filtering.
#
# @param limit [Integer] maximum number of ingestors to return.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
# @return [Array<Hash>] compacted ingestor rows suitable for API responses.
def query_ingestors(limit, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
cutoff = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: cutoff)
sql = <<~SQL
SELECT node_id, start_time, last_seen_time, version, lora_freq, modem_preset
FROM ingestors
WHERE last_seen_time >= ?
ORDER BY last_seen_time DESC
LIMIT ?
SQL
rows = db.execute(sql, [since_threshold, limit])
rows.each do |row|
row.delete_if { |key, _| key.is_a?(Integer) }
start_time = coerce_integer(row["start_time"])
last_seen_time = coerce_integer(row["last_seen_time"])
start_time = now if start_time && start_time > now
last_seen_time = now if last_seen_time && last_seen_time > now
if start_time && last_seen_time && last_seen_time < start_time
last_seen_time = start_time
end
row["start_time"] = start_time
row["last_seen_time"] = last_seen_time
row["start_time_iso"] = Time.at(start_time).utc.iso8601 if start_time
row["last_seen_iso"] = Time.at(last_seen_time).utc.iso8601 if last_seen_time
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
# Fetch chat messages with optional filtering.
#
# @param limit [Integer] maximum number of rows to return.
@@ -271,8 +331,7 @@ module PotatoMesh
# @return [Array<Hash>] compacted message rows safe for API responses.
def query_messages(limit, node_ref: nil, include_encrypted: false, since: 0)
limit = coerce_query_limit(limit)
since_threshold = coerce_integer(since)
since_threshold = 0 if since_threshold.nil? || since_threshold.negative?
since_threshold = normalize_since_threshold(since, floor: 0)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
@@ -298,7 +357,7 @@ module PotatoMesh
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
m.lora_freq, m.modem_preset, m.channel_name, m.snr,
m.reply_id, m.emoji
m.reply_id, m.emoji, m.decrypted, m.decryption_confidence
FROM messages m
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n"
@@ -312,6 +371,30 @@ module PotatoMesh
r.delete_if { |key, _| key.is_a?(Integer) }
r["reply_id"] = coerce_integer(r["reply_id"]) if r.key?("reply_id")
r["emoji"] = string_or_nil(r["emoji"]) if r.key?("emoji")
if string_or_nil(r["encrypted"])
r.delete("portnum")
end
if r.key?("decrypted")
decrypted_raw = r["decrypted"]
decrypted = case decrypted_raw
when true, false
decrypted_raw
when Integer
!decrypted_raw.zero?
when String
trimmed = decrypted_raw.strip
!trimmed.empty? && trimmed != "0" && trimmed.casecmp("false") != 0
else
!!decrypted_raw
end
r["decrypted"] = decrypted
r.delete("decryption_confidence") unless decrypted
end
if r.key?("decryption_confidence") && !r["decryption_confidence"].nil?
r["decryption_confidence"] = r["decryption_confidence"].to_f
end
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
debug_log(
@@ -350,7 +433,13 @@ module PotatoMesh
db&.close
end
def query_positions(limit, node_ref: nil)
# Fetch positions optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted position rows suitable for API responses.
def query_positions(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -358,8 +447,10 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_floor = node_ref ? 0 : min_rx_time
since_threshold = normalize_since_threshold(since, floor: since_floor)
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
@@ -401,7 +492,13 @@ module PotatoMesh
db&.close
end
def query_neighbors(limit, node_ref: nil)
# Fetch neighbor relationships optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
# @return [Array<Hash>] compacted neighbor rows suitable for API responses.
def query_neighbors(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -409,8 +506,10 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_floor = node_ref ? 0 : min_rx_time
since_threshold = normalize_since_threshold(since, floor: since_floor)
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
@@ -441,7 +540,13 @@ module PotatoMesh
db&.close
end
def query_telemetry(limit, node_ref: nil)
# Fetch telemetry packets optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
# @return [Array<Hash>] compacted telemetry rows suitable for API responses.
def query_telemetry(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -449,8 +554,10 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_floor = node_ref ? 0 : min_rx_time
since_threshold = normalize_since_threshold(since, floor: since_floor)
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
@@ -520,7 +627,13 @@ module PotatoMesh
db&.close
end
def query_telemetry_buckets(window_seconds:, bucket_seconds:)
# Aggregate telemetry metrics into time buckets.
#
# @param window_seconds [Integer] duration expressed in seconds to include in the query.
# @param bucket_seconds [Integer] size of each aggregation bucket in seconds.
# @param since [Integer] unix timestamp threshold applied in addition to the requested window.
# @return [Array<Hash>] aggregated telemetry metrics grouped by bucket start time.
def query_telemetry_buckets(window_seconds:, bucket_seconds:, since: 0)
window = coerce_integer(window_seconds) || DEFAULT_TELEMETRY_WINDOW_SECONDS
window = DEFAULT_TELEMETRY_WINDOW_SECONDS if window <= 0
bucket = coerce_integer(bucket_seconds) || DEFAULT_TELEMETRY_BUCKET_SECONDS
@@ -530,6 +643,7 @@ module PotatoMesh
db.results_as_hash = true
now = Time.now.to_i
min_timestamp = now - window
since_threshold = normalize_since_threshold(since, floor: min_timestamp)
bucket_expression = "((COALESCE(rx_time, telemetry_time) / ?) * ?)"
select_clauses = [
"#{bucket_expression} AS bucket_start",
@@ -555,7 +669,7 @@ module PotatoMesh
ORDER BY bucket_start ASC
LIMIT ?
SQL
params = [bucket, bucket, min_timestamp, MAX_QUERY_LIMIT]
params = [bucket, bucket, since_threshold, MAX_QUERY_LIMIT]
rows = db.execute(sql, params)
rows.map do |row|
bucket_start = coerce_integer(row["bucket_start"])
@@ -635,16 +749,23 @@ module PotatoMesh
column
end
def query_traces(limit, node_ref: nil)
# Fetch trace records optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted trace rows suitable for API responses.
def query_traces(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
min_rx_time = now - PotatoMesh::Config.trace_neighbor_window_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
tokens = node_reference_tokens(node_ref)
+21 -11
View File
@@ -64,7 +64,7 @@ module PotatoMesh
app.get "/api/nodes" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_nodes(limit).to_json
query_nodes(limit, since: params["since"]).to_json
end
app.get "/api/nodes/:id" do
@@ -72,11 +72,17 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
rows = query_nodes(limit, node_ref: node_ref)
rows = query_nodes(limit, node_ref: node_ref, since: params["since"])
halt 404, { error: "not found" }.to_json if rows.empty?
rows.first.to_json
end
app.get "/api/ingestors" do
content_type :json
limit = coerce_query_limit(params["limit"])
query_ingestors(limit, since: params["since"]).to_json
end
app.get "/api/messages" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
@@ -105,7 +111,7 @@ module PotatoMesh
app.get "/api/positions" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit).to_json
query_positions(limit, since: params["since"]).to_json
end
app.get "/api/positions/:id" do
@@ -113,13 +119,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit, node_ref: node_ref).to_json
query_positions(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/neighbors" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit).to_json
query_neighbors(limit, since: params["since"]).to_json
end
app.get "/api/neighbors/:id" do
@@ -127,13 +133,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit, node_ref: node_ref).to_json
query_neighbors(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/telemetry" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit).to_json
query_telemetry(limit, since: params["since"]).to_json
end
app.get "/api/telemetry/aggregated" do
@@ -164,7 +170,11 @@ module PotatoMesh
halt 400, { error: "bucketSeconds too small for requested window" }.to_json
end
query_telemetry_buckets(window_seconds: window_seconds, bucket_seconds: bucket_seconds).to_json
query_telemetry_buckets(
window_seconds: window_seconds,
bucket_seconds: bucket_seconds,
since: params["since"],
).to_json
end
app.get "/api/telemetry/:id" do
@@ -172,13 +182,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit, node_ref: node_ref).to_json
query_telemetry(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/traces" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit).to_json
query_traces(limit, since: params["since"]).to_json
end
app.get "/api/traces/:id" do
@@ -186,7 +196,7 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit, node_ref: node_ref).to_json
query_traces(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/instances" do
@@ -65,6 +65,25 @@ module PotatoMesh
db&.close
end
app.post "/api/ingestors" do
require_token!
content_type :json
begin
payload = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
unless payload.is_a?(Hash)
halt 400, { error: "invalid payload" }.to_json
end
db = open_database
stored = upsert_ingestor(db, payload)
halt 400, { error: "invalid payload" }.to_json unless stored
{ status: "ok" }.to_json
ensure
db&.close
end
app.post "/api/instances" do
content_type :json
begin
@@ -113,6 +132,7 @@ module PotatoMesh
raw_private = payload.key?("isPrivate") ? payload["isPrivate"] : payload["is_private"]
is_private = coerce_boolean(raw_private)
signature = string_or_nil(payload["signature"])
contact_link = string_or_nil(payload["contactLink"])
attributes = {
id: id,
@@ -126,6 +146,7 @@ module PotatoMesh
longitude: longitude,
last_update_time: last_update_time,
is_private: is_private,
contact_link: contact_link,
}
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
@@ -138,6 +159,10 @@ module PotatoMesh
end
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
if !signature_valid && contact_link
stripped_attributes = attributes.merge(contact_link: nil)
signature_valid = verify_instance_signature(stripped_attributes, signature, attributes[:pubkey])
end
# Some remote peers sign payloads using a canonicalised lowercase
# domain while still sending a mixed-case domain. Retry signature
# verification with the original casing when the first attempt
@@ -145,6 +170,10 @@ module PotatoMesh
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
alternate_attributes = attributes.merge(domain: raw_domain)
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
if !signature_valid && contact_link
stripped_alternate = alternate_attributes.merge(contact_link: nil)
signature_valid = verify_instance_signature(stripped_alternate, signature, attributes[:pubkey])
end
end
unless signature_valid
+29 -3
View File
@@ -14,6 +14,8 @@
# frozen_string_literal: true
require "timeout"
module PotatoMesh
module App
# WorkerPool executes submitted blocks using a bounded set of Ruby threads.
@@ -124,8 +126,9 @@ module PotatoMesh
#
# @param size [Integer] number of worker threads to spawn.
# @param max_queue [Integer, nil] optional upper bound on queued jobs.
# @param task_timeout [Numeric, nil] optional per-task execution timeout.
# @param name [String] prefix assigned to worker thread names.
def initialize(size:, max_queue: nil, name: "worker-pool")
def initialize(size:, max_queue: nil, task_timeout: nil, name: "worker-pool")
raise ArgumentError, "size must be positive" unless size.is_a?(Integer) && size.positive?
@name = name
@@ -133,6 +136,7 @@ module PotatoMesh
@threads = []
@stopped = false
@mutex = Mutex.new
@task_timeout = normalize_task_timeout(task_timeout)
spawn_workers(size)
end
@@ -192,23 +196,45 @@ module PotatoMesh
worker = Thread.new do
Thread.current.name = "#{@name}-#{index}" if Thread.current.respond_to?(:name=)
Thread.current.report_on_exception = false if Thread.current.respond_to?(:report_on_exception=)
# Daemon threads allow the process to exit even if a job is stuck.
Thread.current.daemon = true if Thread.current.respond_to?(:daemon=)
loop do
task, block = @queue.pop
break if task.equal?(STOP_SIGNAL)
begin
result = block.call
result = if @task_timeout
Timeout.timeout(@task_timeout, TaskTimeoutError, "task exceeded timeout") do
block.call
end
else
block.call
end
task.fulfill(result)
rescue StandardError => e
task.reject(e)
end
end
end
@threads << worker
end
end
# Normalize the per-task timeout into a positive float value.
#
# @param task_timeout [Numeric, nil] candidate timeout value.
# @return [Float, nil] positive timeout in seconds or nil when disabled.
def normalize_task_timeout(task_timeout)
return nil if task_timeout.nil?
value = Float(task_timeout)
return nil unless value.positive?
value
rescue ArgumentError, TypeError
nil
end
end
end
end
+25 -2
View File
@@ -32,6 +32,7 @@ module PotatoMesh
DEFAULT_MAP_CENTER = "#{DEFAULT_MAP_CENTER_LAT},#{DEFAULT_MAP_CENTER_LON}"
DEFAULT_CHANNEL = "#LongFast"
DEFAULT_FREQUENCY = "915MHz"
DEFAULT_MESHTASTIC_PSK_B64 = "AQ=="
DEFAULT_CONTACT_LINK = "#potatomesh:dod.ngo"
DEFAULT_MAX_DISTANCE_KM = 42.0
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 15
@@ -42,6 +43,7 @@ module PotatoMesh
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
DEFAULT_FEDERATION_SEED_DOMAINS = %w[potatomesh.net potatomesh.jmrp.io mesh.qrp.ro].freeze
# Retrieve the configured API token used for authenticated requests.
#
@@ -157,6 +159,13 @@ module PotatoMesh
7 * 24 * 60 * 60
end
# Rolling retention window in seconds for trace and neighbor API queries.
#
# @return [Integer] seconds in twenty-eight days.
def trace_neighbor_window_seconds
28 * 24 * 60 * 60
end
# Default upper bound for accepted JSON payload sizes.
#
# @return [Integer] byte ceiling for HTTP request bodies.
@@ -175,7 +184,7 @@ module PotatoMesh
#
# @return [String] semantic version identifier.
def version_fallback
"0.5.7"
"0.5.10"
end
# Default refresh interval for frontend polling routines.
@@ -409,7 +418,7 @@ module PotatoMesh
#
# @return [Array<String>] list of default seed domains.
def federation_seed_domains
["potatomesh.net"].freeze
DEFAULT_FEDERATION_SEED_DOMAINS
end
# Determine how often we broadcast federation announcements.
@@ -436,6 +445,13 @@ module PotatoMesh
fetch_string("SITE_NAME", "PotatoMesh Demo")
end
# Retrieve the configured announcement banner copy.
#
# @return [String, nil] announcement string when configured.
def announcement
fetch_string("ANNOUNCEMENT", nil)
end
# Retrieve the default radio channel label.
#
# @return [String] channel name from configuration.
@@ -450,6 +466,13 @@ module PotatoMesh
fetch_string("FREQUENCY", DEFAULT_FREQUENCY)
end
# Retrieve the Meshtastic PSK used for decrypting channel messages.
#
# @return [String] base64-encoded PSK or alias.
def meshtastic_psk_b64
fetch_string("MESHTASTIC_PSK_B64", DEFAULT_MESHTASTIC_PSK_B64)
end
# Parse the configured map centre coordinates.
#
# @return [Hash{Symbol=>Float}] latitude and longitude in decimal degrees.
+8
View File
@@ -199,6 +199,14 @@ module PotatoMesh
sanitized_string(Config.site_name)
end
# Retrieve the configured announcement banner copy and normalise blank values to nil.
#
# @return [String, nil] announcement copy or +nil+ when blank.
def sanitized_announcement
value = sanitized_string(Config.announcement)
value.empty? ? nil : value
end
# Retrieve the configured channel as a cleaned string.
#
# @return [String] trimmed configuration value.
+2 -2
View File
@@ -1,12 +1,12 @@
{
"name": "potato-mesh",
"version": "0.5.7",
"version": "0.5.10",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "potato-mesh",
"version": "0.5.7",
"version": "0.5.10",
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
+1 -1
View File
@@ -1,6 +1,6 @@
{
"name": "potato-mesh",
"version": "0.5.7",
"version": "0.5.10",
"type": "module",
"private": true,
"scripts": {
@@ -113,11 +113,9 @@ test('buildChatTabModel returns sorted nodes and channel buckets', () => {
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), ['iso-ts', 'recent-alt']);
});
test('buildChatTabModel always includes channel zero bucket', () => {
test('buildChatTabModel skips channel buckets when there are no messages', () => {
const model = buildChatTabModel({ nodes: [], messages: [], nowSeconds: NOW, windowSeconds: WINDOW });
assert.equal(model.channels.length, 1);
assert.equal(model.channels[0].index, 0);
assert.equal(model.channels[0].entries.length, 0);
assert.equal(model.channels.length, 0);
});
test('buildChatTabModel falls back to numeric label when no metadata provided', () => {
@@ -174,14 +172,13 @@ test('buildChatTabModel includes telemetry, position, and neighbor events', () =
windowSeconds: WINDOW
});
assert.deepEqual(model.logEntries.map(entry => entry.type), [
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.NODE_INFO,
CHAT_LOG_ENTRY_TYPES.TELEMETRY,
CHAT_LOG_ENTRY_TYPES.POSITION,
CHAT_LOG_ENTRY_TYPES.NEIGHBOR,
CHAT_LOG_ENTRY_TYPES.TRACE
]);
const types = model.logEntries.map(entry => entry.type);
assert.equal(types[0], CHAT_LOG_ENTRY_TYPES.NODE_NEW);
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.NODE_INFO));
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.TELEMETRY));
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.POSITION));
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.NEIGHBOR));
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.TRACE));
assert.equal(model.logEntries[0].nodeId, nodeId);
const neighborEntry = model.logEntries.find(entry => entry.type === CHAT_LOG_ENTRY_TYPES.NEIGHBOR);
assert.ok(neighborEntry);
@@ -27,6 +27,9 @@ test('federation map centers on configured coordinates and follows theme filters
const mapEl = createElement('div', 'map');
registerElement('map', mapEl);
const mapPanel = createElement('div', 'mapPanel');
mapPanel.dataset.legendCollapsed = 'true';
registerElement('mapPanel', mapPanel);
const statusEl = createElement('div', 'status');
registerElement('status', statusEl);
const tableEl = createElement('table', 'instances');
@@ -408,15 +411,192 @@ test('federation table sorting, contact rendering, and legend creation', async (
assert.deepEqual(mapSetViewCalls[0], [[0, 0], 3]);
assert.equal(mapFitBoundsCalls[0][0].length, 3);
assert.equal(legendContainers.length, 1);
const legend = legendContainers[0];
assert.ok(legend.className.includes('legend'));
assert.equal(legendContainers.length, 2);
const legend = legendContainers.find(container => container.className.includes('legend--instances'));
assert.ok(legend);
assert.ok(legend.className.includes('legend-hidden'));
const legendHeader = legend.children.find(child => child.className === 'legend-header');
const legendTitle = legendHeader && Array.isArray(legendHeader.children)
? legendHeader.children.find(child => child.className === 'legend-title')
: null;
assert.ok(legendTitle);
assert.equal(legendTitle.textContent, 'Active nodes');
const legendToggle = legendContainers.find(container => container.className.includes('legend-toggle'));
assert.ok(legendToggle);
} finally {
cleanup();
}
});
test('federation legend toggle respects media query changes', async () => {
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
const { document, createElement, registerElement, cleanup } = env;
const mapEl = createElement('div', 'map');
registerElement('map', mapEl);
const mapPanel = createElement('div', 'mapPanel');
mapPanel.setAttribute('data-legend-collapsed', 'false');
registerElement('mapPanel', mapPanel);
const statusEl = createElement('div', 'status');
registerElement('status', statusEl);
const tableEl = createElement('table', 'instances');
const tbodyEl = createElement('tbody');
registerElement('instances', tableEl);
tableEl.appendChild(tbodyEl);
const configPayload = {
mapCenter: { lat: 0, lon: 0 },
mapZoom: 3,
tileFilters: { light: 'none', dark: 'invert(1)' }
};
const configEl = createElement('div');
configEl.setAttribute('data-app-config', JSON.stringify(configPayload));
document.querySelector = selector => {
if (selector === '[data-app-config]') return configEl;
if (selector === '#instances tbody') return tbodyEl;
return null;
};
let mediaQueryHandler = null;
window.matchMedia = () => ({
matches: false,
addListener(handler) {
mediaQueryHandler = handler;
}
});
const legendContainers = [];
const legendButtons = [];
const DomUtil = {
create(tag, className, parent) {
const classSet = new Set(className ? className.split(/\s+/).filter(Boolean) : []);
const el = {
tagName: tag,
className,
classList: {
toggle(name, force) {
const shouldAdd = typeof force === 'boolean' ? force : !classSet.has(name);
if (shouldAdd) {
classSet.add(name);
} else {
classSet.delete(name);
}
el.className = Array.from(classSet).join(' ');
}
},
children: [],
style: {},
textContent: '',
attributes: new Map(),
setAttribute(name, value) {
this.attributes.set(name, String(value));
},
appendChild(child) {
this.children.push(child);
return child;
},
addEventListener(event, handler) {
if (event === 'click') {
this._clickHandler = handler;
}
},
querySelector() {
return null;
}
};
if (parent && parent.appendChild) parent.appendChild(el);
if (className && className.includes('legend-toggle-button')) {
legendButtons.push(el);
}
return el;
}
};
const controlStub = () => {
const ctrl = {
onAdd: null,
container: null,
addTo(map) {
this.container = this.onAdd ? this.onAdd(map) : null;
legendContainers.push(this.container);
return this;
},
getContainer() {
return this.container;
}
};
return ctrl;
};
const markersLayer = {
addLayer() {
return null;
},
addTo() {
return this;
}
};
const leafletStub = {
map() {
return {
setView() {},
on() {},
fitBounds() {}
};
},
tileLayer() {
return {
addTo() {
return this;
},
getContainer() {
return null;
},
on() {}
};
},
layerGroup() {
return markersLayer;
},
circleMarker() {
return {
bindPopup() {
return this;
}
};
},
control: controlStub,
DomUtil,
DomEvent: {
disableClickPropagation() {},
disableScrollPropagation() {}
}
};
const fetchImpl = async () => ({
ok: true,
json: async () => []
});
try {
await initializeFederationPage({ config: configPayload, fetchImpl, leaflet: leafletStub });
const legend = legendContainers.find(container => container.className.includes('legend--instances'));
assert.ok(legend);
assert.ok(!legend.className.includes('legend-hidden'));
assert.equal(legendButtons.length, 1);
legendButtons[0]._clickHandler?.({ preventDefault() {}, stopPropagation() {} });
assert.ok(legend.className.includes('legend-hidden'));
if (mediaQueryHandler) {
mediaQueryHandler({ matches: false });
assert.ok(!legend.className.includes('legend-hidden'));
}
} finally {
cleanup();
}
@@ -20,7 +20,7 @@ import { createDomEnvironment } from './dom-environment.js';
import { buildInstanceUrl, initializeInstanceSelector, __test__ } from '../instance-selector.js';
const { resolveInstanceLabel } = __test__;
const { resolveInstanceLabel, updateFederationNavCount } = __test__;
function setupSelectElement(document) {
const select = document.createElement('select');
@@ -90,10 +90,29 @@ test('resolveInstanceLabel falls back to the domain when the name is missing', (
test('buildInstanceUrl normalises domains into navigable HTTPS URLs', () => {
assert.equal(buildInstanceUrl('mesh.example'), 'https://mesh.example');
assert.equal(buildInstanceUrl(' https://mesh.example '), 'https://mesh.example');
assert.equal(buildInstanceUrl('https://mesh.example/path?query#fragment'), 'https://mesh.example');
assert.equal(buildInstanceUrl('javascript:alert(1)'), null);
assert.equal(buildInstanceUrl('ftp://mesh.example'), null);
assert.equal(buildInstanceUrl('mesh.example:8080'), 'https://mesh.example:8080');
assert.equal(buildInstanceUrl('mesh.example<script>'), null);
assert.equal(buildInstanceUrl(''), null);
assert.equal(buildInstanceUrl(null), null);
});
test('buildInstanceUrl rejects malformed HTTP URLs safely', () => {
const originalWarn = console.warn;
const warnings = [];
console.warn = message => warnings.push(message);
try {
assert.equal(buildInstanceUrl('http://[::1'), null);
assert.equal(buildInstanceUrl('https://bad host.example'), null);
assert.ok(warnings.length >= 1);
} finally {
console.warn = originalWarn;
}
});
test('initializeInstanceSelector populates options alphabetically and selects the configured domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
@@ -172,3 +191,65 @@ test('initializeInstanceSelector navigates to the chosen instance domain', async
env.cleanup();
}
});
test('initializeInstanceSelector updates federation navigation labels with instance count', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const navLink = env.document.createElement('a');
navLink.classList.add('js-federation-nav');
navLink.textContent = 'Federation';
env.document.body.appendChild(navLink);
const fetchImpl = async () => ({
ok: true,
async json() {
return [{ domain: 'alpha.mesh' }, { domain: 'beta.mesh' }];
}
});
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document
});
assert.equal(navLink.textContent, 'Federation (2)');
} finally {
env.cleanup();
}
});
test('updateFederationNavCount prefers stored labels and normalizes counts', () => {
const env = createDomEnvironment();
const navLink = env.document.createElement('a');
navLink.classList.add('js-federation-nav');
navLink.textContent = 'Federation';
navLink.dataset.federationLabel = 'Community';
env.document.body.appendChild(navLink);
try {
updateFederationNavCount({ documentObject: env.document, count: -3 });
assert.equal(navLink.textContent, 'Community (0)');
} finally {
env.cleanup();
}
});
test('updateFederationNavCount falls back to existing link text when no dataset label', () => {
const env = createDomEnvironment();
const navLink = env.document.createElement('a');
navLink.classList.add('js-federation-nav');
navLink.textContent = 'Federation (9)';
env.document.body.appendChild(navLink);
try {
updateFederationNavCount({ documentObject: env.document, count: 4 });
assert.equal(navLink.textContent, 'Federation (4)');
} finally {
env.cleanup();
}
});
@@ -0,0 +1,41 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { resolveLegendVisibility } from '../map-legend-visibility.js';
test('resolveLegendVisibility hides when a default collapse is requested', () => {
assert.equal(resolveLegendVisibility({ defaultCollapsed: true, mediaQueryMatches: false }), false);
assert.equal(resolveLegendVisibility({ defaultCollapsed: true, mediaQueryMatches: true }), false);
});
test('resolveLegendVisibility hides for dashboard and map views', () => {
assert.equal(
resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: false, viewMode: 'dashboard' }),
false
);
assert.equal(
resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: false, viewMode: 'map' }),
false
);
});
test('resolveLegendVisibility follows the media query when not forced', () => {
assert.equal(resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: false }), true);
assert.equal(resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: true }), false);
});
@@ -0,0 +1,455 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { __test__, initializeMobileMenu } from '../mobile-menu.js';
const { createMobileMenuController, resolveFocusableElements } = __test__;
function createClassList() {
const values = new Set();
return {
add(...names) {
names.forEach(name => values.add(name));
},
remove(...names) {
names.forEach(name => values.delete(name));
},
contains(name) {
return values.has(name);
}
};
}
function createElement(tagName = 'div', initialId = '') {
const listeners = new Map();
const attributes = new Map();
if (initialId) {
attributes.set('id', String(initialId));
}
return {
tagName: tagName.toUpperCase(),
attributes,
classList: createClassList(),
dataset: {},
hidden: false,
parentNode: null,
nextSibling: null,
setAttribute(name, value) {
attributes.set(name, String(value));
},
getAttribute(name) {
return attributes.has(name) ? attributes.get(name) : null;
},
addEventListener(event, handler) {
listeners.set(event, handler);
},
dispatchEvent(event) {
const key = typeof event === 'string' ? event : event?.type;
const handler = listeners.get(key);
if (handler) {
handler(event);
}
},
appendChild(node) {
this.lastAppended = node;
return node;
},
insertBefore(node, nextSibling) {
this.lastInserted = { node, nextSibling };
return node;
},
focus() {
globalThis.document.activeElement = this;
},
querySelector() {
return null;
},
querySelectorAll() {
return [];
}
};
}
function createDomStub() {
const originalDocument = globalThis.document;
const registry = new Map();
const documentStub = {
body: createElement('body'),
activeElement: null,
querySelectorAll() {
return [];
},
getElementById(id) {
return registry.get(id) || null;
}
};
globalThis.document = documentStub;
return {
documentStub,
registry,
cleanup() {
globalThis.document = originalDocument;
}
};
}
function createWindowStub(matches = true) {
const listeners = new Map();
const mediaListeners = new Map();
return {
matchMedia() {
return {
matches,
addEventListener(event, handler) {
mediaListeners.set(event, handler);
}
};
},
addEventListener(event, handler) {
listeners.set(event, handler);
},
dispatchEvent(event) {
const key = typeof event === 'string' ? event : event?.type;
const handler = listeners.get(key);
if (handler) {
handler(event);
}
},
dispatchMediaChange() {
const handler = mediaListeners.get('change');
if (handler) {
handler();
}
}
};
}
function createWindowStubWithListener(matches = true) {
const listeners = new Map();
let mediaHandler = null;
return {
matchMedia() {
return {
matches,
addListener(handler) {
mediaHandler = handler;
}
};
},
addEventListener(event, handler) {
listeners.set(event, handler);
},
dispatchMediaChange() {
if (mediaHandler) {
mediaHandler();
}
}
};
}
test('mobile menu toggles open state and aria-expanded', () => {
const { documentStub, registry, cleanup } = createDomStub();
const windowStub = createWindowStub(true);
const menuToggle = createElement('button');
const menu = createElement('div');
const menuPanel = createElement('div');
const closeButton = createElement('button');
const navLink = createElement('a');
menu.hidden = true;
menuPanel.classList.add('mobile-menu__panel');
menu.querySelector = selector => {
if (selector === '.mobile-menu__panel') return menuPanel;
return null;
};
menu.querySelectorAll = selector => {
if (selector === '[data-mobile-menu-close]') return [closeButton];
if (selector === 'a') return [navLink];
return [];
};
menuPanel.querySelectorAll = () => [closeButton, navLink];
registry.set('mobileMenuToggle', menuToggle);
registry.set('mobileMenu', menu);
try {
const controller = createMobileMenuController({
documentObject: documentStub,
windowObject: windowStub
});
controller.initialize();
windowStub.dispatchMediaChange();
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
assert.equal(menu.hidden, false);
assert.equal(menuToggle.getAttribute('aria-expanded'), 'true');
assert.equal(documentStub.body.classList.contains('menu-open'), true);
navLink.dispatchEvent({ type: 'click' });
assert.equal(menu.hidden, true);
closeButton.dispatchEvent({ type: 'click' });
assert.equal(menu.hidden, true);
assert.equal(menuToggle.getAttribute('aria-expanded'), 'false');
} finally {
cleanup();
}
});
test('mobile menu closes on escape and route changes', () => {
const { documentStub, registry, cleanup } = createDomStub();
const windowStub = createWindowStub(true);
const menuToggle = createElement('button');
const menu = createElement('div');
const menuPanel = createElement('div');
const closeButton = createElement('button');
menu.hidden = true;
menuPanel.classList.add('mobile-menu__panel');
menu.querySelector = selector => {
if (selector === '.mobile-menu__panel') return menuPanel;
return null;
};
menu.querySelectorAll = selector => {
if (selector === '[data-mobile-menu-close]') return [closeButton];
return [];
};
menuPanel.querySelectorAll = () => [closeButton];
registry.set('mobileMenuToggle', menuToggle);
registry.set('mobileMenu', menu);
try {
const controller = createMobileMenuController({
documentObject: documentStub,
windowObject: windowStub
});
controller.initialize();
menuPanel.dispatchEvent({ type: 'keydown', key: 'Escape', preventDefault() {} });
assert.equal(menu.hidden, true);
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
assert.equal(menu.hidden, false);
menuPanel.dispatchEvent({ type: 'keydown', key: 'ArrowDown' });
assert.equal(menu.hidden, false);
menuPanel.dispatchEvent({ type: 'keydown', key: 'Escape', preventDefault() {} });
assert.equal(menu.hidden, true);
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
windowStub.dispatchEvent({ type: 'hashchange' });
assert.equal(menu.hidden, true);
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
windowStub.dispatchEvent({ type: 'popstate' });
assert.equal(menu.hidden, true);
} finally {
cleanup();
}
});
test('mobile menu traps focus within the panel', () => {
const { documentStub, registry, cleanup } = createDomStub();
const windowStub = createWindowStub(true);
const menuToggle = createElement('button');
const menu = createElement('div');
const menuPanel = createElement('div');
const firstLink = createElement('a');
const lastButton = createElement('button');
menuPanel.classList.add('mobile-menu__panel');
menuPanel.querySelectorAll = () => [firstLink, lastButton];
menu.querySelector = selector => {
if (selector === '.mobile-menu__panel') return menuPanel;
return null;
};
menu.querySelectorAll = () => [];
registry.set('mobileMenuToggle', menuToggle);
registry.set('mobileMenu', menu);
try {
const controller = createMobileMenuController({
documentObject: documentStub,
windowObject: windowStub
});
controller.initialize();
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
documentStub.activeElement = lastButton;
menuPanel.dispatchEvent({ type: 'keydown', key: 'Tab', preventDefault() {}, shiftKey: false });
assert.equal(documentStub.activeElement, firstLink);
documentStub.activeElement = firstLink;
menuPanel.dispatchEvent({ type: 'keydown', key: 'Tab', preventDefault() {}, shiftKey: true });
assert.equal(documentStub.activeElement, lastButton);
} finally {
cleanup();
}
});
test('resolveFocusableElements filters out aria-hidden nodes', () => {
const hiddenButton = createElement('button');
hiddenButton.getAttribute = name => (name === 'aria-hidden' ? 'true' : null);
const openLink = createElement('a');
const bareNode = { tagName: 'DIV' };
const container = {
querySelectorAll() {
return [hiddenButton, bareNode, openLink];
}
};
const focusables = resolveFocusableElements(container);
assert.equal(focusables.length, 1);
assert.equal(focusables[0], openLink);
});
test('resolveFocusableElements handles empty containers', () => {
assert.deepEqual(resolveFocusableElements(null), []);
assert.deepEqual(resolveFocusableElements({}), []);
});
test('mobile menu focuses the panel when no focusables exist', () => {
const { documentStub, registry, cleanup } = createDomStub();
const windowStub = createWindowStub(true);
const menuToggle = createElement('button');
const menu = createElement('div');
const menuPanel = createElement('div');
const lastActive = createElement('button');
menuPanel.classList.add('mobile-menu__panel');
menuPanel.querySelectorAll = () => [];
menu.querySelector = selector => {
if (selector === '.mobile-menu__panel') return menuPanel;
return null;
};
menu.querySelectorAll = () => [];
registry.set('mobileMenuToggle', menuToggle);
registry.set('mobileMenu', menu);
documentStub.activeElement = lastActive;
try {
const controller = createMobileMenuController({
documentObject: documentStub,
windowObject: windowStub
});
controller.initialize();
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
assert.equal(documentStub.activeElement, menuPanel);
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
assert.equal(documentStub.activeElement, lastActive);
} finally {
cleanup();
}
});
test('mobile menu registers legacy media query listeners', () => {
const { documentStub, registry, cleanup } = createDomStub();
const windowStub = createWindowStubWithListener(true);
const menuToggle = createElement('button');
const menu = createElement('div');
const menuPanel = createElement('div');
menuPanel.classList.add('mobile-menu__panel');
menu.querySelector = selector => {
if (selector === '.mobile-menu__panel') return menuPanel;
return null;
};
menu.querySelectorAll = () => [];
registry.set('mobileMenuToggle', menuToggle);
registry.set('mobileMenu', menu);
try {
const controller = createMobileMenuController({
documentObject: documentStub,
windowObject: windowStub
});
controller.initialize();
windowStub.dispatchMediaChange();
assert.equal(menuToggle.getAttribute('aria-expanded'), 'false');
} finally {
cleanup();
}
});
test('mobile menu safely no-ops without required nodes', () => {
const { documentStub, cleanup } = createDomStub();
const windowStub = createWindowStub(true);
try {
const controller = createMobileMenuController({
documentObject: documentStub,
windowObject: windowStub
});
controller.initialize();
controller.openMenu();
controller.closeMenu();
controller.syncLayout();
assert.equal(documentStub.body.classList.contains('menu-open'), false);
} finally {
cleanup();
}
});
test('initializeMobileMenu returns a controller', () => {
const { documentStub, registry, cleanup } = createDomStub();
const windowStub = createWindowStub(true);
const menuToggle = createElement('button');
const menu = createElement('div');
const menuPanel = createElement('div');
menuPanel.classList.add('mobile-menu__panel');
menu.querySelector = selector => {
if (selector === '.mobile-menu__panel') return menuPanel;
return null;
};
menu.querySelectorAll = () => [];
registry.set('mobileMenuToggle', menuToggle);
registry.set('mobileMenu', menu);
try {
const controller = initializeMobileMenu({
documentObject: documentStub,
windowObject: windowStub
});
assert.equal(typeof controller.openMenu, 'function');
} finally {
cleanup();
}
});
@@ -405,6 +405,77 @@ test('renderTelemetryCharts renders condensed scatter charts when telemetry exis
assert.equal(html.includes('node-detail__chart-point'), true);
});
test('renderTelemetryCharts expands upper bounds when overflow metrics exceed defaults', () => {
const nowMs = Date.UTC(2025, 0, 8, 12, 0, 0);
const nowSeconds = Math.floor(nowMs / 1000);
const node = {
rawSources: {
telemetry: {
snapshots: [
{
rx_time: nowSeconds - 120,
device_metrics: {
battery_level: 90,
voltage: 7.2,
current: 3.6,
channel_utilization: 45,
air_util_tx: 18,
},
environment_metrics: {
temperature: 45,
relative_humidity: 48,
barometric_pressure: 1250,
gas_resistance: 1200,
iaq: 650,
},
},
],
},
},
};
const html = renderTelemetryCharts(node, { nowMs });
assert.match(html, />7\.2<\/text>/);
assert.match(html, />3\.6<\/text>/);
assert.match(html, />45<\/text>/);
assert.match(html, />650<\/text>/);
assert.match(html, />1100<\/text>/);
});
test('renderTelemetryCharts keeps default bounds when metrics stay within limits', () => {
const nowMs = Date.UTC(2025, 0, 8, 12, 0, 0);
const nowSeconds = Math.floor(nowMs / 1000);
const node = {
rawSources: {
telemetry: {
snapshots: [
{
rx_time: nowSeconds - 180,
device_metrics: {
battery_level: 70,
voltage: 4.5,
current: 1.5,
channel_utilization: 35,
air_util_tx: 15,
},
environment_metrics: {
temperature: 25,
relative_humidity: 50,
barometric_pressure: 1015,
gas_resistance: 1500,
iaq: 200,
},
},
],
},
},
};
const html = renderTelemetryCharts(node, { nowMs });
assert.match(html, />6\.0<\/text>/);
assert.match(html, />3\.0<\/text>/);
assert.match(html, />40<\/text>/);
assert.match(html, />500<\/text>/);
});
test('renderNodeDetailHtml composes the table, neighbors, and messages', () => {
const html = renderNodeDetailHtml(
{
@@ -875,13 +946,19 @@ test('initializeNodeDetailPage reports an error when refresh fails', async () =>
throw new Error('boom');
};
const renderShortHtml = short => `<span>${short}</span>`;
const result = await initializeNodeDetailPage({
document: documentStub,
refreshImpl,
renderShortHtml,
});
assert.equal(result, false);
assert.equal(element.innerHTML.includes('Failed to load'), true);
const originalError = console.error;
console.error = () => {};
try {
const result = await initializeNodeDetailPage({
document: documentStub,
refreshImpl,
renderShortHtml,
});
assert.equal(result, false);
assert.equal(element.innerHTML.includes('Failed to load'), true);
} finally {
console.error = originalError;
}
});
test('initializeNodeDetailPage handles missing reference payloads', async () => {
+25 -21
View File
@@ -65,7 +65,8 @@ function resolveSnapshotList(entry) {
* Build a data model describing the content for chat tabs.
*
* Entries outside the recent activity window, encrypted messages, and
* channels above {@link MAX_CHANNEL_INDEX} are filtered out.
* channels above {@link MAX_CHANNEL_INDEX} are filtered out. Channel
* buckets are only created when messages are present for that channel.
*
* @param {{
* nodes?: Array<Object>,
@@ -102,11 +103,29 @@ export function buildChatTabModel({
const logEntries = [];
const channelBuckets = new Map();
const primaryChannelEnvLabel = normalisePrimaryChannelEnvLabel(primaryChannelFallbackLabel);
const nodeById = new Map();
const nodeByNum = new Map();
const nodeInfoKeys = new Set();
const buildNodeInfoKey = (nodeId, nodeNum, ts) => `${nodeId ?? ''}:${nodeNum ?? ''}:${ts ?? ''}`;
const recordNodeInfoEntry = (ts, nodeId, nodeNum) => {
if (ts == null) return;
const key = buildNodeInfoKey(nodeId, nodeNum, ts);
if (nodeInfoKeys.has(key)) return;
const node = nodeId && nodeById.has(nodeId)
? nodeById.get(nodeId)
: (nodeNum != null && nodeByNum.has(nodeNum) ? nodeByNum.get(nodeNum) : null);
if (!node) return;
nodeInfoKeys.add(key);
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, node, nodeId, nodeNum });
};
for (const node of nodes || []) {
if (!node) continue;
const nodeId = normaliseNodeId(node);
const nodeNum = normaliseNodeNum(node);
if (nodeId) nodeById.set(nodeId, node);
if (nodeNum != null) nodeByNum.set(nodeNum, node);
const firstTs = resolveTimestampSeconds(node.first_heard ?? node.firstHeard, node.first_heard_iso ?? node.firstHeardIso);
if (firstTs != null && firstTs >= cutoff) {
logEntries.push({ ts: firstTs, type: CHAT_LOG_ENTRY_TYPES.NODE_NEW, node, nodeId, nodeNum });
@@ -114,6 +133,7 @@ export function buildChatTabModel({
const lastTs = resolveTimestampSeconds(node.last_heard ?? node.lastHeard, node.last_seen_iso ?? node.lastSeenIso);
if (lastTs != null && lastTs >= cutoff) {
logEntries.push({ ts: lastTs, type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, node, nodeId, nodeNum });
nodeInfoKeys.add(buildNodeInfoKey(nodeId, nodeNum, lastTs));
}
}
@@ -129,6 +149,7 @@ export function buildChatTabModel({
const nodeId = normaliseNodeId(snapshot);
const nodeNum = normaliseNodeNum(snapshot);
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.TELEMETRY, telemetry: snapshot, nodeId, nodeNum });
recordNodeInfoEntry(ts, nodeId, nodeNum);
}
}
@@ -144,6 +165,7 @@ export function buildChatTabModel({
const nodeId = normaliseNodeId(snapshot);
const nodeNum = normaliseNodeNum(snapshot);
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.POSITION, position: snapshot, nodeId, nodeNum });
recordNodeInfoEntry(ts, nodeId, nodeNum);
}
}
@@ -157,6 +179,7 @@ export function buildChatTabModel({
const nodeNum = normaliseNodeNum(snapshot);
const neighborId = normaliseNeighborId(snapshot);
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.NEIGHBOR, neighbor: snapshot, nodeId, nodeNum, neighborId });
recordNodeInfoEntry(ts, nodeId, nodeNum);
}
}
@@ -186,6 +209,7 @@ export function buildChatTabModel({
nodeId: firstHop.id ?? null,
nodeNum: firstHop.num ?? null
});
recordNodeInfoEntry(ts, firstHop.id ?? null, firstHop.num ?? null);
}
const encryptedLogEntries = [];
@@ -287,26 +311,6 @@ export function buildChatTabModel({
logEntries.sort((a, b) => a.ts - b.ts);
let hasPrimaryBucket = false;
for (const bucket of channelBuckets.values()) {
if (bucket.index === 0) {
hasPrimaryBucket = true;
break;
}
}
if (!hasPrimaryBucket) {
const bucketKey = '0';
channelBuckets.set(bucketKey, {
key: bucketKey,
id: buildChannelTabId(bucketKey),
index: 0,
label: '0',
entries: [],
labelPriority: CHANNEL_LABEL_PRIORITY.INDEX,
isPrimaryFallback: true
});
}
const channels = Array.from(channelBuckets.values()).sort((a, b) => {
if (a.index !== b.index) {
return a.index - b.index;
+115
View File
@@ -15,6 +15,7 @@
*/
import { readAppConfig } from './config.js';
import { resolveLegendVisibility } from './map-legend-visibility.js';
import { mergeConfig } from './settings.js';
import { roleColors } from './role-helpers.js';
@@ -204,6 +205,31 @@ function hasNumberValue(value) {
return toFiniteNumber(value) != null;
}
/**
* Toggle the legend hidden class on a container element.
*
* @param {HTMLElement|{ classList?: { toggle?: Function }, className?: string }} container Legend container.
* @param {boolean} hidden Whether the legend should be hidden.
* @returns {void}
*/
function toggleLegendHiddenClass(container, hidden) {
if (!container) return;
if (container.classList && typeof container.classList.toggle === 'function') {
container.classList.toggle('legend-hidden', hidden);
return;
}
if (typeof container.className === 'string') {
const classes = container.className.split(/\s+/).filter(Boolean);
const hasHidden = classes.includes('legend-hidden');
if (hidden && !hasHidden) {
classes.push('legend-hidden');
} else if (!hidden && hasHidden) {
classes.splice(classes.indexOf('legend-hidden'), 1);
}
container.className = classes.join(' ');
}
}
const TILE_LAYER_URL = 'https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png';
/**
@@ -223,6 +249,7 @@ export async function initializeFederationPage(options = {}) {
const fetchImpl = options.fetchImpl || fetch;
const leaflet = options.leaflet || (typeof window !== 'undefined' ? window.L : null);
const mapContainer = document.getElementById('map');
const mapPanel = document.getElementById('mapPanel');
const tableEl = document.getElementById('instances');
const tableBody = document.querySelector('#instances tbody');
const statusEl = document.getElementById('status');
@@ -239,6 +266,13 @@ export async function initializeFederationPage(options = {}) {
let map = null;
let markersLayer = null;
let tileLayer = null;
let legendContainer = null;
let legendToggleButton = null;
let legendVisible = true;
const legendCollapsedValue = mapPanel ? mapPanel.getAttribute('data-legend-collapsed') : null;
const legendDefaultCollapsed = legendCollapsedValue == null
? true
: legendCollapsedValue.trim() !== 'false';
const tableSorters = {
name: { getValue: inst => inst.name ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
domain: { getValue: inst => inst.domain ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
@@ -357,6 +391,37 @@ export async function initializeFederationPage(options = {}) {
syncSortIndicators();
};
/**
* Update the pressed state of the legend visibility toggle button.
*
* @returns {void}
*/
const updateLegendToggleState = () => {
if (!legendToggleButton) return;
const baseLabel = legendVisible ? 'Hide map legend' : 'Show map legend';
const baseText = legendVisible ? 'Hide legend' : 'Show legend';
legendToggleButton.setAttribute('aria-pressed', legendVisible ? 'true' : 'false');
legendToggleButton.setAttribute('aria-label', baseLabel);
legendToggleButton.textContent = baseText;
};
/**
* Show or hide the map legend component.
*
* @param {boolean} visible Whether the legend should be displayed.
* @returns {void}
*/
const setLegendVisibility = visible => {
legendVisible = Boolean(visible);
if (legendContainer) {
toggleLegendHiddenClass(legendContainer, !legendVisible);
if (typeof legendContainer.setAttribute === 'function') {
legendContainer.setAttribute('aria-hidden', legendVisible ? 'false' : 'true');
}
}
updateLegendToggleState();
};
/**
* Wire up click and keyboard handlers for sortable headers.
*
@@ -483,6 +548,15 @@ export async function initializeFederationPage(options = {}) {
const canRenderLegend =
typeof leaflet.control === 'function' && leaflet.DomUtil && typeof leaflet.DomUtil.create === 'function';
if (canRenderLegend) {
const legendMediaQuery = typeof window !== 'undefined' && window.matchMedia
? window.matchMedia('(max-width: 1024px)')
: null;
const initialLegendVisible = resolveLegendVisibility({
defaultCollapsed: legendDefaultCollapsed,
mediaQueryMatches: legendMediaQuery ? legendMediaQuery.matches : false
});
legendVisible = initialLegendVisible;
const legendStops = NODE_COUNT_COLOR_STOPS.map((stop, index) => {
const lower = index === 0 ? 0 : NODE_COUNT_COLOR_STOPS[index - 1].limit;
const upper = stop.limit - 1;
@@ -495,7 +569,11 @@ export async function initializeFederationPage(options = {}) {
const legend = leaflet.control({ position: 'bottomright' });
legend.onAdd = function onAdd() {
const container = leaflet.DomUtil.create('div', 'legend legend--instances');
container.id = 'federationLegend';
container.setAttribute('aria-label', 'Active nodes legend');
container.setAttribute('role', 'region');
container.setAttribute('aria-hidden', initialLegendVisible ? 'false' : 'true');
toggleLegendHiddenClass(container, !initialLegendVisible);
const header = leaflet.DomUtil.create('div', 'legend-header', container);
const title = leaflet.DomUtil.create('span', 'legend-title', header);
title.textContent = 'Active nodes';
@@ -508,9 +586,46 @@ export async function initializeFederationPage(options = {}) {
const label = leaflet.DomUtil.create('span', 'legend-label', item);
label.textContent = stop.label;
});
legendContainer = container;
return container;
};
legend.addTo(map);
const legendToggleControl = leaflet.control({ position: 'bottomright' });
legendToggleControl.onAdd = function onAdd() {
const container = leaflet.DomUtil.create('div', 'leaflet-control legend-toggle');
const button = leaflet.DomUtil.create('button', 'legend-toggle-button', container);
button.type = 'button';
button.setAttribute('aria-controls', 'federationLegend');
button.addEventListener?.('click', event => {
event.preventDefault();
event.stopPropagation();
setLegendVisibility(!legendVisible);
});
legendToggleButton = button;
updateLegendToggleState();
if (leaflet.DomEvent && typeof leaflet.DomEvent.disableClickPropagation === 'function') {
leaflet.DomEvent.disableClickPropagation(container);
}
if (leaflet.DomEvent && typeof leaflet.DomEvent.disableScrollPropagation === 'function') {
leaflet.DomEvent.disableScrollPropagation(container);
}
return container;
};
legendToggleControl.addTo(map);
setLegendVisibility(initialLegendVisible);
if (legendMediaQuery) {
const changeHandler = event => {
if (legendDefaultCollapsed) return;
setLegendVisibility(!event.matches);
};
if (typeof legendMediaQuery.addEventListener === 'function') {
legendMediaQuery.addEventListener('change', changeHandler);
} else if (typeof legendMediaQuery.addListener === 'function') {
legendMediaQuery.addListener(changeHandler);
}
}
}
for (const instance of instances) {
+76 -6
View File
@@ -35,11 +35,58 @@ function resolveInstanceLabel(entry) {
}
/**
* Construct a navigable URL for the provided instance domain.
* Update federation navigation labels with the instance count.
*
* @param {string} domain Instance domain as returned by the federation catalog.
* @returns {string|null} Navigable absolute URL or ``null`` when the domain is empty.
* @param {{
* documentObject?: Document | null,
* count: number
* }} options Configuration for updating the navigation labels.
* @returns {void}
*/
function updateFederationNavCount(options) {
const { documentObject, count } = options;
if (!documentObject || typeof count !== 'number' || !Number.isFinite(count)) {
return;
}
const normalizedCount = Math.max(0, Math.floor(count));
const root = typeof documentObject.querySelectorAll === 'function'
? documentObject
: documentObject.body;
if (!root || typeof root.querySelectorAll !== 'function') {
return;
}
const links = Array.from(root.querySelectorAll('.js-federation-nav'));
links.forEach(link => {
if (!link || typeof link !== 'object') {
return;
}
const dataset = link.dataset || {};
const storedLabel = typeof dataset.federationLabel === 'string' ? dataset.federationLabel.trim() : '';
const fallbackLabel = typeof link.textContent === 'string'
? link.textContent.split('(')[0].trim()
: 'Federation';
const label = storedLabel || fallbackLabel || 'Federation';
dataset.federationLabel = label;
link.textContent = `${label} (${normalizedCount})`;
});
}
/**
* Construct a navigable URL for the provided instance domain.
*
* The returned URL is guaranteed to use HTTP(S) and a host-only component to avoid
* interpreting arbitrary DOM-controlled text as executable content.
*
* @param {string} domain Instance domain as returned by the federation catalog.
* @returns {string|null} Navigable absolute URL or ``null`` when the domain is empty or unsafe.
*/
export function buildInstanceUrl(domain) {
if (typeof domain !== 'string') {
return null;
@@ -50,8 +97,29 @@ export function buildInstanceUrl(domain) {
return null;
}
if (/^[a-zA-Z][a-zA-Z\d+.-]*:\/\//.test(trimmed)) {
return trimmed;
const allowedHostPattern = /^[a-zA-Z0-9.-]+(?::\d{1,5})?$/;
if (/^https?:\/\//i.test(trimmed)) {
try {
const parsed = new URL(trimmed);
if (!['http:', 'https:'].includes(parsed.protocol)) {
return null;
}
const sanitizedHost = parsed.host.trim();
if (!allowedHostPattern.test(sanitizedHost)) {
return null;
}
return `${parsed.protocol}//${sanitizedHost}`;
} catch (error) {
console.warn('Rejected invalid instance URL', error);
return null;
}
}
if (!allowedHostPattern.test(trimmed)) {
return null;
}
return `https://${trimmed}`;
@@ -142,6 +210,8 @@ export async function initializeInstanceSelector(options) {
return;
}
updateFederationNavCount({ documentObject: doc, count: payload.length });
const sanitizedDomain = typeof instanceDomain === 'string' ? instanceDomain.trim().toLowerCase() : null;
const sortedEntries = payload
@@ -214,4 +284,4 @@ export async function initializeInstanceSelector(options) {
});
}
export const __test__ = { resolveInstanceLabel };
export const __test__ = { resolveInstanceLabel, updateFederationNavCount };
+14 -2
View File
@@ -18,6 +18,7 @@ import { computeBoundingBox, computeBoundsForPoints, haversineDistanceKm } from
import { createMapAutoFitController } from './map-auto-fit-controller.js';
import { resolveAutoFitBoundsConfig } from './map-auto-fit-settings.js';
import { attachNodeInfoRefreshToMarker, overlayToPopupNode } from './map-marker-node-info.js';
import { resolveLegendVisibility } from './map-legend-visibility.js';
import { createMapFocusHandler, DEFAULT_NODE_FOCUS_ZOOM } from './nodes-map-focus.js';
import { enhanceCoordinateCell } from './nodes-coordinate-links.js';
import { createShortInfoOverlayStack } from './short-info-overlay-manager.js';
@@ -43,6 +44,7 @@ import {
formatChatPresetTag
} from './chat-format.js';
import { initializeInstanceSelector } from './instance-selector.js';
import { initializeMobileMenu } from './mobile-menu.js';
import { MESSAGE_LIMIT, normaliseMessageLimit } from './message-limit.js';
import { CHAT_LOG_ENTRY_TYPES, buildChatTabModel, MAX_CHANNEL_INDEX } from './chat-log-tabs.js';
import { renderChatTabs } from './chat-tabs.js';
@@ -116,7 +118,10 @@ export function initializeApp(config) {
: false;
const isDashboardView = bodyClassList ? bodyClassList.contains('view-dashboard') : false;
const isChatView = bodyClassList ? bodyClassList.contains('view-chat') : false;
const isMapView = bodyClassList ? bodyClassList.contains('view-map') : false;
const mapZoomOverride = Number.isFinite(config.mapZoom) ? Number(config.mapZoom) : null;
initializeMobileMenu({ documentObject: document, windowObject: window });
/**
* Column sorter configuration for the node table.
*
@@ -190,7 +195,7 @@ export function initializeApp(config) {
});
const NODE_LIMIT = 1000;
const TRACE_LIMIT = 200;
const TRACE_MAX_AGE_SECONDS = 7 * 24 * 60 * 60;
const TRACE_MAX_AGE_SECONDS = 28 * 24 * 60 * 60;
const SNAPSHOT_LIMIT = SNAPSHOT_WINDOW;
const CHAT_LIMIT = MESSAGE_LIMIT;
const CHAT_RECENT_WINDOW_SECONDS = 7 * 24 * 60 * 60;
@@ -435,6 +440,7 @@ export function initializeApp(config) {
const mapFullscreenToggle = document.getElementById('mapFullscreenToggle');
const fullscreenContainer = mapPanel || mapContainer;
const isFederationView = bodyClassList ? bodyClassList.contains('view-federation') : false;
const legendDefaultCollapsed = mapPanel ? mapPanel.dataset.legendCollapsed === 'true' : false;
let mapStatusEl = null;
let map = null;
let mapCenterLatLng = null;
@@ -1526,8 +1532,14 @@ export function initializeApp(config) {
legendToggleControl.addTo(map);
const legendMediaQuery = window.matchMedia('(max-width: 1024px)');
setLegendVisibility(!legendMediaQuery.matches);
const initialLegendVisible = resolveLegendVisibility({
defaultCollapsed: legendDefaultCollapsed,
mediaQueryMatches: legendMediaQuery.matches,
viewMode: isDashboardView ? 'dashboard' : (isMapView ? 'map' : undefined)
});
setLegendVisibility(initialLegendVisible);
legendMediaQuery.addEventListener('change', event => {
if (legendDefaultCollapsed || isDashboardView || isMapView) return;
setLegendVisibility(!event.matches);
});
} else if (mapContainer && !hasLeaflet) {
@@ -0,0 +1,26 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Resolve the initial visibility of the map legend.
*
* @param {{ defaultCollapsed: boolean, mediaQueryMatches: boolean, viewMode?: string }} options
* @returns {boolean} True when the legend should be visible.
*/
export function resolveLegendVisibility({ defaultCollapsed, mediaQueryMatches, viewMode }) {
if (defaultCollapsed || viewMode === 'dashboard' || viewMode === 'map') return false;
return !mediaQueryMatches;
}
+271
View File
@@ -0,0 +1,271 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const MOBILE_MENU_MEDIA_QUERY = '(max-width: 900px)';
const FOCUSABLE_SELECTOR = [
'a[href]',
'button:not([disabled])',
'input:not([disabled])',
'select:not([disabled])',
'textarea:not([disabled])',
'[tabindex]:not([tabindex="-1"])'
].join(', ');
/**
* Collect the elements that can receive focus within a container.
*
* @param {?Element} container DOM node hosting focusable descendants.
* @returns {Array<Element>} Ordered list of focusable elements.
*/
function resolveFocusableElements(container) {
if (!container || typeof container.querySelectorAll !== 'function') {
return [];
}
const candidates = Array.from(container.querySelectorAll(FOCUSABLE_SELECTOR));
return candidates.filter(candidate => {
if (!candidate || typeof candidate.getAttribute !== 'function') {
return false;
}
return candidate.getAttribute('aria-hidden') !== 'true';
});
}
/**
* Build a menu controller for handling toggle state, focus trapping, and
* responsive layout swapping.
*
* @param {{
* documentObject?: Document,
* windowObject?: Window
* }} [options]
* @returns {{
* initialize: () => void,
* openMenu: () => void,
* closeMenu: () => void,
* syncLayout: () => void
* }}
*/
function createMobileMenuController(options = {}) {
const documentObject = options.documentObject || document;
const windowObject = options.windowObject || window;
const menuToggle = documentObject.getElementById('mobileMenuToggle');
const menu = documentObject.getElementById('mobileMenu');
const menuPanel = menu ? menu.querySelector('.mobile-menu__panel') : null;
const closeTriggers = menu ? Array.from(menu.querySelectorAll('[data-mobile-menu-close]')) : [];
const menuLinks = menu ? Array.from(menu.querySelectorAll('a')) : [];
const body = documentObject.body;
const mediaQuery = windowObject.matchMedia
? windowObject.matchMedia(MOBILE_MENU_MEDIA_QUERY)
: null;
let isOpen = false;
let lastActive = null;
/**
* Toggle the ``aria-expanded`` state on the menu trigger.
*
* @param {boolean} expanded Whether the menu is open.
* @returns {void}
*/
function setExpandedState(expanded) {
if (!menuToggle || typeof menuToggle.setAttribute !== 'function') {
return;
}
menuToggle.setAttribute('aria-expanded', expanded ? 'true' : 'false');
}
/**
* Synchronize the meta row placement based on the active media query.
*
* @returns {void}
*/
function syncLayout() {
return;
}
/**
* Open the slide-in menu and trap focus within the panel.
*
* @returns {void}
*/
function openMenu() {
if (!menu || !menuToggle || !menuPanel) {
return;
}
syncLayout();
menu.hidden = false;
menu.classList.add('is-open');
if (body && body.classList) {
body.classList.add('menu-open');
}
setExpandedState(true);
isOpen = true;
lastActive = documentObject.activeElement || null;
const focusables = resolveFocusableElements(menuPanel);
const focusTarget = focusables[0] || menuPanel;
if (focusTarget && typeof focusTarget.focus === 'function') {
focusTarget.focus();
}
}
/**
* Close the menu and restore focus to the trigger.
*
* @returns {void}
*/
function closeMenu() {
if (!menu || !menuToggle) {
return;
}
menu.classList.remove('is-open');
menu.hidden = true;
if (body && body.classList) {
body.classList.remove('menu-open');
}
setExpandedState(false);
isOpen = false;
if (lastActive && typeof lastActive.focus === 'function') {
lastActive.focus();
}
}
/**
* Toggle open or closed based on the trigger interaction.
*
* @param {Event} event Click event originating from the trigger.
* @returns {void}
*/
function handleToggleClick(event) {
if (event && typeof event.preventDefault === 'function') {
event.preventDefault();
}
if (isOpen) {
closeMenu();
} else {
openMenu();
}
}
/**
* Trap tab focus within the menu panel while open.
*
* @param {KeyboardEvent} event Keydown event from the panel.
* @returns {void}
*/
function handleKeydown(event) {
if (!isOpen || !event) {
return;
}
if (event.key === 'Escape') {
event.preventDefault();
closeMenu();
return;
}
if (event.key !== 'Tab') {
return;
}
const focusables = resolveFocusableElements(menuPanel);
if (!focusables.length) {
return;
}
const first = focusables[0];
const last = focusables[focusables.length - 1];
const active = documentObject.activeElement;
if (event.shiftKey && active === first) {
event.preventDefault();
last.focus();
} else if (!event.shiftKey && active === last) {
event.preventDefault();
first.focus();
}
}
/**
* Close the menu when navigation state changes.
*
* @returns {void}
*/
function handleRouteChange() {
if (isOpen) {
closeMenu();
}
}
/**
* Attach event listeners and sync initial layout.
*
* @returns {void}
*/
function initialize() {
if (!menuToggle || !menu) {
return;
}
menuToggle.addEventListener('click', handleToggleClick);
closeTriggers.forEach(trigger => {
trigger.addEventListener('click', closeMenu);
});
menuLinks.forEach(link => {
link.addEventListener('click', closeMenu);
});
if (menuPanel && typeof menuPanel.addEventListener === 'function') {
menuPanel.addEventListener('keydown', handleKeydown);
}
if (mediaQuery) {
if (typeof mediaQuery.addEventListener === 'function') {
mediaQuery.addEventListener('change', syncLayout);
} else if (typeof mediaQuery.addListener === 'function') {
mediaQuery.addListener(syncLayout);
}
}
if (windowObject && typeof windowObject.addEventListener === 'function') {
windowObject.addEventListener('hashchange', handleRouteChange);
windowObject.addEventListener('popstate', handleRouteChange);
}
syncLayout();
setExpandedState(false);
}
return {
initialize,
openMenu,
closeMenu,
syncLayout,
};
}
/**
* Initialize the mobile menu using the live DOM environment.
*
* @param {{
* documentObject?: Document,
* windowObject?: Window
* }} [options]
* @returns {{
* initialize: () => void,
* openMenu: () => void,
* closeMenu: () => void,
* syncLayout: () => void
* }}
*/
export function initializeMobileMenu(options = {}) {
const controller = createMobileMenuController(options);
controller.initialize();
return controller;
}
export const __test__ = {
createMobileMenuController,
resolveFocusableElements,
};
+51 -7
View File
@@ -68,6 +68,7 @@ const TELEMETRY_CHART_SPECS = Object.freeze([
max: 6,
ticks: 3,
color: '#9ebcda',
allowUpperOverflow: true,
},
{
id: 'current',
@@ -77,6 +78,7 @@ const TELEMETRY_CHART_SPECS = Object.freeze([
max: 3,
ticks: 3,
color: '#3182bd',
allowUpperOverflow: true,
},
],
series: [
@@ -156,6 +158,7 @@ const TELEMETRY_CHART_SPECS = Object.freeze([
max: 40,
ticks: 4,
color: '#fc8d59',
allowUpperOverflow: true,
},
{
id: 'humidity',
@@ -220,6 +223,7 @@ const TELEMETRY_CHART_SPECS = Object.freeze([
max: 500,
ticks: 5,
color: '#636363',
allowUpperOverflow: true,
},
],
series: [
@@ -1004,6 +1008,31 @@ function buildSeriesPoints(entries, fields, domainStart, domainEnd) {
return points;
}
/**
* Resolve the effective axis maximum when upper overflow is allowed.
*
* @param {Object} axis Axis descriptor.
* @param {Array<{axisId: string, points: Array<{timestamp: number, value: number}>}>} seriesEntries Series entries.
* @returns {number} Effective axis max.
*/
function resolveAxisMax(axis, seriesEntries) {
if (!axis || axis.allowUpperOverflow !== true) {
return axis?.max;
}
let observedMax = null;
for (const entry of seriesEntries) {
if (!entry || entry.axisId !== axis.id || !Array.isArray(entry.points)) continue;
for (const point of entry.points) {
if (!point || !Number.isFinite(point.value)) continue;
observedMax = observedMax == null ? point.value : Math.max(observedMax, point.value);
}
}
if (observedMax != null && Number.isFinite(axis.max) && observedMax > axis.max) {
return observedMax;
}
return axis.max;
}
/**
* Render a telemetry series as circles plus an optional translucent guide line.
*
@@ -1133,33 +1162,48 @@ function renderTelemetryChart(spec, entries, nowMs, chartOptions = {}) {
const domainEnd = nowMs;
const domainStart = nowMs - windowMs;
const dims = createChartDimensions(spec);
const axisMap = new Map(spec.axes.map(axis => [axis.id, axis]));
const seriesEntries = spec.series
.map(series => {
const axis = axisMap.get(series.axis);
if (!axis) return null;
const points = buildSeriesPoints(entries, series.fields, domainStart, domainEnd);
if (points.length === 0) return null;
return { config: series, axis, points };
return { config: series, axisId: series.axis, points };
})
.filter(entry => entry != null);
if (seriesEntries.length === 0) {
return '';
}
const axesMarkup = spec.axes.map(axis => renderYAxis(axis, dims)).join('');
const adjustedAxes = spec.axes.map(axis => {
const resolvedMax = resolveAxisMax(axis, seriesEntries);
if (resolvedMax != null && resolvedMax !== axis.max) {
return { ...axis, max: resolvedMax };
}
return axis;
});
const axisMap = new Map(adjustedAxes.map(axis => [axis.id, axis]));
const plottedSeries = seriesEntries
.map(series => {
const axis = axisMap.get(series.axisId);
if (!axis) return null;
return { config: series.config, axis, points: series.points };
})
.filter(entry => entry != null);
if (plottedSeries.length === 0) {
return '';
}
const axesMarkup = adjustedAxes.map(axis => renderYAxis(axis, dims)).join('');
const tickBuilder = typeof chartOptions.xAxisTickBuilder === 'function' ? chartOptions.xAxisTickBuilder : buildMidnightTicks;
const tickFormatter = typeof chartOptions.xAxisTickFormatter === 'function' ? chartOptions.xAxisTickFormatter : formatCompactDate;
const ticks = tickBuilder(nowMs, windowMs);
const xAxisMarkup = renderXAxis(dims, domainStart, domainEnd, ticks, { labelFormatter: tickFormatter });
const seriesMarkup = seriesEntries
const seriesMarkup = plottedSeries
.map(series =>
renderTelemetrySeries(series.config, series.points, series.axis, dims, domainStart, domainEnd, {
lineReducer: chartOptions.lineReducer,
}),
)
.join('');
const legendItems = seriesEntries
const legendItems = plottedSeries
.map(series => {
const legendLabel = stringOrNull(series.config.legend) ?? series.config.label;
return `
+244 -11
View File
@@ -30,6 +30,9 @@
--input-border: rgba(12, 15, 18, 0.18);
--input-placeholder: rgba(12, 15, 18, 0.45);
--control-accent: var(--accent);
--announcement-bg: #fff4d6;
--announcement-fg: #7a3f00;
--announcement-border: #f0c05b;
--pad: 16px;
--map-tile-filter-light: grayscale(1) saturate(0) brightness(0.92) contrast(1.05);
--map-tile-filter-dark: grayscale(1) invert(1) brightness(0.9) contrast(1.08);
@@ -59,6 +62,9 @@ body.dark {
--input-border: rgba(230, 235, 240, 0.24);
--input-placeholder: rgba(230, 235, 240, 0.55);
--control-accent: var(--accent);
--announcement-bg: #3b2500;
--announcement-fg: #ffd184;
--announcement-border: #a56a00;
}
html,
@@ -215,25 +221,237 @@ h1 {
.site-header {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between;
gap: 16px;
min-height: 56px;
padding: 4px 0;
margin-bottom: 8px;
}
.site-header__left,
.site-header__right {
display: flex;
align-items: center;
gap: 12px;
margin-bottom: 8px;
}
.site-header__left {
flex: 1 1 auto;
min-width: 0;
}
.site-header__right {
flex: 0 0 auto;
margin-left: auto;
}
.announcement-banner {
display: flex;
align-items: center;
justify-content: center;
height: 1.6em;
padding: 0 var(--pad);
border-radius: 999px;
background: var(--announcement-bg);
color: var(--announcement-fg);
border: 1px solid var(--announcement-border);
box-sizing: border-box;
overflow: hidden;
}
.announcement-banner__content {
margin: 0;
line-height: 1.6;
text-align: center;
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.site-title {
display: inline-flex;
align-items: center;
gap: 12px;
min-width: 0;
}
.site-title-text {
min-width: 0;
max-width: 100%;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
}
.site-title img {
width: 52px;
height: 52px;
width: 36px;
height: 36px;
display: block;
border-radius: 12px;
}
.site-nav {
display: flex;
align-items: center;
gap: 8px;
flex-wrap: wrap;
}
.site-nav__link {
display: inline-flex;
align-items: center;
gap: 6px;
padding: 6px 12px;
border-radius: 999px;
color: var(--fg);
text-decoration: none;
border: 1px solid transparent;
font-size: 14px;
}
.site-nav__link:hover {
background: var(--card);
}
.site-nav__link.is-active {
border-color: var(--accent);
color: var(--accent);
background: transparent;
font-weight: 600;
}
.site-nav__link:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
.menu-toggle {
display: none;
}
.menu-toggle:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
.mobile-menu {
position: fixed;
inset: 0;
z-index: 1200;
display: flex;
justify-content: flex-end;
pointer-events: none;
}
.mobile-menu[hidden] {
display: none;
}
.mobile-menu__backdrop {
flex: 1 1 auto;
background: rgba(0, 0, 0, 0.4);
opacity: 0;
transition: opacity 200ms ease;
}
.mobile-menu__panel {
width: min(320px, 86vw);
background: var(--bg2);
color: var(--fg);
padding: 16px;
display: flex;
flex-direction: column;
gap: 16px;
height: 100%;
overflow-y: auto;
transform: translateX(100%);
transition: transform 220ms ease;
box-shadow: -12px 0 32px rgba(0, 0, 0, 0.3);
}
.mobile-menu__header {
display: flex;
align-items: center;
justify-content: space-between;
gap: 12px;
}
.mobile-menu__title {
margin: 0;
font-size: 16px;
}
.mobile-menu__close:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
.mobile-nav {
display: flex;
flex-direction: column;
gap: 8px;
}
.mobile-nav__link {
display: inline-flex;
align-items: center;
padding: 8px 10px;
border-radius: 10px;
color: var(--fg);
text-decoration: none;
border: 1px solid transparent;
}
.mobile-nav__link.is-active {
border-color: var(--accent);
color: var(--accent);
font-weight: 600;
}
.mobile-nav__link:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
.mobile-menu.is-open {
pointer-events: auto;
}
.mobile-menu.is-open .mobile-menu__backdrop {
opacity: 1;
}
.mobile-menu.is-open .mobile-menu__panel {
transform: translateX(0);
}
.menu-open {
overflow: hidden;
}
.section-link {
display: inline-flex;
align-items: center;
gap: 6px;
padding: 6px 10px;
border-radius: 999px;
border: 1px solid var(--line);
color: var(--fg);
text-decoration: none;
font-size: 14px;
}
.section-link:hover {
border-color: var(--accent);
color: var(--accent);
}
.section-link:focus-visible {
outline: 2px solid var(--accent);
outline-offset: 2px;
}
.meta {
color: #555;
margin-bottom: 12px;
@@ -282,11 +500,29 @@ h1 {
@media (max-width: 900px) {
.site-header {
flex-direction: column;
align-items: flex-start;
margin-bottom: 4px;
}
.site-header__left {
flex-wrap: nowrap;
}
.site-header__left--federation {
flex-wrap: wrap;
}
.site-nav {
display: none;
}
.menu-toggle {
display: inline-flex;
}
.instance-selector {
flex: 0 1 auto;
}
.instance-selector,
.instance-select {
width: 100%;
}
@@ -296,6 +532,7 @@ h1 {
}
}
.pill {
display: inline-block;
padding: 2px 8px;
@@ -1694,10 +1931,6 @@ input[type="radio"] {
gap: 12px;
}
.controls--full-screen {
grid-template-columns: minmax(0, 1fr) auto;
}
.controls .filter-input {
width: 100%;
}
+1327 -17
View File
File diff suppressed because it is too large Load Diff
+18
View File
@@ -516,6 +516,24 @@ RSpec.describe PotatoMesh::Config do
end
end
describe ".announcement" do
it "returns nil when unset or blank" do
within_env("ANNOUNCEMENT" => nil) do
expect(described_class.announcement).to be_nil
end
within_env("ANNOUNCEMENT" => " \t ") do
expect(described_class.announcement).to be_nil
end
end
it "returns the trimmed announcement text" do
within_env("ANNOUNCEMENT" => " Next Meetup ") do
expect(described_class.announcement).to eq("Next Meetup")
end
end
end
describe ".debug?" do
it "reflects the DEBUG environment variable" do
within_env("DEBUG" => "1") do
+14
View File
@@ -166,6 +166,20 @@ RSpec.describe PotatoMesh::App::Database do
expect(telemetry_columns).to include("rx_time", "battery_level")
end
it "adds decryption metadata columns to existing messages tables" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
db.execute("CREATE TABLE messages(id INTEGER PRIMARY KEY)")
end
expect(column_names_for("messages")).not_to include("decrypted", "decryption_confidence")
harness_class.ensure_schema_upgrades
message_columns = column_names_for("messages")
expect(message_columns).to include("decrypted", "decryption_confidence")
end
it "creates trace tables when absent" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
+33
View File
@@ -321,6 +321,39 @@ RSpec.describe PotatoMesh::App::Federation do
expect(visited).not_to include(attributes_list[1][:domain], attributes_list[2][:domain])
expect(federation_helpers.debug_messages).to include(a_string_including("crawl limit"))
end
it "requests an expanded recent node window when counting remote activity" do
now = Time.at(1_700_000_000)
allow(Time).to receive(:now).and_return(now)
allow(PotatoMesh::Config).to receive(:remote_instance_max_node_age).and_return(900)
recent_cutoff = now.to_i - 900
mapping = { [seed_domain, "/api/instances"] => [payload_entries, :instances] }
attributes_list.each_with_index do |attributes, index|
mapping[[attributes[:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/nodes"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/instances"]] = [[], :instances]
allow(federation_helpers).to receive(:remote_instance_attributes_from_payload).with(payload_entries[index]).and_return([attributes, "signature-#{index}", nil])
end
captured_paths = []
allow(federation_helpers).to receive(:fetch_instance_json) do |host, path|
captured_paths << [host, path]
mapping.fetch([host, path]) { [nil, []] }
end
allow(federation_helpers).to receive(:verify_instance_signature).and_return(true)
allow(federation_helpers).to receive(:validate_remote_nodes).and_return([true, nil])
allow(federation_helpers).to receive(:upsert_instance_record)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(captured_paths).to include(
[attributes_list[0][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
[attributes_list[1][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
[attributes_list[2][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
)
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(node_payload.length))
end
end
describe ".upsert_instance_record" do
+206
View File
@@ -0,0 +1,206 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "spec_helper"
require "json"
require "time"
RSpec.describe "Ingestor endpoints" do
let(:app) { Sinatra::Application }
let(:api_token) { "secret-token" }
let(:auth_headers) do
{
"CONTENT_TYPE" => "application/json",
"HTTP_AUTHORIZATION" => "Bearer #{api_token}",
}
end
before do
@original_token = ENV["API_TOKEN"]
ENV["API_TOKEN"] = api_token
clear_ingestors_table
end
after do
ENV["API_TOKEN"] = @original_token
clear_ingestors_table
end
def clear_ingestors_table
with_db do |db|
db.execute("DELETE FROM ingestors")
db.execute("VACUUM")
end
end
def with_db(readonly: false)
db = PotatoMesh::Application.open_database(readonly: readonly)
db.busy_timeout = PotatoMesh::Config.db_busy_timeout_ms
db.execute("PRAGMA foreign_keys = ON")
yield db
ensure
db&.close
end
def ingestor_payload(overrides = {})
now = Time.now.to_i
{
node_id: "!abc12345",
start_time: now - 120,
last_seen_time: now - 60,
version: "0.5.10",
lora_freq: 915,
modem_preset: "LongFast",
}.merge(overrides)
end
describe "POST /api/ingestors" do
it "requires a bearer token" do
post "/api/ingestors", ingestor_payload.to_json, { "CONTENT_TYPE" => "application/json" }
expect(last_response.status).to eq(403)
end
it "upserts ingestor state without regressing start time" do
payload = ingestor_payload
post "/api/ingestors", payload.to_json, auth_headers
expect(last_response.status).to eq(200)
newer_last_seen = payload[:last_seen_time] + 3_600
older_start = payload[:start_time] - 500
post "/api/ingestors",
payload.merge(last_seen_time: newer_last_seen, start_time: older_start).to_json,
auth_headers
expect(last_response.status).to eq(200)
with_db(readonly: true) do |db|
row = db.get_first_row(
"SELECT node_id, start_time, last_seen_time, version, lora_freq, modem_preset FROM ingestors WHERE node_id = ?",
[payload[:node_id]],
)
expect(row[0]).to eq(payload[:node_id])
expect(row[1]).to eq(payload[:start_time])
expect(row[2]).to be >= payload[:last_seen_time]
expect(row[2]).to be <= Time.now.to_i
expect(row[3]).to eq(payload[:version])
expect(row[4]).to eq(payload[:lora_freq])
expect(row[5]).to eq(payload[:modem_preset])
end
end
it "rejects payloads missing required fields" do
post "/api/ingestors", { node_id: "!abcd0001" }.to_json, auth_headers
expect(last_response.status).to eq(400)
end
it "rejects invalid JSON" do
post "/api/ingestors", "{", auth_headers
expect(last_response.status).to eq(400)
end
it "rejects payloads missing version" do
post "/api/ingestors", ingestor_payload(version: nil).to_json, auth_headers
expect(last_response.status).to eq(400)
end
it "rejects non-object payloads" do
post "/api/ingestors", [].to_json, auth_headers
expect(last_response.status).to eq(400)
end
end
describe "GET /api/ingestors" do
it "returns recent ingestors and omits stale rows" do
now = Time.now.to_i
with_db do |db|
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!fresh000", now - 100, now - 10, "0.5.10"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!stale000", now - (9 * 24 * 60 * 60), now - (9 * 24 * 60 * 60), "0.5.6"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version, lora_freq, modem_preset) VALUES(?,?,?,?,?,?)",
["!rich000", now - 200, now - 100, "0.5.10", 915, "MediumFast"],
)
end
get "/api/ingestors"
expect(last_response.status).to eq(200)
payload = JSON.parse(last_response.body)
expect(payload).to all(include("node_id", "start_time", "last_seen_time", "version"))
node_ids = payload.map { |entry| entry["node_id"] }
expect(node_ids).to include("!fresh000")
expect(node_ids).not_to include("!stale000")
rich = payload.find { |row| row["node_id"] == "!rich000" }
expect(rich["lora_freq"]).to eq(915)
expect(rich["modem_preset"]).to eq("MediumFast")
expect(rich["start_time_iso"]).to be_a(String)
expect(rich["last_seen_iso"]).to be_a(String)
end
it "filters ingestors using the since parameter" do
frozen_time = Time.at(1_700_000_000)
allow(Time).to receive(:now).and_return(frozen_time)
now = frozen_time.to_i
recent_cutoff = now - 120
with_db do |db|
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!old-ingestor", now - 600, now - 300, "0.5.5"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!new-ingestor", now - 60, now - 30, "0.5.10"],
)
end
get "/api/ingestors?since=#{recent_cutoff}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |entry| entry["node_id"] }).to eq(["!new-ingestor"])
end
end
describe "schema migrations" do
it "creates the ingestors table with frequency and modem columns" do
tmp_db = File.join(SPEC_TMPDIR, "ingestor-migrate.db")
FileUtils.rm_f(tmp_db)
original = PotatoMesh::Config.db_path
allow(PotatoMesh::Config).to receive(:db_path).and_return(tmp_db)
begin
PotatoMesh::Application.init_db
with_db(readonly: true) do |db|
columns = db.execute("PRAGMA table_info(ingestors)").map { |row| row[1] }
expect(columns).to include("lora_freq", "modem_preset", "version")
end
ensure
allow(PotatoMesh::Config).to receive(:db_path).and_return(original)
end
end
end
end

Some files were not shown because too many files have changed in this diff Show More