forked from iarv/potato-mesh
Compare commits
58 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| bb7a09cb6f | |||
| fed8b9e124 | |||
| 60e734086f | |||
| c3181e9bd5 | |||
| f4fa487b2d | |||
| e0237108c6 | |||
| d7a636251d | |||
| 108573b100 | |||
| 36f55e6b79 | |||
| b4dd72e7eb | |||
| f5f2e977a1 | |||
| e9a0dc0d59 | |||
| d75c395514 | |||
| b08f951780 | |||
| 955431ac18 | |||
| 7f40abf92a | |||
| c157fd481b | |||
| a6fc7145bc | |||
| ca05cbb2c5 | |||
| 5c79572c4d | |||
| 6fd8e5ad12 | |||
| 09fbc32e48 | |||
| 4591d5acd6 | |||
| 6c711f80b4 | |||
| e61e701240 | |||
| 42f4e80a26 | |||
| 4dc03f33ca | |||
| 5572c6cd12 | |||
| 4f7e66de82 | |||
| c1898037c0 | |||
| efc5f64279 | |||
| 636a203254 | |||
| 2e78fa7a3a | |||
| e74f985630 | |||
| e4facd7f26 | |||
| f533362f8a | |||
| 175a8f368f | |||
| 872bcbd529 | |||
| 8811f71e53 | |||
| fec649a159 | |||
| 9e3f481401 | |||
| 1a497864a7 | |||
| 06fb90513f | |||
| b5eecb1ec1 | |||
| 0e211aebdd | |||
| 96b62d7e14 | |||
| baf6ffff0b | |||
| 135de0863c | |||
| 074a61baac | |||
| 209cc948bf | |||
| cc108f2f49 | |||
| 844204f64d | |||
| 88f699f4ec | |||
| d1b9196f47 | |||
| 8181fc8e03 | |||
| 5be2ac417a | |||
| 6acb1c833c | |||
| 2bd69415c1 |
@@ -36,6 +36,8 @@ jobs:
|
||||
include:
|
||||
- language: python
|
||||
build-mode: none
|
||||
- language: rust
|
||||
build-mode: none
|
||||
- language: ruby
|
||||
build-mode: none
|
||||
- language: javascript-typescript
|
||||
|
||||
@@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
service: [web, ingestor]
|
||||
service: [web, ingestor, matrix-bridge]
|
||||
architecture:
|
||||
- { name: linux-amd64, platform: linux/amd64, label: "Linux x86_64", os: linux, architecture: amd64 }
|
||||
- { name: linux-arm64, platform: linux/arm64, label: "Linux ARM64", os: linux, architecture: arm64 }
|
||||
@@ -109,8 +109,8 @@ jobs:
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./${{ matrix.service == 'web' && 'web/Dockerfile' || 'data/Dockerfile' }}
|
||||
target: production
|
||||
file: ${{ matrix.service == 'web' && './web/Dockerfile' || matrix.service == 'ingestor' && './data/Dockerfile' || './matrix/Dockerfile' }}
|
||||
target: ${{ matrix.service == 'matrix-bridge' && 'runtime' || 'production' }}
|
||||
platforms: ${{ matrix.architecture.platform }}
|
||||
push: true
|
||||
tags: |
|
||||
@@ -119,12 +119,12 @@ jobs:
|
||||
${{ steps.tagging.outputs.include_latest == 'true' && format('{0}/{1}-{2}-{3}:latest', env.REGISTRY, env.IMAGE_PREFIX, matrix.service, matrix.architecture.name) || '' }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
|
||||
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || matrix.service == 'ingestor' && 'Python Ingestor' || 'Matrix Bridge' }} for ${{ matrix.architecture.label }}
|
||||
org.opencontainers.image.licenses=Apache-2.0
|
||||
org.opencontainers.image.version=${{ steps.version.outputs.version }}
|
||||
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || 'Ingestor' }} (${{ matrix.architecture.label }})
|
||||
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || matrix.service == 'ingestor' && 'Ingestor' || 'Matrix Bridge' }} (${{ matrix.architecture.label }})
|
||||
org.opencontainers.image.vendor=PotatoMesh
|
||||
org.opencontainers.image.architecture=${{ matrix.architecture.architecture }}
|
||||
org.opencontainers.image.os=${{ matrix.architecture.os }}
|
||||
@@ -208,6 +208,19 @@ jobs:
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Determine tagging strategy
|
||||
id: tagging
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
|
||||
if echo "$VERSION" | grep -E -- '-(rc|beta|alpha|dev)'; then
|
||||
INCLUDE_LATEST=false
|
||||
else
|
||||
INCLUDE_LATEST=true
|
||||
fi
|
||||
|
||||
echo "include_latest=$INCLUDE_LATEST" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Publish release summary
|
||||
run: |
|
||||
echo "## 🚀 PotatoMesh Images Published to GHCR" >> $GITHUB_STEP_SUMMARY
|
||||
@@ -234,4 +247,13 @@ jobs:
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
# Matrix bridge images
|
||||
echo "### 🧩 Matrix Bridge" >> $GITHUB_STEP_SUMMARY
|
||||
if [ "${{ steps.tagging.outputs.include_latest }}" = "true" ]; then
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
fi
|
||||
|
||||
|
||||
@@ -19,6 +19,10 @@ on:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'web/**'
|
||||
- 'tests/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -47,6 +51,7 @@ jobs:
|
||||
files: web/reports/javascript-coverage.json
|
||||
flags: frontend
|
||||
name: frontend
|
||||
fail_ci_if_error: false
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
|
||||
@@ -19,6 +19,10 @@ on:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'app/**'
|
||||
- 'tests/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -63,5 +67,6 @@ jobs:
|
||||
files: coverage/lcov.info
|
||||
flags: flutter-mobile
|
||||
name: flutter-mobile
|
||||
fail_ci_if_error: false
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Nix
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
jobs:
|
||||
flake-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Nix
|
||||
uses: cachix/install-nix-action@v30
|
||||
with:
|
||||
extra_nix_config: |
|
||||
experimental-features = nix-command flakes
|
||||
- name: Run flake checks
|
||||
run: nix flake check
|
||||
@@ -19,6 +19,10 @@ on:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'data/**'
|
||||
- 'tests/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -47,6 +51,7 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: reports/python-coverage.xml
|
||||
flags: python-ingestor
|
||||
fail_ci_if_error: false
|
||||
name: python-ingestor
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
|
||||
@@ -19,6 +19,10 @@ on:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'web/**'
|
||||
- 'tests/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -31,7 +35,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
ruby-version: ['3.3', '3.4']
|
||||
ruby-version: ['3.4', '4.0']
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
@@ -0,0 +1,78 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths:
|
||||
- '.github/**'
|
||||
- 'matrix/**'
|
||||
- 'tests/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
matrix:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
- name: Cache Cargo registry
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
./matrix/target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.toml', '**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
- name: Show rustc version
|
||||
run: rustc --version
|
||||
- name: Install llvm-tools-preview component
|
||||
run: rustup component add llvm-tools-preview --toolchain stable
|
||||
- name: Install cargo-llvm-cov
|
||||
working-directory: ./matrix
|
||||
run: cargo install cargo-llvm-cov --locked
|
||||
- name: Check formatting
|
||||
working-directory: ./matrix
|
||||
run: cargo fmt --all -- --check
|
||||
- name: Clippy lint
|
||||
working-directory: ./matrix
|
||||
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||
- name: Build
|
||||
working-directory: ./matrix
|
||||
run: cargo build --all --all-features
|
||||
- name: Test
|
||||
working-directory: ./matrix
|
||||
run: cargo test --all --all-features --verbose
|
||||
- name: Run tests with coverage
|
||||
working-directory: ./matrix
|
||||
run: |
|
||||
cargo llvm-cov --all-features --workspace --lcov --output-path coverage.lcov
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./matrix/coverage.lcov
|
||||
flags: matrix-bridge
|
||||
name: matrix-bridge
|
||||
fail_ci_if_error: false
|
||||
@@ -17,11 +17,15 @@ The repository splits runtime and ingestion logic. `web/` holds the Sinatra dash
|
||||
|
||||
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
|
||||
|
||||
`matrix/` contains the Rust Matrix bridge; build with `cargo build --release` or `docker build -f matrix/Dockerfile .`, and keep bridge config under `matrix/Config.toml` when running locally.
|
||||
|
||||
## Build, Test, and Development Commands
|
||||
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
|
||||
|
||||
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
|
||||
|
||||
Container images publish via `.github/workflows/docker.yml` as `potato-mesh-{service}-linux-$arch` (`web`, `ingestor`, `matrix-bridge`), using the Dockerfiles in `web/`, `data/`, and `matrix/`.
|
||||
|
||||
## Coding Style & Naming Conventions
|
||||
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
|
||||
|
||||
|
||||
+101
@@ -1,5 +1,106 @@
|
||||
# CHANGELOG
|
||||
|
||||
## v0.5.9
|
||||
|
||||
* Matrix: listen for synapse on port 41448 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/607>
|
||||
* Web: collapse federation map ledgend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/604>
|
||||
* Web: fix stale node queries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/603>
|
||||
* Matrix: move short name to display name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/602>
|
||||
* Ci: update ruby to 4 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/601>
|
||||
* Web: display traces of last 28 days if available by @l5yth in <https://github.com/l5yth/potato-mesh/pull/599>
|
||||
* Web: establish menu structure by @l5yth in <https://github.com/l5yth/potato-mesh/pull/597>
|
||||
* Matrix: fixed the text-message checkpoint regression by @l5yth in <https://github.com/l5yth/potato-mesh/pull/595>
|
||||
* Matrix: cache seen messages by rx_time not id by @l5yth in <https://github.com/l5yth/potato-mesh/pull/594>
|
||||
* Web: hide the default '0' tab when not active by @l5yth in <https://github.com/l5yth/potato-mesh/pull/593>
|
||||
* Matrix: fix empty bridge state json by @l5yth in <https://github.com/l5yth/potato-mesh/pull/592>
|
||||
* Web: allow certain charts to overflow upper bounds by @l5yth in <https://github.com/l5yth/potato-mesh/pull/585>
|
||||
* Ingestor: support ROUTING_APP messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/584>
|
||||
* Ci: run nix flake check on ci by @l5yth in <https://github.com/l5yth/potato-mesh/pull/583>
|
||||
* Web: hide legend by default by @l5yth in <https://github.com/l5yth/potato-mesh/pull/582>
|
||||
* Nix flake by @benjajaja in <https://github.com/l5yth/potato-mesh/pull/577>
|
||||
* Support BLE UUID format for macOS Bluetooth devices by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/575>
|
||||
* Web: add mesh.qrp.ro as seed node by @l5yth in <https://github.com/l5yth/potato-mesh/pull/573>
|
||||
* Web: ensure unknown nodes for messages and traces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/572>
|
||||
* Chore: bump version to 0.5.9 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/569>
|
||||
|
||||
## v0.5.8
|
||||
|
||||
* Web: add secondary seed node jmrp.io by @l5yth in <https://github.com/l5yth/potato-mesh/pull/568>
|
||||
* Data: implement whitelist for ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/567>
|
||||
* Web: add ?since= parameter to all apis by @l5yth in <https://github.com/l5yth/potato-mesh/pull/566>
|
||||
* Matrix: fix docker build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/565>
|
||||
* Matrix: fix docker build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/564>
|
||||
* Web: fix federation signature validation and create fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/563>
|
||||
* Chore: update readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/561>
|
||||
* Matrix: add docker file for bridge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/556>
|
||||
* Matrix: add health checks to startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/555>
|
||||
* Matrix: omit the api part in base url by @l5yth in <https://github.com/l5yth/potato-mesh/pull/554>
|
||||
* App: add utility coverage tests for main.dart by @l5yth in <https://github.com/l5yth/potato-mesh/pull/552>
|
||||
* Data: add thorough daemon unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/553>
|
||||
* Chore: bump version to 0.5.8 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/551>
|
||||
|
||||
## v0.5.7
|
||||
|
||||
* Data: track ingestors heartbeat by @l5yth in <https://github.com/l5yth/potato-mesh/pull/549>
|
||||
* Harden instance selector navigation URLs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/550>
|
||||
* Data: hide channels that have been flag for ignoring by @l5yth in <https://github.com/l5yth/potato-mesh/pull/548>
|
||||
* Web: fix limit when counting remote nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/547>
|
||||
* Web: improve instances map and table view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/546>
|
||||
* Web: fix traces submission with optional fields on udp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/545>
|
||||
* Chore: bump version to 0.5.7 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/542>
|
||||
* Handle zero telemetry aggregates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/538>
|
||||
* Web: fix telemetry api to return current in amperes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/541>
|
||||
* Web: fix traces rendering by @l5yth in <https://github.com/l5yth/potato-mesh/pull/535>
|
||||
* Normalize numeric node roles to canonical labels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/539>
|
||||
* Use INSTANCE_DOMAIN env for ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/536>
|
||||
* Web: further refine the federation page by @l5yth in <https://github.com/l5yth/potato-mesh/pull/534>
|
||||
* Add Federation Map by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/532>
|
||||
* Add contact link to the instance data by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/533>
|
||||
* Matrix: create potato-matrix-bridge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/528>
|
||||
|
||||
## v0.5.6
|
||||
|
||||
* Web: display sats in view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/523>
|
||||
* Web: display air quality in separate chart by @l5yth in <https://github.com/l5yth/potato-mesh/pull/521>
|
||||
* Ci: Add macOS and Ubuntu builds to Flutter workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/519>
|
||||
* Web: add current to charts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/520>
|
||||
* App: fix notification icon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/518>
|
||||
* Spec: update test fixtures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/517>
|
||||
* App: generate proper icons by @l5yth in <https://github.com/l5yth/potato-mesh/pull/516>
|
||||
* Web: fix favicon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/515>
|
||||
* Web: add ?since= parameter to api/messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/512>
|
||||
* App: implement notifications by @l5yth in <https://github.com/l5yth/potato-mesh/pull/511>
|
||||
* App: add theme selector by @l5yth in <https://github.com/l5yth/potato-mesh/pull/507>
|
||||
* App: further harden refresh logic and prefer local first by @l5yth in <https://github.com/l5yth/potato-mesh/pull/506>
|
||||
* Ci: fix app artifacts for tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/504>
|
||||
* Ci: build app artifacts for tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/503>
|
||||
* App: add persistance by @l5yth in <https://github.com/l5yth/potato-mesh/pull/501>
|
||||
* App: instance and chat mvp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/498>
|
||||
* App: add instance selector to settings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/497>
|
||||
* App: add scaffholding gitignore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/496>
|
||||
* Handle reaction app packets without reply id by @l5yth in <https://github.com/l5yth/potato-mesh/pull/495>
|
||||
* Render reaction multiplier counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/494>
|
||||
* Add comprehensive tests for Flutter reader by @l5yth in <https://github.com/l5yth/potato-mesh/pull/491>
|
||||
* Map numeric role ids to canonical Meshtastic roles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/489>
|
||||
* Update node detail hydration for traces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/490>
|
||||
* Add mobile Flutter CI workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/488>
|
||||
* Align OCI labels in docker workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/487>
|
||||
* Add Meshtastic reader Flutter app by @l5yth in <https://github.com/l5yth/potato-mesh/pull/483>
|
||||
* Handle pre-release Docker tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/486>
|
||||
* Web: remove range from charts labels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/485>
|
||||
* Floor override frequencies to MHz integers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/476>
|
||||
* Prevent message ids from being treated as node identifiers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/475>
|
||||
* Fix 1 after emojis in reply. by @Alexkurd in <https://github.com/l5yth/potato-mesh/pull/464>
|
||||
* Add frequency and preset to node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/472>
|
||||
* Subscribe to traceroute app pubsub topic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/471>
|
||||
* Aggregate telemetry over the last 7 days by @l5yth in <https://github.com/l5yth/potato-mesh/pull/470>
|
||||
* Address missing id field ingestor bug by @l5yth in <https://github.com/l5yth/potato-mesh/pull/469>
|
||||
* Merge secondary channels by name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/468>
|
||||
* Rate limit host device telemetry by @l5yth in <https://github.com/l5yth/potato-mesh/pull/467>
|
||||
* Add traceroutes to frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/466>
|
||||
* Feat: implement traceroute app packet handling across the stack by @l5yth in <https://github.com/l5yth/potato-mesh/pull/463>
|
||||
* Bump version and update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/462>
|
||||
|
||||
## v0.5.5
|
||||
|
||||
* Added comprehensive helper unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/457>
|
||||
|
||||
@@ -53,13 +53,16 @@ Additional environment variables are optional:
|
||||
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom (disables the auto-fit checkbox when set). |
|
||||
| `MAX_DISTANCE` | `42` | Maximum relationship distance (km) before edges are hidden. |
|
||||
| `DEBUG` | `0` | Enables verbose logging across services when set to `1`. |
|
||||
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; other channels are skipped before hidden filters. |
|
||||
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor skips when forwarding packets. |
|
||||
| `FEDERATION` | `1` | Controls whether the instance announces itself and crawls peers (`1`) or stays isolated (`0`). |
|
||||
| `PRIVATE` | `0` | Restricts public visibility and disables chat/message endpoints when set to `1`. |
|
||||
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the radio. |
|
||||
|
||||
The ingestor also respects supporting variables such as `POTATOMESH_INSTANCE`
|
||||
(defaults to `http://web:41447`) for remote posting and `CHANNEL_INDEX` when
|
||||
selecting a LoRa channel on serial or Bluetooth connections.
|
||||
The ingestor posts to the URL configured via `INSTANCE_DOMAIN` (defaulting to
|
||||
`http://web:41447` in the provided compose file) and still accepts
|
||||
`POTATOMESH_INSTANCE` as a legacy alias when the primary variable is unset. Use
|
||||
`CHANNEL_INDEX` to select a LoRa channel on serial or Bluetooth connections.
|
||||
|
||||
## Docker Compose file
|
||||
|
||||
|
||||
@@ -7,13 +7,20 @@
|
||||
[](https://github.com/l5yth/potato-mesh/issues)
|
||||
[](https://matrix.to/#/#potatomesh:dod.ngo)
|
||||
|
||||
A federated Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
|
||||
A federated, Meshtastic-powered node dashboard for your local community.
|
||||
_No MQTT clutter, just local LoRa aether._
|
||||
|
||||
* Web app with chat window and map view showing nodes, neighbors, telemetry, and messages.
|
||||
* API to POST (authenticated) and to GET nodes and messages.
|
||||
* Shows new node notifications (first seen) in chat.
|
||||
* Web dashboard with chat window and map view showing nodes, positions, neighbors,
|
||||
trace routes, telemetry, and messages.
|
||||
* API to POST (authenticated) and to GET nodes, messages, and telemetry.
|
||||
* Shows new node notifications (first seen) and telemetry logs in chat.
|
||||
* Allows searching and filtering for nodes in map and table view.
|
||||
* Federated: _automatically_ froms a federation with other communities running
|
||||
Potato Mesh!
|
||||
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
|
||||
* Supports multiple ingestors per instance.
|
||||
* Matrix bridge that posts Meshtastic messages to a defined matrix channel (no
|
||||
radio required).
|
||||
* Mobile app to _read_ messages on your local aether (no radio required).
|
||||
|
||||
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
|
||||
@@ -58,6 +65,7 @@ RACK_ENV="production" \
|
||||
APP_ENV="production" \
|
||||
API_TOKEN="SuperSecureTokenReally" \
|
||||
INSTANCE_DOMAIN="https://potatomesh.net" \
|
||||
MAP_CENTER="53.55,13.42" \
|
||||
exec ruby app.rb -p 41447 -o 0.0.0.0
|
||||
```
|
||||
|
||||
@@ -68,6 +76,7 @@ exec ruby app.rb -p 41447 -o 0.0.0.0
|
||||
* Provide a strong `API_TOKEN` value to authorize POST requests against the API.
|
||||
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
|
||||
links and generated metadata resolve correctly.
|
||||
* Don't forget to set a `MAP_CENTER` to point to your local region.
|
||||
|
||||
The web app can be configured with environment variables (defaults shown):
|
||||
|
||||
@@ -79,10 +88,13 @@ The web app can be configured with environment variables (defaults shown):
|
||||
| `CHANNEL` | `"#LongFast"` | Default channel name displayed in the UI. |
|
||||
| `FREQUENCY` | `"915MHz"` | Default frequency description displayed in the UI. |
|
||||
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix alias rendered in the footer and overlays. |
|
||||
| `ANNOUNCEMENT` | _unset_ | Optional announcement banner text rendered above the header on every page. |
|
||||
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map on load. |
|
||||
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
|
||||
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
|
||||
| `DEBUG` | `0` | Set to `1` for verbose logging in the web and ingestor services. |
|
||||
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; when set, all other channels are skipped before hidden filters. |
|
||||
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor will ignore when forwarding packets. |
|
||||
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
|
||||
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
|
||||
|
||||
@@ -133,7 +145,9 @@ The web app contains an API:
|
||||
* GET `/api/messages?limit=100&encrypted=false&since=0` - returns the latest 100 messages newer than the provided unix timestamp (defaults to `since=0` to return full history; disabled when `PRIVATE=1`)
|
||||
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
|
||||
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
|
||||
* GET `/api/traces?limit=100` - returns the latest 100 trace-routes caught
|
||||
* GET `/api/instances` - returns known potato-mesh instances in other locations
|
||||
* GET `/api/ingestors` - returns active potato-mesh python ingestors that feed data
|
||||
* GET `/metrics`- metrics for the prometheus endpoint
|
||||
* GET `/version`- information about the potato-mesh instance
|
||||
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
@@ -141,6 +155,7 @@ The web app contains an API:
|
||||
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
|
||||
* POST `/api/telemetry` - appends telemetry provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/neighbors` - appends neighbor tuples provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/traces` - appends caught traces routes provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
|
||||
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
|
||||
|
||||
@@ -176,7 +191,7 @@ to the configured potato-mesh instance.
|
||||
Check out `mesh.sh` ingestor script in the `./data` directory.
|
||||
|
||||
```bash
|
||||
POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 CONNECTION=/dev/ttyACM0 DEBUG=1 ./mesh.sh
|
||||
INSTANCE_DOMAIN=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 CONNECTION=/dev/ttyACM0 DEBUG=1 ./mesh.sh
|
||||
[2025-02-20T12:34:56.789012Z] [potato-mesh] [info] channel=0 context=daemon.main port='41447' target='http://127.0.0.1' Mesh daemon starting
|
||||
[...]
|
||||
[2025-02-20T12:34:57.012345Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!849b7154 short_name='7154' long_name='7154' Queued node upsert payload
|
||||
@@ -184,12 +199,56 @@ POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c
|
||||
[2025-02-20T12:34:58.001122Z] [potato-mesh] [debug] context=handlers.store_packet_dict channel=0 from_id='!9ee71c38' payload='Guten Morgen!' to_id='^all' Queued message payload
|
||||
```
|
||||
|
||||
Run the script with `POTATOMESH_INSTANCE` and `API_TOKEN` to keep updating
|
||||
Run the script with `INSTANCE_DOMAIN` and `API_TOKEN` to keep updating
|
||||
node records and parsing new incoming messages. Enable debug output with `DEBUG=1`,
|
||||
specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set it to
|
||||
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
|
||||
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
|
||||
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available.
|
||||
interface. `CONNECTION` also accepts Bluetooth device addresses in MAC format (e.g.,
|
||||
`ED:4D:9E:95:CF:60`) or UUID format for macOS (e.g., `C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E`)
|
||||
and the script attempts a BLE connection if available. To keep
|
||||
ingestion limited, set `ALLOWED_CHANNELS` to a comma-separated whitelist (for
|
||||
example `ALLOWED_CHANNELS="Chat,Ops"`); packets on other channels are discarded.
|
||||
Use `HIDDEN_CHANNELS` to block specific channels from the web UI even when they
|
||||
appear in the allowlist.
|
||||
|
||||
## Nix
|
||||
|
||||
For the dev shell, run:
|
||||
|
||||
```bash
|
||||
nix develop
|
||||
```
|
||||
|
||||
The shell provides Ruby plus the Python ingestor dependencies (including `meshtastic`
|
||||
and `protobuf`). To sanity-check that the ingestor starts, run `python -m data.mesh`
|
||||
with the usual environment variables (`INSTANCE_DOMAIN`, `API_TOKEN`, `CONNECTION`).
|
||||
|
||||
To run the packaged apps directly:
|
||||
|
||||
```bash
|
||||
nix run .#web
|
||||
nix run .#ingestor
|
||||
```
|
||||
|
||||
Minimal NixOS module snippet:
|
||||
|
||||
```nix
|
||||
services.potato-mesh = {
|
||||
enable = true;
|
||||
apiTokenFile = config.sops.secrets.potato-mesh-api-token.path;
|
||||
dataDir = "/var/lib/potato-mesh";
|
||||
port = 41447;
|
||||
instanceDomain = "https://mesh.me";
|
||||
siteName = "Nix Mesh";
|
||||
contactLink = "homeserver.mx";
|
||||
mapCenter = "28.96,-13.56";
|
||||
frequency = "868MHz";
|
||||
ingestor = {
|
||||
enable = true;
|
||||
connection = "192.168.X.Y:4403";
|
||||
};
|
||||
};
|
||||
```
|
||||
|
||||
## Docker
|
||||
|
||||
@@ -199,12 +258,21 @@ Docker images are published on Github for each release:
|
||||
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
|
||||
docker pull ghcr.io/l5yth/potato-mesh/web:v0.5.5 # pinned historical release
|
||||
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh/matrix-bridge:latest
|
||||
```
|
||||
|
||||
Feel free to run the [configure.sh](./configure.sh) script to set up your
|
||||
environment. See the [Docker guide](DOCKER.md) for more details and custom
|
||||
deployment instructions.
|
||||
|
||||
## Matrix Bridge
|
||||
|
||||
A matrix bridge is currently being worked on. It requests messages from a configured
|
||||
potato-mesh instance and forwards it to a specified matrix channel; see
|
||||
[matrix/README.md](./matrix/README.md).
|
||||
|
||||

|
||||
|
||||
## Mobile App
|
||||
|
||||
A mobile _reader_ app is currently being worked on. Stay tuned for releases and updates.
|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
plugins {
|
||||
id("com.android.application")
|
||||
id("kotlin-android")
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package net.potatomesh.reader
|
||||
|
||||
import io.flutter.embedding.android.FlutterActivity
|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
allprojects {
|
||||
repositories {
|
||||
google()
|
||||
|
||||
@@ -1,3 +1,18 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
pluginManagement {
|
||||
val flutterSdkPath =
|
||||
run {
|
||||
|
||||
+13
-1
@@ -1,5 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
export GIT_TAG="$(git describe --tags --abbrev=0)"
|
||||
export GIT_COMMITS="$(git rev-list --count ${GIT_TAG}..HEAD)"
|
||||
export GIT_SHA="$(git rev-parse --short=9 HEAD)"
|
||||
@@ -12,4 +25,3 @@ flutter run \
|
||||
--dart-define=GIT_SHA="${GIT_SHA}" \
|
||||
--dart-define=GIT_DIRTY="${GIT_DIRTY}" \
|
||||
--device-id 38151FDJH00D4C
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>FMWK</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>1.0</string>
|
||||
<string>0.5.10</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>1.0</string>
|
||||
<string>0.5.10</string>
|
||||
<key>MinimumOSVersion</key>
|
||||
<string>14.0</string>
|
||||
</dict>
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import Flutter
|
||||
import UIKit
|
||||
|
||||
|
||||
@@ -1 +1,14 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import "GeneratedPluginRegistrant.h"
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import Flutter
|
||||
import UIKit
|
||||
import XCTest
|
||||
|
||||
+1
-1
@@ -1,7 +1,7 @@
|
||||
name: potato_mesh_reader
|
||||
description: Meshtastic Reader — read-only view for PotatoMesh messages.
|
||||
publish_to: "none"
|
||||
version: 0.5.6
|
||||
version: 0.5.10
|
||||
|
||||
environment:
|
||||
sdk: ">=3.4.0 <4.0.0"
|
||||
|
||||
+13
-1
@@ -1,5 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
export GIT_TAG="$(git describe --tags --abbrev=0)"
|
||||
@@ -27,4 +40,3 @@ fi
|
||||
export APK_DIR="build/app/outputs/flutter-apk"
|
||||
mv -v "${APK_DIR}/app-release.apk" "${APK_DIR}/potatomesh-reader-android-${TAG_NAME}.apk"
|
||||
(cd "${APK_DIR}" && sha256sum "potatomesh-reader-android-${TAG_NAME}.apk" > "potatomesh-reader-android-${TAG_NAME}.apk.sha256sum")
|
||||
|
||||
|
||||
@@ -0,0 +1,128 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:flutter_test/flutter_test.dart';
|
||||
import 'package:potato_mesh_reader/main.dart';
|
||||
|
||||
void main() {
|
||||
TestWidgetsFlutterBinding.ensureInitialized();
|
||||
|
||||
test('BootstrapProgress renders stage, counts, and detail', () {
|
||||
const progress = BootstrapProgress(
|
||||
stage: 'Downloading',
|
||||
current: 2,
|
||||
total: 5,
|
||||
detail: 'instances',
|
||||
);
|
||||
|
||||
expect(progress.label, 'Downloading 2/5 • instances');
|
||||
|
||||
const fallback = BootstrapProgress(stage: 'Starting');
|
||||
expect(fallback.label, 'Starting');
|
||||
});
|
||||
|
||||
test('InstanceVersion summary prefers populated fields', () {
|
||||
const populated = InstanceVersion(
|
||||
name: 'BerlinMesh',
|
||||
channel: '#MediumFast',
|
||||
frequency: '868MHz',
|
||||
instanceDomain: 'potatomesh.net',
|
||||
);
|
||||
expect(populated.summary, 'BerlinMesh · #MediumFast · 868MHz');
|
||||
|
||||
const minimal = InstanceVersion(
|
||||
name: '',
|
||||
channel: null,
|
||||
frequency: null,
|
||||
instanceDomain: null,
|
||||
);
|
||||
expect(minimal.summary, 'Unknown');
|
||||
});
|
||||
|
||||
test('sortMessagesByRxTime keeps unknown timestamps in place', () {
|
||||
MeshMessage buildMessage({
|
||||
required int id,
|
||||
required String text,
|
||||
required String rxIso,
|
||||
DateTime? rxTime,
|
||||
}) {
|
||||
return MeshMessage(
|
||||
id: id,
|
||||
rxTime: rxTime,
|
||||
rxIso: rxIso,
|
||||
fromId: '!$id',
|
||||
nodeId: '!$id',
|
||||
toId: '^',
|
||||
channelName: '#general',
|
||||
channel: 1,
|
||||
portnum: 'TEXT',
|
||||
text: text,
|
||||
rssi: -50,
|
||||
snr: 1.0,
|
||||
hopLimit: 1,
|
||||
);
|
||||
}
|
||||
|
||||
final withTime = buildMessage(
|
||||
id: 2,
|
||||
rxTime: DateTime.utc(2024, 1, 1, 12, 1),
|
||||
rxIso: '2024-01-01T12:01:00Z',
|
||||
text: 'timed',
|
||||
);
|
||||
final withoutTime = buildMessage(
|
||||
id: 1,
|
||||
rxTime: null,
|
||||
rxIso: 'unknown',
|
||||
text: 'unknown',
|
||||
);
|
||||
final laterTime = buildMessage(
|
||||
id: 3,
|
||||
rxTime: DateTime.utc(2024, 1, 1, 12, 5),
|
||||
rxIso: '2024-01-01T12:05:00Z',
|
||||
text: 'later',
|
||||
);
|
||||
|
||||
final sorted = sortMessagesByRxTime([withoutTime, laterTime, withTime]);
|
||||
|
||||
expect(sorted.first.id, withoutTime.id,
|
||||
reason: 'messages without rxTime should retain position');
|
||||
expect(sorted[1].id, withTime.id,
|
||||
reason: 'messages with timestamps should be ordered chronologically');
|
||||
expect(sorted.last.id, laterTime.id);
|
||||
});
|
||||
|
||||
testWidgets('LoadingScreen displays progress label and icon', (tester) async {
|
||||
const screen = LoadingScreen(
|
||||
progress: BootstrapProgress(stage: 'Fetching'),
|
||||
);
|
||||
|
||||
await tester.pumpWidget(const MaterialApp(home: screen));
|
||||
|
||||
expect(find.byType(CircularProgressIndicator), findsOneWidget);
|
||||
expect(find.text('Fetching'), findsOneWidget);
|
||||
expect(find.bySemanticsLabel('PotatoMesh'), findsOneWidget);
|
||||
});
|
||||
|
||||
testWidgets('LoadingScreen surfaces errors', (tester) async {
|
||||
const screen = LoadingScreen(
|
||||
progress: BootstrapProgress(stage: 'Loading'),
|
||||
error: 'boom',
|
||||
);
|
||||
|
||||
await tester.pumpWidget(const MaterialApp(home: screen));
|
||||
|
||||
expect(find.textContaining('Failed to load: boom'), findsOneWidget);
|
||||
});
|
||||
}
|
||||
@@ -1,3 +1,16 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a basic Flutter widget test.
|
||||
//
|
||||
// To perform an interaction with a widget in your test, use the WidgetTester
|
||||
|
||||
@@ -76,6 +76,8 @@ CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo
|
||||
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
|
||||
FEDERATION=$(grep "^FEDERATION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "1")
|
||||
PRIVATE=$(grep "^PRIVATE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
|
||||
HIDDEN_CHANNELS=$(grep "^HIDDEN_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
|
||||
ALLOWED_CHANNELS=$(grep "^ALLOWED_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
|
||||
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
|
||||
MAP_ZOOM=$(grep "^MAP_ZOOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
|
||||
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
|
||||
@@ -126,6 +128,11 @@ echo "-------------------"
|
||||
echo "Private mode hides public mesh messages from unauthenticated visitors."
|
||||
echo "Set to 1 to hide public feeds or 0 to keep them visible."
|
||||
read_with_default "Enable private mode (1=yes, 0=no)" "$PRIVATE" PRIVATE
|
||||
echo "Provide a comma-separated whitelist of channel names to ingest (optional)."
|
||||
echo "When set, only listed channels are ingested unless explicitly hidden below."
|
||||
read_with_default "Allowed channels" "$ALLOWED_CHANNELS" ALLOWED_CHANNELS
|
||||
echo "Provide a comma-separated list of channel names to hide from the web UI (optional)."
|
||||
read_with_default "Hidden channels" "$HIDDEN_CHANNELS" HIDDEN_CHANNELS
|
||||
|
||||
echo ""
|
||||
echo "🛠 Docker Settings"
|
||||
@@ -196,6 +203,16 @@ update_env "POTATOMESH_IMAGE_TAG" "$POTATOMESH_IMAGE_TAG"
|
||||
update_env "FEDERATION" "$FEDERATION"
|
||||
update_env "PRIVATE" "$PRIVATE"
|
||||
update_env "CONNECTION" "$CONNECTION"
|
||||
if [ -n "$ALLOWED_CHANNELS" ]; then
|
||||
update_env "ALLOWED_CHANNELS" "\"$ALLOWED_CHANNELS\""
|
||||
else
|
||||
sed -i.bak '/^ALLOWED_CHANNELS=.*/d' .env
|
||||
fi
|
||||
if [ -n "$HIDDEN_CHANNELS" ]; then
|
||||
update_env "HIDDEN_CHANNELS" "\"$HIDDEN_CHANNELS\""
|
||||
else
|
||||
sed -i.bak '/^HIDDEN_CHANNELS=.*/d' .env
|
||||
fi
|
||||
if [ -n "$INSTANCE_DOMAIN" ]; then
|
||||
update_env "INSTANCE_DOMAIN" "$INSTANCE_DOMAIN"
|
||||
else
|
||||
@@ -244,6 +261,8 @@ echo " API Token: ${API_TOKEN:0:8}..."
|
||||
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
|
||||
echo " Docker Image Tag: $POTATOMESH_IMAGE_TAG"
|
||||
echo " Private Mode: ${PRIVATE}"
|
||||
echo " Allowed Channels: ${ALLOWED_CHANNELS:-'All'}"
|
||||
echo " Hidden Channels: ${HIDDEN_CHANNELS:-'None'}"
|
||||
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
|
||||
if [ "${FEDERATION:-1}" = "0" ]; then
|
||||
echo " Federation: Disabled"
|
||||
|
||||
+6
-2
@@ -50,7 +50,9 @@ USER potatomesh
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
POTATOMESH_INSTANCE="" \
|
||||
ALLOWED_CHANNELS="" \
|
||||
HIDDEN_CHANNELS="" \
|
||||
INSTANCE_DOMAIN="" \
|
||||
API_TOKEN=""
|
||||
|
||||
CMD ["python", "-m", "data.mesh"]
|
||||
@@ -75,7 +77,9 @@ USER ContainerUser
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
POTATOMESH_INSTANCE="" \
|
||||
ALLOWED_CHANNELS="" \
|
||||
HIDDEN_CHANNELS="" \
|
||||
INSTANCE_DOMAIN="" \
|
||||
API_TOKEN=""
|
||||
|
||||
CMD ["python", "-m", "data.mesh"]
|
||||
|
||||
+1
-1
@@ -18,7 +18,7 @@ The ``data.mesh`` module exposes helpers for reading Meshtastic node and
|
||||
message information before forwarding it to the accompanying web application.
|
||||
"""
|
||||
|
||||
VERSION = "0.5.6"
|
||||
VERSION = "0.5.10"
|
||||
"""Semantic version identifier shared with the dashboard and front-end."""
|
||||
|
||||
__version__ = VERSION
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
-- Copyright © 2025-26 l5yth & contributors
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
PRAGMA journal_mode=WAL;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS ingestors (
|
||||
node_id TEXT PRIMARY KEY,
|
||||
start_time INTEGER NOT NULL,
|
||||
last_seen_time INTEGER NOT NULL,
|
||||
version TEXT,
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_ingestors_last_seen ON ingestors(last_seen_time);
|
||||
@@ -26,6 +26,8 @@ CREATE TABLE IF NOT EXISTS instances (
|
||||
longitude REAL,
|
||||
last_update_time INTEGER,
|
||||
is_private BOOLEAN NOT NULL DEFAULT 0,
|
||||
nodes_count INTEGER,
|
||||
contact_link TEXT,
|
||||
signature TEXT
|
||||
);
|
||||
|
||||
|
||||
@@ -21,7 +21,17 @@ import threading as threading # re-exported for compatibility
|
||||
import sys
|
||||
import types
|
||||
|
||||
from . import channels, config, daemon, handlers, interfaces, queue, serialization
|
||||
from .. import VERSION as _PACKAGE_VERSION
|
||||
from . import (
|
||||
channels,
|
||||
config,
|
||||
daemon,
|
||||
handlers,
|
||||
ingestors,
|
||||
interfaces,
|
||||
queue,
|
||||
serialization,
|
||||
)
|
||||
|
||||
__all__: list[str] = []
|
||||
|
||||
@@ -40,7 +50,15 @@ def _export_constants() -> None:
|
||||
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
|
||||
|
||||
|
||||
for _module in (channels, daemon, handlers, interfaces, queue, serialization):
|
||||
for _module in (
|
||||
channels,
|
||||
daemon,
|
||||
handlers,
|
||||
interfaces,
|
||||
queue,
|
||||
serialization,
|
||||
ingestors,
|
||||
):
|
||||
_reexport(_module)
|
||||
|
||||
_export_constants()
|
||||
@@ -52,11 +70,14 @@ _CONFIG_ATTRS = {
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"API_TOKEN",
|
||||
"ALLOWED_CHANNELS",
|
||||
"HIDDEN_CHANNELS",
|
||||
"LORA_FREQ",
|
||||
"MODEM_PRESET",
|
||||
"_RECONNECT_INITIAL_DELAY_SECS",
|
||||
"_RECONNECT_MAX_DELAY_SECS",
|
||||
"_CLOSE_TIMEOUT_SECS",
|
||||
"_INGESTOR_HEARTBEAT_SECS",
|
||||
"_debug_log",
|
||||
}
|
||||
|
||||
@@ -70,9 +91,16 @@ _HANDLER_ATTRS = set(handlers.__all__)
|
||||
_DAEMON_ATTRS = set(daemon.__all__)
|
||||
_SERIALIZATION_ATTRS = set(serialization.__all__)
|
||||
_INTERFACE_EXPORTS = set(interfaces.__all__)
|
||||
_INGESTOR_ATTRS = set(ingestors.__all__)
|
||||
|
||||
# Re-export the package version for callers that previously referenced
|
||||
# data.mesh_ingestor.VERSION directly.
|
||||
VERSION = _PACKAGE_VERSION
|
||||
__all__.append("VERSION")
|
||||
|
||||
__all__.extend(sorted(_CONFIG_ATTRS))
|
||||
__all__.extend(sorted(_INTERFACE_ATTRS))
|
||||
__all__.append("VERSION")
|
||||
|
||||
|
||||
class _MeshIngestorModule(types.ModuleType):
|
||||
@@ -87,6 +115,10 @@ class _MeshIngestorModule(types.ModuleType):
|
||||
return getattr(interfaces, name)
|
||||
if name in _INTERFACE_EXPORTS:
|
||||
return getattr(interfaces, name)
|
||||
if name in _INGESTOR_ATTRS:
|
||||
return getattr(ingestors, name)
|
||||
if name == "VERSION":
|
||||
return VERSION
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name: str, value): # type: ignore[override]
|
||||
@@ -121,6 +153,10 @@ class _MeshIngestorModule(types.ModuleType):
|
||||
setattr(serialization, name, value)
|
||||
super().__setattr__(name, getattr(serialization, name, value))
|
||||
handled = True
|
||||
if name in _INGESTOR_ATTRS:
|
||||
setattr(ingestors, name, value)
|
||||
super().__setattr__(name, getattr(ingestors, name, value))
|
||||
handled = True
|
||||
if handled:
|
||||
return
|
||||
super().__setattr__(name, value)
|
||||
|
||||
@@ -222,6 +222,54 @@ def channel_name(channel_index: int | None) -> str | None:
|
||||
return _CHANNEL_LOOKUP.get(int(channel_index))
|
||||
|
||||
|
||||
def hidden_channel_names() -> tuple[str, ...]:
|
||||
"""Return the configured set of hidden channel names."""
|
||||
|
||||
return tuple(getattr(config, "HIDDEN_CHANNELS", ()))
|
||||
|
||||
|
||||
def allowed_channel_names() -> tuple[str, ...]:
|
||||
"""Return the configured set of explicitly allowed channel names."""
|
||||
|
||||
return tuple(getattr(config, "ALLOWED_CHANNELS", ()))
|
||||
|
||||
|
||||
def is_allowed_channel(channel_name_value: str | None) -> bool:
|
||||
"""Return ``True`` when ``channel_name_value`` is permitted by policy."""
|
||||
|
||||
allowed = getattr(config, "ALLOWED_CHANNELS", ())
|
||||
if not allowed:
|
||||
return True
|
||||
|
||||
if channel_name_value is None:
|
||||
return False
|
||||
|
||||
normalized = channel_name_value.strip()
|
||||
if not normalized:
|
||||
return False
|
||||
|
||||
normalized_casefold = normalized.casefold()
|
||||
for allowed_name in allowed:
|
||||
if normalized_casefold == allowed_name.casefold():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def is_hidden_channel(channel_name_value: str | None) -> bool:
|
||||
"""Return ``True`` when ``channel_name_value`` is configured as hidden."""
|
||||
|
||||
if channel_name_value is None:
|
||||
return False
|
||||
normalized = channel_name_value.strip()
|
||||
if not normalized:
|
||||
return False
|
||||
normalized_casefold = normalized.casefold()
|
||||
for hidden in getattr(config, "HIDDEN_CHANNELS", ()):
|
||||
if normalized_casefold == hidden.casefold():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _reset_channel_cache() -> None:
|
||||
"""Clear cached channel data. Intended for use in tests only."""
|
||||
|
||||
@@ -234,5 +282,9 @@ __all__ = [
|
||||
"capture_from_interface",
|
||||
"channel_mappings",
|
||||
"channel_name",
|
||||
"allowed_channel_names",
|
||||
"hidden_channel_names",
|
||||
"is_allowed_channel",
|
||||
"is_hidden_channel",
|
||||
"_reset_channel_cache",
|
||||
]
|
||||
|
||||
@@ -46,6 +46,9 @@ DEFAULT_ENERGY_ONLINE_DURATION_SECS = 300.0
|
||||
DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
|
||||
"""Sleep duration used when energy saving mode is active."""
|
||||
|
||||
DEFAULT_INGESTOR_HEARTBEAT_SECS = float(60 * 60)
|
||||
"""Interval between ingestor heartbeat announcements."""
|
||||
|
||||
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
|
||||
"""Optional connection target for the mesh interface.
|
||||
|
||||
@@ -61,7 +64,72 @@ CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
|
||||
"""Index of the LoRa channel to select when connecting."""
|
||||
|
||||
DEBUG = os.environ.get("DEBUG") == "1"
|
||||
INSTANCE = os.environ.get("POTATOMESH_INSTANCE", "").rstrip("/")
|
||||
|
||||
|
||||
def _parse_channel_names(raw_value: str | None) -> tuple[str, ...]:
|
||||
"""Normalise a comma-separated list of channel names.
|
||||
|
||||
Parameters:
|
||||
raw_value: Raw environment string containing channel names separated by
|
||||
commas. ``None`` and empty segments are ignored.
|
||||
|
||||
Returns:
|
||||
A tuple of unique, non-empty channel names preserving input order while
|
||||
deduplicating case-insensitively.
|
||||
"""
|
||||
|
||||
if not raw_value:
|
||||
return ()
|
||||
|
||||
normalized_entries: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for part in raw_value.split(","):
|
||||
name = part.strip()
|
||||
if not name:
|
||||
continue
|
||||
key = name.casefold()
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
normalized_entries.append(name)
|
||||
|
||||
return tuple(normalized_entries)
|
||||
|
||||
|
||||
def _parse_hidden_channels(raw_value: str | None) -> tuple[str, ...]:
|
||||
"""Compatibility wrapper that parses hidden channel names."""
|
||||
|
||||
return _parse_channel_names(raw_value)
|
||||
|
||||
|
||||
HIDDEN_CHANNELS = _parse_hidden_channels(os.environ.get("HIDDEN_CHANNELS"))
|
||||
"""Channel names configured to be ignored by the ingestor."""
|
||||
|
||||
ALLOWED_CHANNELS = _parse_channel_names(os.environ.get("ALLOWED_CHANNELS"))
|
||||
"""Explicitly permitted channel names; when set, other channels are ignored."""
|
||||
|
||||
|
||||
def _resolve_instance_domain() -> str:
|
||||
"""Resolve the configured instance domain from the environment.
|
||||
|
||||
The ingestor prefers the :envvar:`INSTANCE_DOMAIN` variable for clarity and
|
||||
compatibility with the web application. For deployments that still
|
||||
configure the legacy :envvar:`POTATOMESH_INSTANCE` variable, the resolver
|
||||
falls back to that value when no primary domain is set.
|
||||
"""
|
||||
|
||||
instance_domain = os.environ.get("INSTANCE_DOMAIN", "")
|
||||
legacy_instance = os.environ.get("POTATOMESH_INSTANCE", "")
|
||||
|
||||
configured_instance = (instance_domain or legacy_instance).rstrip("/")
|
||||
|
||||
if configured_instance and "://" not in configured_instance:
|
||||
return f"https://{configured_instance}"
|
||||
|
||||
return configured_instance
|
||||
|
||||
|
||||
INSTANCE = _resolve_instance_domain()
|
||||
API_TOKEN = os.environ.get("API_TOKEN", "")
|
||||
ENERGY_SAVING = os.environ.get("ENERGY_SAVING") == "1"
|
||||
"""When ``True``, enables the ingestor's energy saving mode."""
|
||||
@@ -78,6 +146,7 @@ _CLOSE_TIMEOUT_SECS = DEFAULT_CLOSE_TIMEOUT_SECS
|
||||
_INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
|
||||
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
|
||||
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
|
||||
_INGESTOR_HEARTBEAT_SECS = DEFAULT_INGESTOR_HEARTBEAT_SECS
|
||||
|
||||
# Backwards compatibility shim for legacy imports.
|
||||
PORT = CONNECTION
|
||||
@@ -122,6 +191,8 @@ __all__ = [
|
||||
"SNAPSHOT_SECS",
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"HIDDEN_CHANNELS",
|
||||
"ALLOWED_CHANNELS",
|
||||
"INSTANCE",
|
||||
"API_TOKEN",
|
||||
"ENERGY_SAVING",
|
||||
@@ -133,6 +204,7 @@ __all__ = [
|
||||
"_INACTIVITY_RECONNECT_SECS",
|
||||
"_ENERGY_ONLINE_DURATION_SECS",
|
||||
"_ENERGY_SLEEP_SECS",
|
||||
"_INGESTOR_HEARTBEAT_SECS",
|
||||
"_debug_log",
|
||||
]
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ import time
|
||||
|
||||
from pubsub import pub
|
||||
|
||||
from . import config, handlers, interfaces
|
||||
from . import config, handlers, ingestors, interfaces
|
||||
|
||||
_RECEIVE_TOPICS = (
|
||||
"meshtastic.receive",
|
||||
@@ -169,6 +169,41 @@ def _is_ble_interface(iface_obj) -> bool:
|
||||
return "ble_interface" in module_name
|
||||
|
||||
|
||||
def _process_ingestor_heartbeat(iface, *, ingestor_announcement_sent: bool) -> bool:
|
||||
"""Send ingestor liveness heartbeats when a host id is known.
|
||||
|
||||
Parameters:
|
||||
iface: Active mesh interface used to extract a host node id when absent.
|
||||
ingestor_announcement_sent: Whether an initial heartbeat has already
|
||||
been sent during the current session.
|
||||
|
||||
Returns:
|
||||
Updated ``ingestor_announcement_sent`` flag reflecting whether an
|
||||
initial heartbeat was transmitted.
|
||||
"""
|
||||
|
||||
host_id = handlers.host_node_id()
|
||||
if host_id is None and iface is not None:
|
||||
extracted = interfaces._extract_host_node_id(iface)
|
||||
if extracted:
|
||||
handlers.register_host_node_id(extracted)
|
||||
host_id = handlers.host_node_id()
|
||||
|
||||
if host_id:
|
||||
ingestors.set_ingestor_node_id(host_id)
|
||||
heartbeat_sent = ingestors.queue_ingestor_heartbeat(
|
||||
force=not ingestor_announcement_sent
|
||||
)
|
||||
if heartbeat_sent and not ingestor_announcement_sent:
|
||||
return True
|
||||
return ingestor_announcement_sent
|
||||
iface_cls = getattr(iface_obj, "__class__", None)
|
||||
if iface_cls is None:
|
||||
return False
|
||||
module_name = getattr(iface_cls, "__module__", "") or ""
|
||||
return "ble_interface" in module_name
|
||||
|
||||
|
||||
def _connected_state(candidate) -> bool | None:
|
||||
"""Return the connection state advertised by ``candidate``.
|
||||
|
||||
@@ -233,6 +268,7 @@ def main(existing_interface=None) -> None:
|
||||
inactivity_reconnect_secs = max(
|
||||
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
|
||||
)
|
||||
ingestor_announcement_sent = False
|
||||
|
||||
energy_saving_enabled = config.ENERGY_SAVING
|
||||
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
|
||||
@@ -260,7 +296,7 @@ def main(existing_interface=None) -> None:
|
||||
signal.signal(signal.SIGINT, handle_sigint)
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
target = config.INSTANCE or "(no POTATOMESH_INSTANCE)"
|
||||
target = config.INSTANCE or "(no INSTANCE_DOMAIN configured)"
|
||||
configured_port = config.CONNECTION
|
||||
active_candidate = configured_port
|
||||
announced_target = False
|
||||
@@ -288,6 +324,7 @@ def main(existing_interface=None) -> None:
|
||||
handlers.register_host_node_id(
|
||||
interfaces._extract_host_node_id(iface)
|
||||
)
|
||||
ingestors.set_ingestor_node_id(handlers.host_node_id())
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
initial_snapshot_sent = False
|
||||
if not announced_target and resolved_target:
|
||||
@@ -501,6 +538,10 @@ def main(existing_interface=None) -> None:
|
||||
iface_connected_at = None
|
||||
continue
|
||||
|
||||
ingestor_announcement_sent = _process_ingestor_heartbeat(
|
||||
iface, ingestor_announcement_sent=ingestor_announcement_sent
|
||||
)
|
||||
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
stop.wait(config.SNAPSHOT_SECS)
|
||||
except KeyboardInterrupt: # pragma: no cover - interactive only
|
||||
@@ -520,6 +561,7 @@ __all__ = [
|
||||
"_node_items_snapshot",
|
||||
"_subscribe_receive_topics",
|
||||
"_is_ble_interface",
|
||||
"_process_ingestor_heartbeat",
|
||||
"_connected_state",
|
||||
"main",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Decode Meshtastic protobuf payloads from stdin JSON."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
if SCRIPT_DIR in sys.path:
|
||||
sys.path.remove(SCRIPT_DIR)
|
||||
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from meshtastic.protobuf import mesh_pb2, telemetry_pb2
|
||||
|
||||
|
||||
PORTNUM_MAP: Dict[int, Tuple[str, Any]] = {
|
||||
3: ("POSITION_APP", mesh_pb2.Position),
|
||||
4: ("NODEINFO_APP", mesh_pb2.NodeInfo),
|
||||
5: ("ROUTING_APP", mesh_pb2.Routing),
|
||||
67: ("TELEMETRY_APP", telemetry_pb2.Telemetry),
|
||||
70: ("TRACEROUTE_APP", mesh_pb2.RouteDiscovery),
|
||||
71: ("NEIGHBORINFO_APP", mesh_pb2.NeighborInfo),
|
||||
}
|
||||
|
||||
|
||||
def _decode_payload(portnum: int, payload_b64: str) -> dict[str, Any]:
|
||||
if portnum not in PORTNUM_MAP:
|
||||
return {"error": "unsupported-port", "portnum": portnum}
|
||||
try:
|
||||
payload_bytes = base64.b64decode(payload_b64, validate=True)
|
||||
except Exception as exc:
|
||||
return {"error": f"invalid-payload: {exc}"}
|
||||
|
||||
name, message_cls = PORTNUM_MAP[portnum]
|
||||
msg = message_cls()
|
||||
try:
|
||||
msg.ParseFromString(payload_bytes)
|
||||
except Exception as exc:
|
||||
return {"error": f"decode-failed: {exc}", "portnum": portnum, "type": name}
|
||||
|
||||
decoded = MessageToDict(msg, preserving_proto_field_name=True)
|
||||
return {"portnum": portnum, "type": name, "payload": decoded}
|
||||
|
||||
|
||||
def main() -> int:
|
||||
raw = sys.stdin.read()
|
||||
try:
|
||||
request = json.loads(raw)
|
||||
except json.JSONDecodeError as exc:
|
||||
sys.stdout.write(json.dumps({"error": f"invalid-json: {exc}"}))
|
||||
return 1
|
||||
|
||||
portnum = request.get("portnum")
|
||||
payload_b64 = request.get("payload_b64")
|
||||
|
||||
if not isinstance(portnum, int):
|
||||
sys.stdout.write(json.dumps({"error": "missing-portnum"}))
|
||||
return 1
|
||||
if not isinstance(payload_b64, str):
|
||||
sys.stdout.write(json.dumps({"error": "missing-payload"}))
|
||||
return 1
|
||||
|
||||
result = _decode_payload(portnum, payload_b64)
|
||||
sys.stdout.write(json.dumps(result))
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -100,6 +100,41 @@ from .serialization import (
|
||||
)
|
||||
|
||||
|
||||
def _portnum_candidates(name: str) -> set[int]:
|
||||
"""Return Meshtastic port number candidates for ``name``.
|
||||
|
||||
Parameters:
|
||||
name: Port name to look up in Meshtastic ``PortNum`` enums.
|
||||
|
||||
Returns:
|
||||
Set of integer port numbers resolved from Meshtastic modules.
|
||||
"""
|
||||
|
||||
candidates: set[int] = set()
|
||||
for module_name in (
|
||||
"meshtastic.portnums_pb2",
|
||||
"meshtastic.protobuf.portnums_pb2",
|
||||
):
|
||||
module = sys.modules.get(module_name)
|
||||
if module is None:
|
||||
with contextlib.suppress(ModuleNotFoundError):
|
||||
module = importlib.import_module(module_name)
|
||||
if module is None:
|
||||
continue
|
||||
portnum_enum = getattr(module, "PortNum", None)
|
||||
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
|
||||
if callable(value_lookup):
|
||||
with contextlib.suppress(Exception):
|
||||
candidate = _coerce_int(value_lookup(name))
|
||||
if candidate is not None:
|
||||
candidates.add(candidate)
|
||||
constant_value = getattr(module, name, None)
|
||||
candidate = _coerce_int(constant_value)
|
||||
if candidate is not None:
|
||||
candidates.add(candidate)
|
||||
return candidates
|
||||
|
||||
|
||||
def register_host_node_id(node_id: str | None) -> None:
|
||||
"""Record the canonical identifier for the connected host device.
|
||||
|
||||
@@ -1280,28 +1315,7 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
traceroute_section = (
|
||||
decoded.get("traceroute") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
traceroute_port_ints: set[int] = set()
|
||||
for module_name in (
|
||||
"meshtastic.portnums_pb2",
|
||||
"meshtastic.protobuf.portnums_pb2",
|
||||
):
|
||||
module = sys.modules.get(module_name)
|
||||
if module is None:
|
||||
with contextlib.suppress(ModuleNotFoundError):
|
||||
module = importlib.import_module(module_name)
|
||||
if module is None:
|
||||
continue
|
||||
portnum_enum = getattr(module, "PortNum", None)
|
||||
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
|
||||
if callable(value_lookup):
|
||||
with contextlib.suppress(Exception):
|
||||
candidate = _coerce_int(value_lookup("TRACEROUTE_APP"))
|
||||
if candidate is not None:
|
||||
traceroute_port_ints.add(candidate)
|
||||
constant_value = getattr(module, "TRACEROUTE_APP", None)
|
||||
candidate = _coerce_int(constant_value)
|
||||
if candidate is not None:
|
||||
traceroute_port_ints.add(candidate)
|
||||
traceroute_port_ints = _portnum_candidates("TRACEROUTE_APP")
|
||||
|
||||
if (
|
||||
portnum == "TRACEROUTE_APP"
|
||||
@@ -1359,36 +1373,43 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
if emoji_text:
|
||||
emoji = emoji_text
|
||||
|
||||
allowed_port_values = {"1", "TEXT_MESSAGE_APP", "REACTION_APP"}
|
||||
routing_section = decoded.get("routing") if isinstance(decoded, Mapping) else None
|
||||
routing_port_candidates = _portnum_candidates("ROUTING_APP")
|
||||
if text is None and (
|
||||
portnum == "ROUTING_APP"
|
||||
or (portnum_int is not None and portnum_int in routing_port_candidates)
|
||||
or isinstance(routing_section, Mapping)
|
||||
):
|
||||
routing_payload = _first(decoded, "payload", "data", default=None)
|
||||
if routing_payload is not None:
|
||||
if isinstance(routing_payload, bytes):
|
||||
text = base64.b64encode(routing_payload).decode("ascii")
|
||||
elif isinstance(routing_payload, str):
|
||||
text = routing_payload
|
||||
else:
|
||||
try:
|
||||
text = json.dumps(routing_payload, ensure_ascii=True)
|
||||
except TypeError:
|
||||
text = str(routing_payload)
|
||||
if isinstance(text, str):
|
||||
text = text.strip() or None
|
||||
|
||||
allowed_port_values = {"1", "TEXT_MESSAGE_APP", "REACTION_APP", "ROUTING_APP"}
|
||||
allowed_port_ints = {1}
|
||||
|
||||
reaction_port_candidates: set[int] = set()
|
||||
for module_name in (
|
||||
"meshtastic.portnums_pb2",
|
||||
"meshtastic.protobuf.portnums_pb2",
|
||||
):
|
||||
module = sys.modules.get(module_name)
|
||||
if module is None:
|
||||
with contextlib.suppress(ModuleNotFoundError):
|
||||
module = importlib.import_module(module_name)
|
||||
if module is None:
|
||||
continue
|
||||
portnum_enum = getattr(module, "PortNum", None)
|
||||
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
|
||||
if callable(value_lookup):
|
||||
with contextlib.suppress(Exception):
|
||||
candidate = _coerce_int(value_lookup("REACTION_APP"))
|
||||
if candidate is not None:
|
||||
reaction_port_candidates.add(candidate)
|
||||
constant_value = getattr(module, "REACTION_APP", None)
|
||||
candidate = _coerce_int(constant_value)
|
||||
if candidate is not None:
|
||||
reaction_port_candidates.add(candidate)
|
||||
|
||||
reaction_port_candidates = _portnum_candidates("REACTION_APP")
|
||||
for candidate in reaction_port_candidates:
|
||||
allowed_port_ints.add(candidate)
|
||||
allowed_port_values.add(str(candidate))
|
||||
|
||||
for candidate in routing_port_candidates:
|
||||
allowed_port_ints.add(candidate)
|
||||
allowed_port_values.add(str(candidate))
|
||||
|
||||
if isinstance(routing_section, Mapping) and portnum_int is not None:
|
||||
allowed_port_ints.add(portnum_int)
|
||||
allowed_port_values.add(str(portnum_int))
|
||||
|
||||
is_reaction_packet = portnum == "REACTION_APP" or (
|
||||
reply_id is not None and emoji is not None
|
||||
)
|
||||
@@ -1414,6 +1435,8 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
except Exception:
|
||||
channel = 0
|
||||
|
||||
channel_name_value = channels.channel_name(channel)
|
||||
|
||||
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
|
||||
if pkt_id is None:
|
||||
_record_ignored_packet(packet, reason="missing-packet-id")
|
||||
@@ -1459,6 +1482,29 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
_record_ignored_packet(packet, reason="skipped-direct-message")
|
||||
return
|
||||
|
||||
if not channels.is_allowed_channel(channel_name_value):
|
||||
_record_ignored_packet(packet, reason="disallowed-channel")
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Ignored packet on disallowed channel",
|
||||
context="handlers.store_packet_dict",
|
||||
channel=channel,
|
||||
channel_name=channel_name_value,
|
||||
allowed_channels=channels.allowed_channel_names(),
|
||||
)
|
||||
return
|
||||
|
||||
if channels.is_hidden_channel(channel_name_value):
|
||||
_record_ignored_packet(packet, reason="hidden-channel")
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Ignored packet on hidden channel",
|
||||
context="handlers.store_packet_dict",
|
||||
channel=channel,
|
||||
channel_name=channel_name_value,
|
||||
)
|
||||
return
|
||||
|
||||
message_payload = {
|
||||
"id": int(pkt_id),
|
||||
"rx_time": rx_time,
|
||||
@@ -1476,11 +1522,8 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
"emoji": emoji,
|
||||
}
|
||||
|
||||
channel_name_value = None
|
||||
if not encrypted_flag:
|
||||
channel_name_value = channels.channel_name(channel)
|
||||
if channel_name_value:
|
||||
message_payload["channel_name"] = channel_name_value
|
||||
if not encrypted_flag and channel_name_value:
|
||||
message_payload["channel_name"] = channel_name_value
|
||||
_queue_post_json(
|
||||
"/api/messages",
|
||||
_apply_radio_metadata(message_payload),
|
||||
|
||||
@@ -0,0 +1,139 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helpers for tracking ingestor identity and liveness announcements."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable
|
||||
|
||||
from .. import VERSION as INGESTOR_VERSION
|
||||
from . import config, queue
|
||||
from .serialization import _canonical_node_id
|
||||
|
||||
HEARTBEAT_INTERVAL_SECS = 60 * 60
|
||||
"""Default interval between ingestor heartbeat announcements."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class _IngestorState:
|
||||
"""Mutable ingestor identity and heartbeat tracking data."""
|
||||
|
||||
start_time: int = field(default_factory=lambda: int(time.time()))
|
||||
last_heartbeat: int | None = None
|
||||
node_id: str | None = None
|
||||
|
||||
|
||||
STATE = _IngestorState()
|
||||
"""Shared ingestor identity state."""
|
||||
# Alias retained for clarity without exporting into the top-level mesh module to
|
||||
# avoid colliding with the HTTP queue state.
|
||||
INGESTOR_STATE = STATE
|
||||
|
||||
|
||||
def ingestor_start_time() -> int:
|
||||
"""Return the unix timestamp representing when the ingestor booted."""
|
||||
|
||||
return STATE.start_time
|
||||
|
||||
|
||||
def set_ingestor_node_id(node_id: str | None) -> str | None:
|
||||
"""Record the canonical host node identifier for the ingestor.
|
||||
|
||||
Parameters:
|
||||
node_id: Raw node identifier reported by the connected device.
|
||||
|
||||
Returns:
|
||||
Canonical node identifier in ``!xxxxxxxx`` form or ``None`` when the
|
||||
provided value cannot be normalised.
|
||||
"""
|
||||
|
||||
canonical = _canonical_node_id(node_id)
|
||||
if canonical is None:
|
||||
return None
|
||||
|
||||
if STATE.node_id != canonical:
|
||||
STATE.node_id = canonical
|
||||
STATE.last_heartbeat = None
|
||||
|
||||
return canonical
|
||||
|
||||
|
||||
def queue_ingestor_heartbeat(
|
||||
*,
|
||||
force: bool = False,
|
||||
send: Callable[[str, dict], None] | None = None,
|
||||
node_id: str | None = None,
|
||||
) -> bool:
|
||||
"""Queue a heartbeat payload advertising ingestor liveness.
|
||||
|
||||
Parameters:
|
||||
force: When ``True``, bypasses the heartbeat interval guard so an
|
||||
announcement is queued immediately.
|
||||
send: Optional transport callable used for tests; defaults to the queue
|
||||
dispatcher.
|
||||
node_id: Optional node identifier to register before sending. When
|
||||
omitted the previously recorded identifier is reused.
|
||||
|
||||
Returns:
|
||||
``True`` when a heartbeat payload was queued, ``False`` otherwise.
|
||||
"""
|
||||
|
||||
canonical = _canonical_node_id(node_id) if node_id is not None else None
|
||||
if canonical:
|
||||
set_ingestor_node_id(canonical)
|
||||
canonical = STATE.node_id
|
||||
|
||||
if canonical is None:
|
||||
return False
|
||||
|
||||
now = int(time.time())
|
||||
interval = max(
|
||||
0, int(getattr(config, "_INGESTOR_HEARTBEAT_SECS", HEARTBEAT_INTERVAL_SECS))
|
||||
)
|
||||
last = STATE.last_heartbeat
|
||||
if not force and last is not None and now - last < interval:
|
||||
return False
|
||||
|
||||
payload = {
|
||||
"node_id": canonical,
|
||||
"start_time": STATE.start_time,
|
||||
"last_seen_time": now,
|
||||
"version": INGESTOR_VERSION,
|
||||
}
|
||||
if getattr(config, "LORA_FREQ", None) is not None:
|
||||
payload["lora_freq"] = config.LORA_FREQ
|
||||
if getattr(config, "MODEM_PRESET", None) is not None:
|
||||
payload["modem_preset"] = config.MODEM_PRESET
|
||||
queue._queue_post_json(
|
||||
"/api/ingestors",
|
||||
payload,
|
||||
priority=getattr(
|
||||
queue, "_INGESTOR_POST_PRIORITY", queue._DEFAULT_POST_PRIORITY
|
||||
),
|
||||
send=send,
|
||||
)
|
||||
STATE.last_heartbeat = now
|
||||
return True
|
||||
|
||||
|
||||
__all__ = [
|
||||
"HEARTBEAT_INTERVAL_SECS",
|
||||
"INGESTOR_STATE",
|
||||
"ingestor_start_time",
|
||||
"queue_ingestor_heartbeat",
|
||||
"set_ingestor_node_id",
|
||||
]
|
||||
@@ -628,7 +628,13 @@ _DEFAULT_SERIAL_PATTERNS = (
|
||||
"/dev/cu.usbserial*",
|
||||
)
|
||||
|
||||
_BLE_ADDRESS_RE = re.compile(r"^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$")
|
||||
# Support both MAC addresses (Linux/Windows) and UUIDs (macOS)
|
||||
_BLE_ADDRESS_RE = re.compile(
|
||||
r"^(?:"
|
||||
r"(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}|" # MAC address format
|
||||
r"[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}" # UUID format
|
||||
r")$"
|
||||
)
|
||||
|
||||
|
||||
class _DummySerialInterface:
|
||||
@@ -642,13 +648,13 @@ class _DummySerialInterface:
|
||||
|
||||
|
||||
def _parse_ble_target(value: str) -> str | None:
|
||||
"""Return an uppercase BLE MAC address when ``value`` matches the format.
|
||||
"""Return a normalized BLE address (MAC or UUID) when ``value`` matches the format.
|
||||
|
||||
Parameters:
|
||||
value: User-provided target string.
|
||||
|
||||
Returns:
|
||||
The normalised MAC address or ``None`` when validation fails.
|
||||
The normalised MAC address or UUID, or ``None`` when validation fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
@@ -772,10 +778,13 @@ def _create_serial_interface(port: str) -> tuple[object, str]:
|
||||
return _DummySerialInterface(), "mock"
|
||||
ble_target = _parse_ble_target(port_value)
|
||||
if ble_target:
|
||||
# Determine if it's a MAC address or UUID
|
||||
address_type = "MAC" if ":" in ble_target else "UUID"
|
||||
config._debug_log(
|
||||
"Using BLE interface",
|
||||
context="interfaces.ble",
|
||||
address=ble_target,
|
||||
address_type=address_type,
|
||||
)
|
||||
return _load_ble_interface()(address=ble_target), ble_target
|
||||
network_target = _parse_network_target(port_value)
|
||||
|
||||
@@ -74,6 +74,7 @@ def _payload_key_value_pairs(payload: Mapping[str, object]) -> str:
|
||||
|
||||
|
||||
_MESSAGE_POST_PRIORITY = 10
|
||||
_INGESTOR_POST_PRIORITY = 80
|
||||
_NEIGHBOR_POST_PRIORITY = 20
|
||||
_TRACE_POST_PRIORITY = 25
|
||||
_POSITION_POST_PRIORITY = 30
|
||||
@@ -259,6 +260,7 @@ __all__ = [
|
||||
"QueueState",
|
||||
"_DEFAULT_POST_PRIORITY",
|
||||
"_MESSAGE_POST_PRIORITY",
|
||||
"_INGESTOR_POST_PRIORITY",
|
||||
"_NEIGHBOR_POST_PRIORITY",
|
||||
"_NODE_POST_PRIORITY",
|
||||
"_POSITION_POST_PRIORITY",
|
||||
|
||||
+3
-1
@@ -29,7 +29,9 @@ CREATE TABLE IF NOT EXISTS messages (
|
||||
modem_preset TEXT,
|
||||
channel_name TEXT,
|
||||
reply_id INTEGER,
|
||||
emoji TEXT
|
||||
emoji TEXT,
|
||||
decrypted INTEGER NOT NULL DEFAULT 0,
|
||||
decryption_confidence REAL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
|
||||
|
||||
+40
-1
@@ -49,9 +49,11 @@ x-ingestor-base: &ingestor-base
|
||||
environment:
|
||||
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
|
||||
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
|
||||
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
|
||||
ALLOWED_CHANNELS: ${ALLOWED_CHANNELS:-""}
|
||||
HIDDEN_CHANNELS: ${HIDDEN_CHANNELS:-""}
|
||||
API_TOKEN: ${API_TOKEN}
|
||||
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
|
||||
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
|
||||
DEBUG: ${DEBUG:-0}
|
||||
FEDERATION: ${FEDERATION:-1}
|
||||
PRIVATE: ${PRIVATE:-0}
|
||||
@@ -75,6 +77,21 @@ x-ingestor-base: &ingestor-base
|
||||
memory: 128M
|
||||
cpus: '0.1'
|
||||
|
||||
x-matrix-bridge-base: &matrix-bridge-base
|
||||
image: ghcr.io/l5yth/potato-mesh-matrix-bridge-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
|
||||
volumes:
|
||||
- potatomesh_matrix_bridge_state:/app
|
||||
- ./matrix/Config.toml:/app/Config.toml:ro
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 128M
|
||||
cpus: '0.1'
|
||||
reservations:
|
||||
memory: 64M
|
||||
cpus: '0.05'
|
||||
|
||||
services:
|
||||
web:
|
||||
<<: *web-base
|
||||
@@ -108,6 +125,26 @@ services:
|
||||
profiles:
|
||||
- bridge
|
||||
|
||||
matrix-bridge:
|
||||
<<: *matrix-bridge-base
|
||||
network_mode: host
|
||||
depends_on:
|
||||
- web
|
||||
extra_hosts:
|
||||
- "web:127.0.0.1"
|
||||
|
||||
matrix-bridge-bridge:
|
||||
<<: *matrix-bridge-base
|
||||
container_name: potatomesh-matrix-bridge
|
||||
networks:
|
||||
- potatomesh-network
|
||||
depends_on:
|
||||
- web-bridge
|
||||
ports:
|
||||
- "41448:41448"
|
||||
profiles:
|
||||
- bridge
|
||||
|
||||
volumes:
|
||||
potatomesh_data:
|
||||
driver: local
|
||||
@@ -115,6 +152,8 @@ volumes:
|
||||
driver: local
|
||||
potatomesh_logs:
|
||||
driver: local
|
||||
potatomesh_matrix_bridge_state:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
potatomesh-network:
|
||||
|
||||
Generated
+61
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1731533236,
|
||||
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1766070988,
|
||||
"narHash": "sha256-G/WVghka6c4bAzMhTwT2vjLccg/awmHkdKSd2JrycLc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c6245e83d836d0433170a16eb185cefe0572f8b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
@@ -0,0 +1,384 @@
|
||||
{
|
||||
description = "PotatoMesh - A federated, Meshtastic-powered node dashboard";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
flake-utils.url = "github:numtide/flake-utils";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils }:
|
||||
flake-utils.lib.eachDefaultSystem (system:
|
||||
let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
|
||||
# Python environment for the ingestor
|
||||
pythonEnv = pkgs.python3.withPackages (ps: with ps; [
|
||||
meshtastic
|
||||
protobuf
|
||||
requests
|
||||
]);
|
||||
|
||||
# Web app wrapper script
|
||||
webApp = pkgs.writeShellApplication {
|
||||
name = "potato-mesh-web";
|
||||
runtimeInputs = [ pkgs.ruby pkgs.bundler pkgs.sqlite pkgs.git pkgs.gnumake pkgs.gcc ];
|
||||
text = ''
|
||||
if [ -n "''${XDG_DATA_HOME:-}" ]; then
|
||||
BASEDIR="$XDG_DATA_HOME"
|
||||
else
|
||||
BASEDIR="$HOME/.local/share/potato-mesh"
|
||||
fi
|
||||
WORKDIR="$BASEDIR/web"
|
||||
mkdir -p "$WORKDIR"
|
||||
|
||||
# Copy app files if not present or outdated
|
||||
APP_SRC="${./web}"
|
||||
DATA_SRC="${./data}"
|
||||
if [ ! -f "$WORKDIR/.installed" ] || [ "$APP_SRC" != "$(cat "$WORKDIR/.src_path" 2>/dev/null)" ]; then
|
||||
# Copy web app
|
||||
cp -rT "$APP_SRC" "$WORKDIR/"
|
||||
chmod -R u+w "$WORKDIR"
|
||||
# Copy data directory (contains SQL schemas)
|
||||
mkdir -p "$BASEDIR/data"
|
||||
cp -rT "$DATA_SRC" "$BASEDIR/data/"
|
||||
chmod -R u+w "$BASEDIR/data"
|
||||
echo "$APP_SRC" > "$WORKDIR/.src_path"
|
||||
rm -f "$WORKDIR/.installed"
|
||||
fi
|
||||
|
||||
cd "$WORKDIR"
|
||||
|
||||
# Install gems if needed
|
||||
if [ ! -f ".installed" ]; then
|
||||
bundle config set --local path 'vendor/bundle'
|
||||
bundle install
|
||||
touch .installed
|
||||
fi
|
||||
|
||||
exec bundle exec ruby app.rb -p "''${PORT:-41447}" -o "''${HOST:-0.0.0.0}"
|
||||
'';
|
||||
};
|
||||
|
||||
# Ingestor wrapper script
|
||||
ingestor = pkgs.writeShellApplication {
|
||||
name = "potato-mesh-ingestor";
|
||||
runtimeInputs = [ pythonEnv ];
|
||||
text = ''
|
||||
# The ingestor needs to run from parent directory with data/ folder
|
||||
if [ -n "''${XDG_DATA_HOME:-}" ]; then
|
||||
BASEDIR="$XDG_DATA_HOME"
|
||||
else
|
||||
BASEDIR="$HOME/.local/share/potato-mesh"
|
||||
fi
|
||||
if [ ! -d "$BASEDIR/data" ]; then
|
||||
mkdir -p "$BASEDIR"
|
||||
cp -rT "${./data}" "$BASEDIR/data/"
|
||||
chmod -R u+w "$BASEDIR/data"
|
||||
fi
|
||||
cd "$BASEDIR"
|
||||
exec python -m data.mesh
|
||||
'';
|
||||
};
|
||||
|
||||
in {
|
||||
packages = {
|
||||
web = webApp;
|
||||
ingestor = ingestor;
|
||||
default = webApp;
|
||||
};
|
||||
|
||||
apps = {
|
||||
web = {
|
||||
type = "app";
|
||||
program = "${webApp}/bin/potato-mesh-web";
|
||||
};
|
||||
ingestor = {
|
||||
type = "app";
|
||||
program = "${ingestor}/bin/potato-mesh-ingestor";
|
||||
};
|
||||
default = self.apps.${system}.web;
|
||||
};
|
||||
|
||||
devShells.default = pkgs.mkShell {
|
||||
buildInputs = [
|
||||
pkgs.ruby
|
||||
pkgs.bundler
|
||||
pythonEnv
|
||||
pkgs.sqlite
|
||||
];
|
||||
|
||||
shellHook = ''
|
||||
echo "PotatoMesh development shell"
|
||||
echo " - Ruby: $(ruby --version)"
|
||||
echo " - Python: $(python --version)"
|
||||
echo ""
|
||||
echo "To run the web app: cd web && bundle install && ./app.sh"
|
||||
echo "To run the ingestor: cd data && python mesh.py"
|
||||
'';
|
||||
};
|
||||
|
||||
checks.potato-mesh-nixos = pkgs.testers.nixosTest {
|
||||
name = "potato-mesh-data-dir";
|
||||
nodes.machine = { lib, ... }: {
|
||||
imports = [ self.nixosModules.default ];
|
||||
services.potato-mesh = {
|
||||
enable = true;
|
||||
apiToken = "test-token";
|
||||
dataDir = "/var/lib/potato-mesh";
|
||||
ingestor.enable = true;
|
||||
};
|
||||
systemd.services.potato-mesh-ingestor.wantedBy = lib.mkForce [];
|
||||
};
|
||||
testScript = ''
|
||||
machine.start
|
||||
machine.succeed("grep -q 'XDG_DATA_HOME=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-web.service")
|
||||
machine.succeed("grep -q 'XDG_DATA_HOME=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-ingestor.service")
|
||||
machine.succeed("grep -q 'WorkingDirectory=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-web.service")
|
||||
machine.succeed("grep -q 'WorkingDirectory=/var/lib/potato-mesh' /etc/systemd/system/potato-mesh-ingestor.service")
|
||||
'';
|
||||
};
|
||||
}
|
||||
) // {
|
||||
# NixOS module
|
||||
nixosModules.default = { config, lib, pkgs, ... }:
|
||||
let
|
||||
cfg = config.services.potato-mesh;
|
||||
in {
|
||||
options.services.potato-mesh = {
|
||||
enable = lib.mkEnableOption "PotatoMesh web dashboard";
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = self.packages.${pkgs.system}.web;
|
||||
description = "The potato-mesh web package to use";
|
||||
};
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.port;
|
||||
default = 41447;
|
||||
description = "Port to listen on";
|
||||
};
|
||||
|
||||
host = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "0.0.0.0";
|
||||
description = "Host to bind to";
|
||||
};
|
||||
|
||||
apiToken = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Shared secret that authorizes ingestors and API clients making POST requests. Warning: visible in nix store. Prefer apiTokenFile for production.";
|
||||
};
|
||||
|
||||
apiTokenFile = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.path;
|
||||
default = null;
|
||||
description = "File containing API_TOKEN=<secret> (recommended for production)";
|
||||
};
|
||||
|
||||
instanceDomain = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Public hostname used for metadata, federation, and generated API links";
|
||||
};
|
||||
|
||||
siteName = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "PotatoMesh Demo";
|
||||
description = "Title and header displayed in the UI";
|
||||
};
|
||||
|
||||
channel = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "#LongFast";
|
||||
description = "Default channel name displayed in the UI";
|
||||
};
|
||||
|
||||
frequency = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "915MHz";
|
||||
description = "Default frequency description displayed in the UI";
|
||||
};
|
||||
|
||||
contactLink = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "#potatomesh:dod.ngo";
|
||||
description = "Chat link or Matrix alias rendered in the footer and overlays";
|
||||
};
|
||||
|
||||
mapCenter = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "38.761944,-27.090833";
|
||||
description = "Latitude and longitude that centre the map on load";
|
||||
};
|
||||
|
||||
mapZoom = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.int;
|
||||
default = null;
|
||||
description = "Fixed Leaflet zoom applied on first load; disables auto-fit when provided";
|
||||
};
|
||||
|
||||
maxDistance = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 42;
|
||||
description = "Maximum distance (km) before node relationships are hidden on the map";
|
||||
};
|
||||
|
||||
debug = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Enable verbose logging";
|
||||
};
|
||||
|
||||
allowedChannels = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Comma-separated channel names the ingestor accepts";
|
||||
};
|
||||
|
||||
hiddenChannels = lib.mkOption {
|
||||
type = lib.types.nullOr lib.types.str;
|
||||
default = null;
|
||||
description = "Comma-separated channel names the ingestor will ignore";
|
||||
};
|
||||
|
||||
federation = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = true;
|
||||
description = "Announce instance and crawl peers";
|
||||
};
|
||||
|
||||
private = lib.mkOption {
|
||||
type = lib.types.bool;
|
||||
default = false;
|
||||
description = "Hide chat UI, disable message APIs, and exclude hidden clients from public listings";
|
||||
};
|
||||
|
||||
dataDir = lib.mkOption {
|
||||
type = lib.types.path;
|
||||
default = "/var/lib/potato-mesh";
|
||||
description = "Directory to store database and configuration";
|
||||
};
|
||||
|
||||
user = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "potato-mesh";
|
||||
description = "User to run the service as";
|
||||
};
|
||||
|
||||
group = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "potato-mesh";
|
||||
description = "Group to run the service as";
|
||||
};
|
||||
|
||||
# Ingestor options
|
||||
ingestor = {
|
||||
enable = lib.mkEnableOption "PotatoMesh Python ingestor";
|
||||
|
||||
package = lib.mkOption {
|
||||
type = lib.types.package;
|
||||
default = self.packages.${pkgs.system}.ingestor;
|
||||
description = "The potato-mesh ingestor package to use";
|
||||
};
|
||||
|
||||
connection = lib.mkOption {
|
||||
type = lib.types.str;
|
||||
default = "/dev/ttyACM0";
|
||||
description = "Connection target: serial port, IP:port for TCP, or Bluetooth address for BLE";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.users.${cfg.user} = {
|
||||
isSystemUser = true;
|
||||
group = cfg.group;
|
||||
home = cfg.dataDir;
|
||||
createHome = true;
|
||||
};
|
||||
|
||||
users.groups.${cfg.group} = {};
|
||||
|
||||
systemd.services.potato-mesh-web = {
|
||||
description = "PotatoMesh Web Dashboard";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
|
||||
environment = {
|
||||
RACK_ENV = "production";
|
||||
APP_ENV = "production";
|
||||
PORT = toString cfg.port;
|
||||
HOST = cfg.host;
|
||||
SITE_NAME = cfg.siteName;
|
||||
CHANNEL = cfg.channel;
|
||||
FREQUENCY = cfg.frequency;
|
||||
CONTACT_LINK = cfg.contactLink;
|
||||
MAP_CENTER = cfg.mapCenter;
|
||||
MAX_DISTANCE = toString cfg.maxDistance;
|
||||
DEBUG = if cfg.debug then "1" else "0";
|
||||
FEDERATION = if cfg.federation then "1" else "0";
|
||||
PRIVATE = if cfg.private then "1" else "0";
|
||||
XDG_DATA_HOME = cfg.dataDir;
|
||||
XDG_CONFIG_HOME = "${cfg.dataDir}/config";
|
||||
} // lib.optionalAttrs (cfg.instanceDomain != null) {
|
||||
INSTANCE_DOMAIN = cfg.instanceDomain;
|
||||
} // lib.optionalAttrs (cfg.mapZoom != null) {
|
||||
MAP_ZOOM = toString cfg.mapZoom;
|
||||
} // lib.optionalAttrs (cfg.allowedChannels != null) {
|
||||
ALLOWED_CHANNELS = cfg.allowedChannels;
|
||||
} // lib.optionalAttrs (cfg.hiddenChannels != null) {
|
||||
HIDDEN_CHANNELS = cfg.hiddenChannels;
|
||||
} // lib.optionalAttrs (cfg.apiToken != null) {
|
||||
API_TOKEN = cfg.apiToken;
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
ExecStart = "${cfg.package}/bin/potato-mesh-web";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
} // lib.optionalAttrs (cfg.apiTokenFile != null) {
|
||||
EnvironmentFile = cfg.apiTokenFile;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.potato-mesh-ingestor = lib.mkIf cfg.ingestor.enable {
|
||||
description = "PotatoMesh Python Ingestor";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" "potato-mesh-web.service" ];
|
||||
requires = [ "potato-mesh-web.service" ];
|
||||
|
||||
environment = {
|
||||
INSTANCE_DOMAIN = "http://127.0.0.1:${toString cfg.port}";
|
||||
CONNECTION = cfg.ingestor.connection;
|
||||
DEBUG = if cfg.debug then "1" else "0";
|
||||
XDG_DATA_HOME = cfg.dataDir;
|
||||
} // lib.optionalAttrs (cfg.allowedChannels != null) {
|
||||
ALLOWED_CHANNELS = cfg.allowedChannels;
|
||||
} // lib.optionalAttrs (cfg.hiddenChannels != null) {
|
||||
HIDDEN_CHANNELS = cfg.hiddenChannels;
|
||||
} // lib.optionalAttrs (cfg.apiToken != null) {
|
||||
API_TOKEN = cfg.apiToken;
|
||||
};
|
||||
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
User = cfg.user;
|
||||
Group = cfg.group;
|
||||
WorkingDirectory = cfg.dataDir;
|
||||
ExecStart = "${cfg.ingestor.package}/bin/potato-mesh-ingestor";
|
||||
Restart = "always";
|
||||
RestartSec = 10;
|
||||
} // lib.optionalAttrs (cfg.apiTokenFile != null) {
|
||||
EnvironmentFile = cfg.apiTokenFile;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
target/
|
||||
coverage.lcov
|
||||
bridge_state.json
|
||||
Generated
+2316
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,37 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "potatomesh-matrix-bridge"
|
||||
version = "0.5.10"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["macros", "rt-multi-thread", "time"] }
|
||||
reqwest = { version = "0.12", features = ["json", "rustls-tls"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
toml = "0.9"
|
||||
anyhow = "1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["fmt", "env-filter"] }
|
||||
urlencoding = "2"
|
||||
axum = { version = "0.7", features = ["json"] }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
mockito = "1"
|
||||
serial_test = "3"
|
||||
tower = "0.5"
|
||||
@@ -0,0 +1,21 @@
|
||||
[potatomesh]
|
||||
# Base domain (with or without trailing slash)
|
||||
base_url = "https://potatomesh.net"
|
||||
# Poll interval in seconds
|
||||
poll_interval_secs = 60
|
||||
|
||||
[matrix]
|
||||
# Homeserver base URL (client API) without trailing slash
|
||||
homeserver = "https://matrix.dod.ngo"
|
||||
# Appservice access token (from your registration.yaml)
|
||||
as_token = "INVALID_TOKEN_NOT_WORKING"
|
||||
# Homeserver token used to authenticate Synapse callbacks
|
||||
hs_token = "INVALID_TOKEN_NOT_WORKING"
|
||||
# Server name (domain) part of Matrix user IDs
|
||||
server_name = "dod.ngo"
|
||||
# Room ID to send into (must be joined by the appservice / puppets)
|
||||
room_id = "!sXabOBXbVObAlZQEUs:c-base.org" # "#potato-bridge:c-base.org"
|
||||
|
||||
[state]
|
||||
# Where to persist last seen message id (optional but recommended)
|
||||
state_file = "bridge_state.json"
|
||||
@@ -0,0 +1,44 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM rust:1.92-bookworm AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY matrix/Cargo.toml matrix/Cargo.lock ./
|
||||
COPY matrix/src ./src
|
||||
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
cargo build --release --locked
|
||||
|
||||
FROM debian:bookworm-slim AS runtime
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates gosu \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd --create-home --uid 10001 --shell /usr/sbin/nologin potatomesh
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /app/target/release/potatomesh-matrix-bridge /usr/local/bin/potatomesh-matrix-bridge
|
||||
COPY matrix/Config.toml /app/Config.example.toml
|
||||
COPY matrix/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
EXPOSE 41448
|
||||
|
||||
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
|
||||
@@ -0,0 +1,349 @@
|
||||
# potatomesh-matrix-bridge
|
||||
|
||||
A small Rust daemon that bridges **PotatoMesh** LoRa messages into a **Matrix** room.
|
||||
|
||||

|
||||
|
||||
For each PotatoMesh node, the bridge creates (or uses) a **Matrix puppet user**:
|
||||
|
||||
- Matrix localpart: `potato_` + the hex node id (without `!`), e.g. `!67fc83cb` → `@potato_67fc83cb:example.org`
|
||||
- Matrix display name: the node’s `long_name` from the PotatoMesh API
|
||||
|
||||
Messages from PotatoMesh are periodically fetched and forwarded to a single Matrix room as those puppet users.
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
|
||||
- Polls `https://potatomesh.net/api/messages` (deriving `/api` from the configured base domain)
|
||||
- Looks up node metadata via `GET /api/nodes/{hex}` and caches it
|
||||
- One Matrix user per node:
|
||||
- username: `potato_{hex node id}`
|
||||
- display name: `long_name`
|
||||
- Forwards `TEXT_MESSAGE_APP` messages into a single Matrix room
|
||||
- Persists last-seen message ID to avoid duplicates across restarts
|
||||
|
||||
---
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
- **PotatoMesh side**
|
||||
- `GET /api/messages` returns an array of messages
|
||||
- `GET /api/nodes/{hex}` returns node metadata (including `long_name`)
|
||||
|
||||
- **Matrix side**
|
||||
- Uses the Matrix Client-Server API with an **appservice access token**
|
||||
- Impersonates puppet users via `user_id=@potato_{hex}:{server_name}&access_token={as_token}`
|
||||
- Sends `m.room.message` events into a configured room
|
||||
|
||||
This is **not** a full appservice framework; it just speaks the minimal HTTP needed.
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
- Rust (stable) and `cargo`
|
||||
- A Matrix homeserver you control (e.g. Synapse)
|
||||
- An **application service registration** on your homeserver that:
|
||||
- Whitelists the puppet user namespace (e.g. `@potato_[0-9a-f]{8}:example.org`)
|
||||
- Provides an `as_token` the bridge can use
|
||||
|
||||
- Network access from the bridge host to:
|
||||
- `https://potatomesh.net/` (bridge appends `/api`)
|
||||
- Your Matrix homeserver (`https://matrix.example.org`)
|
||||
|
||||
---
|
||||
|
||||
## Configuration
|
||||
|
||||
Configuration can come from a TOML file, CLI flags, environment variables, or secret files. The bridge merges inputs in this order (highest to lowest):
|
||||
|
||||
1. CLI flags
|
||||
2. Environment variables
|
||||
3. Secret files (`*_FILE` paths or container defaults)
|
||||
4. TOML config file
|
||||
5. Container defaults (paths + poll interval)
|
||||
|
||||
If no TOML file is provided, required values must be supplied via CLI/env/secret inputs.
|
||||
|
||||
Example TOML:
|
||||
|
||||
```toml
|
||||
[potatomesh]
|
||||
# Base domain (bridge will call {base_url}/api)
|
||||
base_url = "https://potatomesh.net/"
|
||||
# Poll interval in seconds
|
||||
poll_interval_secs = 10
|
||||
|
||||
[matrix]
|
||||
# Homeserver base URL (client API) without trailing slash
|
||||
homeserver = "https://matrix.example.org"
|
||||
# Appservice access token (from your registration.yaml)
|
||||
as_token = "YOUR_APPSERVICE_AS_TOKEN"
|
||||
# Appservice homeserver token (must match registration hs_token)
|
||||
hs_token = "SECRET_HS_TOKEN"
|
||||
# Server name (domain) part of Matrix user IDs
|
||||
server_name = "example.org"
|
||||
# Room ID to send into (must be joined by the appservice / puppets)
|
||||
room_id = "!yourroomid:example.org"
|
||||
|
||||
[state]
|
||||
# Where to persist last seen message id
|
||||
state_file = "bridge_state.json"
|
||||
````
|
||||
|
||||
The `hs_token` is used to validate inbound appservice transactions. Keep it identical in `Config.toml` and your Matrix appservice registration file.
|
||||
|
||||
### CLI Flags
|
||||
|
||||
Run `potatomesh-matrix-bridge --help` for the full list. Common flags:
|
||||
|
||||
* `--config PATH`
|
||||
* `--state-file PATH`
|
||||
* `--potatomesh-base-url URL`
|
||||
* `--potatomesh-poll-interval-secs SECS`
|
||||
* `--matrix-homeserver URL`
|
||||
* `--matrix-as-token TOKEN`
|
||||
* `--matrix-as-token-file PATH`
|
||||
* `--matrix-hs-token TOKEN`
|
||||
* `--matrix-hs-token-file PATH`
|
||||
* `--matrix-server-name NAME`
|
||||
* `--matrix-room-id ROOM`
|
||||
* `--container` / `--no-container`
|
||||
* `--secrets-dir PATH`
|
||||
|
||||
### Environment Variables
|
||||
|
||||
* `POTATOMESH_CONFIG`
|
||||
* `POTATOMESH_BASE_URL`
|
||||
* `POTATOMESH_POLL_INTERVAL_SECS`
|
||||
* `MATRIX_HOMESERVER`
|
||||
* `MATRIX_AS_TOKEN`
|
||||
* `MATRIX_AS_TOKEN_FILE`
|
||||
* `MATRIX_HS_TOKEN`
|
||||
* `MATRIX_HS_TOKEN_FILE`
|
||||
* `MATRIX_SERVER_NAME`
|
||||
* `MATRIX_ROOM_ID`
|
||||
* `STATE_FILE`
|
||||
* `POTATOMESH_CONTAINER`
|
||||
* `POTATOMESH_SECRETS_DIR`
|
||||
|
||||
### Secret Files
|
||||
|
||||
If you supply `*_FILE` values, the bridge reads the secret contents and trims whitespace. When running inside a container, the bridge also checks the default secrets directory (default: `/run/secrets`) for:
|
||||
|
||||
* `matrix_as_token`
|
||||
* `matrix_hs_token`
|
||||
|
||||
### Container Defaults
|
||||
|
||||
Container detection checks `POTATOMESH_CONTAINER`, `CONTAINER`, and `/proc/1/cgroup`. When detected (or forced with `--container`), defaults shift to:
|
||||
|
||||
* Config path: `/app/Config.toml`
|
||||
* State file: `/app/bridge_state.json`
|
||||
* Secrets dir: `/run/secrets`
|
||||
* Poll interval: 15 seconds (if not otherwise configured)
|
||||
|
||||
Set `POTATOMESH_CONTAINER=0` or `--no-container` to opt out of container defaults.
|
||||
|
||||
### PotatoMesh API
|
||||
|
||||
The bridge assumes:
|
||||
|
||||
* Messages: `GET {base_url}/api/messages` → JSON array, for example:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": 2947676906,
|
||||
"rx_time": 1764241436,
|
||||
"rx_iso": "2025-11-27T11:03:56Z",
|
||||
"from_id": "!da6556d4",
|
||||
"to_id": "^all",
|
||||
"channel": 1,
|
||||
"portnum": "TEXT_MESSAGE_APP",
|
||||
"text": "Ping",
|
||||
"rssi": -111,
|
||||
"hop_limit": 1,
|
||||
"lora_freq": 868,
|
||||
"modem_preset": "MediumFast",
|
||||
"channel_name": "TEST",
|
||||
"snr": -9.0,
|
||||
"node_id": "!06871773"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
* Nodes: `GET {base_url}/api/nodes/{hex}` → JSON, for example:
|
||||
|
||||
```json
|
||||
{
|
||||
"node_id": "!67fc83cb",
|
||||
"short_name": "83CB",
|
||||
"long_name": "Meshtastic 83CB",
|
||||
"role": "CLIENT_HIDDEN",
|
||||
"last_heard": 1764250515,
|
||||
"first_heard": 1758993817,
|
||||
"last_seen_iso": "2025-11-27T13:35:15Z"
|
||||
}
|
||||
```
|
||||
|
||||
Node hex ID is derived from `node_id` by stripping the leading `!` and using the remainder inside the puppet localpart prefix (`potato_{hex}`).
|
||||
|
||||
---
|
||||
|
||||
## Matrix Appservice Setup (Synapse example)
|
||||
|
||||
You need an appservice registration file (e.g. `potatomesh-bridge.yaml`) configured in Synapse.
|
||||
|
||||
A minimal example sketch (you **must** adjust URLs, secrets, namespaces):
|
||||
|
||||
```yaml
|
||||
id: potatomesh-bridge
|
||||
url: "http://your-bridge-host:41448"
|
||||
as_token: "YOUR_APPSERVICE_AS_TOKEN"
|
||||
hs_token: "SECRET_HS_TOKEN"
|
||||
sender_localpart: "potatomesh-bridge"
|
||||
rate_limited: false
|
||||
namespaces:
|
||||
users:
|
||||
- exclusive: true
|
||||
regex: "@potato_[0-9a-f]{8}:example.org"
|
||||
```
|
||||
|
||||
This bridge listens for Synapse appservice callbacks on port `41448` so it can log inbound transaction payloads. It still only forwards messages one way (PotatoMesh → Matrix), so inbound Matrix events are acknowledged but not bridged. The `as_token` and `namespaces.users` entries remain required for outbound calls, and the `url` should point at the listener.
|
||||
|
||||
In Synapse’s `homeserver.yaml`, add the registration file under `app_service_config_files`, restart, and invite a puppet user to your target room (or use room ID directly).
|
||||
|
||||
The bridge validates inbound appservice callbacks by comparing the `access_token` query param to `hs_token` in `Config.toml`, so keep those values in sync.
|
||||
|
||||
---
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
# clone
|
||||
git clone https://github.com/YOUR_USER/potatomesh-matrix-bridge.git
|
||||
cd potatomesh-matrix-bridge
|
||||
|
||||
# build
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
The resulting binary will be at:
|
||||
|
||||
```bash
|
||||
target/release/potatomesh-matrix-bridge
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Docker
|
||||
|
||||
Build the container from the repo root with the included `matrix/Dockerfile`:
|
||||
|
||||
```bash
|
||||
docker build -f matrix/Dockerfile -t potatomesh-matrix-bridge .
|
||||
```
|
||||
|
||||
Provide your config at `/app/Config.toml` (or use CLI/env/secret overrides) and persist the bridge state file by mounting volumes. Minimal example:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-p 41448:41448 \
|
||||
-v bridge_state:/app \
|
||||
-v "$(pwd)/matrix/Config.toml:/app/Config.toml:ro" \
|
||||
potatomesh-matrix-bridge
|
||||
```
|
||||
|
||||
If you prefer to isolate the state file from the config, mount it directly instead of the whole `/app` directory:
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
-p 41448:41448 \
|
||||
-v bridge_state:/app \
|
||||
-v "$(pwd)/matrix/Config.toml:/app/Config.toml:ro" \
|
||||
potatomesh-matrix-bridge
|
||||
```
|
||||
|
||||
The image ships `Config.example.toml` for reference. If `/app/Config.toml` is absent, set the required values via environment variables, CLI flags, or secrets instead.
|
||||
|
||||
---
|
||||
|
||||
## Run
|
||||
|
||||
Ensure `Config.toml` is present and valid, then:
|
||||
|
||||
```bash
|
||||
./target/release/potatomesh-matrix-bridge
|
||||
```
|
||||
|
||||
Environment variables you may care about:
|
||||
|
||||
* `RUST_LOG` – for logging, e.g.:
|
||||
|
||||
```bash
|
||||
RUST_LOG=info,reqwest=warn ./target/release/potatomesh-matrix-bridge
|
||||
```
|
||||
|
||||
The bridge will:
|
||||
|
||||
1. Load state from `bridge_state.json` (if present).
|
||||
2. Poll PotatoMesh every `poll_interval_secs`.
|
||||
3. For each new `TEXT_MESSAGE_APP`:
|
||||
|
||||
* Fetch node info.
|
||||
* Ensure puppet is registered (`@potato_{hex}:{server_name}`).
|
||||
* Set puppet display name to `long_name`.
|
||||
* Send a formatted text message into `room_id` as that puppet.
|
||||
* Update and persist `bridge_state.json`.
|
||||
|
||||
Delete `bridge_state.json` if you want it to replay all currently available messages.
|
||||
|
||||
---
|
||||
|
||||
## Development
|
||||
|
||||
Run tests:
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
```
|
||||
|
||||
Format code:
|
||||
|
||||
```bash
|
||||
cargo fmt
|
||||
```
|
||||
|
||||
Lint (optional but recommended):
|
||||
|
||||
```bash
|
||||
cargo clippy -- -D warnings
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## GitHub Actions CI
|
||||
|
||||
This repository includes a GitHub Actions workflow (`.github/workflows/ci.yml`) that:
|
||||
|
||||
* runs on pushes and pull requests
|
||||
* caches Cargo dependencies
|
||||
* runs:
|
||||
|
||||
* `cargo fmt --check`
|
||||
* `cargo clippy`
|
||||
* `cargo test`
|
||||
|
||||
See the workflow file for details.
|
||||
|
||||
---
|
||||
|
||||
## Caveats & Future Work
|
||||
|
||||
* No E2EE: this bridge posts into unencrypted (or server-side managed) rooms. For encrypted rooms, you’d need real E2EE support and key management.
|
||||
* No inbound Matrix → PotatoMesh direction yet. This is a one-way bridge (PotatoMesh → Matrix).
|
||||
* No pagination or `since` support on the PotatoMesh API. The bridge simply deduplicates by message `id` and stores the highest seen.
|
||||
|
||||
If you change the PotatoMesh API, adjust the types in `src/potatomesh.rs` accordingly.
|
||||
@@ -0,0 +1,40 @@
|
||||
#!/bin/sh
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
|
||||
# Default to container-aware configuration paths unless explicitly overridden.
|
||||
: "${POTATOMESH_CONTAINER:=1}"
|
||||
: "${POTATOMESH_SECRETS_DIR:=/run/secrets}"
|
||||
|
||||
export POTATOMESH_CONTAINER
|
||||
export POTATOMESH_SECRETS_DIR
|
||||
|
||||
# Default state file path from Config.toml unless overridden.
|
||||
STATE_FILE="${STATE_FILE:-/app/bridge_state.json}"
|
||||
STATE_DIR="$(dirname "$STATE_FILE")"
|
||||
|
||||
# Ensure state directory exists and is writable by the non-root user without
|
||||
# touching the read-only config bind mount.
|
||||
if [ ! -d "$STATE_DIR" ]; then
|
||||
mkdir -p "$STATE_DIR"
|
||||
fi
|
||||
|
||||
# Best-effort ownership fix; ignore if the underlying volume is read-only.
|
||||
chown potatomesh:potatomesh "$STATE_DIR" 2>/dev/null || true
|
||||
touch "$STATE_FILE" 2>/dev/null || true
|
||||
chown potatomesh:potatomesh "$STATE_FILE" 2>/dev/null || true
|
||||
|
||||
exec gosu potatomesh potatomesh-matrix-bridge "$@"
|
||||
@@ -0,0 +1,105 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use clap::{ArgAction, Parser};
|
||||
|
||||
#[cfg(not(test))]
|
||||
use crate::config::{ConfigInputs, ConfigOverrides};
|
||||
|
||||
/// CLI arguments for the Matrix bridge.
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(
|
||||
name = "potatomesh-matrix-bridge",
|
||||
version,
|
||||
about = "PotatoMesh Matrix bridge"
|
||||
)]
|
||||
pub struct Cli {
|
||||
/// Path to the configuration TOML file.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub config: Option<String>,
|
||||
/// Path to the bridge state file.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub state_file: Option<String>,
|
||||
/// PotatoMesh base URL.
|
||||
#[arg(long, value_name = "URL")]
|
||||
pub potatomesh_base_url: Option<String>,
|
||||
/// Poll interval in seconds.
|
||||
#[arg(long, value_name = "SECS")]
|
||||
pub potatomesh_poll_interval_secs: Option<u64>,
|
||||
/// Matrix homeserver base URL.
|
||||
#[arg(long, value_name = "URL")]
|
||||
pub matrix_homeserver: Option<String>,
|
||||
/// Matrix appservice access token.
|
||||
#[arg(long, value_name = "TOKEN")]
|
||||
pub matrix_as_token: Option<String>,
|
||||
/// Path to a secret file containing the Matrix appservice access token.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub matrix_as_token_file: Option<String>,
|
||||
/// Matrix homeserver token for inbound appservice requests.
|
||||
#[arg(long, value_name = "TOKEN")]
|
||||
pub matrix_hs_token: Option<String>,
|
||||
/// Path to a secret file containing the Matrix homeserver token.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub matrix_hs_token_file: Option<String>,
|
||||
/// Matrix server name (domain).
|
||||
#[arg(long, value_name = "NAME")]
|
||||
pub matrix_server_name: Option<String>,
|
||||
/// Matrix room id to forward into.
|
||||
#[arg(long, value_name = "ROOM")]
|
||||
pub matrix_room_id: Option<String>,
|
||||
/// Force container defaults (overrides detection).
|
||||
#[arg(long, action = ArgAction::SetTrue)]
|
||||
pub container: bool,
|
||||
/// Disable container defaults (overrides detection).
|
||||
#[arg(long, action = ArgAction::SetTrue)]
|
||||
pub no_container: bool,
|
||||
/// Directory to search for default secret files.
|
||||
#[arg(long, value_name = "PATH")]
|
||||
pub secrets_dir: Option<String>,
|
||||
}
|
||||
|
||||
impl Cli {
|
||||
/// Convert CLI args into configuration inputs.
|
||||
#[cfg(not(test))]
|
||||
pub fn to_inputs(&self) -> ConfigInputs {
|
||||
ConfigInputs {
|
||||
config_path: self.config.clone(),
|
||||
secrets_dir: self.secrets_dir.clone(),
|
||||
container_override: resolve_container_override(self.container, self.no_container),
|
||||
container_hint: None,
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: self.potatomesh_base_url.clone(),
|
||||
potatomesh_poll_interval_secs: self.potatomesh_poll_interval_secs,
|
||||
matrix_homeserver: self.matrix_homeserver.clone(),
|
||||
matrix_as_token: self.matrix_as_token.clone(),
|
||||
matrix_as_token_file: self.matrix_as_token_file.clone(),
|
||||
matrix_hs_token: self.matrix_hs_token.clone(),
|
||||
matrix_hs_token_file: self.matrix_hs_token_file.clone(),
|
||||
matrix_server_name: self.matrix_server_name.clone(),
|
||||
matrix_room_id: self.matrix_room_id.clone(),
|
||||
state_file: self.state_file.clone(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolve container override flags into an optional boolean.
|
||||
#[cfg(not(test))]
|
||||
fn resolve_container_override(container: bool, no_container: bool) -> Option<bool> {
|
||||
match (container, no_container) {
|
||||
(true, false) => Some(true),
|
||||
(false, true) => Some(false),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,978 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::Deserialize;
|
||||
use std::{fs, path::Path};
|
||||
|
||||
const DEFAULT_CONFIG_PATH: &str = "Config.toml";
|
||||
const CONTAINER_CONFIG_PATH: &str = "/app/Config.toml";
|
||||
const DEFAULT_STATE_FILE: &str = "bridge_state.json";
|
||||
const CONTAINER_STATE_FILE: &str = "/app/bridge_state.json";
|
||||
const DEFAULT_SECRETS_DIR: &str = "/run/secrets";
|
||||
const CONTAINER_POLL_INTERVAL_SECS: u64 = 15;
|
||||
|
||||
/// PotatoMesh API settings.
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct PotatomeshConfig {
|
||||
pub base_url: String,
|
||||
pub poll_interval_secs: u64,
|
||||
}
|
||||
|
||||
/// Matrix appservice settings for the bridge.
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct MatrixConfig {
|
||||
pub homeserver: String,
|
||||
pub as_token: String,
|
||||
pub hs_token: String,
|
||||
pub server_name: String,
|
||||
pub room_id: String,
|
||||
}
|
||||
|
||||
/// State file configuration for the bridge.
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct StateConfig {
|
||||
pub state_file: String,
|
||||
}
|
||||
|
||||
/// Full configuration loaded for the bridge runtime.
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub potatomesh: PotatomeshConfig,
|
||||
pub matrix: MatrixConfig,
|
||||
pub state: StateConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Default)]
|
||||
struct PartialPotatomeshConfig {
|
||||
#[serde(default)]
|
||||
base_url: Option<String>,
|
||||
#[serde(default)]
|
||||
poll_interval_secs: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Default)]
|
||||
struct PartialMatrixConfig {
|
||||
#[serde(default)]
|
||||
homeserver: Option<String>,
|
||||
#[serde(default)]
|
||||
as_token: Option<String>,
|
||||
#[serde(default)]
|
||||
hs_token: Option<String>,
|
||||
#[serde(default)]
|
||||
server_name: Option<String>,
|
||||
#[serde(default)]
|
||||
room_id: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Default)]
|
||||
struct PartialStateConfig {
|
||||
#[serde(default)]
|
||||
state_file: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Clone, Default)]
|
||||
struct PartialConfig {
|
||||
#[serde(default)]
|
||||
potatomesh: PartialPotatomeshConfig,
|
||||
#[serde(default)]
|
||||
matrix: PartialMatrixConfig,
|
||||
#[serde(default)]
|
||||
state: PartialStateConfig,
|
||||
}
|
||||
|
||||
/// Overwrite an optional value when the incoming value is present.
|
||||
fn merge_option<T>(target: &mut Option<T>, incoming: Option<T>) {
|
||||
if incoming.is_some() {
|
||||
*target = incoming;
|
||||
}
|
||||
}
|
||||
|
||||
/// CLI or environment overrides for configuration fields.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ConfigOverrides {
|
||||
pub potatomesh_base_url: Option<String>,
|
||||
pub potatomesh_poll_interval_secs: Option<u64>,
|
||||
pub matrix_homeserver: Option<String>,
|
||||
pub matrix_as_token: Option<String>,
|
||||
pub matrix_as_token_file: Option<String>,
|
||||
pub matrix_hs_token: Option<String>,
|
||||
pub matrix_hs_token_file: Option<String>,
|
||||
pub matrix_server_name: Option<String>,
|
||||
pub matrix_room_id: Option<String>,
|
||||
pub state_file: Option<String>,
|
||||
}
|
||||
|
||||
impl ConfigOverrides {
|
||||
fn apply_non_token_overrides(&self, cfg: &mut PartialConfig) {
|
||||
merge_option(
|
||||
&mut cfg.potatomesh.base_url,
|
||||
self.potatomesh_base_url.clone(),
|
||||
);
|
||||
merge_option(
|
||||
&mut cfg.potatomesh.poll_interval_secs,
|
||||
self.potatomesh_poll_interval_secs,
|
||||
);
|
||||
merge_option(&mut cfg.matrix.homeserver, self.matrix_homeserver.clone());
|
||||
merge_option(&mut cfg.matrix.server_name, self.matrix_server_name.clone());
|
||||
merge_option(&mut cfg.matrix.room_id, self.matrix_room_id.clone());
|
||||
merge_option(&mut cfg.state.state_file, self.state_file.clone());
|
||||
}
|
||||
|
||||
fn merge(self, higher: ConfigOverrides) -> ConfigOverrides {
|
||||
let matrix_as_token = if higher.matrix_as_token_file.is_some() {
|
||||
higher.matrix_as_token
|
||||
} else {
|
||||
higher.matrix_as_token.or(self.matrix_as_token)
|
||||
};
|
||||
let matrix_hs_token = if higher.matrix_hs_token_file.is_some() {
|
||||
higher.matrix_hs_token
|
||||
} else {
|
||||
higher.matrix_hs_token.or(self.matrix_hs_token)
|
||||
};
|
||||
ConfigOverrides {
|
||||
potatomesh_base_url: higher.potatomesh_base_url.or(self.potatomesh_base_url),
|
||||
potatomesh_poll_interval_secs: higher
|
||||
.potatomesh_poll_interval_secs
|
||||
.or(self.potatomesh_poll_interval_secs),
|
||||
matrix_homeserver: higher.matrix_homeserver.or(self.matrix_homeserver),
|
||||
matrix_as_token,
|
||||
matrix_as_token_file: higher.matrix_as_token_file.or(self.matrix_as_token_file),
|
||||
matrix_hs_token,
|
||||
matrix_hs_token_file: higher.matrix_hs_token_file.or(self.matrix_hs_token_file),
|
||||
matrix_server_name: higher.matrix_server_name.or(self.matrix_server_name),
|
||||
matrix_room_id: higher.matrix_room_id.or(self.matrix_room_id),
|
||||
state_file: higher.state_file.or(self.state_file),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Inputs gathered from CLI flags or environment variables.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ConfigInputs {
|
||||
pub config_path: Option<String>,
|
||||
pub secrets_dir: Option<String>,
|
||||
pub container_override: Option<bool>,
|
||||
pub container_hint: Option<String>,
|
||||
pub overrides: ConfigOverrides,
|
||||
}
|
||||
|
||||
impl ConfigInputs {
|
||||
/// Merge two input sets, preferring values from `higher`.
|
||||
pub fn merge(self, higher: ConfigInputs) -> ConfigInputs {
|
||||
ConfigInputs {
|
||||
config_path: higher.config_path.or(self.config_path),
|
||||
secrets_dir: higher.secrets_dir.or(self.secrets_dir),
|
||||
container_override: higher.container_override.or(self.container_override),
|
||||
container_hint: higher.container_hint.or(self.container_hint),
|
||||
overrides: self.overrides.merge(higher.overrides),
|
||||
}
|
||||
}
|
||||
|
||||
/// Load configuration inputs from the process environment.
|
||||
#[cfg(not(test))]
|
||||
pub fn from_env() -> anyhow::Result<Self> {
|
||||
let overrides = ConfigOverrides {
|
||||
potatomesh_base_url: env_var("POTATOMESH_BASE_URL"),
|
||||
potatomesh_poll_interval_secs: parse_u64_env("POTATOMESH_POLL_INTERVAL_SECS")?,
|
||||
matrix_homeserver: env_var("MATRIX_HOMESERVER"),
|
||||
matrix_as_token: env_var("MATRIX_AS_TOKEN"),
|
||||
matrix_as_token_file: env_var("MATRIX_AS_TOKEN_FILE"),
|
||||
matrix_hs_token: env_var("MATRIX_HS_TOKEN"),
|
||||
matrix_hs_token_file: env_var("MATRIX_HS_TOKEN_FILE"),
|
||||
matrix_server_name: env_var("MATRIX_SERVER_NAME"),
|
||||
matrix_room_id: env_var("MATRIX_ROOM_ID"),
|
||||
state_file: env_var("STATE_FILE"),
|
||||
};
|
||||
Ok(ConfigInputs {
|
||||
config_path: env_var("POTATOMESH_CONFIG"),
|
||||
secrets_dir: env_var("POTATOMESH_SECRETS_DIR"),
|
||||
container_override: parse_bool_env("POTATOMESH_CONTAINER")?,
|
||||
container_hint: env_var("CONTAINER"),
|
||||
overrides,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Load a full Config from a TOML file.
|
||||
#[cfg(test)]
|
||||
pub fn load_from_file(path: &str) -> anyhow::Result<Self> {
|
||||
let contents = fs::read_to_string(path)?;
|
||||
let cfg = toml::from_str(&contents)?;
|
||||
Ok(cfg)
|
||||
}
|
||||
}
|
||||
|
||||
/// Load a Config by merging CLI/env overrides with an optional TOML file.
|
||||
#[cfg(not(test))]
|
||||
pub fn load(cli_inputs: ConfigInputs) -> anyhow::Result<Config> {
|
||||
let env_inputs = ConfigInputs::from_env()?;
|
||||
let cgroup_hint = read_cgroup();
|
||||
load_from_sources(cli_inputs, env_inputs, cgroup_hint.as_deref())
|
||||
}
|
||||
|
||||
/// Load configuration by merging CLI/env inputs and an optional config file.
|
||||
fn load_from_sources(
|
||||
cli_inputs: ConfigInputs,
|
||||
env_inputs: ConfigInputs,
|
||||
cgroup_hint: Option<&str>,
|
||||
) -> anyhow::Result<Config> {
|
||||
let merged_inputs = env_inputs.merge(cli_inputs);
|
||||
let container = detect_container(
|
||||
merged_inputs.container_override,
|
||||
merged_inputs.container_hint.as_deref(),
|
||||
cgroup_hint,
|
||||
);
|
||||
let defaults = default_paths(container);
|
||||
|
||||
let base_cfg = resolve_base_config(&merged_inputs, &defaults)?;
|
||||
let mut cfg = base_cfg.unwrap_or_default();
|
||||
merged_inputs.overrides.apply_non_token_overrides(&mut cfg);
|
||||
|
||||
let secrets_dir = resolve_secrets_dir(&merged_inputs, container, &defaults);
|
||||
let as_token = resolve_token(
|
||||
cfg.matrix.as_token.clone(),
|
||||
merged_inputs.overrides.matrix_as_token.clone(),
|
||||
merged_inputs.overrides.matrix_as_token_file.as_deref(),
|
||||
secrets_dir.as_deref(),
|
||||
"matrix_as_token",
|
||||
)?;
|
||||
let hs_token = resolve_token(
|
||||
cfg.matrix.hs_token.clone(),
|
||||
merged_inputs.overrides.matrix_hs_token.clone(),
|
||||
merged_inputs.overrides.matrix_hs_token_file.as_deref(),
|
||||
secrets_dir.as_deref(),
|
||||
"matrix_hs_token",
|
||||
)?;
|
||||
|
||||
if cfg.potatomesh.poll_interval_secs.is_none() && container {
|
||||
cfg.potatomesh.poll_interval_secs = Some(defaults.poll_interval_secs);
|
||||
}
|
||||
|
||||
if cfg.state.state_file.is_none() {
|
||||
cfg.state.state_file = Some(defaults.state_file);
|
||||
}
|
||||
|
||||
let missing = collect_missing_fields(&cfg, &as_token, &hs_token);
|
||||
if !missing.is_empty() {
|
||||
anyhow::bail!(
|
||||
"Missing required configuration values: {}",
|
||||
missing.join(", ")
|
||||
);
|
||||
}
|
||||
|
||||
Ok(Config {
|
||||
potatomesh: PotatomeshConfig {
|
||||
base_url: cfg.potatomesh.base_url.unwrap(),
|
||||
poll_interval_secs: cfg.potatomesh.poll_interval_secs.unwrap(),
|
||||
},
|
||||
matrix: MatrixConfig {
|
||||
homeserver: cfg.matrix.homeserver.unwrap(),
|
||||
as_token: as_token.unwrap(),
|
||||
hs_token: hs_token.unwrap(),
|
||||
server_name: cfg.matrix.server_name.unwrap(),
|
||||
room_id: cfg.matrix.room_id.unwrap(),
|
||||
},
|
||||
state: StateConfig {
|
||||
state_file: cfg.state.state_file.unwrap(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// Collect the missing required field identifiers for error reporting.
|
||||
fn collect_missing_fields(
|
||||
cfg: &PartialConfig,
|
||||
as_token: &Option<String>,
|
||||
hs_token: &Option<String>,
|
||||
) -> Vec<&'static str> {
|
||||
let mut missing = Vec::new();
|
||||
if cfg.potatomesh.base_url.is_none() {
|
||||
missing.push("potatomesh.base_url");
|
||||
}
|
||||
if cfg.potatomesh.poll_interval_secs.is_none() {
|
||||
missing.push("potatomesh.poll_interval_secs");
|
||||
}
|
||||
if cfg.matrix.homeserver.is_none() {
|
||||
missing.push("matrix.homeserver");
|
||||
}
|
||||
if as_token.is_none() {
|
||||
missing.push("matrix.as_token");
|
||||
}
|
||||
if hs_token.is_none() {
|
||||
missing.push("matrix.hs_token");
|
||||
}
|
||||
if cfg.matrix.server_name.is_none() {
|
||||
missing.push("matrix.server_name");
|
||||
}
|
||||
if cfg.matrix.room_id.is_none() {
|
||||
missing.push("matrix.room_id");
|
||||
}
|
||||
if cfg.state.state_file.is_none() {
|
||||
missing.push("state.state_file");
|
||||
}
|
||||
missing
|
||||
}
|
||||
|
||||
/// Resolve the base TOML config file, honoring explicit config paths.
|
||||
fn resolve_base_config(
|
||||
inputs: &ConfigInputs,
|
||||
defaults: &DefaultPaths,
|
||||
) -> anyhow::Result<Option<PartialConfig>> {
|
||||
if let Some(path) = &inputs.config_path {
|
||||
return Ok(Some(load_partial_from_file(path)?));
|
||||
}
|
||||
let container_path = Path::new(&defaults.config_path);
|
||||
if container_path.exists() {
|
||||
return Ok(Some(load_partial_from_file(&defaults.config_path)?));
|
||||
}
|
||||
let host_path = Path::new(DEFAULT_CONFIG_PATH);
|
||||
if host_path.exists() {
|
||||
return Ok(Some(load_partial_from_file(DEFAULT_CONFIG_PATH)?));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Decide which secrets directory to use based on inputs and defaults.
|
||||
fn resolve_secrets_dir(
|
||||
inputs: &ConfigInputs,
|
||||
container: bool,
|
||||
defaults: &DefaultPaths,
|
||||
) -> Option<String> {
|
||||
if let Some(explicit) = inputs.secrets_dir.clone() {
|
||||
return Some(explicit);
|
||||
}
|
||||
if container {
|
||||
return Some(defaults.secrets_dir.clone());
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Resolve a token value from explicit values, secret files, or config file values.
|
||||
fn resolve_token(
|
||||
base_value: Option<String>,
|
||||
explicit_value: Option<String>,
|
||||
explicit_file: Option<&str>,
|
||||
secrets_dir: Option<&str>,
|
||||
default_secret_name: &str,
|
||||
) -> anyhow::Result<Option<String>> {
|
||||
if let Some(value) = explicit_value {
|
||||
return Ok(Some(value));
|
||||
}
|
||||
if let Some(path) = explicit_file {
|
||||
return Ok(Some(read_secret_file(path)?));
|
||||
}
|
||||
if let Some(dir) = secrets_dir {
|
||||
let default_path = Path::new(dir).join(default_secret_name);
|
||||
if default_path.exists() {
|
||||
return Ok(Some(read_secret_file(
|
||||
default_path
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid secret file path"))?,
|
||||
)?));
|
||||
}
|
||||
}
|
||||
Ok(base_value)
|
||||
}
|
||||
|
||||
/// Read and trim a secret file from disk.
|
||||
fn read_secret_file(path: &str) -> anyhow::Result<String> {
|
||||
let contents = fs::read_to_string(path)?;
|
||||
let trimmed = contents.trim();
|
||||
if trimmed.is_empty() {
|
||||
anyhow::bail!("Secret file {path} is empty");
|
||||
}
|
||||
Ok(trimmed.to_string())
|
||||
}
|
||||
|
||||
/// Load a partial config from a TOML file.
|
||||
fn load_partial_from_file(path: &str) -> anyhow::Result<PartialConfig> {
|
||||
let contents = fs::read_to_string(path)?;
|
||||
let cfg = toml::from_str(&contents)?;
|
||||
Ok(cfg)
|
||||
}
|
||||
|
||||
/// Compute default paths and intervals based on container mode.
|
||||
fn default_paths(container: bool) -> DefaultPaths {
|
||||
if container {
|
||||
DefaultPaths {
|
||||
config_path: CONTAINER_CONFIG_PATH.to_string(),
|
||||
state_file: CONTAINER_STATE_FILE.to_string(),
|
||||
secrets_dir: DEFAULT_SECRETS_DIR.to_string(),
|
||||
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
|
||||
}
|
||||
} else {
|
||||
DefaultPaths {
|
||||
config_path: DEFAULT_CONFIG_PATH.to_string(),
|
||||
state_file: DEFAULT_STATE_FILE.to_string(),
|
||||
secrets_dir: DEFAULT_SECRETS_DIR.to_string(),
|
||||
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct DefaultPaths {
|
||||
config_path: String,
|
||||
state_file: String,
|
||||
secrets_dir: String,
|
||||
poll_interval_secs: u64,
|
||||
}
|
||||
|
||||
/// Detect whether the bridge is running inside a container.
|
||||
fn detect_container(
|
||||
override_value: Option<bool>,
|
||||
env_hint: Option<&str>,
|
||||
cgroup_hint: Option<&str>,
|
||||
) -> bool {
|
||||
if let Some(value) = override_value {
|
||||
return value;
|
||||
}
|
||||
if let Some(hint) = env_hint {
|
||||
if !hint.trim().is_empty() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(cgroup) = cgroup_hint {
|
||||
let haystack = cgroup.to_ascii_lowercase();
|
||||
return haystack.contains("docker")
|
||||
|| haystack.contains("kubepods")
|
||||
|| haystack.contains("containerd")
|
||||
|| haystack.contains("podman");
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Read the primary cgroup file for container detection.
|
||||
#[cfg(not(test))]
|
||||
fn read_cgroup() -> Option<String> {
|
||||
fs::read_to_string("/proc/1/cgroup").ok()
|
||||
}
|
||||
|
||||
/// Read and trim an environment variable value.
|
||||
#[cfg(not(test))]
|
||||
fn env_var(key: &str) -> Option<String> {
|
||||
std::env::var(key).ok().filter(|v| !v.trim().is_empty())
|
||||
}
|
||||
|
||||
/// Parse a u64 environment variable value.
|
||||
#[cfg(not(test))]
|
||||
fn parse_u64_env(key: &str) -> anyhow::Result<Option<u64>> {
|
||||
match env_var(key) {
|
||||
None => Ok(None),
|
||||
Some(value) => value
|
||||
.parse::<u64>()
|
||||
.map(Some)
|
||||
.map_err(|e| anyhow::anyhow!("Invalid {key} value: {e}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a boolean environment variable value.
|
||||
#[cfg(not(test))]
|
||||
fn parse_bool_env(key: &str) -> anyhow::Result<Option<bool>> {
|
||||
match env_var(key) {
|
||||
None => Ok(None),
|
||||
Some(value) => parse_bool_value(key, &value).map(Some),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a boolean string with standard truthy/falsy values.
|
||||
#[cfg(not(test))]
|
||||
fn parse_bool_value(key: &str, value: &str) -> anyhow::Result<bool> {
|
||||
let normalized = value.trim().to_ascii_lowercase();
|
||||
match normalized.as_str() {
|
||||
"1" | "true" | "yes" | "on" => Ok(true),
|
||||
"0" | "false" | "no" | "off" => Ok(false),
|
||||
_ => anyhow::bail!("Invalid {key} value: {value}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use serial_test::serial;
|
||||
use std::io::Write;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
struct CwdGuard {
|
||||
original: PathBuf,
|
||||
}
|
||||
|
||||
impl CwdGuard {
|
||||
/// Switch to the provided path and restore the original cwd on drop.
|
||||
fn enter(path: &Path) -> Self {
|
||||
let original = std::env::current_dir().unwrap_or_else(|_| PathBuf::from("/"));
|
||||
std::env::set_current_dir(path).unwrap();
|
||||
Self { original }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CwdGuard {
|
||||
fn drop(&mut self) {
|
||||
if std::env::set_current_dir(&self.original).is_err() {
|
||||
let _ = std::env::set_current_dir("/");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn minimal_overrides() -> ConfigOverrides {
|
||||
ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
|
||||
potatomesh_poll_interval_secs: Some(10),
|
||||
matrix_homeserver: Some("https://matrix.example.org".to_string()),
|
||||
matrix_as_token: Some("AS_TOKEN".to_string()),
|
||||
matrix_hs_token: Some("HS_TOKEN".to_string()),
|
||||
matrix_server_name: Some("example.org".to_string()),
|
||||
matrix_room_id: Some("!roomid:example.org".to_string()),
|
||||
state_file: Some("bridge_state.json".to_string()),
|
||||
matrix_as_token_file: None,
|
||||
matrix_hs_token_file: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_minimal_config_from_toml_str() {
|
||||
let toml_str = r#"
|
||||
[potatomesh]
|
||||
base_url = "https://potatomesh.net/"
|
||||
poll_interval_secs = 10
|
||||
|
||||
[matrix]
|
||||
homeserver = "https://matrix.example.org"
|
||||
as_token = "AS_TOKEN"
|
||||
hs_token = "HS_TOKEN"
|
||||
server_name = "example.org"
|
||||
room_id = "!roomid:example.org"
|
||||
|
||||
[state]
|
||||
state_file = "bridge_state.json"
|
||||
"#;
|
||||
|
||||
let cfg: Config = toml::from_str(toml_str).expect("toml should parse");
|
||||
assert_eq!(cfg.potatomesh.base_url, "https://potatomesh.net/");
|
||||
assert_eq!(cfg.potatomesh.poll_interval_secs, 10);
|
||||
|
||||
assert_eq!(cfg.matrix.homeserver, "https://matrix.example.org");
|
||||
assert_eq!(cfg.matrix.as_token, "AS_TOKEN");
|
||||
assert_eq!(cfg.matrix.hs_token, "HS_TOKEN");
|
||||
assert_eq!(cfg.matrix.server_name, "example.org");
|
||||
assert_eq!(cfg.matrix.room_id, "!roomid:example.org");
|
||||
|
||||
assert_eq!(cfg.state.state_file, "bridge_state.json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_from_file_not_found() {
|
||||
let result = Config::load_from_file("file_that_does_not_exist.toml");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_from_file_valid_file() {
|
||||
let toml_str = r#"
|
||||
[potatomesh]
|
||||
base_url = "https://potatomesh.net/"
|
||||
poll_interval_secs = 10
|
||||
|
||||
[matrix]
|
||||
homeserver = "https://matrix.example.org"
|
||||
as_token = "AS_TOKEN"
|
||||
hs_token = "HS_TOKEN"
|
||||
server_name = "example.org"
|
||||
room_id = "!roomid:example.org"
|
||||
|
||||
[state]
|
||||
state_file = "bridge_state.json"
|
||||
"#;
|
||||
let mut file = tempfile::NamedTempFile::new().unwrap();
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
let result = Config::load_from_file(file.path().to_str().unwrap());
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_container_prefers_override() {
|
||||
assert!(detect_container(Some(true), None, None));
|
||||
assert!(!detect_container(
|
||||
Some(false),
|
||||
Some("docker"),
|
||||
Some("docker")
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn detect_container_from_hint_or_cgroup() {
|
||||
assert!(detect_container(None, Some("docker"), None));
|
||||
assert!(detect_container(None, None, Some("kubepods")));
|
||||
assert!(!detect_container(None, None, Some("")));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_uses_cli_overrides_over_env() {
|
||||
let toml_str = r#"
|
||||
[potatomesh]
|
||||
base_url = "https://potatomesh.net/"
|
||||
poll_interval_secs = 5
|
||||
|
||||
[matrix]
|
||||
homeserver = "https://matrix.example.org"
|
||||
as_token = "AS_TOKEN"
|
||||
hs_token = "HS_TOKEN"
|
||||
server_name = "example.org"
|
||||
room_id = "!roomid:example.org"
|
||||
|
||||
[state]
|
||||
state_file = "bridge_state.json"
|
||||
"#;
|
||||
let mut file = tempfile::NamedTempFile::new().unwrap();
|
||||
write!(file, "{}", toml_str).unwrap();
|
||||
|
||||
let env_inputs = ConfigInputs {
|
||||
config_path: Some(file.path().to_str().unwrap().to_string()),
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://env.example/".to_string()),
|
||||
..minimal_overrides()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
let cli_inputs = ConfigInputs {
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://cli.example/".to_string()),
|
||||
..ConfigOverrides::default()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let cfg = load_from_sources(cli_inputs, env_inputs, None).unwrap();
|
||||
assert_eq!(cfg.potatomesh.base_url, "https://cli.example/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn load_uses_container_secret_defaults() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
let secrets_dir = tmp_dir.path();
|
||||
fs::write(secrets_dir.join("matrix_as_token"), "FROM_SECRET").unwrap();
|
||||
|
||||
let cli_inputs = ConfigInputs {
|
||||
secrets_dir: Some(secrets_dir.to_string_lossy().to_string()),
|
||||
container_override: Some(true),
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
|
||||
potatomesh_poll_interval_secs: Some(10),
|
||||
matrix_homeserver: Some("https://matrix.example.org".to_string()),
|
||||
matrix_hs_token: Some("HS_TOKEN".to_string()),
|
||||
matrix_server_name: Some("example.org".to_string()),
|
||||
matrix_room_id: Some("!roomid:example.org".to_string()),
|
||||
state_file: Some("bridge_state.json".to_string()),
|
||||
..ConfigOverrides::default()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let cfg = load_from_sources(cli_inputs, ConfigInputs::default(), None).unwrap();
|
||||
assert_eq!(cfg.matrix.as_token, "FROM_SECRET");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_token_prefers_explicit_value() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let token_file = tmp_dir.path().join("token");
|
||||
fs::write(&token_file, "FROM_FILE").unwrap();
|
||||
|
||||
let resolved = resolve_token(
|
||||
Some("FROM_BASE".to_string()),
|
||||
Some("FROM_EXPLICIT".to_string()),
|
||||
Some(token_file.to_str().unwrap()),
|
||||
Some(tmp_dir.path().to_str().unwrap()),
|
||||
"matrix_as_token",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(resolved, Some("FROM_EXPLICIT".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_token_reads_explicit_file() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let token_file = tmp_dir.path().join("token");
|
||||
fs::write(&token_file, "FROM_FILE").unwrap();
|
||||
|
||||
let resolved = resolve_token(
|
||||
None,
|
||||
None,
|
||||
Some(token_file.to_str().unwrap()),
|
||||
None,
|
||||
"matrix_as_token",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(resolved, Some("FROM_FILE".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_token_reads_default_secret_file() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
fs::write(tmp_dir.path().join("matrix_hs_token"), "FROM_SECRET").unwrap();
|
||||
|
||||
let resolved = resolve_token(
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
Some(tmp_dir.path().to_str().unwrap()),
|
||||
"matrix_hs_token",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(resolved, Some("FROM_SECRET".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_token_errors_on_empty_secret_file() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let token_file = tmp_dir.path().join("token");
|
||||
fs::write(&token_file, " ").unwrap();
|
||||
|
||||
let result = resolve_token(
|
||||
None,
|
||||
None,
|
||||
Some(token_file.to_str().unwrap()),
|
||||
None,
|
||||
"matrix_as_token",
|
||||
);
|
||||
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_secrets_dir_prefers_explicit() {
|
||||
let defaults = DefaultPaths {
|
||||
config_path: "Config.toml".to_string(),
|
||||
state_file: DEFAULT_STATE_FILE.to_string(),
|
||||
secrets_dir: "default".to_string(),
|
||||
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
|
||||
};
|
||||
let inputs = ConfigInputs {
|
||||
secrets_dir: Some("explicit".to_string()),
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let resolved = resolve_secrets_dir(&inputs, true, &defaults);
|
||||
assert_eq!(resolved, Some("explicit".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_secrets_dir_container_default() {
|
||||
let defaults = DefaultPaths {
|
||||
config_path: "Config.toml".to_string(),
|
||||
state_file: DEFAULT_STATE_FILE.to_string(),
|
||||
secrets_dir: "default".to_string(),
|
||||
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
|
||||
};
|
||||
let inputs = ConfigInputs::default();
|
||||
|
||||
let resolved = resolve_secrets_dir(&inputs, true, &defaults);
|
||||
assert_eq!(resolved, Some("default".to_string()));
|
||||
assert_eq!(resolve_secrets_dir(&inputs, false, &defaults), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn resolve_base_config_prefers_explicit_path() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
let config_path = tmp_dir.path().join("explicit.toml");
|
||||
fs::write(
|
||||
&config_path,
|
||||
r#"[potatomesh]
|
||||
base_url = "https://potatomesh.net/"
|
||||
poll_interval_secs = 10
|
||||
[matrix]
|
||||
homeserver = "https://matrix.example.org"
|
||||
as_token = "AS_TOKEN"
|
||||
hs_token = "HS_TOKEN"
|
||||
server_name = "example.org"
|
||||
room_id = "!roomid:example.org"
|
||||
[state]
|
||||
state_file = "bridge_state.json"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let defaults = default_paths(false);
|
||||
let inputs = ConfigInputs {
|
||||
config_path: Some(config_path.to_string_lossy().to_string()),
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let resolved = resolve_base_config(&inputs, &defaults).unwrap();
|
||||
assert!(resolved.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn resolve_base_config_uses_container_path_when_present() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
let config_path = tmp_dir.path().join("container.toml");
|
||||
fs::write(
|
||||
&config_path,
|
||||
r#"[potatomesh]
|
||||
base_url = "https://potatomesh.net/"
|
||||
poll_interval_secs = 10
|
||||
[matrix]
|
||||
homeserver = "https://matrix.example.org"
|
||||
as_token = "AS_TOKEN"
|
||||
hs_token = "HS_TOKEN"
|
||||
server_name = "example.org"
|
||||
room_id = "!roomid:example.org"
|
||||
[state]
|
||||
state_file = "bridge_state.json"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let defaults = DefaultPaths {
|
||||
config_path: config_path.to_string_lossy().to_string(),
|
||||
state_file: DEFAULT_STATE_FILE.to_string(),
|
||||
secrets_dir: DEFAULT_SECRETS_DIR.to_string(),
|
||||
poll_interval_secs: CONTAINER_POLL_INTERVAL_SECS,
|
||||
};
|
||||
|
||||
let resolved = resolve_base_config(&ConfigInputs::default(), &defaults).unwrap();
|
||||
assert!(resolved.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn resolve_base_config_uses_host_path_when_present() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
fs::write(
|
||||
"Config.toml",
|
||||
r#"[potatomesh]
|
||||
base_url = "https://potatomesh.net/"
|
||||
poll_interval_secs = 10
|
||||
[matrix]
|
||||
homeserver = "https://matrix.example.org"
|
||||
as_token = "AS_TOKEN"
|
||||
hs_token = "HS_TOKEN"
|
||||
server_name = "example.org"
|
||||
room_id = "!roomid:example.org"
|
||||
[state]
|
||||
state_file = "bridge_state.json"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let defaults = default_paths(false);
|
||||
let resolved = resolve_base_config(&ConfigInputs::default(), &defaults).unwrap();
|
||||
assert!(resolved.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn resolve_base_config_returns_none_when_missing() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
let defaults = default_paths(false);
|
||||
let resolved = resolve_base_config(&ConfigInputs::default(), &defaults).unwrap();
|
||||
assert!(resolved.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn load_prefers_cli_token_file_over_env_value() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
|
||||
let token_file = tmp_dir.path().join("as_token");
|
||||
fs::write(&token_file, "CLI_SECRET").unwrap();
|
||||
|
||||
let env_inputs = ConfigInputs {
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
|
||||
potatomesh_poll_interval_secs: Some(10),
|
||||
matrix_homeserver: Some("https://matrix.example.org".to_string()),
|
||||
matrix_as_token: Some("ENV_TOKEN".to_string()),
|
||||
matrix_hs_token: Some("HS_TOKEN".to_string()),
|
||||
matrix_server_name: Some("example.org".to_string()),
|
||||
matrix_room_id: Some("!roomid:example.org".to_string()),
|
||||
..ConfigOverrides::default()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
let cli_inputs = ConfigInputs {
|
||||
overrides: ConfigOverrides {
|
||||
matrix_as_token_file: Some(token_file.to_string_lossy().to_string()),
|
||||
..ConfigOverrides::default()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let cfg = load_from_sources(cli_inputs, env_inputs, None).unwrap();
|
||||
assert_eq!(cfg.matrix.as_token, "CLI_SECRET");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn load_uses_container_default_poll_interval() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
|
||||
let cli_inputs = ConfigInputs {
|
||||
container_override: Some(true),
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
|
||||
matrix_homeserver: Some("https://matrix.example.org".to_string()),
|
||||
matrix_as_token: Some("AS_TOKEN".to_string()),
|
||||
matrix_hs_token: Some("HS_TOKEN".to_string()),
|
||||
matrix_server_name: Some("example.org".to_string()),
|
||||
matrix_room_id: Some("!roomid:example.org".to_string()),
|
||||
..ConfigOverrides::default()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let cfg = load_from_sources(cli_inputs, ConfigInputs::default(), None).unwrap();
|
||||
assert_eq!(
|
||||
cfg.potatomesh.poll_interval_secs,
|
||||
CONTAINER_POLL_INTERVAL_SECS
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[serial]
|
||||
fn load_uses_default_state_path_when_missing() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let _guard = CwdGuard::enter(tmp_dir.path());
|
||||
|
||||
let cli_inputs = ConfigInputs {
|
||||
overrides: ConfigOverrides {
|
||||
potatomesh_base_url: Some("https://potatomesh.net/".to_string()),
|
||||
potatomesh_poll_interval_secs: Some(10),
|
||||
matrix_homeserver: Some("https://matrix.example.org".to_string()),
|
||||
matrix_as_token: Some("AS_TOKEN".to_string()),
|
||||
matrix_hs_token: Some("HS_TOKEN".to_string()),
|
||||
matrix_server_name: Some("example.org".to_string()),
|
||||
matrix_room_id: Some("!roomid:example.org".to_string()),
|
||||
..ConfigOverrides::default()
|
||||
},
|
||||
..ConfigInputs::default()
|
||||
};
|
||||
|
||||
let cfg = load_from_sources(cli_inputs, ConfigInputs::default(), None).unwrap();
|
||||
assert_eq!(cfg.state.state_file, DEFAULT_STATE_FILE);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,831 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod cli;
|
||||
mod config;
|
||||
mod matrix;
|
||||
mod matrix_server;
|
||||
mod potatomesh;
|
||||
|
||||
use std::{fs, net::SocketAddr, path::Path};
|
||||
|
||||
use anyhow::Result;
|
||||
#[cfg(not(test))]
|
||||
use clap::Parser;
|
||||
use tokio::time::Duration;
|
||||
use tracing::{error, info};
|
||||
|
||||
#[cfg(not(test))]
|
||||
use crate::cli::Cli;
|
||||
#[cfg(not(test))]
|
||||
use crate::config::Config;
|
||||
use crate::matrix::MatrixAppserviceClient;
|
||||
use crate::matrix_server::run_synapse_listener;
|
||||
use crate::potatomesh::{FetchParams, PotatoClient, PotatoMessage, PotatoNode};
|
||||
#[cfg(not(test))]
|
||||
use tokio::time::sleep;
|
||||
|
||||
#[derive(Debug, serde::Serialize, serde::Deserialize, Default)]
|
||||
pub struct BridgeState {
|
||||
/// Highest message id processed by the bridge.
|
||||
last_message_id: Option<u64>,
|
||||
/// Highest rx_time observed; used to build incremental fetch queries.
|
||||
#[serde(default)]
|
||||
last_rx_time: Option<u64>,
|
||||
/// Message ids seen at the current last_rx_time for de-duplication.
|
||||
#[serde(default)]
|
||||
last_rx_time_ids: Vec<u64>,
|
||||
/// Legacy checkpoint timestamp used before last_rx_time was added.
|
||||
#[serde(default, skip_serializing)]
|
||||
last_checked_at: Option<u64>,
|
||||
}
|
||||
|
||||
impl BridgeState {
|
||||
fn load(path: &str) -> Result<Self> {
|
||||
if !Path::new(path).exists() {
|
||||
return Ok(Self::default());
|
||||
}
|
||||
let data = fs::read_to_string(path)?;
|
||||
// Treat empty/whitespace-only files as a fresh state.
|
||||
if data.trim().is_empty() {
|
||||
return Ok(Self::default());
|
||||
}
|
||||
let mut s: Self = serde_json::from_str(&data)?;
|
||||
if s.last_rx_time.is_none() {
|
||||
s.last_rx_time = s.last_checked_at;
|
||||
}
|
||||
s.last_checked_at = None;
|
||||
Ok(s)
|
||||
}
|
||||
|
||||
fn save(&self, path: &str) -> Result<()> {
|
||||
let data = serde_json::to_string_pretty(self)?;
|
||||
fs::write(path, data)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn should_forward(&self, msg: &PotatoMessage) -> bool {
|
||||
match self.last_rx_time {
|
||||
None => match self.last_message_id {
|
||||
None => true,
|
||||
Some(last_id) => msg.id > last_id,
|
||||
},
|
||||
Some(last_ts) => {
|
||||
if msg.rx_time > last_ts {
|
||||
true
|
||||
} else if msg.rx_time < last_ts {
|
||||
false
|
||||
} else {
|
||||
!self.last_rx_time_ids.contains(&msg.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_with(&mut self, msg: &PotatoMessage) {
|
||||
self.last_message_id = Some(msg.id);
|
||||
if self.last_rx_time.is_none() || Some(msg.rx_time) > self.last_rx_time {
|
||||
self.last_rx_time = Some(msg.rx_time);
|
||||
self.last_rx_time_ids = vec![msg.id];
|
||||
} else if Some(msg.rx_time) == self.last_rx_time && !self.last_rx_time_ids.contains(&msg.id)
|
||||
{
|
||||
self.last_rx_time_ids.push(msg.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_fetch_params(state: &BridgeState) -> FetchParams {
|
||||
if state.last_message_id.is_none() {
|
||||
FetchParams {
|
||||
limit: None,
|
||||
since: None,
|
||||
}
|
||||
} else if let Some(ts) = state.last_rx_time {
|
||||
FetchParams {
|
||||
limit: None,
|
||||
since: Some(ts),
|
||||
}
|
||||
} else {
|
||||
FetchParams {
|
||||
limit: Some(10),
|
||||
since: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Persist the bridge state and log any write errors.
|
||||
fn persist_state(state: &BridgeState, state_path: &str) {
|
||||
if let Err(e) = state.save(state_path) {
|
||||
error!("Error saving state: {:?}", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Emit an info log for the latest bridge state snapshot.
|
||||
fn log_state_update(state: &BridgeState) {
|
||||
info!("Updated state: {:?}", state);
|
||||
}
|
||||
|
||||
/// Emit a sanitized config log without sensitive tokens.
|
||||
#[cfg(not(test))]
|
||||
fn log_config(cfg: &Config) {
|
||||
info!(
|
||||
potatomesh_base_url = cfg.potatomesh.base_url.as_str(),
|
||||
matrix_homeserver = cfg.matrix.homeserver.as_str(),
|
||||
matrix_server_name = cfg.matrix.server_name.as_str(),
|
||||
matrix_room_id = cfg.matrix.room_id.as_str(),
|
||||
state_file = cfg.state.state_file.as_str(),
|
||||
"Loaded config"
|
||||
);
|
||||
}
|
||||
|
||||
async fn poll_once(
|
||||
potato: &PotatoClient,
|
||||
matrix: &MatrixAppserviceClient,
|
||||
state: &mut BridgeState,
|
||||
state_path: &str,
|
||||
) {
|
||||
let params = build_fetch_params(state);
|
||||
|
||||
match potato.fetch_messages(params).await {
|
||||
Ok(mut msgs) => {
|
||||
// sort by rx_time so we process by actual receipt time
|
||||
msgs.sort_by_key(|m| m.rx_time);
|
||||
|
||||
for msg in &msgs {
|
||||
if !state.should_forward(msg) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Filter to the ports you care about
|
||||
if let Some(port) = &msg.portnum {
|
||||
if port != "TEXT_MESSAGE_APP" {
|
||||
state.update_with(msg);
|
||||
log_state_update(state);
|
||||
persist_state(state, state_path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = handle_message(potato, matrix, state, msg).await {
|
||||
error!("Error handling message {}: {:?}", msg.id, e);
|
||||
continue;
|
||||
}
|
||||
|
||||
// persist after each processed message
|
||||
persist_state(state, state_path);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error fetching PotatoMesh messages: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn spawn_synapse_listener(addr: SocketAddr, token: String) -> tokio::task::JoinHandle<()> {
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = run_synapse_listener(addr, token).await {
|
||||
error!("Synapse listener failed: {:?}", e);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
// Logging: RUST_LOG=info,bridge=debug,reqwest=warn ...
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::from_default_env()
|
||||
.add_directive("potatomesh_matrix_bridge=info".parse().unwrap_or_default())
|
||||
.add_directive("reqwest=warn".parse().unwrap_or_default()),
|
||||
)
|
||||
.init();
|
||||
|
||||
let cli = Cli::parse();
|
||||
let cfg = config::load(cli.to_inputs())?;
|
||||
log_config(&cfg);
|
||||
|
||||
let http = reqwest::Client::builder().build()?;
|
||||
let potato = PotatoClient::new(http.clone(), cfg.potatomesh.clone());
|
||||
potato.health_check().await?;
|
||||
let matrix = MatrixAppserviceClient::new(http.clone(), cfg.matrix.clone());
|
||||
matrix.health_check().await?;
|
||||
|
||||
let synapse_addr = SocketAddr::from(([0, 0, 0, 0], 41448));
|
||||
let synapse_token = cfg.matrix.hs_token.clone();
|
||||
let _synapse_handle = spawn_synapse_listener(synapse_addr, synapse_token);
|
||||
|
||||
let state_path = &cfg.state.state_file;
|
||||
let mut state = BridgeState::load(state_path)?;
|
||||
info!("Loaded state: {:?}", state);
|
||||
|
||||
let poll_interval = Duration::from_secs(cfg.potatomesh.poll_interval_secs);
|
||||
|
||||
loop {
|
||||
poll_once(&potato, &matrix, &mut state, state_path).await;
|
||||
|
||||
sleep(poll_interval).await;
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_message(
|
||||
potato: &PotatoClient,
|
||||
matrix: &MatrixAppserviceClient,
|
||||
state: &mut BridgeState,
|
||||
msg: &PotatoMessage,
|
||||
) -> Result<()> {
|
||||
let node = potato.get_node(&msg.node_id).await?;
|
||||
let localpart = MatrixAppserviceClient::localpart_from_node_id(&msg.node_id);
|
||||
let user_id = matrix.user_id(&localpart);
|
||||
|
||||
// Ensure puppet exists & has display name
|
||||
matrix.ensure_user_registered(&localpart).await?;
|
||||
matrix.ensure_user_joined_room(&user_id).await?;
|
||||
let display_name = display_name_for_node(&node);
|
||||
matrix.set_display_name(&user_id, &display_name).await?;
|
||||
|
||||
// Format the bridged message
|
||||
let preset_short = modem_preset_short(&msg.modem_preset);
|
||||
let prefix = format!(
|
||||
"[{freq}][{preset_short}][{channel}]",
|
||||
freq = msg.lora_freq,
|
||||
preset_short = preset_short,
|
||||
channel = msg.channel_name,
|
||||
);
|
||||
let (body, formatted_body) = format_message_bodies(&prefix, &msg.text);
|
||||
|
||||
matrix
|
||||
.send_formatted_message_as(&user_id, &body, &formatted_body)
|
||||
.await?;
|
||||
|
||||
info!("Bridged message: {:?}", msg);
|
||||
state.update_with(msg);
|
||||
log_state_update(state);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build a compact modem preset label like "LF" for "LongFast".
|
||||
fn modem_preset_short(preset: &str) -> String {
|
||||
let letters: String = preset
|
||||
.chars()
|
||||
.filter(|ch| ch.is_ascii_uppercase())
|
||||
.collect();
|
||||
if letters.is_empty() {
|
||||
preset.chars().take(2).collect()
|
||||
} else {
|
||||
letters
|
||||
}
|
||||
}
|
||||
|
||||
/// Build plain text + HTML message bodies with inline-code metadata.
|
||||
fn format_message_bodies(prefix: &str, text: &str) -> (String, String) {
|
||||
let body = format!("`{}` {}", prefix, text);
|
||||
let formatted_body = format!("<code>{}</code> {}", escape_html(prefix), escape_html(text));
|
||||
(body, formatted_body)
|
||||
}
|
||||
|
||||
/// Build the Matrix display name from a node's long/short names.
|
||||
fn display_name_for_node(node: &PotatoNode) -> String {
|
||||
match node
|
||||
.short_name
|
||||
.as_deref()
|
||||
.map(str::trim)
|
||||
.filter(|s| !s.is_empty())
|
||||
{
|
||||
Some(short) if short != node.long_name => format!("{} ({})", node.long_name, short),
|
||||
_ => node.long_name.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimal HTML escaping for Matrix formatted_body payloads.
|
||||
fn escape_html(input: &str) -> String {
|
||||
let mut escaped = String::with_capacity(input.len());
|
||||
for ch in input.chars() {
|
||||
match ch {
|
||||
'&' => escaped.push_str("&"),
|
||||
'<' => escaped.push_str("<"),
|
||||
'>' => escaped.push_str(">"),
|
||||
'"' => escaped.push_str("""),
|
||||
'\'' => escaped.push_str("'"),
|
||||
_ => escaped.push(ch),
|
||||
}
|
||||
}
|
||||
escaped
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::{MatrixConfig, PotatomeshConfig};
|
||||
use crate::matrix::MatrixAppserviceClient;
|
||||
use crate::potatomesh::PotatoClient;
|
||||
|
||||
fn sample_msg(id: u64) -> PotatoMessage {
|
||||
PotatoMessage {
|
||||
id,
|
||||
rx_time: 0,
|
||||
rx_iso: "2025-11-27T00:00:00Z".to_string(),
|
||||
from_id: "!abcd1234".to_string(),
|
||||
to_id: "^all".to_string(),
|
||||
channel: 1,
|
||||
portnum: Some("TEXT_MESSAGE_APP".to_string()),
|
||||
text: "Ping".to_string(),
|
||||
rssi: Some(-100),
|
||||
hop_limit: Some(1),
|
||||
lora_freq: 868,
|
||||
modem_preset: "MediumFast".to_string(),
|
||||
channel_name: "TEST".to_string(),
|
||||
snr: Some(0.0),
|
||||
reply_id: None,
|
||||
node_id: "!abcd1234".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_node(short_name: Option<&str>, long_name: &str) -> PotatoNode {
|
||||
PotatoNode {
|
||||
node_id: "!abcd1234".to_string(),
|
||||
short_name: short_name.map(str::to_string),
|
||||
long_name: long_name.to_string(),
|
||||
role: None,
|
||||
hw_model: None,
|
||||
last_heard: None,
|
||||
first_heard: None,
|
||||
latitude: None,
|
||||
longitude: None,
|
||||
altitude: None,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn modem_preset_short_handles_camelcase() {
|
||||
assert_eq!(modem_preset_short("LongFast"), "LF");
|
||||
assert_eq!(modem_preset_short("MediumFast"), "MF");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_message_bodies_escape_html() {
|
||||
let (body, formatted) = format_message_bodies("[868][LF]", "Hello <&>");
|
||||
assert_eq!(body, "`[868][LF]` Hello <&>");
|
||||
assert_eq!(formatted, "<code>[868][LF]</code> Hello <&>");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn escape_html_escapes_quotes() {
|
||||
assert_eq!(escape_html("a\"b'c"), "a"b'c");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn display_name_for_node_includes_short_when_present() {
|
||||
let node = sample_node(Some("TN"), "Test Node");
|
||||
assert_eq!(display_name_for_node(&node), "Test Node (TN)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn display_name_for_node_ignores_empty_or_duplicate_short() {
|
||||
let empty_short = sample_node(Some(""), "Test Node");
|
||||
assert_eq!(display_name_for_node(&empty_short), "Test Node");
|
||||
|
||||
let duplicate_short = sample_node(Some("Test Node"), "Test Node");
|
||||
assert_eq!(display_name_for_node(&duplicate_short), "Test Node");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_initially_forwards_all() {
|
||||
let state = BridgeState::default();
|
||||
let msg = sample_msg(42);
|
||||
|
||||
assert!(state.should_forward(&msg));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_tracks_latest_rx_time_and_skips_older() {
|
||||
let mut state = BridgeState::default();
|
||||
let m1 = sample_msg(10);
|
||||
let m2 = sample_msg(20);
|
||||
let m3 = sample_msg(15);
|
||||
let m1 = PotatoMessage { rx_time: 10, ..m1 };
|
||||
let m2 = PotatoMessage { rx_time: 20, ..m2 };
|
||||
let m3 = PotatoMessage { rx_time: 15, ..m3 };
|
||||
|
||||
// First message, should forward
|
||||
assert!(state.should_forward(&m1));
|
||||
state.update_with(&m1);
|
||||
assert_eq!(state.last_message_id, Some(10));
|
||||
assert_eq!(state.last_rx_time, Some(10));
|
||||
|
||||
// Second message, higher id, should forward
|
||||
assert!(state.should_forward(&m2));
|
||||
state.update_with(&m2);
|
||||
assert_eq!(state.last_message_id, Some(20));
|
||||
assert_eq!(state.last_rx_time, Some(20));
|
||||
|
||||
// Third message, lower than last, should NOT forward
|
||||
assert!(!state.should_forward(&m3));
|
||||
// state remains unchanged
|
||||
assert_eq!(state.last_message_id, Some(20));
|
||||
assert_eq!(state.last_rx_time, Some(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_uses_legacy_id_filter_when_rx_time_missing() {
|
||||
let state = BridgeState {
|
||||
last_message_id: Some(10),
|
||||
last_rx_time: None,
|
||||
last_rx_time_ids: vec![],
|
||||
last_checked_at: None,
|
||||
};
|
||||
let older = sample_msg(9);
|
||||
let newer = sample_msg(11);
|
||||
|
||||
assert!(!state.should_forward(&older));
|
||||
assert!(state.should_forward(&newer));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_dedupes_same_timestamp() {
|
||||
let mut state = BridgeState::default();
|
||||
let m1 = PotatoMessage {
|
||||
rx_time: 100,
|
||||
..sample_msg(10)
|
||||
};
|
||||
let m2 = PotatoMessage {
|
||||
rx_time: 100,
|
||||
..sample_msg(9)
|
||||
};
|
||||
let dup = PotatoMessage {
|
||||
rx_time: 100,
|
||||
..sample_msg(10)
|
||||
};
|
||||
|
||||
assert!(state.should_forward(&m1));
|
||||
state.update_with(&m1);
|
||||
assert!(state.should_forward(&m2));
|
||||
state.update_with(&m2);
|
||||
assert!(!state.should_forward(&dup));
|
||||
assert_eq!(state.last_rx_time, Some(100));
|
||||
assert_eq!(state.last_rx_time_ids, vec![10, 9]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_load_save_roundtrip() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let file_path = tmp_dir.path().join("state.json");
|
||||
let path_str = file_path.to_str().unwrap();
|
||||
|
||||
let state = BridgeState {
|
||||
last_message_id: Some(12345),
|
||||
last_rx_time: Some(99),
|
||||
last_rx_time_ids: vec![123],
|
||||
last_checked_at: Some(77),
|
||||
};
|
||||
state.save(path_str).unwrap();
|
||||
|
||||
let loaded_state = BridgeState::load(path_str).unwrap();
|
||||
assert_eq!(loaded_state.last_message_id, Some(12345));
|
||||
assert_eq!(loaded_state.last_rx_time, Some(99));
|
||||
assert_eq!(loaded_state.last_rx_time_ids, vec![123]);
|
||||
assert_eq!(loaded_state.last_checked_at, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_load_nonexistent() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let file_path = tmp_dir.path().join("nonexistent.json");
|
||||
let path_str = file_path.to_str().unwrap();
|
||||
|
||||
let state = BridgeState::load(path_str).unwrap();
|
||||
assert_eq!(state.last_message_id, None);
|
||||
assert_eq!(state.last_rx_time, None);
|
||||
assert!(state.last_rx_time_ids.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_load_empty_file() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let file_path = tmp_dir.path().join("empty.json");
|
||||
let path_str = file_path.to_str().unwrap();
|
||||
|
||||
fs::write(path_str, "").unwrap();
|
||||
|
||||
let state = BridgeState::load(path_str).unwrap();
|
||||
assert_eq!(state.last_message_id, None);
|
||||
assert_eq!(state.last_rx_time, None);
|
||||
assert!(state.last_rx_time_ids.is_empty());
|
||||
assert_eq!(state.last_checked_at, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bridge_state_migrates_legacy_checkpoint() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let file_path = tmp_dir.path().join("legacy_state.json");
|
||||
let path_str = file_path.to_str().unwrap();
|
||||
|
||||
fs::write(
|
||||
path_str,
|
||||
r#"{"last_message_id":42,"last_checked_at":1710000000}"#,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let state = BridgeState::load(path_str).unwrap();
|
||||
assert_eq!(state.last_message_id, Some(42));
|
||||
assert_eq!(state.last_rx_time, Some(1_710_000_000));
|
||||
assert!(state.last_rx_time_ids.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_params_respects_missing_last_message_id() {
|
||||
let state = BridgeState {
|
||||
last_message_id: None,
|
||||
last_rx_time: Some(123),
|
||||
last_rx_time_ids: vec![],
|
||||
last_checked_at: None,
|
||||
};
|
||||
|
||||
let params = build_fetch_params(&state);
|
||||
assert_eq!(params.limit, None);
|
||||
assert_eq!(params.since, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_params_uses_since_when_safe() {
|
||||
let state = BridgeState {
|
||||
last_message_id: Some(1),
|
||||
last_rx_time: Some(123),
|
||||
last_rx_time_ids: vec![],
|
||||
last_checked_at: None,
|
||||
};
|
||||
|
||||
let params = build_fetch_params(&state);
|
||||
assert_eq!(params.limit, None);
|
||||
assert_eq!(params.since, Some(123));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fetch_params_defaults_to_small_window() {
|
||||
let state = BridgeState {
|
||||
last_message_id: Some(1),
|
||||
last_rx_time: None,
|
||||
last_rx_time_ids: vec![],
|
||||
last_checked_at: None,
|
||||
};
|
||||
|
||||
let params = build_fetch_params(&state);
|
||||
assert_eq!(params.limit, Some(10));
|
||||
assert_eq!(params.since, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn log_state_update_emits_info() {
|
||||
let state = BridgeState::default();
|
||||
log_state_update(&state);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn persist_state_writes_file() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let file_path = tmp_dir.path().join("state.json");
|
||||
let path_str = file_path.to_str().unwrap();
|
||||
|
||||
let state = BridgeState {
|
||||
last_message_id: Some(42),
|
||||
last_rx_time: Some(123),
|
||||
last_rx_time_ids: vec![42],
|
||||
last_checked_at: None,
|
||||
};
|
||||
|
||||
persist_state(&state, path_str);
|
||||
|
||||
let loaded = BridgeState::load(path_str).unwrap();
|
||||
assert_eq!(loaded.last_message_id, Some(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn persist_state_logs_on_error() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let dir_path = tmp_dir.path().to_str().unwrap();
|
||||
let state = BridgeState::default();
|
||||
|
||||
// Writing to a directory path should trigger the error branch.
|
||||
persist_state(&state, dir_path);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn spawn_synapse_listener_starts_task() {
|
||||
let addr = SocketAddr::from(([127, 0, 0, 1], 0));
|
||||
let handle = spawn_synapse_listener(addr, "HS_TOKEN".to_string());
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn spawn_synapse_listener_logs_error_on_bind_failure() {
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
let handle = spawn_synapse_listener(addr, "HS_TOKEN".to_string());
|
||||
let _ = handle.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn poll_once_leaves_state_unchanged_without_messages() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let state_path = tmp_dir.path().join("state.json");
|
||||
let state_str = state_path.to_str().unwrap();
|
||||
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock_msgs = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::Any)
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body("[]")
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let potatomesh_cfg = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 1,
|
||||
};
|
||||
let matrix_cfg = MatrixConfig {
|
||||
homeserver: server.url(),
|
||||
as_token: "AS_TOKEN".to_string(),
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
server_name: "example.org".to_string(),
|
||||
room_id: "!roomid:example.org".to_string(),
|
||||
};
|
||||
|
||||
let potato = PotatoClient::new(http_client.clone(), potatomesh_cfg);
|
||||
let matrix = MatrixAppserviceClient::new(http_client, matrix_cfg);
|
||||
|
||||
let mut state = BridgeState {
|
||||
last_message_id: Some(1),
|
||||
last_rx_time: Some(100),
|
||||
last_rx_time_ids: vec![1],
|
||||
last_checked_at: None,
|
||||
};
|
||||
|
||||
poll_once(&potato, &matrix, &mut state, state_str).await;
|
||||
|
||||
mock_msgs.assert();
|
||||
|
||||
// No new data means state remains unchanged and is not persisted.
|
||||
assert_eq!(state.last_rx_time, Some(100));
|
||||
assert_eq!(state.last_rx_time_ids, vec![1]);
|
||||
assert!(!state_path.exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn poll_once_persists_state_for_non_text_messages() {
|
||||
let tmp_dir = tempfile::tempdir().unwrap();
|
||||
let state_path = tmp_dir.path().join("state.json");
|
||||
let state_str = state_path.to_str().unwrap();
|
||||
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock_msgs = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::Any)
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(
|
||||
r#"[{"id":1,"rx_time":100,"rx_iso":"2025-11-27T00:00:00Z","from_id":"!abcd1234","to_id":"^all","channel":1,"portnum":"POSITION_APP","text":"","rssi":-100,"hop_limit":1,"lora_freq":868,"modem_preset":"MediumFast","channel_name":"TEST","snr":0.0,"node_id":"!abcd1234"}]"#,
|
||||
)
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let potatomesh_cfg = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 1,
|
||||
};
|
||||
let matrix_cfg = MatrixConfig {
|
||||
homeserver: server.url(),
|
||||
as_token: "AS_TOKEN".to_string(),
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
server_name: "example.org".to_string(),
|
||||
room_id: "!roomid:example.org".to_string(),
|
||||
};
|
||||
|
||||
let potato = PotatoClient::new(http_client.clone(), potatomesh_cfg);
|
||||
let matrix = MatrixAppserviceClient::new(http_client, matrix_cfg);
|
||||
let mut state = BridgeState::default();
|
||||
|
||||
poll_once(&potato, &matrix, &mut state, state_str).await;
|
||||
|
||||
mock_msgs.assert();
|
||||
assert!(state_path.exists());
|
||||
let loaded = BridgeState::load(state_str).unwrap();
|
||||
assert_eq!(loaded.last_message_id, Some(1));
|
||||
assert_eq!(loaded.last_rx_time, Some(100));
|
||||
assert_eq!(loaded.last_rx_time_ids, vec![1]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_message() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
|
||||
let potatomesh_cfg = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 1,
|
||||
};
|
||||
let matrix_cfg = MatrixConfig {
|
||||
homeserver: server.url(),
|
||||
as_token: "AS_TOKEN".to_string(),
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
server_name: "example.org".to_string(),
|
||||
room_id: "!roomid:example.org".to_string(),
|
||||
};
|
||||
|
||||
let node_id = "abcd1234";
|
||||
let user_id = format!("@potato_{}:{}", node_id, matrix_cfg.server_name);
|
||||
let encoded_user = urlencoding::encode(&user_id);
|
||||
let room_id = matrix_cfg.room_id.clone();
|
||||
let encoded_room = urlencoding::encode(&room_id);
|
||||
|
||||
let mock_get_node = server
|
||||
.mock("GET", "/api/nodes/abcd1234")
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(r#"{"node_id": "!abcd1234", "long_name": "Test Node", "short_name": "TN"}"#)
|
||||
.create();
|
||||
|
||||
let mock_register = server
|
||||
.mock("POST", "/_matrix/client/v3/register")
|
||||
.match_query("kind=user")
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let mock_join = server
|
||||
.mock(
|
||||
"POST",
|
||||
format!("/_matrix/client/v3/rooms/{}/join", encoded_room).as_str(),
|
||||
)
|
||||
.match_query(format!("user_id={}", encoded_user).as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let mock_display_name = server
|
||||
.mock(
|
||||
"PUT",
|
||||
format!("/_matrix/client/v3/profile/{}/displayname", encoded_user).as_str(),
|
||||
)
|
||||
.match_query(format!("user_id={}", encoded_user).as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
|
||||
"displayname": "Test Node (TN)"
|
||||
})))
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let matrix_client = MatrixAppserviceClient::new(http_client.clone(), matrix_cfg);
|
||||
let txn_id = matrix_client
|
||||
.txn_counter
|
||||
.load(std::sync::atomic::Ordering::SeqCst);
|
||||
|
||||
let mock_send = server
|
||||
.mock(
|
||||
"PUT",
|
||||
format!(
|
||||
"/_matrix/client/v3/rooms/{}/send/m.room.message/{}",
|
||||
encoded_room, txn_id
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.match_query(format!("user_id={}", encoded_user).as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
|
||||
"msgtype": "m.text",
|
||||
"body": "`[868][MF][TEST]` Ping",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<code>[868][MF][TEST]</code> Ping",
|
||||
})))
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let potato_client = PotatoClient::new(http_client.clone(), potatomesh_cfg);
|
||||
let mut state = BridgeState::default();
|
||||
let msg = sample_msg(100);
|
||||
|
||||
let result = handle_message(&potato_client, &matrix_client, &mut state, &msg).await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
mock_get_node.assert();
|
||||
mock_register.assert();
|
||||
mock_join.assert();
|
||||
mock_display_name.assert();
|
||||
mock_send.assert();
|
||||
|
||||
assert_eq!(state.last_message_id, Some(100));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,496 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::Serialize;
|
||||
use std::sync::{
|
||||
atomic::{AtomicU64, Ordering},
|
||||
Arc,
|
||||
};
|
||||
|
||||
use crate::config::MatrixConfig;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct MatrixAppserviceClient {
|
||||
http: reqwest::Client,
|
||||
pub cfg: MatrixConfig,
|
||||
pub txn_counter: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl MatrixAppserviceClient {
|
||||
pub fn new(http: reqwest::Client, cfg: MatrixConfig) -> Self {
|
||||
let start = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_millis() as u64;
|
||||
|
||||
Self {
|
||||
http,
|
||||
cfg,
|
||||
txn_counter: Arc::new(AtomicU64::new(start)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Basic liveness check against the homeserver.
|
||||
pub async fn health_check(&self) -> anyhow::Result<()> {
|
||||
let url = format!("{}/_matrix/client/versions", self.cfg.homeserver);
|
||||
let resp = self.http.get(&url).send().await?;
|
||||
if resp.status().is_success() {
|
||||
tracing::info!("Matrix homeserver healthy at {}", self.cfg.homeserver);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow::anyhow!(
|
||||
"Matrix homeserver versions check failed with status {}",
|
||||
resp.status()
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert a node_id like "!deadbeef" into Matrix localpart "potato_deadbeef".
|
||||
pub fn localpart_from_node_id(node_id: &str) -> String {
|
||||
format!("potato_{}", node_id.trim_start_matches('!'))
|
||||
}
|
||||
|
||||
/// Build a full Matrix user_id from localpart.
|
||||
pub fn user_id(&self, localpart: &str) -> String {
|
||||
format!("@{}:{}", localpart, self.cfg.server_name)
|
||||
}
|
||||
|
||||
/// Ensure the puppet user exists (register via appservice registration).
|
||||
pub async fn ensure_user_registered(&self, localpart: &str) -> anyhow::Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct RegisterReq<'a> {
|
||||
#[serde(rename = "type")]
|
||||
typ: &'a str,
|
||||
username: &'a str,
|
||||
}
|
||||
|
||||
let url = format!(
|
||||
"{}/_matrix/client/v3/register?kind=user",
|
||||
self.cfg.homeserver
|
||||
);
|
||||
|
||||
let body = RegisterReq {
|
||||
typ: "m.login.application_service",
|
||||
username: localpart,
|
||||
};
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
.post(&url)
|
||||
.bearer_auth(&self.cfg.as_token)
|
||||
.json(&body)
|
||||
.send()
|
||||
.await?;
|
||||
if resp.status().is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
// If user already exists, Synapse / HS usually returns 400 M_USER_IN_USE.
|
||||
// We'll just ignore non-success and hope it's that case.
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Set display name for puppet user.
|
||||
pub async fn set_display_name(&self, user_id: &str, display_name: &str) -> anyhow::Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct DisplayNameReq<'a> {
|
||||
displayname: &'a str,
|
||||
}
|
||||
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let url = format!(
|
||||
"{}/_matrix/client/v3/profile/{}/displayname?user_id={}",
|
||||
self.cfg.homeserver, encoded_user, encoded_user
|
||||
);
|
||||
|
||||
let body = DisplayNameReq {
|
||||
displayname: display_name,
|
||||
};
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
.put(&url)
|
||||
.bearer_auth(&self.cfg.as_token)
|
||||
.json(&body)
|
||||
.send()
|
||||
.await?;
|
||||
if resp.status().is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
// Non-fatal.
|
||||
tracing::warn!(
|
||||
"Failed to set display name for {}: {}",
|
||||
user_id,
|
||||
resp.status()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensure the puppet user is joined to the configured room.
|
||||
pub async fn ensure_user_joined_room(&self, user_id: &str) -> anyhow::Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct JoinReq {}
|
||||
|
||||
let encoded_room = urlencoding::encode(&self.cfg.room_id);
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let url = format!(
|
||||
"{}/_matrix/client/v3/rooms/{}/join?user_id={}",
|
||||
self.cfg.homeserver, encoded_room, encoded_user
|
||||
);
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
.post(&url)
|
||||
.bearer_auth(&self.cfg.as_token)
|
||||
.json(&JoinReq {})
|
||||
.send()
|
||||
.await?;
|
||||
if resp.status().is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
let status = resp.status();
|
||||
let body_snip = resp.text().await.unwrap_or_default();
|
||||
Err(anyhow::anyhow!(
|
||||
"Matrix join failed for {} in {} with status {} ({})",
|
||||
user_id,
|
||||
self.cfg.room_id,
|
||||
status,
|
||||
body_snip
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Send a text message with HTML formatting into the configured room as puppet user_id.
|
||||
pub async fn send_formatted_message_as(
|
||||
&self,
|
||||
user_id: &str,
|
||||
body_text: &str,
|
||||
formatted_body: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct MsgContent<'a> {
|
||||
msgtype: &'a str,
|
||||
body: &'a str,
|
||||
format: &'a str,
|
||||
formatted_body: &'a str,
|
||||
}
|
||||
|
||||
let txn_id = self.txn_counter.fetch_add(1, Ordering::SeqCst);
|
||||
let encoded_room = urlencoding::encode(&self.cfg.room_id);
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
|
||||
let url = format!(
|
||||
"{}/_matrix/client/v3/rooms/{}/send/m.room.message/{}?user_id={}",
|
||||
self.cfg.homeserver, encoded_room, txn_id, encoded_user
|
||||
);
|
||||
|
||||
let content = MsgContent {
|
||||
msgtype: "m.text",
|
||||
body: body_text,
|
||||
format: "org.matrix.custom.html",
|
||||
formatted_body,
|
||||
};
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
.put(&url)
|
||||
.bearer_auth(&self.cfg.as_token)
|
||||
.json(&content)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body_snip = resp.text().await.unwrap_or_default();
|
||||
|
||||
tracing::warn!(
|
||||
"Failed to send formatted message as {}: status {}, body: {}",
|
||||
user_id,
|
||||
status,
|
||||
body_snip
|
||||
);
|
||||
|
||||
return Err(anyhow::anyhow!(
|
||||
"Matrix send failed for {} with status {}",
|
||||
user_id,
|
||||
status
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn dummy_cfg() -> MatrixConfig {
|
||||
MatrixConfig {
|
||||
homeserver: "https://matrix.example.org".to_string(),
|
||||
as_token: "AS_TOKEN".to_string(),
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
server_name: "example.org".to_string(),
|
||||
room_id: "!roomid:example.org".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn localpart_strips_bang_correctly() {
|
||||
assert_eq!(
|
||||
MatrixAppserviceClient::localpart_from_node_id("!deadbeef"),
|
||||
"potato_deadbeef"
|
||||
);
|
||||
assert_eq!(
|
||||
MatrixAppserviceClient::localpart_from_node_id("cafebabe"),
|
||||
"potato_cafebabe"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn user_id_builds_from_localpart_and_server_name() {
|
||||
let http = reqwest::Client::builder().build().unwrap();
|
||||
let client = MatrixAppserviceClient::new(http, dummy_cfg());
|
||||
|
||||
let uid = client.user_id("potato_deadbeef");
|
||||
assert_eq!(uid, "@potato_deadbeef:example.org");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn health_check_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/_matrix/client/versions")
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.health_check().await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn health_check_failure() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/_matrix/client/versions")
|
||||
.with_status(500)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.health_check().await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_matrix_client() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = dummy_cfg();
|
||||
let client = MatrixAppserviceClient::new(http_client, config);
|
||||
assert_eq!(client.cfg.homeserver, "https://matrix.example.org");
|
||||
assert_eq!(client.cfg.as_token, "AS_TOKEN");
|
||||
assert!(client.txn_counter.load(Ordering::SeqCst) > 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ensure_user_registered_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("POST", "/_matrix/client/v3/register")
|
||||
.match_query("kind=user")
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.ensure_user_registered("testuser").await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ensure_user_registered_user_in_use() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("POST", "/_matrix/client/v3/register")
|
||||
.match_query("kind=user")
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(400) // M_USER_IN_USE
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.ensure_user_registered("testuser").await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_set_display_name_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let user_id = "@test:example.org";
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let query = format!("user_id={}", encoded_user);
|
||||
let path = format!("/_matrix/client/v3/profile/{}/displayname", encoded_user);
|
||||
|
||||
let mock = server
|
||||
.mock("PUT", path.as_str())
|
||||
.match_query(query.as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.set_display_name(user_id, "Test Name").await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_set_display_name_fail_is_ok() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let user_id = "@test:example.org";
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let query = format!("user_id={}", encoded_user);
|
||||
let path = format!("/_matrix/client/v3/profile/{}/displayname", encoded_user);
|
||||
|
||||
let mock = server
|
||||
.mock("PUT", path.as_str())
|
||||
.match_query(query.as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(500)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.set_display_name(user_id, "Test Name").await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ensure_user_joined_room_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let user_id = "@test:example.org";
|
||||
let room_id = "!roomid:example.org";
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let encoded_room = urlencoding::encode(room_id);
|
||||
let query = format!("user_id={}", encoded_user);
|
||||
let path = format!("/_matrix/client/v3/rooms/{}/join", encoded_room);
|
||||
|
||||
let mock = server
|
||||
.mock("POST", path.as_str())
|
||||
.match_query(query.as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
cfg.room_id = room_id.to_string();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.ensure_user_joined_room(user_id).await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ensure_user_joined_room_fail() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let user_id = "@test:example.org";
|
||||
let room_id = "!roomid:example.org";
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let encoded_room = urlencoding::encode(room_id);
|
||||
let query = format!("user_id={}", encoded_user);
|
||||
let path = format!("/_matrix/client/v3/rooms/{}/join", encoded_room);
|
||||
|
||||
let mock = server
|
||||
.mock("POST", path.as_str())
|
||||
.match_query(query.as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.with_status(403)
|
||||
.create();
|
||||
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
cfg.room_id = room_id.to_string();
|
||||
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
|
||||
let result = client.ensure_user_joined_room(user_id).await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_send_formatted_message_as_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let user_id = "@test:example.org";
|
||||
let room_id = "!roomid:example.org";
|
||||
let encoded_user = urlencoding::encode(user_id);
|
||||
let encoded_room = urlencoding::encode(room_id);
|
||||
|
||||
let client = {
|
||||
let mut cfg = dummy_cfg();
|
||||
cfg.homeserver = server.url();
|
||||
cfg.room_id = room_id.to_string();
|
||||
MatrixAppserviceClient::new(reqwest::Client::new(), cfg)
|
||||
};
|
||||
let txn_id = client.txn_counter.load(Ordering::SeqCst);
|
||||
let query = format!("user_id={}", encoded_user);
|
||||
let path = format!(
|
||||
"/_matrix/client/v3/rooms/{}/send/m.room.message/{}",
|
||||
encoded_room, txn_id
|
||||
);
|
||||
|
||||
let mock = server
|
||||
.mock("PUT", path.as_str())
|
||||
.match_query(query.as_str())
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
|
||||
"msgtype": "m.text",
|
||||
"body": "`[meta]` hello",
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<code>[meta]</code> hello",
|
||||
})))
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let result = client
|
||||
.send_formatted_message_as(user_id, "`[meta]` hello", "<code>[meta]</code> hello")
|
||||
.await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,289 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use axum::{
|
||||
extract::{Path, Query, State},
|
||||
http::{header::AUTHORIZATION, HeaderMap, StatusCode},
|
||||
response::IntoResponse,
|
||||
routing::put,
|
||||
Json, Router,
|
||||
};
|
||||
use serde_json::Value;
|
||||
use std::net::SocketAddr;
|
||||
use tracing::info;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SynapseState {
|
||||
hs_token: String,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct AuthQuery {
|
||||
access_token: Option<String>,
|
||||
}
|
||||
|
||||
/// Pull access tokens from supported auth headers.
|
||||
fn extract_access_token(headers: &HeaderMap) -> Option<String> {
|
||||
if let Some(value) = headers.get(AUTHORIZATION) {
|
||||
if let Ok(raw) = value.to_str() {
|
||||
if let Some(token) = raw.strip_prefix("Bearer ") {
|
||||
return Some(token.trim().to_string());
|
||||
}
|
||||
if let Some(token) = raw.strip_prefix("bearer ") {
|
||||
return Some(token.trim().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(value) = headers.get("x-access-token") {
|
||||
if let Ok(raw) = value.to_str() {
|
||||
return Some(raw.trim().to_string());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Compare tokens in constant time to avoid timing leakage.
|
||||
fn constant_time_eq(a: &str, b: &str) -> bool {
|
||||
let a_bytes = a.as_bytes();
|
||||
let b_bytes = b.as_bytes();
|
||||
let max_len = std::cmp::max(a_bytes.len(), b_bytes.len());
|
||||
let mut diff = (a_bytes.len() ^ b_bytes.len()) as u8;
|
||||
|
||||
for idx in 0..max_len {
|
||||
let left = *a_bytes.get(idx).unwrap_or(&0);
|
||||
let right = *b_bytes.get(idx).unwrap_or(&0);
|
||||
diff |= left ^ right;
|
||||
}
|
||||
|
||||
diff == 0
|
||||
}
|
||||
|
||||
/// Captures inbound Synapse transaction payloads for logging.
|
||||
#[derive(Debug)]
|
||||
struct SynapseResponse {
|
||||
txn_id: String,
|
||||
payload: Value,
|
||||
}
|
||||
|
||||
/// Build the router that handles Synapse appservice transactions.
|
||||
fn build_router(state: SynapseState) -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/_matrix/appservice/v1/transactions/:txn_id",
|
||||
put(handle_transaction),
|
||||
)
|
||||
.with_state(state)
|
||||
}
|
||||
|
||||
/// Handle inbound transaction callbacks from Synapse.
|
||||
async fn handle_transaction(
|
||||
Path(txn_id): Path<String>,
|
||||
State(state): State<SynapseState>,
|
||||
Query(auth): Query<AuthQuery>,
|
||||
headers: HeaderMap,
|
||||
Json(payload): Json<Value>,
|
||||
) -> impl IntoResponse {
|
||||
let header_token = extract_access_token(&headers);
|
||||
let token_matches = if let Some(token) = header_token.as_deref() {
|
||||
constant_time_eq(token, &state.hs_token)
|
||||
} else {
|
||||
auth.access_token
|
||||
.as_deref()
|
||||
.is_some_and(|token| constant_time_eq(token, &state.hs_token))
|
||||
};
|
||||
if !token_matches {
|
||||
return (StatusCode::UNAUTHORIZED, Json(serde_json::json!({})));
|
||||
}
|
||||
let response = SynapseResponse { txn_id, payload };
|
||||
info!(
|
||||
"Status response: SynapseResponse {{ txn_id: {}, payload: {:?} }}",
|
||||
response.txn_id, response.payload
|
||||
);
|
||||
(StatusCode::OK, Json(serde_json::json!({})))
|
||||
}
|
||||
|
||||
/// Listen for Synapse callbacks on the configured address.
|
||||
pub async fn run_synapse_listener(addr: SocketAddr, hs_token: String) -> anyhow::Result<()> {
|
||||
let app = build_router(SynapseState { hs_token });
|
||||
let listener = tokio::net::TcpListener::bind(addr).await?;
|
||||
info!("Synapse listener bound on {}", addr);
|
||||
axum::serve(listener, app).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use axum::body::Body;
|
||||
use axum::http::Request;
|
||||
use tokio::time::{sleep, Duration};
|
||||
use tower::ServiceExt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn transactions_endpoint_accepts_payloads() {
|
||||
let app = build_router(SynapseState {
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
});
|
||||
let payload = serde_json::json!({
|
||||
"events": [],
|
||||
"txn_id": "123"
|
||||
});
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
Request::builder()
|
||||
.method("PUT")
|
||||
.uri("/_matrix/appservice/v1/transactions/123")
|
||||
.header("authorization", "Bearer HS_TOKEN")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(payload.to_string()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(body.as_ref(), b"{}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn transactions_endpoint_rejects_missing_token() {
|
||||
let app = build_router(SynapseState {
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
});
|
||||
let payload = serde_json::json!({
|
||||
"events": [],
|
||||
"txn_id": "123"
|
||||
});
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
Request::builder()
|
||||
.method("PUT")
|
||||
.uri("/_matrix/appservice/v1/transactions/123")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(payload.to_string()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
|
||||
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(body.as_ref(), b"{}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn transactions_endpoint_rejects_wrong_token() {
|
||||
let app = build_router(SynapseState {
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
});
|
||||
let payload = serde_json::json!({
|
||||
"events": [],
|
||||
"txn_id": "123"
|
||||
});
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
Request::builder()
|
||||
.method("PUT")
|
||||
.uri("/_matrix/appservice/v1/transactions/123")
|
||||
.header("authorization", "Bearer NOPE")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(payload.to_string()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
|
||||
let body = axum::body::to_bytes(response.into_body(), usize::MAX)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(body.as_ref(), b"{}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn transactions_endpoint_accepts_legacy_query_token() {
|
||||
let app = build_router(SynapseState {
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
});
|
||||
let payload = serde_json::json!({
|
||||
"events": [],
|
||||
"txn_id": "125"
|
||||
});
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
Request::builder()
|
||||
.method("PUT")
|
||||
.uri("/_matrix/appservice/v1/transactions/125?access_token=HS_TOKEN")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(payload.to_string()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn transactions_endpoint_accepts_x_access_token_header() {
|
||||
let app = build_router(SynapseState {
|
||||
hs_token: "HS_TOKEN".to_string(),
|
||||
});
|
||||
let payload = serde_json::json!({
|
||||
"events": [],
|
||||
"txn_id": "126"
|
||||
});
|
||||
|
||||
let response = app
|
||||
.oneshot(
|
||||
Request::builder()
|
||||
.method("PUT")
|
||||
.uri("/_matrix/appservice/v1/transactions/126")
|
||||
.header("x-access-token", "HS_TOKEN")
|
||||
.header("content-type", "application/json")
|
||||
.body(Body::from(payload.to_string()))
|
||||
.unwrap(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn run_synapse_listener_starts_and_can_abort() {
|
||||
let addr = SocketAddr::from(([127, 0, 0, 1], 0));
|
||||
let handle =
|
||||
tokio::spawn(async move { run_synapse_listener(addr, "HS_TOKEN".to_string()).await });
|
||||
sleep(Duration::from_millis(10)).await;
|
||||
handle.abort();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn run_synapse_listener_returns_error_on_bind_failure() {
|
||||
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
let result = run_synapse_listener(addr, "HS_TOKEN".to_string()).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,561 @@
|
||||
// Copyright © 2025-26 l5yth & contributors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::config::PotatomeshConfig;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct PotatoMessage {
|
||||
pub id: u64,
|
||||
pub rx_time: u64,
|
||||
pub rx_iso: String,
|
||||
pub from_id: String,
|
||||
pub to_id: String,
|
||||
pub channel: u8,
|
||||
#[serde(default)]
|
||||
pub portnum: Option<String>,
|
||||
pub text: String,
|
||||
#[serde(default)]
|
||||
pub rssi: Option<i16>,
|
||||
#[serde(default)]
|
||||
pub hop_limit: Option<u8>,
|
||||
pub lora_freq: u32,
|
||||
pub modem_preset: String,
|
||||
pub channel_name: String,
|
||||
#[serde(default)]
|
||||
pub snr: Option<f32>,
|
||||
#[serde(default)]
|
||||
pub reply_id: Option<u64>,
|
||||
pub node_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct FetchParams {
|
||||
pub limit: Option<u32>,
|
||||
pub since: Option<u64>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct PotatoNode {
|
||||
pub node_id: String,
|
||||
#[serde(default)]
|
||||
pub short_name: Option<String>,
|
||||
pub long_name: String,
|
||||
#[serde(default)]
|
||||
pub role: Option<String>,
|
||||
#[serde(default)]
|
||||
pub hw_model: Option<String>,
|
||||
#[serde(default)]
|
||||
pub last_heard: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub first_heard: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub latitude: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub longitude: Option<f64>,
|
||||
#[serde(default)]
|
||||
pub altitude: Option<f64>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PotatoClient {
|
||||
http: reqwest::Client,
|
||||
cfg: PotatomeshConfig,
|
||||
// simple in-memory cache for node metadata
|
||||
nodes_cache: Arc<RwLock<HashMap<String, PotatoNode>>>,
|
||||
}
|
||||
|
||||
impl PotatoClient {
|
||||
pub fn new(http: reqwest::Client, cfg: PotatomeshConfig) -> Self {
|
||||
Self {
|
||||
http,
|
||||
cfg,
|
||||
nodes_cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the API root; accept either a bare domain or one already ending in `/api`.
|
||||
fn api_base(&self) -> String {
|
||||
let trimmed = self.cfg.base_url.trim_end_matches('/');
|
||||
if trimmed.ends_with("/api") {
|
||||
trimmed.to_string()
|
||||
} else {
|
||||
format!("{}/api", trimmed)
|
||||
}
|
||||
}
|
||||
|
||||
fn messages_url(&self) -> String {
|
||||
format!("{}/messages", self.api_base())
|
||||
}
|
||||
|
||||
fn node_url(&self, hex_id: &str) -> String {
|
||||
// e.g. https://potatomesh.net/api/nodes/67fc83cb
|
||||
format!("{}/nodes/{}", self.api_base(), hex_id)
|
||||
}
|
||||
|
||||
/// Basic liveness check against the PotatoMesh API.
|
||||
pub async fn health_check(&self) -> anyhow::Result<()> {
|
||||
let base = self
|
||||
.cfg
|
||||
.base_url
|
||||
.trim_end_matches('/')
|
||||
.trim_end_matches("/api");
|
||||
let url = format!("{}/version", base);
|
||||
let resp = self.http.get(&url).send().await?;
|
||||
if resp.status().is_success() {
|
||||
tracing::info!("PotatoMesh API healthy at {}", self.cfg.base_url);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow::anyhow!(
|
||||
"PotatoMesh health check failed with status {}",
|
||||
resp.status()
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn fetch_messages(&self, params: FetchParams) -> anyhow::Result<Vec<PotatoMessage>> {
|
||||
let mut req = self.http.get(self.messages_url());
|
||||
if let Some(limit) = params.limit {
|
||||
req = req.query(&[("limit", limit)]);
|
||||
}
|
||||
if let Some(since) = params.since {
|
||||
req = req.query(&[("since", since)]);
|
||||
}
|
||||
|
||||
let resp = req.send().await?.error_for_status()?;
|
||||
|
||||
let msgs: Vec<PotatoMessage> = resp.json().await?;
|
||||
Ok(msgs)
|
||||
}
|
||||
|
||||
pub async fn get_node(&self, node_id_with_bang: &str) -> anyhow::Result<PotatoNode> {
|
||||
// node_id is like "!67fc83cb" → we need "67fc83cb"
|
||||
let hex = node_id_with_bang.trim_start_matches('!').to_string();
|
||||
|
||||
{
|
||||
let cache = self.nodes_cache.read().await;
|
||||
if let Some(n) = cache.get(&hex) {
|
||||
return Ok(n.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let url = self.node_url(&hex);
|
||||
let resp = self.http.get(url).send().await?.error_for_status()?;
|
||||
let node: PotatoNode = resp.json().await?;
|
||||
|
||||
{
|
||||
let mut cache = self.nodes_cache.write().await;
|
||||
cache.insert(hex, node.clone());
|
||||
}
|
||||
|
||||
Ok(node)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn deserialize_sample_message_array() {
|
||||
let json = r#"
|
||||
[
|
||||
{
|
||||
"id": 2947676906,
|
||||
"rx_time": 1764241436,
|
||||
"rx_iso": "2025-11-27T11:03:56Z",
|
||||
"from_id": "!da6556d4",
|
||||
"to_id": "^all",
|
||||
"channel": 1,
|
||||
"portnum": "TEXT_MESSAGE_APP",
|
||||
"text": "Ping",
|
||||
"rssi": -111,
|
||||
"hop_limit": 1,
|
||||
"lora_freq": 868,
|
||||
"modem_preset": "MediumFast",
|
||||
"channel_name": "TEST",
|
||||
"snr": -9.0,
|
||||
"node_id": "!06871773"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let msgs: Vec<PotatoMessage> = serde_json::from_str(json).expect("valid message json");
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let m = &msgs[0];
|
||||
assert_eq!(m.id, 2947676906);
|
||||
assert_eq!(m.from_id, "!da6556d4");
|
||||
assert_eq!(m.node_id, "!06871773");
|
||||
assert_eq!(m.portnum.as_deref(), Some("TEXT_MESSAGE_APP"));
|
||||
assert_eq!(m.lora_freq, 868);
|
||||
assert!((m.snr.unwrap() - (-9.0)).abs() < f32::EPSILON);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_message_with_missing_optional_fields() {
|
||||
let json = r#"
|
||||
[
|
||||
{
|
||||
"id": 1,
|
||||
"rx_time": 0,
|
||||
"rx_iso": "2025-11-27T11:03:56Z",
|
||||
"from_id": "!abcd1234",
|
||||
"to_id": "^all",
|
||||
"channel": 1,
|
||||
"text": "Ping",
|
||||
"lora_freq": 868,
|
||||
"modem_preset": "MediumFast",
|
||||
"channel_name": "TEST",
|
||||
"node_id": "!abcd1234"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let msgs: Vec<PotatoMessage> = serde_json::from_str(json).expect("valid message json");
|
||||
assert_eq!(msgs.len(), 1);
|
||||
let m = &msgs[0];
|
||||
assert!(m.portnum.is_none());
|
||||
assert!(m.rssi.is_none());
|
||||
assert!(m.hop_limit.is_none());
|
||||
assert!(m.snr.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_sample_node() {
|
||||
let json = r#"
|
||||
{
|
||||
"node_id": "!67fc83cb",
|
||||
"short_name": "83CB",
|
||||
"long_name": "Meshtastic 83CB",
|
||||
"role": "CLIENT_HIDDEN",
|
||||
"last_heard": 1764250515,
|
||||
"first_heard": 1758993817,
|
||||
"last_seen_iso": "2025-11-27T13:35:15Z"
|
||||
}
|
||||
"#;
|
||||
|
||||
let node: PotatoNode = serde_json::from_str(json).expect("valid node json");
|
||||
assert_eq!(node.node_id, "!67fc83cb");
|
||||
assert_eq!(node.short_name.as_deref(), Some("83CB"));
|
||||
assert_eq!(node.long_name, "Meshtastic 83CB");
|
||||
assert_eq!(node.role.as_deref(), Some("CLIENT_HIDDEN"));
|
||||
assert_eq!(node.last_heard, Some(1764250515));
|
||||
assert_eq!(node.first_heard, Some(1758993817));
|
||||
assert!(node.latitude.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn node_hex_id_is_stripped_correctly() {
|
||||
let with_bang = "!deadbeef";
|
||||
let hex = with_bang.trim_start_matches('!');
|
||||
assert_eq!(hex, "deadbeef");
|
||||
|
||||
let already_hex = "cafebabe";
|
||||
let hex2 = already_hex.trim_start_matches('!');
|
||||
assert_eq!(hex2, "cafebabe");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_potato_client() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
assert_eq!(client.cfg.base_url, "http://localhost:8080");
|
||||
assert_eq!(client.cfg.poll_interval_secs, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_messages_url() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_messages_url_with_trailing_slash() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: "http://localhost:8080/".to_string(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_messages_url_with_existing_api_suffix() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: "http://localhost:8080/api/".to_string(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_node_url() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
assert_eq!(
|
||||
client.node_url("!1234"),
|
||||
"http://localhost:8080/api/nodes/!1234"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_messages_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::Any) // allow optional query params
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(
|
||||
r#"
|
||||
[
|
||||
{
|
||||
"id": 2947676906, "rx_time": 1764241436, "rx_iso": "2025-11-27T11:03:56Z",
|
||||
"from_id": "!da6556d4", "to_id": "^all", "channel": 1,
|
||||
"portnum": "TEXT_MESSAGE_APP", "text": "Ping", "rssi": -111,
|
||||
"hop_limit": 1, "lora_freq": 868, "modem_preset": "MediumFast",
|
||||
"channel_name": "TEST", "snr": -9.0, "node_id": "!06871773"
|
||||
}
|
||||
]
|
||||
"#,
|
||||
)
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let result = client.fetch_messages(FetchParams::default()).await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
let messages = result.unwrap();
|
||||
assert_eq!(messages.len(), 1);
|
||||
assert_eq!(messages[0].id, 2947676906);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_success() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server.mock("GET", "/version").with_status(200).create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let result = client.health_check().await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_strips_api_suffix() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server.mock("GET", "/version").with_status(200).create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let mut base = server.url();
|
||||
base.push_str("/api");
|
||||
let config = PotatomeshConfig {
|
||||
base_url: base,
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let result = client.health_check().await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_failure() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server.mock("GET", "/version").with_status(500).create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let result = client.health_check().await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_messages_error() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::Any)
|
||||
.with_status(500)
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let result = client.fetch_messages(FetchParams::default()).await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_messages_with_limit_and_since() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query("limit=10&since=123")
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body("[]")
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let params = FetchParams {
|
||||
limit: Some(10),
|
||||
since: Some(123),
|
||||
};
|
||||
let result = client.fetch_messages(params).await;
|
||||
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_node_cache_hit() {
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: "http://localhost:8080".to_string(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let node = PotatoNode {
|
||||
node_id: "!1234".to_string(),
|
||||
short_name: Some("test".to_string()),
|
||||
long_name: "test node".to_string(),
|
||||
role: None,
|
||||
hw_model: None,
|
||||
last_heard: None,
|
||||
first_heard: None,
|
||||
latitude: None,
|
||||
longitude: None,
|
||||
altitude: None,
|
||||
};
|
||||
client
|
||||
.nodes_cache
|
||||
.write()
|
||||
.await
|
||||
.insert("1234".to_string(), node.clone());
|
||||
let result = client.get_node("!1234").await;
|
||||
assert!(result.is_ok());
|
||||
let got = result.unwrap();
|
||||
assert_eq!(got.node_id, "!1234");
|
||||
assert_eq!(got.short_name.unwrap(), "test");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_node_cache_miss() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/nodes/1234")
|
||||
.match_query(mockito::Matcher::Any)
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(
|
||||
r#"
|
||||
{
|
||||
"node_id": "!1234", "short_name": "test", "long_name": "test node",
|
||||
"role": "test", "hw_model": "test", "last_heard": 1, "first_heard": 1,
|
||||
"latitude": 1.0, "longitude": 1.0, "altitude": 1.0
|
||||
}
|
||||
"#,
|
||||
)
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
|
||||
// first call, should miss cache and hit the server
|
||||
let result = client.get_node("!1234").await;
|
||||
mock.assert();
|
||||
assert!(result.is_ok());
|
||||
|
||||
// second call, should hit cache
|
||||
let result2 = client.get_node("!1234").await;
|
||||
assert!(result2.is_ok());
|
||||
// mockito would panic here if we made a second request
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_node_error() {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/nodes/1234")
|
||||
.with_status(500)
|
||||
.create();
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
let config = PotatomeshConfig {
|
||||
base_url: server.url(),
|
||||
poll_interval_secs: 60,
|
||||
};
|
||||
let client = PotatoClient::new(http_client, config);
|
||||
let result = client.get_node("!1234").await;
|
||||
mock.assert();
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 62 KiB |
@@ -0,0 +1,71 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
require "base64"
|
||||
require "meshtastic"
|
||||
require "openssl"
|
||||
|
||||
channel_name = "BerlinMesh"
|
||||
|
||||
# === Inputs from your packet ===
|
||||
cipher_b64 = "Q1R7tgI5yXzMXu/3"
|
||||
psk_b64 = "Nmh7EooP2Tsc+7pvPwXLcEDDuYhk+fBo2GLnbA1Y1sg="
|
||||
packet_id = 3_915_687_257
|
||||
from_id = "!9e95cf60"
|
||||
channel = 35
|
||||
|
||||
# === Decode key and ciphertext ===
|
||||
key = Base64.decode64(psk_b64) # 32 bytes -> AES-256
|
||||
ciphertext = Base64.decode64(cipher_b64)
|
||||
|
||||
# === Derive numeric node id from Meshtastic-style string ===
|
||||
hex_str = from_id.sub(/^!/, "") # "9e95cf60"
|
||||
from_node = hex_str.to_i(16) # 0x9e95cf60
|
||||
|
||||
# === Build nonce exactly like Meshtastic CryptoEngine ===
|
||||
# Little-endian 64-bit packet ID + little-endian 32-bit node ID + 4 zero bytes
|
||||
nonce = [packet_id].pack("Q<") # uint64, little-endian
|
||||
nonce += [from_node].pack("L<") # uint32, little-endian
|
||||
nonce += "\x00" * 4 # extraNonce == 0 for PSK channel msgs
|
||||
|
||||
raise "Nonce must be 16 bytes" unless nonce.bytesize == 16
|
||||
raise "Key must be 32 bytes" unless key.bytesize == 32
|
||||
|
||||
# === AES-256-CTR decrypt ===
|
||||
cipher = OpenSSL::Cipher.new("aes-256-ctr")
|
||||
cipher.decrypt
|
||||
cipher.key = key
|
||||
cipher.iv = nonce
|
||||
|
||||
plaintext = cipher.update(ciphertext) + cipher.final
|
||||
|
||||
# At this point `plaintext` is the raw Meshtastic protobuf payload
|
||||
plaintext = plaintext.bytes.pack("C*")
|
||||
data = Meshtastic::Data.decode(plaintext)
|
||||
msg = data.payload.dup.force_encoding("UTF-8")
|
||||
puts msg
|
||||
|
||||
# Gets channel number from name and psk
|
||||
def channel_hash(name, psk_b64)
|
||||
name_bytes = name.b # UTF-8 bytes
|
||||
psk_bytes = Base64.decode64(psk_b64)
|
||||
|
||||
hn = name_bytes.bytes.reduce(0) { |acc, b| acc ^ b } # XOR over name
|
||||
hp = psk_bytes.bytes.reduce(0) { |acc, b| acc ^ b } # XOR over PSK
|
||||
|
||||
(hn ^ hp) & 0xFF
|
||||
end
|
||||
|
||||
channel_h = channel_hash(channel_name, psk_b64)
|
||||
puts channel_h
|
||||
puts channel == channel_h
|
||||
@@ -0,0 +1,491 @@
|
||||
hash,name
|
||||
0,Mesh1
|
||||
1,DEMO
|
||||
1,Downlink1
|
||||
1,NightNet
|
||||
1,Sideband1
|
||||
2,CommsNet
|
||||
2,Mesh3
|
||||
2,PulseNet
|
||||
3,LightNet
|
||||
3,Mesh2
|
||||
3,WestStar
|
||||
3,WolfMesh
|
||||
4,Mesh5
|
||||
4,OPERATIONS
|
||||
4,Rescue1
|
||||
4,SignalFire
|
||||
5,Base2
|
||||
5,DeltaNet
|
||||
5,Mesh4
|
||||
5,MeshMunich
|
||||
6,Base1
|
||||
7,MeshTest
|
||||
7,Rescue2
|
||||
7,ZuluMesh
|
||||
8,CourierNet
|
||||
8,Fire2
|
||||
8,Grid2
|
||||
8,LongFast
|
||||
8,RescueTeam
|
||||
9,AlphaNet
|
||||
9,MeshGrid
|
||||
10,TestBerlin
|
||||
10,WaWi
|
||||
11,Fire1
|
||||
11,Grid1
|
||||
12,FoxNet
|
||||
12,MeshRuhr
|
||||
12,RadioNet
|
||||
13,Signal1
|
||||
13,Zone1
|
||||
14,BetaBerlin
|
||||
14,Signal2
|
||||
14,TangoNet
|
||||
14,Zone2
|
||||
15,BerlinMesh
|
||||
15,LongSlow
|
||||
15,MeshBerlin
|
||||
15,Zone3
|
||||
16,CQ
|
||||
16,EchoMesh
|
||||
16,Freq2
|
||||
16,KiloMesh
|
||||
16,Node2
|
||||
16,PhoenixNet
|
||||
16,Repeater2
|
||||
17,FoxtrotNet
|
||||
17,Node3
|
||||
18,LoRa
|
||||
19,Freq1
|
||||
19,HarmonyNet
|
||||
19,Node1
|
||||
19,RavenNet
|
||||
19,Repeater1
|
||||
20,NomadNet
|
||||
20,SENSOR
|
||||
20,TEST
|
||||
20,test
|
||||
21,BravoNet
|
||||
21,EastStar
|
||||
21,MeshCollective
|
||||
21,SunNet
|
||||
22,Node4
|
||||
22,Uplink1
|
||||
23,EagleNet
|
||||
23,MeshHessen
|
||||
23,Node5
|
||||
24,MediumSlow
|
||||
24,Router1
|
||||
25,Checkpoint1
|
||||
25,HAMNet
|
||||
26,Checkpoint2
|
||||
26,GhostNet
|
||||
27,HQ
|
||||
27,Router2
|
||||
31,DemoBerlin
|
||||
31,FieldNet
|
||||
31,MediumFast
|
||||
32,Clinic
|
||||
32,Convoy
|
||||
32,Daylight
|
||||
32,Town
|
||||
33,Callisto
|
||||
33,CQ1
|
||||
33,Daybreak
|
||||
33,Demo
|
||||
33,East
|
||||
33,LoRaMesh
|
||||
33,Mist
|
||||
34,CQ2
|
||||
34,Freq
|
||||
34,Gold
|
||||
34,Link
|
||||
34,Repeater
|
||||
35,Aquila
|
||||
35,Doctor
|
||||
35,Echo
|
||||
35,Kilo
|
||||
35,Public
|
||||
35,Wyvern
|
||||
36,District
|
||||
36,Hessen
|
||||
36,Io
|
||||
36,LoRaTest
|
||||
36,Operations
|
||||
36,Shadow
|
||||
36,Unit
|
||||
37,Campfire
|
||||
37,City
|
||||
37,Outsider
|
||||
37,Sync
|
||||
38,Beacon
|
||||
38,Collective
|
||||
38,Harbor
|
||||
38,Lion
|
||||
38,Meteor
|
||||
39,Firebird
|
||||
39,Fireteam
|
||||
39,Quasar
|
||||
39,Snow
|
||||
39,Universe
|
||||
39,Uplink
|
||||
40,Checkpoint
|
||||
40,Galaxy
|
||||
40,Jaguar
|
||||
40,Sunset
|
||||
40,Zeta
|
||||
41,Hinterland
|
||||
41,HQ2
|
||||
41,Main
|
||||
41,Meshtastic
|
||||
41,Router
|
||||
41,Valley
|
||||
41,Wander
|
||||
41,Wolfpack
|
||||
42,HQ1
|
||||
42,Lizard
|
||||
42,Packet
|
||||
42,Sahara
|
||||
42,Tunnel
|
||||
43,Anaconda
|
||||
43,Basalt
|
||||
43,Blackout
|
||||
43,Crow
|
||||
43,Dusk
|
||||
43,Falcon
|
||||
43,Lima
|
||||
43,Müggelberg
|
||||
44,Arctic
|
||||
44,Backup
|
||||
44,Bronze
|
||||
44,Corvus
|
||||
44,Cosmos
|
||||
44,LoRaBerlin
|
||||
44,Neukölln
|
||||
44,Safari
|
||||
45,Breeze
|
||||
45,Burrow
|
||||
45,Gale
|
||||
45,Saturn
|
||||
46,Border
|
||||
46,Nest
|
||||
47,Borealis
|
||||
47,Mars
|
||||
47,Path
|
||||
47,Ranger
|
||||
48,Beat
|
||||
48,Berg
|
||||
48,Beta
|
||||
48,Downlink
|
||||
48,Hive
|
||||
48,Rhythm
|
||||
48,Saxony
|
||||
48,Sideband
|
||||
48,Wolf
|
||||
49,Asteroid
|
||||
49,Carbon
|
||||
49,Mesh
|
||||
50,Blizzard
|
||||
50,Runner
|
||||
51,Callsign
|
||||
51,Carpet
|
||||
51,Desert
|
||||
51,Dragon
|
||||
51,Friedrichshain
|
||||
51,Help
|
||||
51,Nebula
|
||||
51,Safe
|
||||
52,Amazon
|
||||
52,Fireline
|
||||
52,Haze
|
||||
52,LoRaHessen
|
||||
52,Platinum
|
||||
52,Sensor
|
||||
52,Test
|
||||
52,Zulu
|
||||
53,Nord
|
||||
53,Rescue
|
||||
53,Secure
|
||||
53,Silver
|
||||
54,Bear
|
||||
54,Hospital
|
||||
54,Munich
|
||||
54,Python
|
||||
54,Rain
|
||||
54,Wind
|
||||
54,Wolves
|
||||
55,Base
|
||||
55,Bolt
|
||||
55,Hawk
|
||||
55,Mirage
|
||||
55,Nightwatch
|
||||
55,Obsidian
|
||||
55,Rock
|
||||
55,Victor
|
||||
55,West
|
||||
56,Aurora
|
||||
56,Dune
|
||||
56,Iron
|
||||
56,Lava
|
||||
56,Nomads
|
||||
57,Copper
|
||||
57,Core
|
||||
57,Spectrum
|
||||
57,Summit
|
||||
58,Colony
|
||||
58,Fire
|
||||
58,Ganymede
|
||||
58,Grid
|
||||
58,Kraken
|
||||
58,Road
|
||||
58,Solstice
|
||||
58,Tundra
|
||||
59,911
|
||||
59,Forest
|
||||
59,Pack
|
||||
60,Berlin
|
||||
60,Chat
|
||||
60,Sierra
|
||||
60,Signal
|
||||
60,Wald
|
||||
60,Zone
|
||||
61,Alpine
|
||||
61,Bridge
|
||||
61,Camp
|
||||
61,Dortmund
|
||||
61,Frontier
|
||||
61,Jungle
|
||||
61,Peak
|
||||
62,Burner
|
||||
62,Dawn
|
||||
62,Europa
|
||||
62,Midnight
|
||||
62,Nightshift
|
||||
62,Prenzlauer
|
||||
62,Safety
|
||||
62,Sector
|
||||
62,Wanderer
|
||||
63,Distress
|
||||
63,Kiez
|
||||
63,Ruhr
|
||||
63,Team
|
||||
64,Epsilon
|
||||
64,Field
|
||||
64,Granite
|
||||
64,Orbit
|
||||
64,Trail
|
||||
64,Whisper
|
||||
65,Central
|
||||
65,Cologne
|
||||
65,Layer
|
||||
65,Relay
|
||||
65,Runners
|
||||
65,Stone
|
||||
65,Tempo
|
||||
66,Polar
|
||||
66,Woods
|
||||
67,Highway
|
||||
67,Kreuzberg
|
||||
67,Leopard
|
||||
67,Metro
|
||||
67,Omega
|
||||
67,Phantom
|
||||
68,Hamburg
|
||||
68,Hydra
|
||||
68,Medic
|
||||
68,Titan
|
||||
69,Command
|
||||
69,Control
|
||||
69,Gamma
|
||||
69,Ghost
|
||||
69,Mercury
|
||||
69,Oasis
|
||||
70,Diamond
|
||||
70,Ham
|
||||
70,HAM
|
||||
70,Leipzig
|
||||
70,Paramedic
|
||||
70,Savanna
|
||||
71,Frankfurt
|
||||
71,Gecko
|
||||
71,Jupiter
|
||||
71,Sensors
|
||||
71,SENSORS
|
||||
71,Sunrise
|
||||
72,Chameleon
|
||||
72,Eagle
|
||||
72,Hilltop
|
||||
72,Teufelsberg
|
||||
73,Firefly
|
||||
73,Steel
|
||||
74,Bravo
|
||||
74,Caravan
|
||||
74,Ost
|
||||
74,Süd
|
||||
75,Emergency
|
||||
75,EMERGENCY
|
||||
75,Nomad
|
||||
75,Watch
|
||||
76,Alert
|
||||
76,Bavaria
|
||||
76,Fog
|
||||
76,Harmony
|
||||
76,Raven
|
||||
77,Admin
|
||||
77,ADMIN
|
||||
77,Den
|
||||
77,Ice
|
||||
77,LoRaNet
|
||||
77,North
|
||||
77,SOS
|
||||
77,Sos
|
||||
77,Wanderers
|
||||
78,Foxtrot
|
||||
78,Med
|
||||
78,Ops
|
||||
79,Flock
|
||||
79,Phoenix
|
||||
79,PRIVATE
|
||||
79,Private
|
||||
79,Signals
|
||||
79,Tiger
|
||||
80,Commune
|
||||
80,Freedom
|
||||
80,Pluto
|
||||
80,Snake
|
||||
80,Squad
|
||||
80,Stuttgart
|
||||
81,Grassland
|
||||
81,Tango
|
||||
81,Union
|
||||
82,Comet
|
||||
82,Flash
|
||||
82,Lightning
|
||||
83,Cloud
|
||||
83,Equinox
|
||||
83,Firewatch
|
||||
83,Fox
|
||||
83,Radio
|
||||
83,Shelter
|
||||
84,Cheetah
|
||||
84,General
|
||||
84,Outpost
|
||||
84,Volcano
|
||||
85,Glacier
|
||||
85,Storm
|
||||
86,Alpha
|
||||
86,Owl
|
||||
86,Panther
|
||||
86,Prairie
|
||||
86,Thunder
|
||||
87,Courier
|
||||
87,Nexus
|
||||
87,South
|
||||
88,Ash
|
||||
88,River
|
||||
88,Syndicate
|
||||
89,Amateur
|
||||
89,Astro
|
||||
89,Avalanche
|
||||
89,Bonfire
|
||||
89,Draco
|
||||
89,Griffin
|
||||
89,Nightfall
|
||||
89,Shade
|
||||
89,Venus
|
||||
90,Charlie
|
||||
90,Delta
|
||||
90,Stratum
|
||||
90,Viper
|
||||
91,Bison
|
||||
91,Tal
|
||||
92,Network
|
||||
92,Scout
|
||||
93,Comms
|
||||
93,Fluss
|
||||
93,Group
|
||||
93,Hub
|
||||
93,Pulse
|
||||
93,Smoke
|
||||
94,Frost
|
||||
94,Rover
|
||||
94,Village
|
||||
95,Cobra
|
||||
95,Liberty
|
||||
95,Ridge
|
||||
97,DarkNet
|
||||
97,NightshiftNet
|
||||
97,Radio2
|
||||
97,Shelter2
|
||||
98,CampNet
|
||||
98,Radio1
|
||||
98,Shelter1
|
||||
98,TangoMesh
|
||||
99,BaseAlpha
|
||||
99,BerlinNet
|
||||
99,SouthStar
|
||||
100,CourierMesh
|
||||
100,Storm1
|
||||
101,Courier2
|
||||
101,GridNet
|
||||
101,OpsCenter
|
||||
102,Courier1
|
||||
103,Storm2
|
||||
104,HawkNet
|
||||
105,BearNet
|
||||
105,StarNet
|
||||
107,emergency
|
||||
107,ZuluNet
|
||||
108,Comms1
|
||||
108,DragonNet
|
||||
108,Hub1
|
||||
109,admin
|
||||
109,NightMesh
|
||||
110,MeshNet
|
||||
111,BaseCharlie
|
||||
111,Comms2
|
||||
111,GridSouth
|
||||
111,Hub2
|
||||
111,MeshNetwork
|
||||
111,WolfNet
|
||||
112,Layer1
|
||||
112,Relay1
|
||||
112,ShortFast
|
||||
113,OpsRoom
|
||||
114,Layer3
|
||||
114,MeshCologne
|
||||
115,Layer2
|
||||
115,Relay2
|
||||
115,SOSBerlin
|
||||
116,Command1
|
||||
116,Control1
|
||||
116,CrowNet
|
||||
116,MeshFrankfurt
|
||||
117,EmergencyBerlin
|
||||
117,GridNorth
|
||||
117,MeshLeipzig
|
||||
117,PacketNet
|
||||
119,Command2
|
||||
119,Control2
|
||||
119,MeshHamburg
|
||||
120,NomadMesh
|
||||
121,NorthStar
|
||||
121,Watch2
|
||||
122,CommandRoom
|
||||
122,ControlRoom
|
||||
122,SyncNet
|
||||
122,Watch1
|
||||
123,PacketRadio
|
||||
123,ShadowNet
|
||||
124,EchoNet
|
||||
124,KiloNet
|
||||
124,Med2
|
||||
124,Ops2
|
||||
125,FoxtrotMesh
|
||||
125,RepeaterHub
|
||||
126,MoonNet
|
||||
127,BaseBravo
|
||||
127,Med1
|
||||
127,Ops1
|
||||
127,WolfDen
|
||||
|
@@ -0,0 +1,736 @@
|
||||
{
|
||||
"59": [
|
||||
"911",
|
||||
"Forest",
|
||||
"Pack"
|
||||
],
|
||||
"77": [
|
||||
"Admin",
|
||||
"ADMIN",
|
||||
"Den",
|
||||
"Ice",
|
||||
"LoRaNet",
|
||||
"North",
|
||||
"SOS",
|
||||
"Sos",
|
||||
"Wanderers"
|
||||
],
|
||||
"109": [
|
||||
"admin",
|
||||
"NightMesh"
|
||||
],
|
||||
"76": [
|
||||
"Alert",
|
||||
"Bavaria",
|
||||
"Fog",
|
||||
"Harmony",
|
||||
"Raven"
|
||||
],
|
||||
"86": [
|
||||
"Alpha",
|
||||
"Owl",
|
||||
"Panther",
|
||||
"Prairie",
|
||||
"Thunder"
|
||||
],
|
||||
"9": [
|
||||
"AlphaNet",
|
||||
"MeshGrid"
|
||||
],
|
||||
"61": [
|
||||
"Alpine",
|
||||
"Bridge",
|
||||
"Camp",
|
||||
"Dortmund",
|
||||
"Frontier",
|
||||
"Jungle",
|
||||
"Peak"
|
||||
],
|
||||
"89": [
|
||||
"Amateur",
|
||||
"Astro",
|
||||
"Avalanche",
|
||||
"Bonfire",
|
||||
"Draco",
|
||||
"Griffin",
|
||||
"Nightfall",
|
||||
"Shade",
|
||||
"Venus"
|
||||
],
|
||||
"52": [
|
||||
"Amazon",
|
||||
"Fireline",
|
||||
"Haze",
|
||||
"LoRaHessen",
|
||||
"Platinum",
|
||||
"Sensor",
|
||||
"Test",
|
||||
"Zulu"
|
||||
],
|
||||
"43": [
|
||||
"Anaconda",
|
||||
"Basalt",
|
||||
"Blackout",
|
||||
"Crow",
|
||||
"Dusk",
|
||||
"Falcon",
|
||||
"Lima",
|
||||
"Müggelberg"
|
||||
],
|
||||
"35": [
|
||||
"Aquila",
|
||||
"Doctor",
|
||||
"Echo",
|
||||
"Kilo",
|
||||
"Public",
|
||||
"Wyvern"
|
||||
],
|
||||
"44": [
|
||||
"Arctic",
|
||||
"Backup",
|
||||
"Bronze",
|
||||
"Corvus",
|
||||
"Cosmos",
|
||||
"LoRaBerlin",
|
||||
"Neukölln",
|
||||
"Safari"
|
||||
],
|
||||
"88": [
|
||||
"Ash",
|
||||
"River",
|
||||
"Syndicate"
|
||||
],
|
||||
"49": [
|
||||
"Asteroid",
|
||||
"Carbon",
|
||||
"Mesh"
|
||||
],
|
||||
"56": [
|
||||
"Aurora",
|
||||
"Dune",
|
||||
"Iron",
|
||||
"Lava",
|
||||
"Nomads"
|
||||
],
|
||||
"55": [
|
||||
"Base",
|
||||
"Bolt",
|
||||
"Hawk",
|
||||
"Mirage",
|
||||
"Nightwatch",
|
||||
"Obsidian",
|
||||
"Rock",
|
||||
"Victor",
|
||||
"West"
|
||||
],
|
||||
"6": [
|
||||
"Base1"
|
||||
],
|
||||
"5": [
|
||||
"Base2",
|
||||
"DeltaNet",
|
||||
"Mesh4",
|
||||
"MeshMunich"
|
||||
],
|
||||
"99": [
|
||||
"BaseAlpha",
|
||||
"BerlinNet",
|
||||
"SouthStar"
|
||||
],
|
||||
"127": [
|
||||
"BaseBravo",
|
||||
"Med1",
|
||||
"Ops1",
|
||||
"WolfDen"
|
||||
],
|
||||
"111": [
|
||||
"BaseCharlie",
|
||||
"Comms2",
|
||||
"GridSouth",
|
||||
"Hub2",
|
||||
"MeshNetwork",
|
||||
"WolfNet"
|
||||
],
|
||||
"38": [
|
||||
"Beacon",
|
||||
"Collective",
|
||||
"Harbor",
|
||||
"Lion",
|
||||
"Meteor"
|
||||
],
|
||||
"54": [
|
||||
"Bear",
|
||||
"Hospital",
|
||||
"Munich",
|
||||
"Python",
|
||||
"Rain",
|
||||
"Wind",
|
||||
"Wolves"
|
||||
],
|
||||
"105": [
|
||||
"BearNet",
|
||||
"StarNet"
|
||||
],
|
||||
"48": [
|
||||
"Beat",
|
||||
"Berg",
|
||||
"Beta",
|
||||
"Downlink",
|
||||
"Hive",
|
||||
"Rhythm",
|
||||
"Saxony",
|
||||
"Sideband",
|
||||
"Wolf"
|
||||
],
|
||||
"60": [
|
||||
"Berlin",
|
||||
"Chat",
|
||||
"Sierra",
|
||||
"Signal",
|
||||
"Wald",
|
||||
"Zone"
|
||||
],
|
||||
"15": [
|
||||
"BerlinMesh",
|
||||
"LongSlow",
|
||||
"MeshBerlin",
|
||||
"Zone3"
|
||||
],
|
||||
"14": [
|
||||
"BetaBerlin",
|
||||
"Signal2",
|
||||
"TangoNet",
|
||||
"Zone2"
|
||||
],
|
||||
"91": [
|
||||
"Bison",
|
||||
"Tal"
|
||||
],
|
||||
"50": [
|
||||
"Blizzard",
|
||||
"Runner"
|
||||
],
|
||||
"46": [
|
||||
"Border",
|
||||
"Nest"
|
||||
],
|
||||
"47": [
|
||||
"Borealis",
|
||||
"Mars",
|
||||
"Path",
|
||||
"Ranger"
|
||||
],
|
||||
"74": [
|
||||
"Bravo",
|
||||
"Caravan",
|
||||
"Ost",
|
||||
"Süd"
|
||||
],
|
||||
"21": [
|
||||
"BravoNet",
|
||||
"EastStar",
|
||||
"MeshCollective",
|
||||
"SunNet"
|
||||
],
|
||||
"45": [
|
||||
"Breeze",
|
||||
"Burrow",
|
||||
"Gale",
|
||||
"Saturn"
|
||||
],
|
||||
"62": [
|
||||
"Burner",
|
||||
"Dawn",
|
||||
"Europa",
|
||||
"Midnight",
|
||||
"Nightshift",
|
||||
"Prenzlauer",
|
||||
"Safety",
|
||||
"Sector",
|
||||
"Wanderer"
|
||||
],
|
||||
"33": [
|
||||
"Callisto",
|
||||
"CQ1",
|
||||
"Daybreak",
|
||||
"Demo",
|
||||
"East",
|
||||
"LoRaMesh",
|
||||
"Mist"
|
||||
],
|
||||
"51": [
|
||||
"Callsign",
|
||||
"Carpet",
|
||||
"Desert",
|
||||
"Dragon",
|
||||
"Friedrichshain",
|
||||
"Help",
|
||||
"Nebula",
|
||||
"Safe"
|
||||
],
|
||||
"37": [
|
||||
"Campfire",
|
||||
"City",
|
||||
"Outsider",
|
||||
"Sync"
|
||||
],
|
||||
"98": [
|
||||
"CampNet",
|
||||
"Radio1",
|
||||
"Shelter1",
|
||||
"TangoMesh"
|
||||
],
|
||||
"65": [
|
||||
"Central",
|
||||
"Cologne",
|
||||
"Layer",
|
||||
"Relay",
|
||||
"Runners",
|
||||
"Stone",
|
||||
"Tempo"
|
||||
],
|
||||
"72": [
|
||||
"Chameleon",
|
||||
"Eagle",
|
||||
"Hilltop",
|
||||
"Teufelsberg"
|
||||
],
|
||||
"90": [
|
||||
"Charlie",
|
||||
"Delta",
|
||||
"Stratum",
|
||||
"Viper"
|
||||
],
|
||||
"40": [
|
||||
"Checkpoint",
|
||||
"Galaxy",
|
||||
"Jaguar",
|
||||
"Sunset",
|
||||
"Zeta"
|
||||
],
|
||||
"25": [
|
||||
"Checkpoint1",
|
||||
"HAMNet"
|
||||
],
|
||||
"26": [
|
||||
"Checkpoint2",
|
||||
"GhostNet"
|
||||
],
|
||||
"84": [
|
||||
"Cheetah",
|
||||
"General",
|
||||
"Outpost",
|
||||
"Volcano"
|
||||
],
|
||||
"32": [
|
||||
"Clinic",
|
||||
"Convoy",
|
||||
"Daylight",
|
||||
"Town"
|
||||
],
|
||||
"83": [
|
||||
"Cloud",
|
||||
"Equinox",
|
||||
"Firewatch",
|
||||
"Fox",
|
||||
"Radio",
|
||||
"Shelter"
|
||||
],
|
||||
"95": [
|
||||
"Cobra",
|
||||
"Liberty",
|
||||
"Ridge"
|
||||
],
|
||||
"58": [
|
||||
"Colony",
|
||||
"Fire",
|
||||
"Ganymede",
|
||||
"Grid",
|
||||
"Kraken",
|
||||
"Road",
|
||||
"Solstice",
|
||||
"Tundra"
|
||||
],
|
||||
"82": [
|
||||
"Comet",
|
||||
"Flash",
|
||||
"Lightning"
|
||||
],
|
||||
"69": [
|
||||
"Command",
|
||||
"Control",
|
||||
"Gamma",
|
||||
"Ghost",
|
||||
"Mercury",
|
||||
"Oasis"
|
||||
],
|
||||
"116": [
|
||||
"Command1",
|
||||
"Control1",
|
||||
"CrowNet",
|
||||
"MeshFrankfurt"
|
||||
],
|
||||
"119": [
|
||||
"Command2",
|
||||
"Control2",
|
||||
"MeshHamburg"
|
||||
],
|
||||
"122": [
|
||||
"CommandRoom",
|
||||
"ControlRoom",
|
||||
"SyncNet",
|
||||
"Watch1"
|
||||
],
|
||||
"93": [
|
||||
"Comms",
|
||||
"Fluss",
|
||||
"Group",
|
||||
"Hub",
|
||||
"Pulse",
|
||||
"Smoke"
|
||||
],
|
||||
"108": [
|
||||
"Comms1",
|
||||
"DragonNet",
|
||||
"Hub1"
|
||||
],
|
||||
"2": [
|
||||
"CommsNet",
|
||||
"Mesh3",
|
||||
"PulseNet"
|
||||
],
|
||||
"80": [
|
||||
"Commune",
|
||||
"Freedom",
|
||||
"Pluto",
|
||||
"Snake",
|
||||
"Squad",
|
||||
"Stuttgart"
|
||||
],
|
||||
"57": [
|
||||
"Copper",
|
||||
"Core",
|
||||
"Spectrum",
|
||||
"Summit"
|
||||
],
|
||||
"87": [
|
||||
"Courier",
|
||||
"Nexus",
|
||||
"South"
|
||||
],
|
||||
"102": [
|
||||
"Courier1"
|
||||
],
|
||||
"101": [
|
||||
"Courier2",
|
||||
"GridNet",
|
||||
"OpsCenter"
|
||||
],
|
||||
"100": [
|
||||
"CourierMesh",
|
||||
"Storm1"
|
||||
],
|
||||
"8": [
|
||||
"CourierNet",
|
||||
"Fire2",
|
||||
"Grid2",
|
||||
"LongFast",
|
||||
"RescueTeam"
|
||||
],
|
||||
"16": [
|
||||
"CQ",
|
||||
"EchoMesh",
|
||||
"Freq2",
|
||||
"KiloMesh",
|
||||
"Node2",
|
||||
"PhoenixNet",
|
||||
"Repeater2"
|
||||
],
|
||||
"34": [
|
||||
"CQ2",
|
||||
"Freq",
|
||||
"Gold",
|
||||
"Link",
|
||||
"Repeater"
|
||||
],
|
||||
"97": [
|
||||
"DarkNet",
|
||||
"NightshiftNet",
|
||||
"Radio2",
|
||||
"Shelter2"
|
||||
],
|
||||
"1": [
|
||||
"DEMO",
|
||||
"Downlink1",
|
||||
"NightNet",
|
||||
"Sideband1"
|
||||
],
|
||||
"31": [
|
||||
"DemoBerlin",
|
||||
"FieldNet",
|
||||
"MediumFast"
|
||||
],
|
||||
"70": [
|
||||
"Diamond",
|
||||
"Ham",
|
||||
"HAM",
|
||||
"Leipzig",
|
||||
"Paramedic",
|
||||
"Savanna"
|
||||
],
|
||||
"63": [
|
||||
"Distress",
|
||||
"Kiez",
|
||||
"Ruhr",
|
||||
"Team"
|
||||
],
|
||||
"36": [
|
||||
"District",
|
||||
"Hessen",
|
||||
"Io",
|
||||
"LoRaTest",
|
||||
"Operations",
|
||||
"Shadow",
|
||||
"Unit"
|
||||
],
|
||||
"23": [
|
||||
"EagleNet",
|
||||
"MeshHessen",
|
||||
"Node5"
|
||||
],
|
||||
"124": [
|
||||
"EchoNet",
|
||||
"KiloNet",
|
||||
"Med2",
|
||||
"Ops2"
|
||||
],
|
||||
"75": [
|
||||
"Emergency",
|
||||
"EMERGENCY",
|
||||
"Nomad",
|
||||
"Watch"
|
||||
],
|
||||
"107": [
|
||||
"emergency",
|
||||
"ZuluNet"
|
||||
],
|
||||
"117": [
|
||||
"EmergencyBerlin",
|
||||
"GridNorth",
|
||||
"MeshLeipzig",
|
||||
"PacketNet"
|
||||
],
|
||||
"64": [
|
||||
"Epsilon",
|
||||
"Field",
|
||||
"Granite",
|
||||
"Orbit",
|
||||
"Trail",
|
||||
"Whisper"
|
||||
],
|
||||
"11": [
|
||||
"Fire1",
|
||||
"Grid1"
|
||||
],
|
||||
"39": [
|
||||
"Firebird",
|
||||
"Fireteam",
|
||||
"Quasar",
|
||||
"Snow",
|
||||
"Universe",
|
||||
"Uplink"
|
||||
],
|
||||
"73": [
|
||||
"Firefly",
|
||||
"Steel"
|
||||
],
|
||||
"79": [
|
||||
"Flock",
|
||||
"Phoenix",
|
||||
"PRIVATE",
|
||||
"Private",
|
||||
"Signals",
|
||||
"Tiger"
|
||||
],
|
||||
"12": [
|
||||
"FoxNet",
|
||||
"MeshRuhr",
|
||||
"RadioNet"
|
||||
],
|
||||
"78": [
|
||||
"Foxtrot",
|
||||
"Med",
|
||||
"Ops"
|
||||
],
|
||||
"125": [
|
||||
"FoxtrotMesh",
|
||||
"RepeaterHub"
|
||||
],
|
||||
"17": [
|
||||
"FoxtrotNet",
|
||||
"Node3"
|
||||
],
|
||||
"71": [
|
||||
"Frankfurt",
|
||||
"Gecko",
|
||||
"Jupiter",
|
||||
"Sensors",
|
||||
"SENSORS",
|
||||
"Sunrise"
|
||||
],
|
||||
"19": [
|
||||
"Freq1",
|
||||
"HarmonyNet",
|
||||
"Node1",
|
||||
"RavenNet",
|
||||
"Repeater1"
|
||||
],
|
||||
"94": [
|
||||
"Frost",
|
||||
"Rover",
|
||||
"Village"
|
||||
],
|
||||
"85": [
|
||||
"Glacier",
|
||||
"Storm"
|
||||
],
|
||||
"81": [
|
||||
"Grassland",
|
||||
"Tango",
|
||||
"Union"
|
||||
],
|
||||
"68": [
|
||||
"Hamburg",
|
||||
"Hydra",
|
||||
"Medic",
|
||||
"Titan"
|
||||
],
|
||||
"104": [
|
||||
"HawkNet"
|
||||
],
|
||||
"67": [
|
||||
"Highway",
|
||||
"Kreuzberg",
|
||||
"Leopard",
|
||||
"Metro",
|
||||
"Omega",
|
||||
"Phantom"
|
||||
],
|
||||
"41": [
|
||||
"Hinterland",
|
||||
"HQ2",
|
||||
"Main",
|
||||
"Meshtastic",
|
||||
"Router",
|
||||
"Valley",
|
||||
"Wander",
|
||||
"Wolfpack"
|
||||
],
|
||||
"27": [
|
||||
"HQ",
|
||||
"Router2"
|
||||
],
|
||||
"42": [
|
||||
"HQ1",
|
||||
"Lizard",
|
||||
"Packet",
|
||||
"Sahara",
|
||||
"Tunnel"
|
||||
],
|
||||
"112": [
|
||||
"Layer1",
|
||||
"Relay1",
|
||||
"ShortFast"
|
||||
],
|
||||
"115": [
|
||||
"Layer2",
|
||||
"Relay2",
|
||||
"SOSBerlin"
|
||||
],
|
||||
"114": [
|
||||
"Layer3",
|
||||
"MeshCologne"
|
||||
],
|
||||
"3": [
|
||||
"LightNet",
|
||||
"Mesh2",
|
||||
"WestStar",
|
||||
"WolfMesh"
|
||||
],
|
||||
"18": [
|
||||
"LoRa"
|
||||
],
|
||||
"24": [
|
||||
"MediumSlow",
|
||||
"Router1"
|
||||
],
|
||||
"0": [
|
||||
"Mesh1"
|
||||
],
|
||||
"4": [
|
||||
"Mesh5",
|
||||
"OPERATIONS",
|
||||
"Rescue1",
|
||||
"SignalFire"
|
||||
],
|
||||
"110": [
|
||||
"MeshNet"
|
||||
],
|
||||
"7": [
|
||||
"MeshTest",
|
||||
"Rescue2",
|
||||
"ZuluMesh"
|
||||
],
|
||||
"126": [
|
||||
"MoonNet"
|
||||
],
|
||||
"92": [
|
||||
"Network",
|
||||
"Scout"
|
||||
],
|
||||
"22": [
|
||||
"Node4",
|
||||
"Uplink1"
|
||||
],
|
||||
"120": [
|
||||
"NomadMesh"
|
||||
],
|
||||
"20": [
|
||||
"NomadNet",
|
||||
"SENSOR",
|
||||
"TEST",
|
||||
"test"
|
||||
],
|
||||
"53": [
|
||||
"Nord",
|
||||
"Rescue",
|
||||
"Secure",
|
||||
"Silver"
|
||||
],
|
||||
"121": [
|
||||
"NorthStar",
|
||||
"Watch2"
|
||||
],
|
||||
"113": [
|
||||
"OpsRoom"
|
||||
],
|
||||
"123": [
|
||||
"PacketRadio",
|
||||
"ShadowNet"
|
||||
],
|
||||
"66": [
|
||||
"Polar",
|
||||
"Woods"
|
||||
],
|
||||
"13": [
|
||||
"Signal1",
|
||||
"Zone1"
|
||||
],
|
||||
"103": [
|
||||
"Storm2"
|
||||
],
|
||||
"10": [
|
||||
"TestBerlin",
|
||||
"WaWi"
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,134 @@
|
||||
#!/usr/bin/env ruby
|
||||
# frozen_string_literal: true
|
||||
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
require "base64"
|
||||
require "json"
|
||||
require "csv"
|
||||
|
||||
# --- CONFIG --------------------------------------------------------
|
||||
|
||||
# The PSK you want. Here: public mesh, "AQ==" (0x01).
|
||||
PSK_B64 = ENV.fetch("PSK_B64", "AQ==")
|
||||
|
||||
# 1000 potential channel candidate names for rainbow indices.
|
||||
CANDIDATE_NAMES = %w[
|
||||
911 Admin ADMIN admin Alert Alpha AlphaNet Alpine Amateur Amazon Anaconda Aquila Arctic Ash Asteroid Astro Aurora Avalanche Backup Basalt Base Base1 Base2 BaseAlpha BaseBravo BaseCharlie Bavaria Beacon Bear BearNet Beat Berg Berlin BerlinMesh BerlinNet Beta BetaBerlin Bison Blackout Blizzard Bolt Bonfire Border Borealis Bravo BravoNet Breeze Bridge Bronze Burner Burrow Callisto Callsign Camp Campfire CampNet Caravan Carbon Carpet Central Chameleon Charlie Chat Checkpoint Checkpoint1 Checkpoint2 Cheetah City Clinic Cloud Cobra Collective Cologne Colony Comet Command Command1 Command2 CommandRoom Comms Comms1 Comms2 CommsNet Commune Control Control1 Control2 ControlRoom Convoy Copper Core Corvus Cosmos Courier Courier1 Courier2 CourierMesh CourierNet CQ CQ1 CQ2 Crow CrowNet DarkNet Dawn Daybreak Daylight Delta DeltaNet Demo DEMO DemoBerlin Den Desert Diamond Distress District Doctor Dortmund Downlink Downlink1 Draco Dragon DragonNet Dune Dusk Eagle EagleNet East EastStar Echo EchoMesh EchoNet Emergency emergency EMERGENCY EmergencyBerlin Epsilon Equinox Europa Falcon Field FieldNet Fire Fire1 Fire2 Firebird Firefly Fireline Fireteam Firewatch Flash Flock Fluss Fog Forest Fox FoxNet Foxtrot FoxtrotMesh FoxtrotNet Frankfurt Freedom Freq Freq1 Freq2 Friedrichshain Frontier Frost Galaxy Gale Gamma Ganymede Gecko General Ghost GhostNet Glacier Gold Granite Grassland Grid Grid1 Grid2 GridNet GridNorth GridSouth Griffin Group Ham HAM Hamburg HAMNet Harbor Harmony HarmonyNet Hawk HawkNet Haze Help Hessen Highway Hilltop Hinterland Hive Hospital HQ HQ1 HQ2 Hub Hub1 Hub2 Hydra Ice Io Iron Jaguar Jungle Jupiter Kiez Kilo KiloMesh KiloNet Kraken Kreuzberg Lava Layer Layer1 Layer2 Layer3 Leipzig Leopard Liberty LightNet Lightning Lima Link Lion Lizard LongFast LongSlow LoRa LoRaBerlin LoRaHessen LoRaMesh LoRaNet LoRaTest Main Mars Med Med1 Med2 Medic MediumFast MediumSlow Mercury Mesh Mesh1 Mesh2 Mesh3 Mesh4 Mesh5 MeshBerlin MeshCollective MeshCologne MeshFrankfurt MeshGrid MeshHamburg MeshHessen MeshLeipzig MeshMunich MeshNet MeshNetwork MeshRuhr Meshtastic MeshTest Meteor Metro Midnight Mirage Mist MoonNet Munich Müggelberg Nebula Nest Network Neukölln Nexus Nightfall NightMesh NightNet Nightshift NightshiftNet Nightwatch Node1 Node2 Node3 Node4 Node5 Nomad NomadMesh NomadNet Nomads Nord North NorthStar Oasis Obsidian Omega Operations OPERATIONS Ops Ops1 Ops2 OpsCenter OpsRoom Orbit Ost Outpost Outsider Owl Pack Packet PacketNet PacketRadio Panther Paramedic Path Peak Phantom Phoenix PhoenixNet Platinum Pluto Polar Prairie Prenzlauer PRIVATE Private Public Pulse PulseNet Python Quasar Radio Radio1 Radio2 RadioNet Rain Ranger Raven RavenNet Relay Relay1 Relay2 Repeater Repeater1 Repeater2 RepeaterHub Rescue Rescue1 Rescue2 RescueTeam Rhythm Ridge River Road Rock Router Router1 Router2 Rover Ruhr Runner Runners Safari Safe Safety Sahara Saturn Savanna Saxony Scout Sector Secure Sensor SENSOR Sensors SENSORS Shade Shadow ShadowNet Shelter Shelter1 Shelter2 ShortFast Sideband Sideband1 Sierra Signal Signal1 Signal2 SignalFire Signals Silver Smoke Snake Snow Solstice SOS Sos SOSBerlin South SouthStar Spectrum Squad StarNet Steel Stone Storm Storm1 Storm2 Stratum Stuttgart Summit SunNet Sunrise Sunset Sync SyncNet Syndicate Süd Tal Tango TangoMesh TangoNet Team Tempo Test TEST test TestBerlin Teufelsberg Thunder Tiger Titan Town Trail Tundra Tunnel Union Unit Universe Uplink Uplink1 Valley Venus Victor Village Viper Volcano Wald Wander Wanderer Wanderers Watch Watch1 Watch2 WaWi West WestStar Whisper Wind Wolf WolfDen WolfMesh WolfNet Wolfpack Wolves Woods Wyvern Zeta Zone Zone1 Zone2 Zone3 Zulu ZuluMesh ZuluNet
|
||||
]
|
||||
|
||||
# Output filenames
|
||||
CSV_OUT = ENV.fetch("CSV_OUT", "rainbow.csv")
|
||||
JSON_OUT = ENV.fetch("JSON_OUT", "rainbow.json")
|
||||
|
||||
# --- HASH FUNCTION -------------------------------------------------
|
||||
|
||||
def xor_bytes(str_or_bytes)
|
||||
bytes = str_or_bytes.is_a?(String) ? str_or_bytes.bytes : str_or_bytes
|
||||
bytes.reduce(0) { |acc, b| (acc ^ b) & 0xFF }
|
||||
end
|
||||
|
||||
def expanded_key(psk_b64)
|
||||
raw = Base64.decode64(psk_b64 || "")
|
||||
|
||||
case raw.bytesize
|
||||
when 0
|
||||
# no encryption: length 0, xor = 0
|
||||
"".b
|
||||
when 1
|
||||
alias_index = raw.bytes.first
|
||||
alias_keys = {
|
||||
1 => [
|
||||
0xD4, 0xF1, 0xBB, 0x3A, 0x20, 0x29, 0x07, 0x59,
|
||||
0xF0, 0xBC, 0xFF, 0xAB, 0xCF, 0x4E, 0x69, 0x01,
|
||||
].pack("C*"),
|
||||
2 => [
|
||||
0x38, 0x4B, 0xBC, 0xC0, 0x1D, 0xC0, 0x22, 0xD1,
|
||||
0x81, 0xBF, 0x36, 0xB8, 0x61, 0x21, 0xE1, 0xFB,
|
||||
0x96, 0xB7, 0x2E, 0x55, 0xBF, 0x74, 0x22, 0x7E,
|
||||
0x9D, 0x6A, 0xFB, 0x48, 0xD6, 0x4C, 0xB1, 0xA1,
|
||||
].pack("C*"),
|
||||
}
|
||||
alias_keys.fetch(alias_index) { raise "Unknown PSK alias #{alias_index}" }
|
||||
when 2..15
|
||||
# pad to 16 (AES128)
|
||||
(raw.bytes + [0] * (16 - raw.bytesize)).pack("C*")
|
||||
when 16
|
||||
raw
|
||||
when 17..31
|
||||
# pad to 32 (AES256)
|
||||
(raw.bytes + [0] * (32 - raw.bytesize)).pack("C*")
|
||||
when 32
|
||||
raw
|
||||
else
|
||||
raise "PSK too long (#{raw.bytesize} bytes)"
|
||||
end
|
||||
end
|
||||
|
||||
def channel_hash(name, psk_b64)
|
||||
effective_name = name.b
|
||||
key = expanded_key(psk_b64)
|
||||
|
||||
h_name = xor_bytes(effective_name)
|
||||
h_key = xor_bytes(key)
|
||||
|
||||
(h_name ^ h_key) & 0xFF
|
||||
end
|
||||
|
||||
# --- BUILD RAINBOW TABLE -------------------------------------------
|
||||
|
||||
psk_b64 = PSK_B64
|
||||
puts "Using PSK_B64=#{psk_b64.inspect}"
|
||||
|
||||
hash_to_names = Hash.new { |h, k| h[k] = [] }
|
||||
|
||||
CANDIDATE_NAMES.each do |name|
|
||||
h = channel_hash(name, psk_b64)
|
||||
hash_to_names[h] << name
|
||||
end
|
||||
|
||||
# --- WRITE CSV (hash,name) -----------------------------------------
|
||||
|
||||
CSV.open(CSV_OUT, "w") do |csv|
|
||||
csv << %w[hash name]
|
||||
hash_to_names.keys.sort.each do |h|
|
||||
hash_to_names[h].each do |name|
|
||||
csv << [h, name]
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
puts "Wrote CSV rainbow table to #{CSV_OUT}"
|
||||
|
||||
# --- WRITE JSON ({hash: [names...]}) -------------------------------
|
||||
|
||||
json_hash = hash_to_names.transform_keys(&:to_s)
|
||||
File.write(JSON_OUT, JSON.pretty_generate(json_hash))
|
||||
|
||||
puts "Wrote JSON rainbow table to #{JSON_OUT}"
|
||||
|
||||
# --- OPTIONAL: interactive query -----------------------------------
|
||||
|
||||
if ARGV.first == "query"
|
||||
target = Integer(ARGV[1] || raise("Usage: #{File.basename($0)} query <hash>"))
|
||||
names = hash_to_names[target]
|
||||
if names.empty?
|
||||
puts "No names for hash #{target}"
|
||||
else
|
||||
puts "Names for hash #{target}:"
|
||||
names.each { |n| puts " - #{n}" }
|
||||
end
|
||||
else
|
||||
puts "Run again with: #{File.basename($0)} query <hash> # to inspect a specific hash"
|
||||
end
|
||||
@@ -0,0 +1,437 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Unit tests for :mod:`data.mesh_ingestor.daemon`."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import threading
|
||||
import types
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(REPO_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from data.mesh_ingestor import daemon
|
||||
|
||||
|
||||
class FakeEvent:
|
||||
"""Test double for :class:`threading.Event` that can auto-set itself."""
|
||||
|
||||
instances: list["FakeEvent"] = []
|
||||
|
||||
def __init__(self, *, auto_set_on_wait: bool = False):
|
||||
self._is_set = False
|
||||
self._auto_set_on_wait = auto_set_on_wait
|
||||
self.wait_calls: list[Any] = []
|
||||
FakeEvent.instances.append(self)
|
||||
|
||||
def set(self) -> None:
|
||||
"""Mark the event as set."""
|
||||
|
||||
self._is_set = True
|
||||
|
||||
def is_set(self) -> bool:
|
||||
"""Return whether the event is currently set."""
|
||||
|
||||
return self._is_set
|
||||
|
||||
def wait(self, timeout: float | None = None) -> bool:
|
||||
"""Record waits and optionally auto-set the flag."""
|
||||
|
||||
self.wait_calls.append(timeout)
|
||||
if self._auto_set_on_wait:
|
||||
self._is_set = True
|
||||
return self._is_set
|
||||
|
||||
|
||||
class AutoSetEvent(FakeEvent):
|
||||
"""Event variant that automatically sets on each wait call."""
|
||||
|
||||
def __init__(self): # noqa: D401 - short initializer docstring handled by class
|
||||
super().__init__(auto_set_on_wait=True)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_fake_events():
|
||||
"""Ensure :class:`FakeEvent` registry is cleared between tests."""
|
||||
|
||||
FakeEvent.instances.clear()
|
||||
yield
|
||||
FakeEvent.instances.clear()
|
||||
|
||||
|
||||
def test_event_wait_default_detection(monkeypatch):
|
||||
"""``_event_wait_allows_default_timeout`` matches defaulted signatures."""
|
||||
|
||||
assert daemon._event_wait_allows_default_timeout() is True
|
||||
|
||||
class _NoDefaultEvent:
|
||||
def wait(self, timeout): # type: ignore[override]
|
||||
return bool(timeout)
|
||||
|
||||
monkeypatch.setattr(
|
||||
daemon, "threading", types.SimpleNamespace(Event=_NoDefaultEvent)
|
||||
)
|
||||
assert daemon._event_wait_allows_default_timeout() is False
|
||||
|
||||
|
||||
def test_subscribe_receive_topics(monkeypatch):
|
||||
"""Subscribing to receive topics returns the exact topic list."""
|
||||
|
||||
subscribed: list[str] = []
|
||||
|
||||
def _record_subscription(_handler, topic):
|
||||
subscribed.append(topic)
|
||||
|
||||
monkeypatch.setattr(
|
||||
daemon, "pub", types.SimpleNamespace(subscribe=_record_subscription)
|
||||
)
|
||||
assert daemon._subscribe_receive_topics() == list(daemon._RECEIVE_TOPICS)
|
||||
assert subscribed == list(daemon._RECEIVE_TOPICS)
|
||||
|
||||
|
||||
def test_node_items_snapshot_handles_mutation(monkeypatch):
|
||||
"""Snapshots tolerate temporary runtime errors while iterating."""
|
||||
|
||||
class MutatingMapping(dict):
|
||||
def __bool__(self):
|
||||
return True
|
||||
|
||||
def items(self): # type: ignore[override]
|
||||
raise RuntimeError("dictionary changed size during iteration")
|
||||
|
||||
monkeypatch.setattr(daemon.time, "sleep", lambda _: None)
|
||||
assert daemon._node_items_snapshot({"a": 1}) == [("a", 1)]
|
||||
assert daemon._node_items_snapshot(MutatingMapping(), retries=1) is None
|
||||
|
||||
class IteratingMapping:
|
||||
def __init__(self):
|
||||
self.calls = 0
|
||||
self._data = {"x": 10, "y": 20}
|
||||
|
||||
def __iter__(self):
|
||||
self.calls += 1
|
||||
if self.calls == 1:
|
||||
raise RuntimeError("dictionary changed size during iteration")
|
||||
return iter(self._data)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._data[key]
|
||||
|
||||
mapping = IteratingMapping()
|
||||
assert daemon._node_items_snapshot(mapping, retries=2) == [("x", 10), ("y", 20)]
|
||||
|
||||
|
||||
def test_close_interface_respects_timeout(monkeypatch):
|
||||
"""Long-running close calls emit a timeout debug log."""
|
||||
|
||||
log_calls = []
|
||||
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0.01)
|
||||
monkeypatch.setattr(
|
||||
daemon.config, "_debug_log", lambda *args, **kwargs: log_calls.append(kwargs)
|
||||
)
|
||||
blocker = threading.Event()
|
||||
|
||||
class SlowInterface:
|
||||
def close(self):
|
||||
blocker.wait(timeout=0.1)
|
||||
|
||||
daemon._close_interface(SlowInterface())
|
||||
assert any("timeout_seconds" in entry for entry in log_calls)
|
||||
|
||||
|
||||
def test_close_interface_immediate_path(monkeypatch):
|
||||
"""A zero timeout calls ``close`` inline without threading."""
|
||||
|
||||
flags = {"called": False}
|
||||
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0)
|
||||
|
||||
class ImmediateInterface:
|
||||
def close(self):
|
||||
flags["called"] = True
|
||||
|
||||
daemon._close_interface(ImmediateInterface())
|
||||
assert flags["called"] is True
|
||||
|
||||
|
||||
def test_ble_interface_detection():
|
||||
"""Detect BLE module names reliably."""
|
||||
|
||||
class BLE:
|
||||
__module__ = "meshtastic.ble_interface"
|
||||
|
||||
class NonBLE:
|
||||
__module__ = "meshtastic.serial"
|
||||
|
||||
assert daemon._is_ble_interface(BLE()) is True
|
||||
assert daemon._is_ble_interface(NonBLE()) is False
|
||||
assert daemon._is_ble_interface(None) is False
|
||||
|
||||
|
||||
def test_process_ingestor_heartbeat_with_extracted_host(monkeypatch):
|
||||
"""Host id extraction triggers heartbeat announcement flag updates."""
|
||||
|
||||
host_ids: list[str | None] = [None]
|
||||
ingestor_ids: list[str | None] = []
|
||||
queued: list[bool] = []
|
||||
|
||||
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: host_ids[0])
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_extract_host_node_id", lambda iface: "!abcd"
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.handlers,
|
||||
"register_host_node_id",
|
||||
lambda node: host_ids.__setitem__(0, node),
|
||||
)
|
||||
monkeypatch.setattr(daemon.ingestors, "set_ingestor_node_id", ingestor_ids.append)
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors,
|
||||
"queue_ingestor_heartbeat",
|
||||
lambda force: queued.append(force) or True,
|
||||
)
|
||||
|
||||
assert (
|
||||
daemon._process_ingestor_heartbeat(object(), ingestor_announcement_sent=False)
|
||||
is True
|
||||
)
|
||||
assert host_ids[0] == "!abcd"
|
||||
assert ingestor_ids[-1] == "!abcd"
|
||||
assert queued[-1] is True
|
||||
|
||||
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!abcd")
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors,
|
||||
"queue_ingestor_heartbeat",
|
||||
lambda force: queued.append(force) or False,
|
||||
)
|
||||
assert (
|
||||
daemon._process_ingestor_heartbeat(object(), ingestor_announcement_sent=True)
|
||||
is True
|
||||
)
|
||||
assert queued[-1] is False
|
||||
|
||||
|
||||
def test_connected_state_branches(monkeypatch):
|
||||
"""Connection state resolves across multiple attribute forms."""
|
||||
|
||||
event = threading.Event()
|
||||
event.set()
|
||||
assert daemon._connected_state(event) is True
|
||||
|
||||
class CallableCandidate:
|
||||
def __call__(self):
|
||||
return False
|
||||
|
||||
assert daemon._connected_state(CallableCandidate()) is False
|
||||
|
||||
class BooleanCandidate:
|
||||
def __bool__(self):
|
||||
raise RuntimeError("cannot bool")
|
||||
|
||||
assert daemon._connected_state(BooleanCandidate()) is None
|
||||
|
||||
class HasIsSet:
|
||||
def is_set(self):
|
||||
raise RuntimeError("broken")
|
||||
|
||||
assert daemon._connected_state(HasIsSet()) is None
|
||||
|
||||
|
||||
def _configure_common_defaults(
|
||||
monkeypatch, *, energy_saving: bool = False, inactivity: float = 0.0
|
||||
):
|
||||
"""Set fast configuration defaults shared by daemon integration tests."""
|
||||
|
||||
monkeypatch.setattr(daemon.config, "SNAPSHOT_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "_RECONNECT_INITIAL_DELAY_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "_RECONNECT_MAX_DELAY_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "ENERGY_SAVING", energy_saving)
|
||||
monkeypatch.setattr(
|
||||
daemon.config, "_ENERGY_ONLINE_DURATION_SECS", 0 if energy_saving else 0.0
|
||||
)
|
||||
monkeypatch.setattr(daemon.config, "_ENERGY_SLEEP_SECS", 0.0)
|
||||
monkeypatch.setattr(daemon.config, "_INGESTOR_HEARTBEAT_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "_INACTIVITY_RECONNECT_SECS", inactivity)
|
||||
monkeypatch.setattr(daemon.config, "CONNECTION", "serial0")
|
||||
|
||||
|
||||
class DummyInterface:
|
||||
"""Lightweight mesh interface stand-in used for daemon integration tests."""
|
||||
|
||||
def __init__(self, *, nodes=None, is_connected=True, client_present=True):
|
||||
self.nodes = nodes if nodes is not None else {"!node": {"id": 1}}
|
||||
self.isConnected = is_connected
|
||||
self.client = object() if client_present else None
|
||||
|
||||
def close(self):
|
||||
return None
|
||||
|
||||
|
||||
def test_main_happy_path(monkeypatch):
|
||||
"""The main loop processes snapshots and heartbeats once before stopping."""
|
||||
|
||||
_configure_common_defaults(monkeypatch)
|
||||
monkeypatch.setattr(
|
||||
daemon,
|
||||
"threading",
|
||||
types.SimpleNamespace(
|
||||
Event=AutoSetEvent,
|
||||
current_thread=threading.current_thread,
|
||||
main_thread=threading.main_thread,
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces,
|
||||
"_create_serial_interface",
|
||||
lambda candidate: (DummyInterface(), candidate),
|
||||
)
|
||||
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
|
||||
)
|
||||
|
||||
host_id = {"value": None}
|
||||
monkeypatch.setattr(
|
||||
daemon.handlers,
|
||||
"register_host_node_id",
|
||||
lambda node: host_id.__setitem__("value", node),
|
||||
)
|
||||
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: host_id["value"])
|
||||
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
|
||||
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
|
||||
|
||||
heartbeats: list[bool] = []
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors,
|
||||
"queue_ingestor_heartbeat",
|
||||
lambda force: heartbeats.append(force) or True,
|
||||
)
|
||||
|
||||
daemon.main()
|
||||
assert heartbeats
|
||||
assert host_id["value"] == "!host"
|
||||
assert FakeEvent.instances and FakeEvent.instances[0].is_set() is True
|
||||
|
||||
|
||||
def test_main_energy_saving_disconnect(monkeypatch):
|
||||
"""Energy saving mode disconnects and sleeps when deadlines expire."""
|
||||
|
||||
_configure_common_defaults(monkeypatch, energy_saving=True)
|
||||
monkeypatch.setattr(
|
||||
daemon,
|
||||
"threading",
|
||||
types.SimpleNamespace(
|
||||
Event=AutoSetEvent,
|
||||
current_thread=threading.current_thread,
|
||||
main_thread=threading.main_thread,
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces,
|
||||
"_create_serial_interface",
|
||||
lambda candidate: (DummyInterface(), candidate),
|
||||
)
|
||||
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.handlers, "register_host_node_id", lambda *_args, **_kwargs: None
|
||||
)
|
||||
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!host")
|
||||
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
|
||||
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors, "queue_ingestor_heartbeat", lambda *_args, **_kwargs: True
|
||||
)
|
||||
|
||||
daemon.main()
|
||||
assert FakeEvent.instances and FakeEvent.instances[0].is_set() is True
|
||||
|
||||
|
||||
def test_main_inactivity_reconnect(monkeypatch):
|
||||
"""Inactivity triggers reconnect attempts and respects stop events."""
|
||||
|
||||
_configure_common_defaults(monkeypatch, inactivity=0.5)
|
||||
monkeypatch.setattr(
|
||||
daemon,
|
||||
"threading",
|
||||
types.SimpleNamespace(
|
||||
Event=AutoSetEvent,
|
||||
current_thread=threading.current_thread,
|
||||
main_thread=threading.main_thread,
|
||||
),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
|
||||
)
|
||||
|
||||
interface_cycle = iter(
|
||||
[DummyInterface(is_connected=False), DummyInterface(is_connected=True)]
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces,
|
||||
"_create_serial_interface",
|
||||
lambda candidate: (next(interface_cycle), candidate),
|
||||
)
|
||||
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.handlers, "register_host_node_id", lambda *_args, **_kwargs: None
|
||||
)
|
||||
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!host")
|
||||
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
|
||||
|
||||
monotonic_calls = iter([0.0, 1.0, 2.0, 3.0, 4.0])
|
||||
monkeypatch.setattr(daemon.time, "monotonic", lambda: next(monotonic_calls))
|
||||
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: 0.0)
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
daemon.ingestors, "queue_ingestor_heartbeat", lambda *_args, **_kwargs: True
|
||||
)
|
||||
|
||||
daemon.main()
|
||||
assert any(event.is_set() for event in FakeEvent.instances)
|
||||
@@ -0,0 +1,183 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import sys
|
||||
|
||||
from meshtastic.protobuf import mesh_pb2
|
||||
from meshtastic.protobuf import telemetry_pb2
|
||||
|
||||
from data.mesh_ingestor import decode_payload
|
||||
|
||||
|
||||
def run_main_with_input(payload: dict) -> tuple[int, dict]:
|
||||
stdin = io.StringIO(json.dumps(payload))
|
||||
stdout = io.StringIO()
|
||||
original_stdin = sys.stdin
|
||||
original_stdout = sys.stdout
|
||||
try:
|
||||
sys.stdin = stdin
|
||||
sys.stdout = stdout
|
||||
status = decode_payload.main()
|
||||
finally:
|
||||
sys.stdin = original_stdin
|
||||
sys.stdout = original_stdout
|
||||
|
||||
output = json.loads(stdout.getvalue() or "{}")
|
||||
return status, output
|
||||
|
||||
|
||||
def test_decode_payload_position_success():
|
||||
position = mesh_pb2.Position()
|
||||
position.latitude_i = 525598720
|
||||
position.longitude_i = 136577024
|
||||
position.altitude = 11
|
||||
position.precision_bits = 13
|
||||
payload_b64 = base64.b64encode(position.SerializeToString()).decode("ascii")
|
||||
|
||||
result = decode_payload._decode_payload(3, payload_b64)
|
||||
|
||||
assert result["type"] == "POSITION_APP"
|
||||
assert result["payload"]["latitude_i"] == 525598720
|
||||
assert result["payload"]["longitude_i"] == 136577024
|
||||
assert result["payload"]["altitude"] == 11
|
||||
|
||||
|
||||
def test_decode_payload_rejects_invalid_payload():
|
||||
result = decode_payload._decode_payload(3, "not-base64")
|
||||
|
||||
assert result["error"].startswith("invalid-payload")
|
||||
assert "invalid-payload" in result["error"]
|
||||
|
||||
|
||||
def test_decode_payload_rejects_unsupported_port():
|
||||
result = decode_payload._decode_payload(
|
||||
999, base64.b64encode(b"ok").decode("ascii")
|
||||
)
|
||||
|
||||
assert result["error"] == "unsupported-port"
|
||||
assert result["portnum"] == 999
|
||||
|
||||
|
||||
def test_main_handles_invalid_json():
|
||||
stdin = io.StringIO("nope")
|
||||
stdout = io.StringIO()
|
||||
original_stdin = sys.stdin
|
||||
original_stdout = sys.stdout
|
||||
try:
|
||||
sys.stdin = stdin
|
||||
sys.stdout = stdout
|
||||
status = decode_payload.main()
|
||||
finally:
|
||||
sys.stdin = original_stdin
|
||||
sys.stdout = original_stdout
|
||||
|
||||
result = json.loads(stdout.getvalue())
|
||||
assert status == 1
|
||||
assert result["error"].startswith("invalid-json")
|
||||
|
||||
|
||||
def test_main_requires_portnum():
|
||||
status, result = run_main_with_input(
|
||||
{"payload_b64": base64.b64encode(b"ok").decode("ascii")}
|
||||
)
|
||||
|
||||
assert status == 1
|
||||
assert result["error"] == "missing-portnum"
|
||||
|
||||
|
||||
def test_main_requires_integer_portnum():
|
||||
status, result = run_main_with_input(
|
||||
{"portnum": "3", "payload_b64": base64.b64encode(b"ok").decode("ascii")}
|
||||
)
|
||||
|
||||
assert status == 1
|
||||
assert result["error"] == "missing-portnum"
|
||||
|
||||
|
||||
def test_main_requires_payload():
|
||||
status, result = run_main_with_input({"portnum": 3})
|
||||
|
||||
assert status == 1
|
||||
assert result["error"] == "missing-payload"
|
||||
|
||||
|
||||
def test_main_requires_string_payload():
|
||||
status, result = run_main_with_input({"portnum": 3, "payload_b64": 123})
|
||||
|
||||
assert status == 1
|
||||
assert result["error"] == "missing-payload"
|
||||
|
||||
|
||||
def test_main_success_position_payload():
|
||||
position = mesh_pb2.Position()
|
||||
position.latitude_i = 525598720
|
||||
position.longitude_i = 136577024
|
||||
payload_b64 = base64.b64encode(position.SerializeToString()).decode("ascii")
|
||||
|
||||
status, result = run_main_with_input({"portnum": 3, "payload_b64": payload_b64})
|
||||
|
||||
assert status == 0
|
||||
assert result["type"] == "POSITION_APP"
|
||||
assert result["payload"]["latitude_i"] == 525598720
|
||||
|
||||
|
||||
def test_decode_payload_handles_parse_failure():
|
||||
class BrokenMessage:
|
||||
def ParseFromString(self, _payload):
|
||||
raise ValueError("boom")
|
||||
|
||||
decode_payload.PORTNUM_MAP[99] = ("BROKEN", BrokenMessage)
|
||||
payload_b64 = base64.b64encode(b"\x00").decode("ascii")
|
||||
|
||||
result = decode_payload._decode_payload(99, payload_b64)
|
||||
|
||||
assert result["error"].startswith("decode-failed")
|
||||
assert result["type"] == "BROKEN"
|
||||
decode_payload.PORTNUM_MAP.pop(99, None)
|
||||
|
||||
|
||||
def test_main_entrypoint_executes():
|
||||
import runpy
|
||||
|
||||
payload = {"portnum": 3, "payload_b64": base64.b64encode(b"").decode("ascii")}
|
||||
stdin = io.StringIO(json.dumps(payload))
|
||||
stdout = io.StringIO()
|
||||
original_stdin = sys.stdin
|
||||
original_stdout = sys.stdout
|
||||
try:
|
||||
sys.stdin = stdin
|
||||
sys.stdout = stdout
|
||||
try:
|
||||
runpy.run_module("data.mesh_ingestor.decode_payload", run_name="__main__")
|
||||
except SystemExit as exc:
|
||||
assert exc.code == 0
|
||||
finally:
|
||||
sys.stdin = original_stdin
|
||||
sys.stdout = original_stdout
|
||||
|
||||
|
||||
def test_decode_payload_telemetry_success():
|
||||
telemetry = telemetry_pb2.Telemetry()
|
||||
telemetry.time = 123
|
||||
payload_b64 = base64.b64encode(telemetry.SerializeToString()).decode("ascii")
|
||||
|
||||
result = decode_payload._decode_payload(67, payload_b64)
|
||||
|
||||
assert result["type"] == "TELEMETRY_APP"
|
||||
assert result["payload"]["time"] == 123
|
||||
@@ -20,6 +20,7 @@ import re
|
||||
import sys
|
||||
import threading
|
||||
import types
|
||||
import time
|
||||
|
||||
"""End-to-end tests covering the mesh ingestion package."""
|
||||
|
||||
@@ -214,6 +215,9 @@ def mesh_module(monkeypatch):
|
||||
if attr in module.__dict__:
|
||||
delattr(module, attr)
|
||||
module.channels._reset_channel_cache()
|
||||
module.ingestors.STATE.start_time = int(time.time())
|
||||
module.ingestors.STATE.last_heartbeat = None
|
||||
module.ingestors.STATE.node_id = None
|
||||
|
||||
yield module
|
||||
|
||||
@@ -223,6 +227,117 @@ def mesh_module(monkeypatch):
|
||||
sys.modules.pop(module_name, None)
|
||||
|
||||
|
||||
def test_instance_domain_prefers_primary_env(mesh_module, monkeypatch):
|
||||
"""Ensure the ingestor prefers ``INSTANCE_DOMAIN`` over the legacy variable."""
|
||||
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "https://new.example")
|
||||
monkeypatch.setenv("POTATOMESH_INSTANCE", "https://legacy.example")
|
||||
|
||||
try:
|
||||
refreshed_instance = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.config.INSTANCE = refreshed_instance
|
||||
mesh_module.INSTANCE = refreshed_instance
|
||||
|
||||
assert refreshed_instance == "https://new.example"
|
||||
assert mesh_module.INSTANCE == "https://new.example"
|
||||
finally:
|
||||
monkeypatch.delenv("INSTANCE_DOMAIN", raising=False)
|
||||
monkeypatch.delenv("POTATOMESH_INSTANCE", raising=False)
|
||||
mesh_module.config.INSTANCE = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.INSTANCE = mesh_module.config.INSTANCE
|
||||
|
||||
|
||||
def test_instance_domain_falls_back_to_legacy(mesh_module, monkeypatch):
|
||||
"""Verify ``POTATOMESH_INSTANCE`` is used when ``INSTANCE_DOMAIN`` is unset."""
|
||||
|
||||
monkeypatch.delenv("INSTANCE_DOMAIN", raising=False)
|
||||
monkeypatch.setenv("POTATOMESH_INSTANCE", "https://legacy-only.example")
|
||||
|
||||
try:
|
||||
refreshed_instance = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.config.INSTANCE = refreshed_instance
|
||||
mesh_module.INSTANCE = refreshed_instance
|
||||
|
||||
assert refreshed_instance == "https://legacy-only.example"
|
||||
assert mesh_module.INSTANCE == "https://legacy-only.example"
|
||||
finally:
|
||||
monkeypatch.delenv("POTATOMESH_INSTANCE", raising=False)
|
||||
mesh_module.config.INSTANCE = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.INSTANCE = mesh_module.config.INSTANCE
|
||||
|
||||
|
||||
def test_instance_domain_infers_scheme_for_hostnames(mesh_module, monkeypatch):
|
||||
"""Ensure bare hostnames are promoted to HTTPS URLs for ingestion."""
|
||||
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "mesh.example.org")
|
||||
monkeypatch.delenv("POTATOMESH_INSTANCE", raising=False)
|
||||
|
||||
try:
|
||||
refreshed_instance = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.config.INSTANCE = refreshed_instance
|
||||
mesh_module.INSTANCE = refreshed_instance
|
||||
|
||||
assert refreshed_instance == "https://mesh.example.org"
|
||||
assert mesh_module.INSTANCE == "https://mesh.example.org"
|
||||
finally:
|
||||
monkeypatch.delenv("INSTANCE_DOMAIN", raising=False)
|
||||
mesh_module.config.INSTANCE = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.INSTANCE = mesh_module.config.INSTANCE
|
||||
|
||||
|
||||
def test_parse_channel_names_applies_allowlist(mesh_module):
|
||||
"""Ensure allowlists reuse the shared channel parser."""
|
||||
|
||||
mesh = mesh_module
|
||||
previous_allowed = mesh.ALLOWED_CHANNELS
|
||||
|
||||
try:
|
||||
parsed = mesh.config._parse_channel_names(" Primary ,Chat ,primary , Ops ")
|
||||
mesh.ALLOWED_CHANNELS = parsed
|
||||
|
||||
assert parsed == ("Primary", "Chat", "Ops")
|
||||
assert mesh.channels.allowed_channel_names() == ("Primary", "Chat", "Ops")
|
||||
assert mesh.channels.is_allowed_channel("chat")
|
||||
assert mesh.channels.is_allowed_channel(" ops ")
|
||||
assert not mesh.channels.is_allowed_channel("unknown")
|
||||
assert not mesh.channels.is_allowed_channel(None)
|
||||
assert mesh.config._parse_channel_names("") == ()
|
||||
finally:
|
||||
mesh.ALLOWED_CHANNELS = previous_allowed
|
||||
|
||||
|
||||
def test_allowed_channel_defaults_allow_all(mesh_module):
|
||||
"""Ensure unset allowlists do not block any channels."""
|
||||
|
||||
mesh = mesh_module
|
||||
previous_allowed = mesh.ALLOWED_CHANNELS
|
||||
|
||||
try:
|
||||
mesh.ALLOWED_CHANNELS = ()
|
||||
assert mesh.channels.is_allowed_channel("Any")
|
||||
finally:
|
||||
mesh.ALLOWED_CHANNELS = previous_allowed
|
||||
|
||||
|
||||
def test_parse_hidden_channels_deduplicates_names(mesh_module):
|
||||
"""Ensure hidden channel parsing strips blanks and deduplicates."""
|
||||
|
||||
mesh = mesh_module
|
||||
previous_hidden = mesh.HIDDEN_CHANNELS
|
||||
|
||||
try:
|
||||
parsed = mesh.config._parse_hidden_channels(" Chat , ,Secret ,chat")
|
||||
mesh.HIDDEN_CHANNELS = parsed
|
||||
|
||||
assert parsed == ("Chat", "Secret")
|
||||
assert mesh.channels.hidden_channel_names() == ("Chat", "Secret")
|
||||
assert mesh.channels.is_hidden_channel(" chat ")
|
||||
assert not mesh.channels.is_hidden_channel("unknown")
|
||||
assert mesh.config._parse_hidden_channels("") == ()
|
||||
finally:
|
||||
mesh.HIDDEN_CHANNELS = previous_hidden
|
||||
|
||||
|
||||
def test_subscribe_receive_topics_covers_all_handlers(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
daemon_mod = sys.modules["data.mesh_ingestor.daemon"]
|
||||
@@ -1814,6 +1929,110 @@ def test_store_packet_dict_allows_primary_channel_broadcast(mesh_module, monkeyp
|
||||
assert priority == mesh._MESSAGE_POST_PRIORITY
|
||||
|
||||
|
||||
def test_store_packet_dict_accepts_routing_app_messages(mesh_module, monkeypatch):
|
||||
"""Ensure routing app payloads are treated as message posts."""
|
||||
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
monkeypatch.setattr(
|
||||
mesh,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority: captured.append((path, payload, priority)),
|
||||
)
|
||||
|
||||
packet = {
|
||||
"id": 333,
|
||||
"rxTime": 999,
|
||||
"fromId": "!node",
|
||||
"toId": "^all",
|
||||
"channel": 0,
|
||||
"decoded": {"payload": "GAA=", "portnum": "ROUTING_APP"},
|
||||
}
|
||||
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
assert captured, "Expected routing packet to be stored"
|
||||
path, payload, priority = captured[0]
|
||||
assert path == "/api/messages"
|
||||
assert payload["portnum"] == "ROUTING_APP"
|
||||
assert payload["text"] == "GAA="
|
||||
assert payload["channel"] == 0
|
||||
assert payload["encrypted"] is None
|
||||
assert priority == mesh._MESSAGE_POST_PRIORITY
|
||||
|
||||
|
||||
def test_store_packet_dict_serializes_routing_payloads(mesh_module, monkeypatch):
|
||||
"""Ensure routing payloads are serialized when text is absent."""
|
||||
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
monkeypatch.setattr(
|
||||
mesh,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority: captured.append((path, payload, priority)),
|
||||
)
|
||||
|
||||
packet = {
|
||||
"id": 334,
|
||||
"rxTime": 1000,
|
||||
"fromId": "!node",
|
||||
"toId": "^all",
|
||||
"channel": 0,
|
||||
"decoded": {
|
||||
"payload": b"\x01\x02",
|
||||
"portnum": "ROUTING_APP",
|
||||
},
|
||||
}
|
||||
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
assert captured, "Expected routing packet to be stored"
|
||||
_, payload, _ = captured[0]
|
||||
assert payload["text"] == "AQI="
|
||||
|
||||
captured.clear()
|
||||
|
||||
packet["decoded"]["payload"] = {"kind": "ack"}
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
assert captured, "Expected routing packet to be stored"
|
||||
_, payload, _ = captured[0]
|
||||
assert payload["text"] == '{"kind": "ack"}'
|
||||
|
||||
captured.clear()
|
||||
|
||||
packet["decoded"]["portnum"] = 7
|
||||
packet["decoded"]["payload"] = b"\x00"
|
||||
packet["decoded"]["routing"] = {"errorReason": "NONE"}
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
assert captured, "Expected numeric routing packet to be stored"
|
||||
_, payload, _ = captured[0]
|
||||
assert payload["text"] == "AA=="
|
||||
|
||||
|
||||
def test_portnum_candidates_reads_enum_values(mesh_module, monkeypatch):
|
||||
"""Ensure portnum candidates include enum and constants when available."""
|
||||
|
||||
mesh = mesh_module
|
||||
module_name = "meshtastic.portnums_pb2"
|
||||
|
||||
class DummyPortNum:
|
||||
@staticmethod
|
||||
def Value(name):
|
||||
if name == "ROUTING_APP":
|
||||
return 7
|
||||
raise KeyError(name)
|
||||
|
||||
dummy_module = types.SimpleNamespace(PortNum=DummyPortNum, ROUTING_APP=8)
|
||||
monkeypatch.setitem(sys.modules, module_name, dummy_module)
|
||||
|
||||
candidates = mesh.handlers._portnum_candidates("ROUTING_APP")
|
||||
|
||||
assert 7 in candidates
|
||||
assert 8 in candidates
|
||||
|
||||
|
||||
def test_store_packet_dict_appends_channel_name(mesh_module, monkeypatch, capsys):
|
||||
mesh = mesh_module
|
||||
mesh.channels._reset_channel_cache()
|
||||
@@ -1874,6 +2093,146 @@ def test_store_packet_dict_appends_channel_name(mesh_module, monkeypatch, capsys
|
||||
assert "channel_display='Chat'" in log_output
|
||||
|
||||
|
||||
def test_store_packet_dict_skips_hidden_channel(mesh_module, monkeypatch, capsys):
|
||||
mesh = mesh_module
|
||||
mesh.channels._reset_channel_cache()
|
||||
mesh.config.MODEM_PRESET = None
|
||||
|
||||
class DummyInterface:
|
||||
def __init__(self) -> None:
|
||||
self.localNode = SimpleNamespace(
|
||||
channels=[
|
||||
SimpleNamespace(
|
||||
role=1,
|
||||
settings=SimpleNamespace(name="Primary"),
|
||||
),
|
||||
SimpleNamespace(
|
||||
role=2,
|
||||
index=5,
|
||||
settings=SimpleNamespace(name="Chat"),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
def waitForConfig(self):
|
||||
return None
|
||||
|
||||
mesh.channels.capture_from_interface(DummyInterface())
|
||||
capsys.readouterr()
|
||||
|
||||
captured: list[tuple[str, dict, int]] = []
|
||||
ignored: list[str] = []
|
||||
monkeypatch.setattr(
|
||||
mesh,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority: captured.append((path, payload, priority)),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
mesh.handlers,
|
||||
"_record_ignored_packet",
|
||||
lambda packet, *, reason: ignored.append(reason),
|
||||
)
|
||||
|
||||
previous_debug = mesh.config.DEBUG
|
||||
previous_hidden = mesh.HIDDEN_CHANNELS
|
||||
previous_allowed = mesh.ALLOWED_CHANNELS
|
||||
mesh.config.DEBUG = True
|
||||
mesh.DEBUG = True
|
||||
mesh.ALLOWED_CHANNELS = ("Chat",)
|
||||
mesh.HIDDEN_CHANNELS = ("Chat",)
|
||||
|
||||
try:
|
||||
packet = {
|
||||
"id": "999",
|
||||
"rxTime": 24_680,
|
||||
"from": "!sender",
|
||||
"to": "^all",
|
||||
"channel": 5,
|
||||
"decoded": {"text": "hidden msg", "portnum": 1},
|
||||
}
|
||||
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
assert captured == []
|
||||
assert ignored == ["hidden-channel"]
|
||||
assert "Ignored packet on hidden channel" in capsys.readouterr().out
|
||||
finally:
|
||||
mesh.HIDDEN_CHANNELS = previous_hidden
|
||||
mesh.ALLOWED_CHANNELS = previous_allowed
|
||||
mesh.config.DEBUG = previous_debug
|
||||
mesh.DEBUG = previous_debug
|
||||
|
||||
|
||||
def test_store_packet_dict_skips_disallowed_channel(mesh_module, monkeypatch, capsys):
|
||||
mesh = mesh_module
|
||||
mesh.channels._reset_channel_cache()
|
||||
mesh.config.MODEM_PRESET = None
|
||||
|
||||
class DummyInterface:
|
||||
def __init__(self) -> None:
|
||||
self.localNode = SimpleNamespace(
|
||||
channels=[
|
||||
SimpleNamespace(
|
||||
role=1,
|
||||
settings=SimpleNamespace(name="Primary"),
|
||||
),
|
||||
SimpleNamespace(
|
||||
role=2,
|
||||
index=5,
|
||||
settings=SimpleNamespace(name="Chat"),
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
def waitForConfig(self):
|
||||
return None
|
||||
|
||||
mesh.channels.capture_from_interface(DummyInterface())
|
||||
capsys.readouterr()
|
||||
|
||||
captured: list[tuple[str, dict, int]] = []
|
||||
ignored: list[str] = []
|
||||
monkeypatch.setattr(
|
||||
mesh,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority: captured.append((path, payload, priority)),
|
||||
)
|
||||
monkeypatch.setattr(
|
||||
mesh.handlers,
|
||||
"_record_ignored_packet",
|
||||
lambda packet, *, reason: ignored.append(reason),
|
||||
)
|
||||
|
||||
previous_debug = mesh.config.DEBUG
|
||||
previous_allowed = mesh.ALLOWED_CHANNELS
|
||||
previous_hidden = mesh.HIDDEN_CHANNELS
|
||||
mesh.config.DEBUG = True
|
||||
mesh.DEBUG = True
|
||||
mesh.ALLOWED_CHANNELS = ("Primary",)
|
||||
mesh.HIDDEN_CHANNELS = ()
|
||||
|
||||
try:
|
||||
packet = {
|
||||
"id": "1001",
|
||||
"rxTime": 25_680,
|
||||
"from": "!sender",
|
||||
"to": "^all",
|
||||
"channel": 5,
|
||||
"decoded": {"text": "disallowed msg", "portnum": 1},
|
||||
}
|
||||
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
assert captured == []
|
||||
assert ignored == ["disallowed-channel"]
|
||||
assert "Ignored packet on disallowed channel" in capsys.readouterr().out
|
||||
finally:
|
||||
mesh.ALLOWED_CHANNELS = previous_allowed
|
||||
mesh.HIDDEN_CHANNELS = previous_hidden
|
||||
mesh.config.DEBUG = previous_debug
|
||||
mesh.DEBUG = previous_debug
|
||||
|
||||
|
||||
def test_store_packet_dict_includes_encrypted_payload(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
@@ -2385,6 +2744,62 @@ def test_parse_ble_target_rejects_invalid_values(mesh_module):
|
||||
assert mesh._parse_ble_target("zz:zz:zz:zz:zz:zz") is None
|
||||
|
||||
|
||||
def test_parse_ble_target_accepts_mac_addresses(mesh_module):
|
||||
"""Test that _parse_ble_target accepts valid MAC address format (Linux/Windows)."""
|
||||
mesh = mesh_module
|
||||
|
||||
# Valid MAC addresses should be accepted and normalized to uppercase
|
||||
assert mesh._parse_ble_target("ED:4D:9E:95:CF:60") == "ED:4D:9E:95:CF:60"
|
||||
assert mesh._parse_ble_target("ed:4d:9e:95:cf:60") == "ED:4D:9E:95:CF:60"
|
||||
assert mesh._parse_ble_target("AA:BB:CC:DD:EE:FF") == "AA:BB:CC:DD:EE:FF"
|
||||
assert mesh._parse_ble_target("00:11:22:33:44:55") == "00:11:22:33:44:55"
|
||||
|
||||
# With whitespace
|
||||
assert mesh._parse_ble_target(" ED:4D:9E:95:CF:60 ") == "ED:4D:9E:95:CF:60"
|
||||
|
||||
# Invalid MAC addresses should be rejected
|
||||
assert mesh._parse_ble_target("ED:4D:9E:95:CF") is None # Too short
|
||||
assert mesh._parse_ble_target("ED:4D:9E:95:CF:60:AB") is None # Too long
|
||||
assert mesh._parse_ble_target("GG:HH:II:JJ:KK:LL") is None # Invalid hex
|
||||
|
||||
|
||||
def test_parse_ble_target_accepts_uuids(mesh_module):
|
||||
"""Test that _parse_ble_target accepts valid UUID format (macOS)."""
|
||||
mesh = mesh_module
|
||||
|
||||
# Valid UUIDs should be accepted and normalized to uppercase
|
||||
assert (
|
||||
mesh._parse_ble_target("C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E")
|
||||
== "C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E"
|
||||
)
|
||||
assert (
|
||||
mesh._parse_ble_target("c0aea92f-045e-9b82-c9a6-a1fd822b3a9e")
|
||||
== "C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E"
|
||||
)
|
||||
assert (
|
||||
mesh._parse_ble_target("12345678-1234-5678-9ABC-DEF012345678")
|
||||
== "12345678-1234-5678-9ABC-DEF012345678"
|
||||
)
|
||||
|
||||
# With whitespace
|
||||
assert (
|
||||
mesh._parse_ble_target(" C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E ")
|
||||
== "C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E"
|
||||
)
|
||||
|
||||
# Invalid UUIDs should be rejected
|
||||
assert mesh._parse_ble_target("C0AEA92F-045E-9B82-C9A6") is None # Too short
|
||||
assert (
|
||||
mesh._parse_ble_target("C0AEA92F-045E-9B82-C9A6-A1FD822B3A9E-EXTRA") is None
|
||||
) # Too long
|
||||
assert (
|
||||
mesh._parse_ble_target("GGGGGGGG-GGGG-GGGG-GGGG-GGGGGGGGGGGG") is None
|
||||
) # Invalid hex
|
||||
assert (
|
||||
mesh._parse_ble_target("C0AEA92F:045E:9B82:C9A6:A1FD822B3A9E") is None
|
||||
) # Wrong separator
|
||||
|
||||
|
||||
def test_parse_network_target_additional_cases(mesh_module):
|
||||
mesh = mesh_module
|
||||
|
||||
@@ -2517,6 +2932,133 @@ def test_queue_post_json_skips_when_active(mesh_module, monkeypatch):
|
||||
mesh._clear_post_queue()
|
||||
|
||||
|
||||
def test_process_ingestor_heartbeat_updates_flag(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
mesh.ingestors.STATE.last_heartbeat = None
|
||||
mesh.ingestors.STATE.node_id = None
|
||||
mesh.handlers.register_host_node_id(None)
|
||||
recorded = {"force": None, "count": 0}
|
||||
|
||||
def fake_queue_ingestor_heartbeat(*, force):
|
||||
recorded["force"] = force
|
||||
recorded["count"] += 1
|
||||
return True
|
||||
|
||||
monkeypatch.setattr(
|
||||
mesh.ingestors, "queue_ingestor_heartbeat", fake_queue_ingestor_heartbeat
|
||||
)
|
||||
|
||||
class DummyIface:
|
||||
def __init__(self):
|
||||
self.myNodeNum = 0xCAFEBABE
|
||||
|
||||
updated = mesh._process_ingestor_heartbeat(
|
||||
DummyIface(), ingestor_announcement_sent=False
|
||||
)
|
||||
|
||||
assert updated is True
|
||||
assert recorded["force"] is True
|
||||
assert recorded["count"] == 1
|
||||
assert mesh.handlers.host_node_id() == "!cafebabe"
|
||||
|
||||
|
||||
def test_process_ingestor_heartbeat_skips_without_host(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
mesh.handlers.register_host_node_id(None)
|
||||
mesh.ingestors.STATE.node_id = None
|
||||
mesh.ingestors.STATE.last_heartbeat = None
|
||||
|
||||
monkeypatch.setattr(mesh.ingestors, "queue_ingestor_heartbeat", lambda **_: False)
|
||||
|
||||
updated = mesh._process_ingestor_heartbeat(None, ingestor_announcement_sent=False)
|
||||
|
||||
assert updated is False
|
||||
assert mesh.ingestors.STATE.node_id is None
|
||||
assert mesh.ingestors.STATE.last_heartbeat is None
|
||||
|
||||
|
||||
def test_ingestor_heartbeat_respects_interval_override(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
mesh.ingestors.STATE.start_time = 100
|
||||
mesh.ingestors.STATE.last_heartbeat = 1_000
|
||||
mesh.ingestors.STATE.node_id = "!abcd0001"
|
||||
mesh._INGESTOR_HEARTBEAT_SECS = 10_000
|
||||
monkeypatch.setattr(mesh.ingestors.time, "time", lambda: 2_000)
|
||||
sent = mesh.ingestors.queue_ingestor_heartbeat()
|
||||
assert sent is False
|
||||
assert mesh.ingestors.STATE.last_heartbeat == 1_000
|
||||
|
||||
|
||||
def test_setting_ingestor_attr_propagates(mesh_module):
|
||||
mesh = mesh_module
|
||||
mesh._INGESTOR_HEARTBEAT_SECS = 123
|
||||
assert mesh.config._INGESTOR_HEARTBEAT_SECS == 123
|
||||
|
||||
|
||||
def test_queue_ingestor_heartbeat_requires_node_id(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
|
||||
monkeypatch.setattr(
|
||||
mesh.queue,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority, send=None: captured.append(
|
||||
(path, payload, priority)
|
||||
),
|
||||
)
|
||||
|
||||
mesh.ingestors.STATE.node_id = None
|
||||
mesh.ingestors.STATE.last_heartbeat = None
|
||||
|
||||
queued = mesh.ingestors.queue_ingestor_heartbeat(force=True)
|
||||
|
||||
assert queued is False
|
||||
assert captured == []
|
||||
|
||||
|
||||
def test_queue_ingestor_heartbeat_enqueues_and_throttles(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
|
||||
monkeypatch.setattr(
|
||||
mesh.queue,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority, send=None: captured.append(
|
||||
(path, payload, priority)
|
||||
),
|
||||
)
|
||||
|
||||
mesh.ingestors.STATE.start_time = 1_700_000_000
|
||||
mesh.ingestors.STATE.last_heartbeat = None
|
||||
mesh.ingestors.STATE.node_id = None
|
||||
mesh.config.LORA_FREQ = 915
|
||||
mesh.config.MODEM_PRESET = "LongFast"
|
||||
|
||||
mesh.ingestors.set_ingestor_node_id("!CAFEBABE")
|
||||
first = mesh.ingestors.queue_ingestor_heartbeat(force=True)
|
||||
second = mesh.ingestors.queue_ingestor_heartbeat()
|
||||
|
||||
assert first is True
|
||||
assert second is False
|
||||
assert len(captured) == 1
|
||||
path, payload, priority = captured[0]
|
||||
assert path == "/api/ingestors"
|
||||
assert payload["node_id"] == "!cafebabe"
|
||||
assert payload["start_time"] == 1_700_000_000
|
||||
assert payload["last_seen_time"] >= payload["start_time"]
|
||||
assert payload["version"] == mesh.VERSION
|
||||
assert payload["lora_freq"] == 915
|
||||
assert payload["modem_preset"] == "LongFast"
|
||||
assert priority == mesh.queue._INGESTOR_POST_PRIORITY
|
||||
|
||||
|
||||
def test_mesh_version_export_matches_package(mesh_module):
|
||||
import data
|
||||
|
||||
mesh = mesh_module
|
||||
assert mesh.VERSION == data.VERSION
|
||||
|
||||
|
||||
def test_node_to_dict_handles_proto_fallback(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
|
||||
|
||||
@@ -23,6 +23,9 @@ ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
python3 \
|
||||
py3-pip \
|
||||
py3-virtualenv \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
@@ -38,11 +41,16 @@ RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Install Meshtastic decoder dependencies in a dedicated venv
|
||||
RUN python3 -m venv /opt/meshtastic-venv && \
|
||||
/opt/meshtastic-venv/bin/pip install --no-cache-dir meshtastic protobuf
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
python3 \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
@@ -56,6 +64,7 @@ WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
COPY --from=builder /opt/meshtastic-venv /opt/meshtastic-venv
|
||||
|
||||
# Copy application code (excluding the Dockerfile which is not required at runtime)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb ./
|
||||
@@ -70,6 +79,7 @@ COPY --chown=potatomesh:potatomesh web/scripts ./scripts
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
COPY --chown=potatomesh:potatomesh data/mesh_ingestor/decode_payload.py /app/data/mesh_ingestor/decode_payload.py
|
||||
|
||||
# Create data and configuration directories with correct ownership
|
||||
RUN mkdir -p /app/.local/share/potato-mesh \
|
||||
@@ -85,6 +95,7 @@ EXPOSE 41447
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV RACK_ENV=production \
|
||||
APP_ENV=production \
|
||||
MESHTASTIC_PYTHON=/opt/meshtastic-venv/bin/python \
|
||||
XDG_DATA_HOME=/app/.local/share \
|
||||
XDG_CONFIG_HOME=/app/.config \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
|
||||
@@ -49,6 +49,12 @@ require_relative "application/worker_pool"
|
||||
require_relative "application/federation"
|
||||
require_relative "application/prometheus"
|
||||
require_relative "application/queries"
|
||||
require_relative "application/meshtastic/channel_names"
|
||||
require_relative "application/meshtastic/channel_hash"
|
||||
require_relative "application/meshtastic/protobuf"
|
||||
require_relative "application/meshtastic/rainbow_table"
|
||||
require_relative "application/meshtastic/cipher"
|
||||
require_relative "application/meshtastic/payload_decoder"
|
||||
require_relative "application/data_processing"
|
||||
require_relative "application/filesystem"
|
||||
require_relative "application/instances"
|
||||
|
||||
@@ -110,11 +110,20 @@ module PotatoMesh
|
||||
["!#{canonical_hex}", parsed, short_id]
|
||||
end
|
||||
|
||||
def broadcast_node_ref?(node_ref, fallback_num = nil)
|
||||
return true if fallback_num == 0xFFFFFFFF
|
||||
trimmed = string_or_nil(node_ref)
|
||||
return false unless trimmed
|
||||
normalized = trimmed.delete_prefix("!").strip.downcase
|
||||
normalized == "ffffffff"
|
||||
end
|
||||
|
||||
def ensure_unknown_node(db, node_ref, fallback_num = nil, heard_time: nil)
|
||||
parts = canonical_node_parts(node_ref, fallback_num)
|
||||
return unless parts
|
||||
|
||||
node_id, node_num, short_id = parts
|
||||
return if broadcast_node_ref?(node_id, node_num)
|
||||
|
||||
existing = db.get_first_value(
|
||||
"SELECT 1 FROM nodes WHERE node_id = ? LIMIT 1",
|
||||
@@ -151,14 +160,25 @@ module PotatoMesh
|
||||
inserted
|
||||
end
|
||||
|
||||
def touch_node_last_seen(db, node_ref, fallback_num = nil, rx_time: nil, source: nil)
|
||||
def touch_node_last_seen(
|
||||
db,
|
||||
node_ref,
|
||||
fallback_num = nil,
|
||||
rx_time: nil,
|
||||
source: nil,
|
||||
lora_freq: nil,
|
||||
modem_preset: nil
|
||||
)
|
||||
timestamp = coerce_integer(rx_time)
|
||||
return unless timestamp
|
||||
|
||||
node_id = nil
|
||||
|
||||
parts = canonical_node_parts(node_ref, fallback_num)
|
||||
node_id, = parts if parts
|
||||
if parts
|
||||
node_id, node_num = parts
|
||||
return if broadcast_node_ref?(node_id, node_num)
|
||||
end
|
||||
|
||||
unless node_id
|
||||
trimmed = string_or_nil(node_ref)
|
||||
@@ -170,17 +190,22 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
|
||||
return if broadcast_node_ref?(node_id, fallback_num)
|
||||
return unless node_id
|
||||
|
||||
lora_freq = coerce_integer(lora_freq)
|
||||
modem_preset = string_or_nil(modem_preset)
|
||||
updated = false
|
||||
with_busy_retry do
|
||||
db.execute <<~SQL, [timestamp, timestamp, timestamp, node_id]
|
||||
db.execute <<~SQL, [timestamp, timestamp, timestamp, lora_freq, modem_preset, node_id]
|
||||
UPDATE nodes
|
||||
SET last_heard = CASE
|
||||
WHEN COALESCE(last_heard, 0) >= ? THEN last_heard
|
||||
ELSE ?
|
||||
END,
|
||||
first_heard = COALESCE(first_heard, ?)
|
||||
first_heard = COALESCE(first_heard, ?),
|
||||
lora_freq = COALESCE(?, lora_freq),
|
||||
modem_preset = COALESCE(?, modem_preset)
|
||||
WHERE node_id = ?
|
||||
SQL
|
||||
updated ||= db.changes.positive?
|
||||
@@ -193,12 +218,74 @@ module PotatoMesh
|
||||
node_id: node_id,
|
||||
timestamp: timestamp,
|
||||
source: source || :unknown,
|
||||
lora_freq: lora_freq,
|
||||
modem_preset: modem_preset,
|
||||
)
|
||||
end
|
||||
|
||||
updated
|
||||
end
|
||||
|
||||
# Insert or update an ingestor heartbeat payload.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database handle.
|
||||
# @param payload [Hash] ingestor payload from the collector.
|
||||
# @return [Boolean] true when persistence succeeded.
|
||||
def upsert_ingestor(db, payload)
|
||||
return false unless payload.is_a?(Hash)
|
||||
|
||||
parts = canonical_node_parts(payload["node_id"] || payload["id"])
|
||||
return false unless parts
|
||||
|
||||
node_id, = parts
|
||||
now = Time.now.to_i
|
||||
|
||||
start_time = coerce_integer(payload["start_time"] || payload["startTime"]) || now
|
||||
last_seen_time =
|
||||
coerce_integer(payload["last_seen_time"] || payload["lastSeenTime"]) || start_time
|
||||
|
||||
start_time = 0 if start_time.negative?
|
||||
last_seen_time = 0 if last_seen_time.negative?
|
||||
start_time = now if start_time > now
|
||||
last_seen_time = now if last_seen_time > now
|
||||
last_seen_time = start_time if last_seen_time < start_time
|
||||
|
||||
version = string_or_nil(payload["version"] || payload["ingestorVersion"])
|
||||
return false unless version
|
||||
lora_freq = coerce_integer(payload["lora_freq"])
|
||||
modem_preset = string_or_nil(payload["modem_preset"])
|
||||
|
||||
with_busy_retry do
|
||||
db.execute <<~SQL, [node_id, start_time, last_seen_time, version, lora_freq, modem_preset]
|
||||
INSERT INTO ingestors(node_id, start_time, last_seen_time, version, lora_freq, modem_preset)
|
||||
VALUES(?,?,?,?,?,?)
|
||||
ON CONFLICT(node_id) DO UPDATE SET
|
||||
start_time = CASE
|
||||
WHEN excluded.start_time > ingestors.start_time THEN excluded.start_time
|
||||
ELSE ingestors.start_time
|
||||
END,
|
||||
last_seen_time = CASE
|
||||
WHEN excluded.last_seen_time > ingestors.last_seen_time THEN excluded.last_seen_time
|
||||
ELSE ingestors.last_seen_time
|
||||
END,
|
||||
version = COALESCE(excluded.version, ingestors.version),
|
||||
lora_freq = COALESCE(excluded.lora_freq, ingestors.lora_freq),
|
||||
modem_preset = COALESCE(excluded.modem_preset, ingestors.modem_preset)
|
||||
SQL
|
||||
end
|
||||
|
||||
true
|
||||
rescue SQLite3::SQLException => e
|
||||
warn_log(
|
||||
"Failed to upsert ingestor record",
|
||||
context: "data_processing.ingestors",
|
||||
node_id: node_id,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
false
|
||||
end
|
||||
|
||||
def upsert_node(db, node_id, n)
|
||||
user = n["user"] || {}
|
||||
met = n["deviceMetrics"] || {}
|
||||
@@ -417,20 +504,37 @@ module PotatoMesh
|
||||
rx_iso ||= Time.at(rx_time).utc.iso8601
|
||||
|
||||
raw_node_id = payload["node_id"] || payload["from_id"] || payload["from"]
|
||||
node_id = string_or_nil(raw_node_id)
|
||||
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
|
||||
raw_node_num = coerce_integer(payload["node_num"]) || coerce_integer(payload["num"])
|
||||
node_id ||= format("!%08x", raw_node_num & 0xFFFFFFFF) if node_id.nil? && raw_node_num
|
||||
|
||||
payload_for_num = payload.is_a?(Hash) ? payload.dup : {}
|
||||
payload_for_num["num"] ||= raw_node_num if raw_node_num
|
||||
node_num = resolve_node_num(node_id, payload_for_num)
|
||||
node_num ||= raw_node_num
|
||||
canonical = normalize_node_id(db, node_id || node_num)
|
||||
node_id = canonical if canonical
|
||||
canonical_parts = canonical_node_parts(raw_node_id, raw_node_num)
|
||||
if canonical_parts
|
||||
node_id, node_num, = canonical_parts
|
||||
else
|
||||
node_id = string_or_nil(raw_node_id)
|
||||
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
|
||||
node_id ||= format("!%08x", raw_node_num & 0xFFFFFFFF) if node_id.nil? && raw_node_num
|
||||
|
||||
payload_for_num = payload.is_a?(Hash) ? payload.dup : {}
|
||||
payload_for_num["num"] ||= raw_node_num if raw_node_num
|
||||
node_num = resolve_node_num(node_id, payload_for_num)
|
||||
node_num ||= raw_node_num
|
||||
canonical = normalize_node_id(db, node_id || node_num)
|
||||
node_id = canonical if canonical
|
||||
end
|
||||
|
||||
lora_freq = coerce_integer(payload["lora_freq"] || payload["loraFrequency"])
|
||||
modem_preset = string_or_nil(payload["modem_preset"] || payload["modemPreset"])
|
||||
|
||||
ensure_unknown_node(db, node_id || node_num, node_num, heard_time: rx_time)
|
||||
touch_node_last_seen(db, node_id || node_num, node_num, rx_time: rx_time, source: :position)
|
||||
touch_node_last_seen(
|
||||
db,
|
||||
node_id || node_num,
|
||||
node_num,
|
||||
rx_time: rx_time,
|
||||
source: :position,
|
||||
lora_freq: lora_freq,
|
||||
modem_preset: modem_preset,
|
||||
)
|
||||
|
||||
to_id = string_or_nil(payload["to_id"] || payload["to"])
|
||||
|
||||
@@ -674,7 +778,15 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
|
||||
def update_node_from_telemetry(db, node_id, node_num, rx_time, metrics = {})
|
||||
def update_node_from_telemetry(
|
||||
db,
|
||||
node_id,
|
||||
node_num,
|
||||
rx_time,
|
||||
metrics = {},
|
||||
lora_freq: nil,
|
||||
modem_preset: nil
|
||||
)
|
||||
num = coerce_integer(node_num)
|
||||
id = string_or_nil(node_id)
|
||||
if id&.start_with?("!")
|
||||
@@ -684,7 +796,15 @@ module PotatoMesh
|
||||
return unless id
|
||||
|
||||
ensure_unknown_node(db, id, num, heard_time: rx_time)
|
||||
touch_node_last_seen(db, id, num, rx_time: rx_time, source: :telemetry)
|
||||
touch_node_last_seen(
|
||||
db,
|
||||
id,
|
||||
num,
|
||||
rx_time: rx_time,
|
||||
source: :telemetry,
|
||||
lora_freq: lora_freq,
|
||||
modem_preset: modem_preset,
|
||||
)
|
||||
|
||||
battery = coerce_float(metrics[:battery_level] || metrics["battery_level"])
|
||||
voltage = coerce_float(metrics[:voltage] || metrics["voltage"])
|
||||
@@ -828,17 +948,23 @@ module PotatoMesh
|
||||
rx_iso ||= Time.at(rx_time).utc.iso8601
|
||||
|
||||
raw_node_id = payload["node_id"] || payload["from_id"] || payload["from"]
|
||||
node_id = string_or_nil(raw_node_id)
|
||||
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
|
||||
raw_node_num = coerce_integer(payload["node_num"]) || coerce_integer(payload["num"])
|
||||
|
||||
payload_for_num = payload.dup
|
||||
payload_for_num["num"] ||= raw_node_num if raw_node_num
|
||||
node_num = resolve_node_num(node_id, payload_for_num)
|
||||
node_num ||= raw_node_num
|
||||
canonical_parts = canonical_node_parts(raw_node_id, raw_node_num)
|
||||
if canonical_parts
|
||||
node_id, node_num, = canonical_parts
|
||||
else
|
||||
node_id = string_or_nil(raw_node_id)
|
||||
node_id = "!#{node_id.delete_prefix("!").downcase}" if node_id&.start_with?("!")
|
||||
|
||||
canonical = normalize_node_id(db, node_id || node_num)
|
||||
node_id = canonical if canonical
|
||||
payload_for_num = payload.dup
|
||||
payload_for_num["num"] ||= raw_node_num if raw_node_num
|
||||
node_num = resolve_node_num(node_id, payload_for_num)
|
||||
node_num ||= raw_node_num
|
||||
|
||||
canonical = normalize_node_id(db, node_id || node_num)
|
||||
node_id = canonical if canonical
|
||||
end
|
||||
|
||||
from_id = string_or_nil(payload["from_id"]) || node_id
|
||||
to_id = string_or_nil(payload["to_id"] || payload["to"])
|
||||
@@ -853,6 +979,8 @@ module PotatoMesh
|
||||
rssi = coerce_integer(payload["rssi"])
|
||||
bitfield = coerce_integer(payload["bitfield"])
|
||||
payload_b64 = string_or_nil(payload["payload_b64"] || payload["payload"])
|
||||
lora_freq = coerce_integer(payload["lora_freq"] || payload["loraFrequency"])
|
||||
modem_preset = string_or_nil(payload["modem_preset"] || payload["modemPreset"])
|
||||
|
||||
telemetry_section = normalize_json_object(payload["telemetry"])
|
||||
device_metrics = normalize_json_object(payload["device_metrics"] || payload["deviceMetrics"])
|
||||
@@ -1235,13 +1363,21 @@ module PotatoMesh
|
||||
SQL
|
||||
end
|
||||
|
||||
update_node_from_telemetry(db, node_id, node_num, rx_time, {
|
||||
battery_level: battery_level,
|
||||
voltage: voltage,
|
||||
channel_utilization: channel_utilization,
|
||||
air_util_tx: air_util_tx,
|
||||
uptime_seconds: uptime_seconds,
|
||||
})
|
||||
update_node_from_telemetry(
|
||||
db,
|
||||
node_id,
|
||||
node_num,
|
||||
rx_time,
|
||||
{
|
||||
battery_level: battery_level,
|
||||
voltage: voltage,
|
||||
channel_utilization: channel_utilization,
|
||||
air_util_tx: air_util_tx,
|
||||
uptime_seconds: uptime_seconds,
|
||||
},
|
||||
lora_freq: lora_freq,
|
||||
modem_preset: modem_preset,
|
||||
)
|
||||
end
|
||||
|
||||
# Persist a traceroute observation and its hop path.
|
||||
@@ -1262,7 +1398,7 @@ module PotatoMesh
|
||||
rx_time = now if rx_time.nil? || rx_time > now
|
||||
rx_iso = string_or_nil(payload["rx_iso"]) || Time.at(rx_time).utc.iso8601
|
||||
|
||||
metrics = normalize_json_object(payload["metrics"])
|
||||
metrics = normalize_json_object(payload["metrics"]) || {}
|
||||
src = coerce_integer(payload["src"] || payload["source"] || payload["from"])
|
||||
dest = coerce_integer(payload["dest"] || payload["destination"] || payload["to"])
|
||||
rssi = coerce_integer(payload["rssi"]) || coerce_integer(metrics["rssi"])
|
||||
@@ -1312,6 +1448,59 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
|
||||
# Attempt to decrypt an encrypted Meshtastic message payload.
|
||||
#
|
||||
# @param message [Hash] message payload supplied by the ingestor.
|
||||
# @param packet_id [Integer] message packet identifier.
|
||||
# @param from_id [String, nil] canonical node identifier when available.
|
||||
# @param from_num [Integer, nil] numeric node identifier when available.
|
||||
# @param channel_index [Integer, nil] channel hash index.
|
||||
# @return [Hash, nil] decrypted payload metadata when parsing succeeds.
|
||||
def decrypt_meshtastic_message(message, packet_id, from_id, from_num, channel_index)
|
||||
return nil unless message.is_a?(Hash)
|
||||
|
||||
cipher_b64 = string_or_nil(message["encrypted"])
|
||||
return nil unless cipher_b64
|
||||
if (ENV["RACK_ENV"] == "test" || ENV["APP_ENV"] == "test" || defined?(RSpec)) &&
|
||||
ENV["MESHTASTIC_PSK_B64"].nil?
|
||||
return nil
|
||||
end
|
||||
|
||||
node_num = coerce_integer(from_num)
|
||||
if node_num.nil?
|
||||
parts = canonical_node_parts(from_id)
|
||||
node_num = parts[1] if parts
|
||||
end
|
||||
return nil unless node_num
|
||||
|
||||
psk_b64 = PotatoMesh::Config.meshtastic_psk_b64
|
||||
data = PotatoMesh::App::Meshtastic::Cipher.decrypt_data(
|
||||
cipher_b64: cipher_b64,
|
||||
packet_id: packet_id,
|
||||
from_id: from_id,
|
||||
from_num: node_num,
|
||||
psk_b64: psk_b64,
|
||||
)
|
||||
return nil unless data
|
||||
|
||||
channel_name = nil
|
||||
if channel_index.is_a?(Integer)
|
||||
candidates = PotatoMesh::App::Meshtastic::RainbowTable.channel_names_for(
|
||||
channel_index,
|
||||
psk_b64: psk_b64,
|
||||
)
|
||||
channel_name = candidates.first if candidates.any?
|
||||
end
|
||||
|
||||
{
|
||||
text: data[:text],
|
||||
portnum: data[:portnum],
|
||||
payload: data[:payload],
|
||||
channel_name: channel_name,
|
||||
decryption_confidence: data[:decryption_confidence],
|
||||
}
|
||||
end
|
||||
|
||||
def insert_message(db, message)
|
||||
return unless message.is_a?(Hash)
|
||||
|
||||
@@ -1342,6 +1531,14 @@ module PotatoMesh
|
||||
from_id = canonical_from_id
|
||||
end
|
||||
end
|
||||
if from_id && !from_id.start_with?("^")
|
||||
canonical_parts = canonical_node_parts(from_id, message["from_num"])
|
||||
if canonical_parts && !from_id.start_with?("!")
|
||||
from_id = canonical_parts[0]
|
||||
message["from_num"] ||= canonical_parts[1]
|
||||
end
|
||||
end
|
||||
sender_present = !from_id.nil? || !coerce_integer(message["from_num"]).nil? || !trimmed_from_id.nil?
|
||||
|
||||
raw_to_id = message["to_id"]
|
||||
raw_to_id = message["to"] if raw_to_id.nil? || raw_to_id.to_s.strip.empty?
|
||||
@@ -1355,17 +1552,60 @@ module PotatoMesh
|
||||
to_id = canonical_to_id
|
||||
end
|
||||
end
|
||||
if to_id && !to_id.start_with?("^")
|
||||
canonical_parts = canonical_node_parts(to_id, message["to_num"])
|
||||
if canonical_parts && !to_id.start_with?("!")
|
||||
to_id = canonical_parts[0]
|
||||
message["to_num"] ||= canonical_parts[1]
|
||||
end
|
||||
end
|
||||
|
||||
encrypted = string_or_nil(message["encrypted"])
|
||||
text = message["text"]
|
||||
portnum = message["portnum"]
|
||||
clear_encrypted = false
|
||||
channel_index = coerce_integer(message["channel"] || message["channel_index"] || message["channelIndex"])
|
||||
|
||||
ensure_unknown_node(db, from_id || raw_from_id, message["from_num"], heard_time: rx_time)
|
||||
touch_node_last_seen(
|
||||
db,
|
||||
from_id || raw_from_id || message["from_num"],
|
||||
message["from_num"],
|
||||
rx_time: rx_time,
|
||||
source: :message,
|
||||
)
|
||||
decrypted_payload = nil
|
||||
decrypted_text = nil
|
||||
decrypted_portnum = nil
|
||||
decrypted_flag = false
|
||||
decryption_confidence = nil
|
||||
|
||||
if encrypted && (text.nil? || text.to_s.strip.empty?)
|
||||
decrypted_data = decrypt_meshtastic_message(
|
||||
message,
|
||||
msg_id,
|
||||
from_id,
|
||||
message["from_num"],
|
||||
channel_index,
|
||||
)
|
||||
|
||||
if decrypted_data
|
||||
decrypted_payload = decrypted_data
|
||||
decrypted_portnum = decrypted_data[:portnum]
|
||||
|
||||
if decrypted_data[:text]
|
||||
text = decrypted_data[:text]
|
||||
decrypted_text = text
|
||||
clear_encrypted = true
|
||||
encrypted = nil
|
||||
message["text"] = text
|
||||
message["channel_name"] ||= decrypted_data[:channel_name]
|
||||
decrypted_flag = true
|
||||
decryption_confidence = decrypted_data[:decryption_confidence] || 0.0
|
||||
if portnum.nil? && decrypted_portnum
|
||||
portnum = decrypted_portnum
|
||||
message["portnum"] = portnum
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if encrypted && (text.nil? || text.to_s.strip.empty?)
|
||||
portnum = nil
|
||||
message.delete("portnum")
|
||||
end
|
||||
|
||||
lora_freq = coerce_integer(message["lora_freq"] || message["loraFrequency"])
|
||||
modem_preset = string_or_nil(message["modem_preset"] || message["modemPreset"])
|
||||
@@ -1380,8 +1620,8 @@ module PotatoMesh
|
||||
from_id,
|
||||
to_id,
|
||||
message["channel"],
|
||||
message["portnum"],
|
||||
message["text"],
|
||||
portnum,
|
||||
text,
|
||||
encrypted,
|
||||
message["snr"],
|
||||
message["rssi"],
|
||||
@@ -1391,19 +1631,28 @@ module PotatoMesh
|
||||
channel_name,
|
||||
reply_id,
|
||||
emoji,
|
||||
decrypted_flag ? 1 : 0,
|
||||
decryption_confidence,
|
||||
]
|
||||
|
||||
with_busy_retry do
|
||||
existing = db.get_first_row(
|
||||
"SELECT from_id, to_id, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji FROM messages WHERE id = ?",
|
||||
"SELECT from_id, to_id, text, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji, portnum, decrypted, decryption_confidence FROM messages WHERE id = ?",
|
||||
[msg_id],
|
||||
)
|
||||
if existing
|
||||
updates = {}
|
||||
existing_text = existing.is_a?(Hash) ? existing["text"] : existing[2]
|
||||
existing_text_str = existing_text&.to_s
|
||||
existing_has_text = existing_text_str && !existing_text_str.strip.empty?
|
||||
existing_from = existing.is_a?(Hash) ? existing["from_id"] : existing[0]
|
||||
existing_from_str = existing_from&.to_s
|
||||
return if !sender_present && (existing_from_str.nil? || existing_from_str.strip.empty?)
|
||||
existing_encrypted = existing.is_a?(Hash) ? existing["encrypted"] : existing[3]
|
||||
existing_encrypted_str = existing_encrypted&.to_s
|
||||
decrypted_precedence = text && (clear_encrypted || (existing_encrypted_str && !existing_encrypted_str.strip.empty?))
|
||||
|
||||
if from_id
|
||||
existing_from = existing.is_a?(Hash) ? existing["from_id"] : existing[0]
|
||||
existing_from_str = existing_from&.to_s
|
||||
should_update = existing_from_str.nil? || existing_from_str.strip.empty?
|
||||
should_update ||= existing_from != from_id
|
||||
updates["from_id"] = from_id if should_update
|
||||
@@ -1417,21 +1666,53 @@ module PotatoMesh
|
||||
updates["to_id"] = to_id if should_update
|
||||
end
|
||||
|
||||
if encrypted
|
||||
existing_encrypted = existing.is_a?(Hash) ? existing["encrypted"] : existing[2]
|
||||
existing_encrypted_str = existing_encrypted&.to_s
|
||||
if clear_encrypted || (decrypted_precedence && existing_encrypted_str && !existing_encrypted_str.strip.empty?)
|
||||
updates["encrypted"] = nil if existing_encrypted
|
||||
elsif encrypted && !existing_has_text
|
||||
should_update = existing_encrypted_str.nil? || existing_encrypted_str.strip.empty?
|
||||
should_update ||= existing_encrypted != encrypted
|
||||
updates["encrypted"] = encrypted if should_update
|
||||
end
|
||||
|
||||
if text
|
||||
should_update = existing_text_str.nil? || existing_text_str.strip.empty?
|
||||
should_update ||= existing_text != text
|
||||
updates["text"] = text if should_update
|
||||
end
|
||||
|
||||
if decrypted_precedence
|
||||
updates["channel"] = message["channel"] if message.key?("channel")
|
||||
updates["snr"] = message["snr"] if message.key?("snr")
|
||||
updates["rssi"] = message["rssi"] if message.key?("rssi")
|
||||
updates["hop_limit"] = message["hop_limit"] if message.key?("hop_limit")
|
||||
updates["lora_freq"] = lora_freq unless lora_freq.nil?
|
||||
updates["modem_preset"] = modem_preset if modem_preset
|
||||
updates["channel_name"] = channel_name if channel_name
|
||||
updates["rx_time"] = rx_time if rx_time
|
||||
updates["rx_iso"] = rx_iso if rx_iso
|
||||
end
|
||||
|
||||
if clear_encrypted
|
||||
updates["decrypted"] = 1
|
||||
updates["decryption_confidence"] = decryption_confidence
|
||||
end
|
||||
|
||||
if portnum
|
||||
existing_portnum = existing.is_a?(Hash) ? existing["portnum"] : existing[9]
|
||||
existing_portnum_str = existing_portnum&.to_s
|
||||
should_update = existing_portnum_str.nil? || existing_portnum_str.strip.empty?
|
||||
should_update ||= existing_portnum != portnum
|
||||
should_update ||= decrypted_precedence
|
||||
updates["portnum"] = portnum if should_update
|
||||
end
|
||||
|
||||
unless lora_freq.nil?
|
||||
existing_lora = existing.is_a?(Hash) ? existing["lora_freq"] : existing[3]
|
||||
existing_lora = existing.is_a?(Hash) ? existing["lora_freq"] : existing[4]
|
||||
updates["lora_freq"] = lora_freq if existing_lora != lora_freq
|
||||
end
|
||||
|
||||
if modem_preset
|
||||
existing_preset = existing.is_a?(Hash) ? existing["modem_preset"] : existing[4]
|
||||
existing_preset = existing.is_a?(Hash) ? existing["modem_preset"] : existing[5]
|
||||
existing_preset_str = existing_preset&.to_s
|
||||
should_update = existing_preset_str.nil? || existing_preset_str.strip.empty?
|
||||
should_update ||= existing_preset != modem_preset
|
||||
@@ -1439,7 +1720,7 @@ module PotatoMesh
|
||||
end
|
||||
|
||||
if channel_name
|
||||
existing_channel = existing.is_a?(Hash) ? existing["channel_name"] : existing[5]
|
||||
existing_channel = existing.is_a?(Hash) ? existing["channel_name"] : existing[6]
|
||||
existing_channel_str = existing_channel&.to_s
|
||||
should_update = existing_channel_str.nil? || existing_channel_str.strip.empty?
|
||||
should_update ||= existing_channel != channel_name
|
||||
@@ -1447,12 +1728,12 @@ module PotatoMesh
|
||||
end
|
||||
|
||||
unless reply_id.nil?
|
||||
existing_reply = existing.is_a?(Hash) ? existing["reply_id"] : existing[6]
|
||||
existing_reply = existing.is_a?(Hash) ? existing["reply_id"] : existing[7]
|
||||
updates["reply_id"] = reply_id if existing_reply != reply_id
|
||||
end
|
||||
|
||||
if emoji
|
||||
existing_emoji = existing.is_a?(Hash) ? existing["emoji"] : existing[7]
|
||||
existing_emoji = existing.is_a?(Hash) ? existing["emoji"] : existing[8]
|
||||
existing_emoji_str = existing_emoji&.to_s
|
||||
should_update = existing_emoji_str.nil? || existing_emoji_str.strip.empty?
|
||||
should_update ||= existing_emoji != emoji
|
||||
@@ -1468,17 +1749,48 @@ module PotatoMesh
|
||||
|
||||
begin
|
||||
db.execute <<~SQL, row
|
||||
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji,decrypted,decryption_confidence)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
SQL
|
||||
rescue SQLite3::ConstraintException
|
||||
existing_row = db.get_first_row(
|
||||
"SELECT text, encrypted FROM messages WHERE id = ?",
|
||||
[msg_id],
|
||||
)
|
||||
existing_text = existing_row.is_a?(Hash) ? existing_row["text"] : existing_row&.[](0)
|
||||
existing_text_str = existing_text&.to_s
|
||||
allow_encrypted_update = existing_text_str.nil? || existing_text_str.strip.empty?
|
||||
existing_encrypted = existing_row.is_a?(Hash) ? existing_row["encrypted"] : existing_row&.[](1)
|
||||
existing_encrypted_str = existing_encrypted&.to_s
|
||||
decrypted_precedence = text && (clear_encrypted || (existing_encrypted_str && !existing_encrypted_str.strip.empty?))
|
||||
|
||||
fallback_updates = {}
|
||||
fallback_updates["from_id"] = from_id if from_id
|
||||
fallback_updates["to_id"] = to_id if to_id
|
||||
fallback_updates["encrypted"] = encrypted if encrypted
|
||||
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
|
||||
fallback_updates["modem_preset"] = modem_preset if modem_preset
|
||||
fallback_updates["channel_name"] = channel_name if channel_name
|
||||
fallback_updates["text"] = text if text
|
||||
fallback_updates["encrypted"] = encrypted if encrypted && allow_encrypted_update
|
||||
fallback_updates["encrypted"] = nil if clear_encrypted
|
||||
fallback_updates["portnum"] = portnum if portnum
|
||||
if clear_encrypted
|
||||
fallback_updates["decrypted"] = 1
|
||||
fallback_updates["decryption_confidence"] = decryption_confidence
|
||||
end
|
||||
if decrypted_precedence
|
||||
fallback_updates["channel"] = message["channel"] if message.key?("channel")
|
||||
fallback_updates["snr"] = message["snr"] if message.key?("snr")
|
||||
fallback_updates["rssi"] = message["rssi"] if message.key?("rssi")
|
||||
fallback_updates["hop_limit"] = message["hop_limit"] if message.key?("hop_limit")
|
||||
fallback_updates["portnum"] = portnum if portnum
|
||||
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
|
||||
fallback_updates["modem_preset"] = modem_preset if modem_preset
|
||||
fallback_updates["channel_name"] = channel_name if channel_name
|
||||
fallback_updates["rx_time"] = rx_time if rx_time
|
||||
fallback_updates["rx_iso"] = rx_iso if rx_iso
|
||||
else
|
||||
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
|
||||
fallback_updates["modem_preset"] = modem_preset if modem_preset
|
||||
fallback_updates["channel_name"] = channel_name if channel_name
|
||||
end
|
||||
fallback_updates["reply_id"] = reply_id unless reply_id.nil?
|
||||
fallback_updates["emoji"] = emoji if emoji
|
||||
unless fallback_updates.empty?
|
||||
@@ -1488,6 +1800,213 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if clear_encrypted && decrypted_text
|
||||
debug_log(
|
||||
"Stored decrypted text message",
|
||||
context: "data_processing.insert_message",
|
||||
message_id: msg_id,
|
||||
channel: message["channel"],
|
||||
channel_name: message["channel_name"],
|
||||
portnum: portnum,
|
||||
)
|
||||
end
|
||||
|
||||
stored_decrypted = nil
|
||||
if decrypted_payload
|
||||
stored_decrypted = store_decrypted_payload(
|
||||
db,
|
||||
message,
|
||||
msg_id,
|
||||
decrypted_payload,
|
||||
rx_time: rx_time,
|
||||
rx_iso: rx_iso,
|
||||
from_id: from_id,
|
||||
to_id: to_id,
|
||||
channel: message["channel"],
|
||||
portnum: portnum || decrypted_portnum,
|
||||
hop_limit: message["hop_limit"],
|
||||
snr: message["snr"],
|
||||
rssi: message["rssi"],
|
||||
)
|
||||
end
|
||||
|
||||
if stored_decrypted && encrypted
|
||||
with_busy_retry do
|
||||
db.execute("UPDATE messages SET encrypted = NULL WHERE id = ?", [msg_id])
|
||||
end
|
||||
debug_log(
|
||||
"Cleared encrypted payload after decoding",
|
||||
context: "data_processing.insert_message",
|
||||
message_id: msg_id,
|
||||
portnum: portnum || decrypted_portnum,
|
||||
)
|
||||
end
|
||||
|
||||
should_touch_message = !stored_decrypted || decrypted_text
|
||||
if should_touch_message
|
||||
ensure_unknown_node(db, from_id || raw_from_id, message["from_num"], heard_time: rx_time)
|
||||
touch_node_last_seen(
|
||||
db,
|
||||
from_id || raw_from_id || message["from_num"],
|
||||
message["from_num"],
|
||||
rx_time: rx_time,
|
||||
source: :message,
|
||||
lora_freq: lora_freq,
|
||||
modem_preset: modem_preset,
|
||||
)
|
||||
|
||||
ensure_unknown_node(db, to_id || raw_to_id, message["to_num"], heard_time: rx_time) if to_id || raw_to_id
|
||||
if to_id || raw_to_id || message.key?("to_num")
|
||||
touch_node_last_seen(
|
||||
db,
|
||||
to_id || raw_to_id || message["to_num"],
|
||||
message["to_num"],
|
||||
rx_time: rx_time,
|
||||
source: :message,
|
||||
lora_freq: lora_freq,
|
||||
modem_preset: modem_preset,
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Decode and store decrypted payloads in domain-specific tables.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database handle.
|
||||
# @param message [Hash] original message payload.
|
||||
# @param packet_id [Integer] packet identifier for the message.
|
||||
# @param decrypted [Hash] decrypted payload metadata.
|
||||
# @param rx_time [Integer] receive time.
|
||||
# @param rx_iso [String] ISO 8601 receive timestamp.
|
||||
# @param from_id [String, nil] canonical sender identifier.
|
||||
# @param to_id [String, nil] destination identifier.
|
||||
# @param channel [Integer, nil] channel index.
|
||||
# @param portnum [Object, nil] port number identifier.
|
||||
# @param hop_limit [Integer, nil] hop limit value.
|
||||
# @param snr [Numeric, nil] signal-to-noise ratio.
|
||||
# @param rssi [Integer, nil] RSSI value.
|
||||
# @return [void]
|
||||
def store_decrypted_payload(
|
||||
db,
|
||||
message,
|
||||
packet_id,
|
||||
decrypted,
|
||||
rx_time:,
|
||||
rx_iso:,
|
||||
from_id:,
|
||||
to_id:,
|
||||
channel:,
|
||||
portnum:,
|
||||
hop_limit:,
|
||||
snr:,
|
||||
rssi:
|
||||
)
|
||||
payload_bytes = decrypted[:payload]
|
||||
return false unless payload_bytes
|
||||
|
||||
portnum_value = coerce_integer(portnum || decrypted[:portnum])
|
||||
return false unless portnum_value
|
||||
|
||||
payload_b64 = Base64.strict_encode64(payload_bytes)
|
||||
supported_ports = [3, 67, 70, 71]
|
||||
return false unless supported_ports.include?(portnum_value)
|
||||
|
||||
decoded = PotatoMesh::App::Meshtastic::PayloadDecoder.decode(
|
||||
portnum: portnum_value,
|
||||
payload_b64: payload_b64,
|
||||
)
|
||||
return false unless decoded.is_a?(Hash)
|
||||
return false unless decoded["payload"].is_a?(Hash)
|
||||
|
||||
common_payload = {
|
||||
"id" => packet_id,
|
||||
"packet_id" => packet_id,
|
||||
"rx_time" => rx_time,
|
||||
"rx_iso" => rx_iso,
|
||||
"from_id" => from_id,
|
||||
"to_id" => to_id,
|
||||
"channel" => channel,
|
||||
"portnum" => portnum_value.to_s,
|
||||
"hop_limit" => hop_limit,
|
||||
"snr" => snr,
|
||||
"rssi" => rssi,
|
||||
"lora_freq" => coerce_integer(message["lora_freq"] || message["loraFrequency"]),
|
||||
"modem_preset" => string_or_nil(message["modem_preset"] || message["modemPreset"]),
|
||||
"payload_b64" => payload_b64,
|
||||
}
|
||||
|
||||
case decoded["type"]
|
||||
when "POSITION_APP"
|
||||
payload = common_payload.merge("position" => decoded["payload"])
|
||||
insert_position(db, payload)
|
||||
debug_log(
|
||||
"Stored decrypted position payload",
|
||||
context: "data_processing.store_decrypted_payload",
|
||||
message_id: packet_id,
|
||||
portnum: portnum_value,
|
||||
)
|
||||
true
|
||||
when "TELEMETRY_APP"
|
||||
payload = common_payload.merge("telemetry" => decoded["payload"])
|
||||
insert_telemetry(db, payload)
|
||||
debug_log(
|
||||
"Stored decrypted telemetry payload",
|
||||
context: "data_processing.store_decrypted_payload",
|
||||
message_id: packet_id,
|
||||
portnum: portnum_value,
|
||||
)
|
||||
true
|
||||
when "NEIGHBORINFO_APP"
|
||||
neighbor_payload = decoded["payload"]
|
||||
neighbors = neighbor_payload["neighbors"]
|
||||
neighbors = [] unless neighbors.is_a?(Array)
|
||||
normalized_neighbors = neighbors.map do |neighbor|
|
||||
next unless neighbor.is_a?(Hash)
|
||||
{
|
||||
"neighbor_id" => neighbor["node_id"] || neighbor["nodeId"] || neighbor["id"],
|
||||
"snr" => neighbor["snr"],
|
||||
"rx_time" => neighbor["last_rx_time"],
|
||||
}.compact
|
||||
end.compact
|
||||
return false if normalized_neighbors.empty?
|
||||
|
||||
payload = common_payload.merge(
|
||||
"node_id" => neighbor_payload["node_id"] || from_id,
|
||||
"neighbors" => normalized_neighbors,
|
||||
"node_broadcast_interval_secs" => neighbor_payload["node_broadcast_interval_secs"],
|
||||
"last_sent_by_id" => neighbor_payload["last_sent_by_id"],
|
||||
)
|
||||
insert_neighbors(db, payload)
|
||||
debug_log(
|
||||
"Stored decrypted neighbor payload",
|
||||
context: "data_processing.store_decrypted_payload",
|
||||
message_id: packet_id,
|
||||
portnum: portnum_value,
|
||||
)
|
||||
true
|
||||
when "TRACEROUTE_APP"
|
||||
route = decoded["payload"]["route"]
|
||||
route_back = decoded["payload"]["route_back"]
|
||||
hops = route.is_a?(Array) ? route : route_back.is_a?(Array) ? route_back : []
|
||||
dest = hops.last if hops.is_a?(Array) && !hops.empty?
|
||||
src_num = coerce_integer(message["from_num"]) || resolve_node_num(from_id, message)
|
||||
payload = common_payload.merge(
|
||||
"src" => src_num,
|
||||
"dest" => dest,
|
||||
"hops" => hops,
|
||||
)
|
||||
insert_trace(db, payload)
|
||||
debug_log(
|
||||
"Stored decrypted traceroute payload",
|
||||
context: "data_processing.store_decrypted_payload",
|
||||
message_id: packet_id,
|
||||
portnum: portnum_value,
|
||||
)
|
||||
true
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
def normalize_node_id(db, node_ref)
|
||||
|
||||
@@ -81,10 +81,10 @@ module PotatoMesh
|
||||
return false unless File.exist?(PotatoMesh::Config.db_path)
|
||||
|
||||
db = open_database(readonly: true)
|
||||
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops]
|
||||
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops ingestors]
|
||||
tables =
|
||||
db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops')",
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops','ingestors')",
|
||||
).flatten
|
||||
(required - tables).empty?
|
||||
rescue SQLite3::Exception
|
||||
@@ -99,7 +99,7 @@ module PotatoMesh
|
||||
def init_db
|
||||
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
|
||||
db = open_database
|
||||
%w[nodes messages positions telemetry neighbors instances traces].each do |schema|
|
||||
%w[nodes messages positions telemetry neighbors instances traces ingestors].each do |schema|
|
||||
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
@@ -150,6 +150,16 @@ module PotatoMesh
|
||||
message_columns << "emoji"
|
||||
end
|
||||
|
||||
unless message_columns.include?("decrypted")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN decrypted INTEGER NOT NULL DEFAULT 0")
|
||||
message_columns << "decrypted"
|
||||
end
|
||||
|
||||
unless message_columns.include?("decryption_confidence")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN decryption_confidence REAL")
|
||||
message_columns << "decryption_confidence"
|
||||
end
|
||||
|
||||
reply_index_exists =
|
||||
db.get_first_value(
|
||||
"SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_messages_reply_id'",
|
||||
@@ -164,6 +174,16 @@ module PotatoMesh
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
|
||||
instance_columns = db.execute("PRAGMA table_info(instances)").map { |row| row[1] }
|
||||
unless instance_columns.include?("contact_link")
|
||||
db.execute("ALTER TABLE instances ADD COLUMN contact_link TEXT")
|
||||
instance_columns << "contact_link"
|
||||
end
|
||||
|
||||
unless instance_columns.include?("nodes_count")
|
||||
db.execute("ALTER TABLE instances ADD COLUMN nodes_count INTEGER")
|
||||
end
|
||||
|
||||
telemetry_tables =
|
||||
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='telemetry'").flatten
|
||||
if telemetry_tables.empty?
|
||||
@@ -187,6 +207,24 @@ module PotatoMesh
|
||||
traces_schema = File.expand_path("../../../../data/traces.sql", __dir__)
|
||||
db.execute_batch(File.read(traces_schema))
|
||||
end
|
||||
|
||||
ingestor_tables =
|
||||
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ingestors'").flatten
|
||||
if ingestor_tables.empty?
|
||||
ingestors_schema = File.expand_path("../../../../data/ingestors.sql", __dir__)
|
||||
db.execute_batch(File.read(ingestors_schema))
|
||||
else
|
||||
ingestor_columns = db.execute("PRAGMA table_info(ingestors)").map { |row| row[1] }
|
||||
unless ingestor_columns.include?("version")
|
||||
db.execute("ALTER TABLE ingestors ADD COLUMN version TEXT")
|
||||
end
|
||||
unless ingestor_columns.include?("lora_freq")
|
||||
db.execute("ALTER TABLE ingestors ADD COLUMN lora_freq INTEGER")
|
||||
end
|
||||
unless ingestor_columns.include?("modem_preset")
|
||||
db.execute("ALTER TABLE ingestors ADD COLUMN modem_preset TEXT")
|
||||
end
|
||||
end
|
||||
rescue SQLite3::SQLException, Errno::ENOENT => e
|
||||
warn_log(
|
||||
"Failed to apply schema upgrade",
|
||||
|
||||
@@ -61,6 +61,7 @@ module PotatoMesh
|
||||
def self_instance_attributes
|
||||
domain = self_instance_domain
|
||||
last_update = latest_node_update_timestamp || Time.now.to_i
|
||||
nodes_count = active_node_count_since(Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age)
|
||||
{
|
||||
id: app_constant(:SELF_INSTANCE_ID),
|
||||
domain: domain,
|
||||
@@ -73,9 +74,37 @@ module PotatoMesh
|
||||
longitude: PotatoMesh::Config.map_center_lon,
|
||||
last_update_time: last_update,
|
||||
is_private: private_mode?,
|
||||
contact_link: sanitized_contact_link,
|
||||
nodes_count: nodes_count,
|
||||
}
|
||||
end
|
||||
|
||||
# Count the number of nodes active since the supplied timestamp.
|
||||
#
|
||||
# @param cutoff [Integer] unix timestamp in seconds.
|
||||
# @param db [SQLite3::Database, nil] optional open handle to reuse.
|
||||
# @return [Integer, nil] node count or nil when unavailable.
|
||||
def active_node_count_since(cutoff, db: nil)
|
||||
return nil unless cutoff
|
||||
|
||||
handle = db || open_database(readonly: true)
|
||||
count =
|
||||
with_busy_retry do
|
||||
handle.get_first_value("SELECT COUNT(*) FROM nodes WHERE last_heard >= ?", cutoff.to_i)
|
||||
end
|
||||
Integer(count)
|
||||
rescue SQLite3::Exception, ArgumentError => e
|
||||
warn_log(
|
||||
"Failed to count active nodes",
|
||||
context: "instances.nodes_count",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
nil
|
||||
ensure
|
||||
handle&.close unless db
|
||||
end
|
||||
|
||||
def sign_instance_attributes(attributes)
|
||||
payload = canonical_instance_payload(attributes)
|
||||
Base64.strict_encode64(
|
||||
@@ -96,6 +125,7 @@ module PotatoMesh
|
||||
"longitude" => attributes[:longitude],
|
||||
"lastUpdateTime" => attributes[:last_update_time],
|
||||
"isPrivate" => attributes[:is_private],
|
||||
"contactLink" => attributes[:contact_link],
|
||||
"signature" => signature,
|
||||
}
|
||||
payload.reject { |_, value| value.nil? }
|
||||
@@ -147,6 +177,7 @@ module PotatoMesh
|
||||
pool = PotatoMesh::App::WorkerPool.new(
|
||||
size: PotatoMesh::Config.federation_worker_pool_size,
|
||||
max_queue: PotatoMesh::Config.federation_worker_queue_capacity,
|
||||
task_timeout: PotatoMesh::Config.federation_task_timeout_seconds,
|
||||
name: "potato-mesh-fed",
|
||||
)
|
||||
|
||||
@@ -412,6 +443,8 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
thread.name = "potato-mesh-federation" if thread.respond_to?(:name=)
|
||||
# Allow shutdown even if the announcement loop is still sleeping.
|
||||
thread.daemon = true if thread.respond_to?(:daemon=)
|
||||
set(:federation_thread, thread)
|
||||
thread
|
||||
end
|
||||
@@ -444,12 +477,15 @@ module PotatoMesh
|
||||
end
|
||||
thread.name = "potato-mesh-federation-initial" if thread.respond_to?(:name=)
|
||||
thread.report_on_exception = false if thread.respond_to?(:report_on_exception=)
|
||||
# Avoid blocking process shutdown during delayed startup announcements.
|
||||
thread.daemon = true if thread.respond_to?(:daemon=)
|
||||
set(:initial_federation_thread, thread)
|
||||
thread
|
||||
end
|
||||
|
||||
def canonical_instance_payload(attributes)
|
||||
data = {}
|
||||
data["contactLink"] = attributes[:contact_link] if attributes[:contact_link]
|
||||
data["id"] = attributes[:id] if attributes[:id]
|
||||
data["domain"] = attributes[:domain] if attributes[:domain]
|
||||
data["pubkey"] = attributes[:pubkey] if attributes[:pubkey]
|
||||
@@ -611,6 +647,7 @@ module PotatoMesh
|
||||
longitude: coerce_float(payload["longitude"]),
|
||||
last_update_time: coerce_integer(payload["lastUpdateTime"]),
|
||||
is_private: private_flag,
|
||||
contact_link: string_or_nil(payload["contactLink"]),
|
||||
}
|
||||
|
||||
[attributes, signature, nil]
|
||||
@@ -719,6 +756,7 @@ module PotatoMesh
|
||||
end
|
||||
|
||||
processed_entries = 0
|
||||
recent_cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
|
||||
payload.each do |entry|
|
||||
if per_response_limit && per_response_limit.positive? && processed_entries >= per_response_limit
|
||||
debug_log(
|
||||
@@ -773,13 +811,27 @@ module PotatoMesh
|
||||
|
||||
attributes[:is_private] = false if attributes[:is_private].nil?
|
||||
|
||||
nodes_since_path = "/api/nodes?since=#{recent_cutoff}&limit=1000"
|
||||
nodes_since_window, nodes_since_metadata = fetch_instance_json(attributes[:domain], nodes_since_path)
|
||||
if nodes_since_window.is_a?(Array)
|
||||
attributes[:nodes_count] = nodes_since_window.length
|
||||
elsif nodes_since_metadata
|
||||
warn_log(
|
||||
"Failed to load remote node window",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: Array(nodes_since_metadata).map(&:to_s).join("; "),
|
||||
)
|
||||
end
|
||||
|
||||
remote_nodes, node_metadata = fetch_instance_json(attributes[:domain], "/api/nodes")
|
||||
remote_nodes ||= nodes_since_window if nodes_since_window.is_a?(Array)
|
||||
unless remote_nodes
|
||||
warn_log(
|
||||
"Failed to load remote node data",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: Array(node_metadata).map(&:to_s).join("; "),
|
||||
reason: Array(node_metadata || nodes_since_metadata).map(&:to_s).join("; "),
|
||||
)
|
||||
next
|
||||
end
|
||||
@@ -1055,8 +1107,8 @@ module PotatoMesh
|
||||
sql = <<~SQL
|
||||
INSERT INTO instances (
|
||||
id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, signature
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
latitude, longitude, last_update_time, is_private, nodes_count, contact_link, signature
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
domain=excluded.domain,
|
||||
pubkey=excluded.pubkey,
|
||||
@@ -1068,9 +1120,12 @@ module PotatoMesh
|
||||
longitude=excluded.longitude,
|
||||
last_update_time=excluded.last_update_time,
|
||||
is_private=excluded.is_private,
|
||||
nodes_count=excluded.nodes_count,
|
||||
contact_link=excluded.contact_link,
|
||||
signature=excluded.signature
|
||||
SQL
|
||||
|
||||
nodes_count = coerce_integer(attributes[:nodes_count])
|
||||
params = [
|
||||
attributes[:id],
|
||||
normalized_domain,
|
||||
@@ -1083,6 +1138,8 @@ module PotatoMesh
|
||||
attributes[:longitude],
|
||||
attributes[:last_update_time],
|
||||
attributes[:is_private] ? 1 : 0,
|
||||
nodes_count,
|
||||
attributes[:contact_link],
|
||||
signature,
|
||||
]
|
||||
|
||||
|
||||
@@ -20,6 +20,8 @@ module PotatoMesh
|
||||
# its intended consumers to ensure consistent behaviour across the Sinatra
|
||||
# application.
|
||||
module Helpers
|
||||
ANNOUNCEMENT_URL_PATTERN = %r{\bhttps?://[^\s<]+}i.freeze
|
||||
|
||||
# Fetch an application level constant exposed by {PotatoMesh::Application}.
|
||||
#
|
||||
# @param name [Symbol] constant identifier to retrieve.
|
||||
@@ -92,6 +94,47 @@ module PotatoMesh
|
||||
PotatoMesh::Sanitizer.sanitized_site_name
|
||||
end
|
||||
|
||||
# Retrieve the configured announcement banner copy.
|
||||
#
|
||||
# @return [String, nil] sanitised announcement or nil when unset.
|
||||
def sanitized_announcement
|
||||
PotatoMesh::Sanitizer.sanitized_announcement
|
||||
end
|
||||
|
||||
# Render the announcement copy with safe outbound links.
|
||||
#
|
||||
# @return [String, nil] escaped HTML snippet or nil when unset.
|
||||
def announcement_html
|
||||
announcement = sanitized_announcement
|
||||
return nil unless announcement
|
||||
|
||||
fragments = []
|
||||
last_index = 0
|
||||
|
||||
announcement.to_enum(:scan, ANNOUNCEMENT_URL_PATTERN).each do
|
||||
match = Regexp.last_match
|
||||
next unless match
|
||||
|
||||
start_index = match.begin(0)
|
||||
end_index = match.end(0)
|
||||
|
||||
if start_index > last_index
|
||||
fragments << Rack::Utils.escape_html(announcement[last_index...start_index])
|
||||
end
|
||||
|
||||
url = match[0]
|
||||
escaped_url = Rack::Utils.escape_html(url)
|
||||
fragments << %(<a href="#{escaped_url}" target="_blank" rel="noopener noreferrer">#{escaped_url}</a>)
|
||||
last_index = end_index
|
||||
end
|
||||
|
||||
if last_index < announcement.length
|
||||
fragments << Rack::Utils.escape_html(announcement[last_index..])
|
||||
end
|
||||
|
||||
fragments.join
|
||||
end
|
||||
|
||||
# Retrieve the configured channel.
|
||||
#
|
||||
# @return [String] sanitised channel identifier.
|
||||
|
||||
@@ -143,6 +143,8 @@ module PotatoMesh
|
||||
"longitude" => coerce_float(row["longitude"]),
|
||||
"lastUpdateTime" => last_update_time,
|
||||
"isPrivate" => private_flag,
|
||||
"nodesCount" => coerce_integer(row["nodes_count"]),
|
||||
"contactLink" => string_or_nil(row["contact_link"]),
|
||||
"signature" => signature,
|
||||
}
|
||||
|
||||
@@ -173,7 +175,7 @@ module PotatoMesh
|
||||
min_last_update_time = now - PotatoMesh::Config.week_seconds
|
||||
sql = <<~SQL
|
||||
SELECT id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, signature
|
||||
latitude, longitude, last_update_time, is_private, nodes_count, contact_link, signature
|
||||
FROM instances
|
||||
WHERE domain IS NOT NULL AND TRIM(domain) != ''
|
||||
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
|
||||
|
||||
@@ -0,0 +1,102 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "base64"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Meshtastic
|
||||
# Compute Meshtastic channel hashes from a name and pre-shared key.
|
||||
module ChannelHash
|
||||
module_function
|
||||
|
||||
DEFAULT_PSK_ALIAS_KEYS = {
|
||||
1 => [
|
||||
0xD4, 0xF1, 0xBB, 0x3A, 0x20, 0x29, 0x07, 0x59,
|
||||
0xF0, 0xBC, 0xFF, 0xAB, 0xCF, 0x4E, 0x69, 0x01,
|
||||
].pack("C*"),
|
||||
2 => [
|
||||
0x38, 0x4B, 0xBC, 0xC0, 0x1D, 0xC0, 0x22, 0xD1,
|
||||
0x81, 0xBF, 0x36, 0xB8, 0x61, 0x21, 0xE1, 0xFB,
|
||||
0x96, 0xB7, 0x2E, 0x55, 0xBF, 0x74, 0x22, 0x7E,
|
||||
0x9D, 0x6A, 0xFB, 0x48, 0xD6, 0x4C, 0xB1, 0xA1,
|
||||
].pack("C*"),
|
||||
}.freeze
|
||||
|
||||
# Calculate the Meshtastic channel hash for the given name and PSK.
|
||||
#
|
||||
# @param name [String] channel name candidate.
|
||||
# @param psk_b64 [String, nil] base64-encoded PSK or PSK alias.
|
||||
# @return [Integer, nil] channel hash byte or nil when inputs are invalid.
|
||||
def channel_hash(name, psk_b64)
|
||||
return nil unless name
|
||||
|
||||
key = expanded_key(psk_b64)
|
||||
return nil unless key
|
||||
|
||||
h_name = xor_bytes(name.b)
|
||||
h_key = xor_bytes(key)
|
||||
|
||||
(h_name ^ h_key) & 0xFF
|
||||
end
|
||||
|
||||
# Expand the provided PSK into a valid AES key length.
|
||||
#
|
||||
# @param psk_b64 [String, nil] base64 PSK value.
|
||||
# @return [String, nil] expanded key bytes or nil when invalid.
|
||||
def expanded_key(psk_b64)
|
||||
raw = Base64.decode64(psk_b64.to_s)
|
||||
|
||||
case raw.bytesize
|
||||
when 0
|
||||
"".b
|
||||
when 1
|
||||
default_key_for_alias(raw.bytes.first)
|
||||
when 2..15
|
||||
(raw.bytes + [0] * (16 - raw.bytesize)).pack("C*")
|
||||
when 16
|
||||
raw
|
||||
when 17..31
|
||||
(raw.bytes + [0] * (32 - raw.bytesize)).pack("C*")
|
||||
when 32
|
||||
raw
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Map PSK alias bytes to their default key material.
|
||||
#
|
||||
# @param alias_index [Integer, nil] alias identifier for the PSK.
|
||||
# @return [String, nil] key bytes or nil when unknown.
|
||||
def default_key_for_alias(alias_index)
|
||||
return nil unless alias_index
|
||||
|
||||
DEFAULT_PSK_ALIAS_KEYS[alias_index]&.dup
|
||||
end
|
||||
|
||||
# XOR all bytes in the given string or byte array.
|
||||
#
|
||||
# @param value [String, Array<Integer>] input byte sequence.
|
||||
# @return [Integer] XOR of all bytes.
|
||||
def xor_bytes(value)
|
||||
bytes = value.is_a?(String) ? value.bytes : value
|
||||
bytes.reduce(0) { |acc, byte| (acc ^ byte) & 0xFF }
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,28 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Meshtastic
|
||||
# Canonical list of candidate channel names used to build rainbow tables.
|
||||
module ChannelNames
|
||||
CHANNEL_NAME_CANDIDATES = %w[
|
||||
911 Admin ADMIN admin Alert Alpha AlphaNet Alpine Amateur Amazon Anaconda Aquila Arctic Ash Asteroid Astro Aurora Avalanche Backup Basalt Base Base1 Base2 BaseAlpha BaseBravo BaseCharlie Bavaria Beacon Bear BearNet Beat Berg Berlin BerlinMesh BerlinNet Beta BetaBerlin Bison Blackout Blizzard Bolt Bonfire Border Borealis Bravo BravoNet Breeze Bridge Bronze Burner Burrow Callisto Callsign Camp Campfire CampNet Caravan Carbon Carpet Central Chameleon Charlie Chat Checkpoint Checkpoint1 Checkpoint2 Cheetah City Clinic Cloud Cobra Collective Cologne Colony Comet Command Command1 Command2 CommandRoom Comms Comms1 Comms2 CommsNet Commune Control Control1 Control2 ControlRoom Convoy Copper Core Corvus Cosmos Courier Courier1 Courier2 CourierMesh CourierNet CQ CQ1 CQ2 Crow CrowNet DarkNet Dawn Daybreak Daylight Delta DeltaNet Demo DEMO DemoBerlin Den Desert Diamond Distress District Doctor Dortmund Downlink Downlink1 Draco Dragon DragonNet Dune Dusk Eagle EagleNet East EastStar Echo EchoMesh EchoNet Emergency emergency EMERGENCY EmergencyBerlin Epsilon Equinox Europa Falcon Field FieldNet Fire Fire1 Fire2 Firebird Firefly Fireline Fireteam Firewatch Flash Flock Fluss Fog Forest Fox FoxNet Foxtrot FoxtrotMesh FoxtrotNet Frankfurt Freedom Freq Freq1 Freq2 Friedrichshain Frontier Frost Galaxy Gale Gamma Ganymede Gecko General Ghost GhostNet Glacier Gold Granite Grassland Grid Grid1 Grid2 GridNet GridNorth GridSouth Griffin Group Ham HAM Hamburg HAMNet Harbor Harmony HarmonyNet Hawk HawkNet Haze Help Hessen Highway Hilltop Hinterland Hive Hospital HQ HQ1 HQ2 Hub Hub1 Hub2 Hydra Ice Io Iron Jaguar Jungle Jupiter Kiez Kilo KiloMesh KiloNet Kraken Kreuzberg Lava Layer Layer1 Layer2 Layer3 Leipzig Leopard Liberty LightNet Lightning Lima Link Lion Lizard LongFast LongSlow LoRa LoRaBerlin LoRaHessen LoRaMesh LoRaNet LoRaTest Main Mars Med Med1 Med2 Medic MediumFast MediumSlow Mercury Mesh Mesh1 Mesh2 Mesh3 Mesh4 Mesh5 MeshBerlin MeshCollective MeshCologne MeshFrankfurt MeshGrid MeshHamburg MeshHessen MeshLeipzig MeshMunich MeshNet MeshNetwork MeshRuhr Meshtastic MeshTest Meteor Metro Midnight Mirage Mist MoonNet Munich Müggelberg Nebula Nest Network Neukölln Nexus Nightfall NightMesh NightNet Nightshift NightshiftNet Nightwatch Node1 Node2 Node3 Node4 Node5 Nomad NomadMesh NomadNet Nomads Nord North NorthStar Oasis Obsidian Omega Operations OPERATIONS Ops Ops1 Ops2 OpsCenter OpsRoom Orbit Ost Outpost Outsider Owl Pack Packet PacketNet PacketRadio Panther Paramedic Path Peak Phantom Phoenix PhoenixNet Platinum Pluto Polar Prairie Prenzlauer PRIVATE Private Public PUBLIC Pulse PulseNet Python Quasar Radio Radio1 Radio2 RadioNet Rain Ranger Raven RavenNet Relay Relay1 Relay2 Repeater Repeater1 Repeater2 RepeaterHub Rescue Rescue1 Rescue2 RescueTeam Rhythm Ridge River Road Rock Router Router1 Router2 Rover Ruhr Runner Runners Safari Safe Safety Sahara Saturn Savanna Saxony Scout Sector Secure Sensor SENSOR Sensors SENSORS Shade Shadow ShadowNet Shelter Shelter1 Shelter2 ShortFast Sideband Sideband1 Sierra Signal Signal1 Signal2 SignalFire Signals Silver Smoke Snake Snow Solstice SOS Sos SOSBerlin South SouthStar Spectrum Squad StarNet Steel Stone Storm Storm1 Storm2 Stratum Stuttgart Summit SunNet Sunrise Sunset Sync SyncNet Syndicate Süd Tal Tango TangoMesh TangoNet Team Tempo Test TEST test TestBerlin Teufelsberg Thunder Tiger Titan Town Trail Tundra Tunnel Union Unit Universe Uplink Uplink1 Valley Venus Victor Village Viper Volcano Wald Wander Wanderer Wanderers Watch Watch1 Watch2 WaWi West WestStar Whisper Wind Wolf WolfDen WolfMesh WolfNet Wolfpack Wolves Woods Wyvern Zeta Zone Zone1 Zone2 Zone3 Zulu ZuluMesh ZuluNet
|
||||
].freeze
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,213 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "base64"
|
||||
require "openssl"
|
||||
|
||||
require_relative "channel_hash"
|
||||
require_relative "protobuf"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Meshtastic
|
||||
# Decrypt Meshtastic payloads with AES-CTR using Meshtastic nonce rules.
|
||||
module Cipher
|
||||
module_function
|
||||
|
||||
DEFAULT_PSK_B64 = "AQ=="
|
||||
TEXT_MESSAGE_PORTNUM = 1
|
||||
# Number of characters required for full confidence scoring.
|
||||
CONFIDENCE_LENGTH_TARGET = 8.0
|
||||
|
||||
# Decrypt an encrypted Meshtastic payload into UTF-8 text.
|
||||
#
|
||||
# @param cipher_b64 [String] base64-encoded encrypted payload.
|
||||
# @param packet_id [Integer] packet identifier used for the nonce.
|
||||
# @param from_id [String, nil] Meshtastic node identifier (e.g. "!9e95cf60").
|
||||
# @param from_num [Integer, nil] numeric node identifier override.
|
||||
# @param psk_b64 [String, nil] base64 PSK or alias.
|
||||
# @return [String, nil] decrypted text or nil when decryption fails.
|
||||
def decrypt_text(cipher_b64:, packet_id:, from_id: nil, from_num: nil, psk_b64: DEFAULT_PSK_B64)
|
||||
data = decrypt_data(
|
||||
cipher_b64: cipher_b64,
|
||||
packet_id: packet_id,
|
||||
from_id: from_id,
|
||||
from_num: from_num,
|
||||
psk_b64: psk_b64,
|
||||
)
|
||||
|
||||
data && data[:text]
|
||||
end
|
||||
|
||||
# Decrypt the Meshtastic data protobuf payload.
|
||||
#
|
||||
# @param cipher_b64 [String] base64-encoded encrypted payload.
|
||||
# @param packet_id [Integer] packet identifier used for the nonce.
|
||||
# @param from_id [String, nil] Meshtastic node identifier.
|
||||
# @param from_num [Integer, nil] numeric node identifier override.
|
||||
# @param psk_b64 [String, nil] base64 PSK or alias.
|
||||
# @return [Hash, nil] decrypted data payload details or nil when decryption fails.
|
||||
def decrypt_data(cipher_b64:, packet_id:, from_id: nil, from_num: nil, psk_b64: DEFAULT_PSK_B64)
|
||||
ciphertext = Base64.strict_decode64(cipher_b64)
|
||||
key = ChannelHash.expanded_key(psk_b64)
|
||||
return nil unless key
|
||||
return nil unless [16, 32].include?(key.bytesize)
|
||||
|
||||
packet_value = normalize_packet_id(packet_id)
|
||||
return nil unless packet_value
|
||||
|
||||
from_value = normalize_node_num(from_id, from_num)
|
||||
return nil unless from_value
|
||||
|
||||
nonce = build_nonce(packet_value, from_value)
|
||||
plaintext = decrypt_aes_ctr(ciphertext, key, nonce)
|
||||
return nil unless plaintext
|
||||
|
||||
data = Protobuf.parse_data(plaintext)
|
||||
return nil unless data
|
||||
|
||||
text = nil
|
||||
decryption_confidence = nil
|
||||
if data[:portnum] == TEXT_MESSAGE_PORTNUM
|
||||
candidate = data[:payload].dup.force_encoding("UTF-8")
|
||||
if candidate.valid_encoding? && !candidate.empty?
|
||||
text = candidate
|
||||
decryption_confidence = text_confidence(text)
|
||||
end
|
||||
end
|
||||
|
||||
{
|
||||
portnum: data[:portnum],
|
||||
payload: data[:payload],
|
||||
text: text,
|
||||
decryption_confidence: decryption_confidence,
|
||||
}
|
||||
rescue ArgumentError, OpenSSL::Cipher::CipherError
|
||||
nil
|
||||
end
|
||||
|
||||
# Decrypt the Meshtastic data protobuf payload bytes.
|
||||
#
|
||||
# @param cipher_b64 [String] base64-encoded encrypted payload.
|
||||
# @param packet_id [Integer] packet identifier used for the nonce.
|
||||
# @param from_id [String, nil] Meshtastic node identifier.
|
||||
# @param from_num [Integer, nil] numeric node identifier override.
|
||||
# @param psk_b64 [String, nil] base64 PSK or alias.
|
||||
# @return [String, nil] payload bytes or nil when decryption fails.
|
||||
def decrypt_payload_bytes(cipher_b64:, packet_id:, from_id: nil, from_num: nil, psk_b64: DEFAULT_PSK_B64)
|
||||
data = decrypt_data(
|
||||
cipher_b64: cipher_b64,
|
||||
packet_id: packet_id,
|
||||
from_id: from_id,
|
||||
from_num: from_num,
|
||||
psk_b64: psk_b64,
|
||||
)
|
||||
|
||||
data && data[:payload]
|
||||
end
|
||||
|
||||
# Build the Meshtastic AES nonce from packet and node identifiers.
|
||||
#
|
||||
# @param packet_id [Integer] packet identifier.
|
||||
# @param from_num [Integer] numeric node identifier.
|
||||
# @return [String] 16-byte nonce.
|
||||
def build_nonce(packet_id, from_num)
|
||||
[packet_id].pack("Q<") + [from_num].pack("L<") + ("\x00" * 4)
|
||||
end
|
||||
|
||||
# Decrypt data using AES-CTR with the derived nonce.
|
||||
#
|
||||
# @param ciphertext [String] encrypted payload bytes.
|
||||
# @param key [String] expanded AES key bytes.
|
||||
# @param nonce [String] 16-byte nonce.
|
||||
# @return [String] decrypted plaintext bytes.
|
||||
def decrypt_aes_ctr(ciphertext, key, nonce)
|
||||
cipher_name = key.bytesize == 16 ? "aes-128-ctr" : "aes-256-ctr"
|
||||
cipher = OpenSSL::Cipher.new(cipher_name)
|
||||
cipher.decrypt
|
||||
cipher.key = key
|
||||
cipher.iv = nonce
|
||||
cipher.update(ciphertext) + cipher.final
|
||||
end
|
||||
|
||||
# Normalise the packet identifier into an integer.
|
||||
#
|
||||
# @param packet_id [Integer, nil] packet identifier.
|
||||
# @return [Integer, nil] validated packet id or nil when invalid.
|
||||
def normalize_packet_id(packet_id)
|
||||
return packet_id if packet_id.is_a?(Integer) && packet_id >= 0
|
||||
return nil if packet_id.nil?
|
||||
|
||||
if packet_id.is_a?(Numeric)
|
||||
return nil if packet_id.negative?
|
||||
return packet_id.to_i
|
||||
end
|
||||
|
||||
return nil unless packet_id.respond_to?(:to_s)
|
||||
|
||||
trimmed = packet_id.to_s.strip
|
||||
return nil if trimmed.empty?
|
||||
return trimmed.to_i(10) if trimmed.match?(/\A\d+\z/)
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Score the plausibility of decrypted text content.
|
||||
#
|
||||
# @param text [String] decrypted text candidate.
|
||||
# @return [Float] confidence score between 0.0 and 1.0.
|
||||
def text_confidence(text)
|
||||
return 0.0 unless text.is_a?(String)
|
||||
return 0.0 if text.empty?
|
||||
|
||||
total = text.length.to_f
|
||||
length_score = [total / CONFIDENCE_LENGTH_TARGET, 1.0].min
|
||||
control_count = text.scan(/[\p{Cc}\p{Cs}]/).length
|
||||
control_ratio = control_count / total
|
||||
acceptable_count = text.scan(/[\p{L}\p{N}\p{P}\p{S}\p{Zs}\t\n\r]/).length
|
||||
acceptable_ratio = acceptable_count / total
|
||||
|
||||
score = length_score * acceptable_ratio * (1.0 - control_ratio)
|
||||
score.clamp(0.0, 1.0)
|
||||
end
|
||||
|
||||
# Resolve the node number from any of the supported identifiers.
|
||||
#
|
||||
# @param from_id [String, nil] Meshtastic node identifier.
|
||||
# @param from_num [Integer, nil] numeric node identifier override.
|
||||
# @return [Integer, nil] node number or nil when invalid.
|
||||
def normalize_node_num(from_id, from_num)
|
||||
if from_num.is_a?(Integer)
|
||||
return from_num & 0xFFFFFFFF
|
||||
elsif from_num.is_a?(Numeric)
|
||||
return from_num.to_i & 0xFFFFFFFF
|
||||
end
|
||||
|
||||
return nil unless from_id
|
||||
|
||||
trimmed = from_id.to_s.strip
|
||||
return nil if trimmed.empty?
|
||||
|
||||
hex = trimmed.delete_prefix("!")
|
||||
hex = hex[2..] if hex.start_with?("0x", "0X")
|
||||
return nil unless hex.match?(/\A[0-9A-Fa-f]+\z/)
|
||||
|
||||
hex.to_i(16) & 0xFFFFFFFF
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,120 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "json"
|
||||
require "open3"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Meshtastic
|
||||
# Decode Meshtastic protobuf payloads via the Python helper script.
|
||||
module PayloadDecoder
|
||||
module_function
|
||||
|
||||
PYTHON_ENV_KEY = "MESHTASTIC_PYTHON"
|
||||
DEFAULT_PYTHON_RELATIVE = File.join("data", ".venv", "bin", "python")
|
||||
DEFAULT_DECODER_RELATIVE = File.join("data", "mesh_ingestor", "decode_payload.py")
|
||||
FALLBACK_PYTHON_NAMES = ["python3", "python"].freeze
|
||||
|
||||
# Decode a protobuf payload using the Meshtastic helper.
|
||||
#
|
||||
# @param portnum [Integer] Meshtastic port number.
|
||||
# @param payload_b64 [String] base64-encoded payload bytes.
|
||||
# @return [Hash, nil] decoded payload hash or nil when decoding fails.
|
||||
def decode(portnum:, payload_b64:)
|
||||
return nil unless portnum && payload_b64
|
||||
|
||||
decoder_path = decoder_script_path
|
||||
python_path = python_executable_path
|
||||
return nil unless decoder_path && python_path
|
||||
|
||||
input = JSON.generate({ portnum: portnum, payload_b64: payload_b64 })
|
||||
stdout, stderr, status = Open3.capture3(python_path, decoder_path, stdin_data: input)
|
||||
return nil unless status.success?
|
||||
|
||||
parsed = JSON.parse(stdout)
|
||||
return nil unless parsed.is_a?(Hash)
|
||||
return nil if parsed["error"]
|
||||
|
||||
parsed
|
||||
rescue JSON::ParserError
|
||||
nil
|
||||
rescue Errno::ENOENT
|
||||
nil
|
||||
rescue ArgumentError
|
||||
nil
|
||||
end
|
||||
|
||||
# Resolve the configured Python executable for Meshtastic decoding.
|
||||
#
|
||||
# @return [String, nil] python path or nil when missing.
|
||||
def python_executable_path
|
||||
configured = ENV[PYTHON_ENV_KEY]
|
||||
return configured if configured && !configured.strip.empty?
|
||||
|
||||
candidate = File.expand_path(DEFAULT_PYTHON_RELATIVE, repo_root)
|
||||
return candidate if File.exist?(candidate)
|
||||
|
||||
FALLBACK_PYTHON_NAMES.each do |name|
|
||||
found = find_executable(name)
|
||||
return found if found
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Resolve the Meshtastic payload decoder script path.
|
||||
#
|
||||
# @return [String, nil] script path or nil when missing.
|
||||
def decoder_script_path
|
||||
repo_candidate = File.expand_path(DEFAULT_DECODER_RELATIVE, repo_root)
|
||||
return repo_candidate if File.exist?(repo_candidate)
|
||||
|
||||
web_candidate = File.expand_path(DEFAULT_DECODER_RELATIVE, web_root)
|
||||
return web_candidate if File.exist?(web_candidate)
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Resolve the repository root directory from the application config.
|
||||
#
|
||||
# @return [String] absolute path to the repository root.
|
||||
def repo_root
|
||||
PotatoMesh::Config.repo_root
|
||||
end
|
||||
|
||||
def web_root
|
||||
PotatoMesh::Config.web_root
|
||||
end
|
||||
|
||||
def find_executable(name)
|
||||
# Locate an executable in PATH without invoking a subshell.
|
||||
#
|
||||
# @param name [String] executable name to resolve.
|
||||
# @return [String, nil] full path when found.
|
||||
ENV.fetch("PATH", "").split(File::PATH_SEPARATOR).each do |path|
|
||||
candidate = File.join(path, name)
|
||||
return candidate if File.file?(candidate) && File.executable?(candidate)
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
private_class_method :find_executable
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,140 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Meshtastic
|
||||
# Minimal protobuf helpers for extracting payload bytes from Meshtastic data.
|
||||
module Protobuf
|
||||
module_function
|
||||
|
||||
WIRE_TYPE_VARINT = 0
|
||||
WIRE_TYPE_64BIT = 1
|
||||
WIRE_TYPE_LENGTH_DELIMITED = 2
|
||||
WIRE_TYPE_32BIT = 5
|
||||
DATA_PORTNUM_FIELD = 1
|
||||
DATA_PAYLOAD_FIELD = 2
|
||||
|
||||
# Extract a length-delimited field from a protobuf message.
|
||||
#
|
||||
# @param payload [String] raw protobuf-encoded bytes.
|
||||
# @param field_number [Integer] field to extract.
|
||||
# @return [String, nil] field bytes or nil when absent/invalid.
|
||||
def extract_field_bytes(payload, field_number)
|
||||
return nil unless payload && field_number
|
||||
|
||||
bytes = payload.bytes
|
||||
index = 0
|
||||
|
||||
while index < bytes.length
|
||||
tag, index = read_varint(bytes, index)
|
||||
return nil unless tag
|
||||
|
||||
field = tag >> 3
|
||||
wire = tag & 0x7
|
||||
|
||||
case wire
|
||||
when WIRE_TYPE_VARINT
|
||||
_, index = read_varint(bytes, index)
|
||||
return nil unless index
|
||||
when WIRE_TYPE_64BIT
|
||||
index += 8
|
||||
when WIRE_TYPE_LENGTH_DELIMITED
|
||||
length, index = read_varint(bytes, index)
|
||||
return nil unless length
|
||||
return nil if index + length > bytes.length
|
||||
value = bytes[index, length].pack("C*")
|
||||
index += length
|
||||
return value if field == field_number
|
||||
when WIRE_TYPE_32BIT
|
||||
index += 4
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Parse a Meshtastic Data message for the port number and payload.
|
||||
#
|
||||
# @param payload [String] raw protobuf-encoded bytes.
|
||||
# @return [Hash, nil] parsed port number and payload bytes.
|
||||
def parse_data(payload)
|
||||
return nil unless payload
|
||||
|
||||
bytes = payload.bytes
|
||||
index = 0
|
||||
portnum = nil
|
||||
data_payload = nil
|
||||
|
||||
while index < bytes.length
|
||||
tag, index = read_varint(bytes, index)
|
||||
return nil unless tag
|
||||
|
||||
field = tag >> 3
|
||||
wire = tag & 0x7
|
||||
|
||||
case wire
|
||||
when WIRE_TYPE_VARINT
|
||||
value, index = read_varint(bytes, index)
|
||||
return nil unless value
|
||||
portnum = value if field == DATA_PORTNUM_FIELD
|
||||
when WIRE_TYPE_64BIT
|
||||
index += 8
|
||||
when WIRE_TYPE_LENGTH_DELIMITED
|
||||
length, index = read_varint(bytes, index)
|
||||
return nil unless length
|
||||
return nil if index + length > bytes.length
|
||||
value = bytes[index, length].pack("C*")
|
||||
index += length
|
||||
data_payload = value if field == DATA_PAYLOAD_FIELD
|
||||
when WIRE_TYPE_32BIT
|
||||
index += 4
|
||||
else
|
||||
return nil
|
||||
end
|
||||
end
|
||||
|
||||
return nil unless portnum && data_payload
|
||||
|
||||
{ portnum: portnum, payload: data_payload }
|
||||
end
|
||||
|
||||
# Read a protobuf varint from a byte array.
|
||||
#
|
||||
# @param bytes [Array<Integer>] byte stream.
|
||||
# @param index [Integer] read offset.
|
||||
# @return [Array(Integer, Integer), nil] value and new index or nil when invalid.
|
||||
def read_varint(bytes, index)
|
||||
shift = 0
|
||||
value = 0
|
||||
|
||||
while index < bytes.length
|
||||
byte = bytes[index]
|
||||
index += 1
|
||||
value |= (byte & 0x7F) << shift
|
||||
return [value, index] if (byte & 0x80).zero?
|
||||
shift += 7
|
||||
return nil if shift > 63
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,68 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require_relative "channel_hash"
|
||||
require_relative "channel_names"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Meshtastic
|
||||
# Resolve candidate channel names for a hashed channel index.
|
||||
module RainbowTable
|
||||
module_function
|
||||
|
||||
@tables = {}
|
||||
|
||||
# Lookup candidate channel names for a hashed channel index.
|
||||
#
|
||||
# @param index [Integer, nil] channel hash byte.
|
||||
# @param psk_b64 [String, nil] base64 PSK or alias.
|
||||
# @return [Array<String>] list of candidate names.
|
||||
def channel_names_for(index, psk_b64:)
|
||||
return [] unless index.is_a?(Integer)
|
||||
|
||||
table_for(psk_b64)[index] || []
|
||||
end
|
||||
|
||||
# Build or retrieve the cached rainbow table for the given PSK.
|
||||
#
|
||||
# @param psk_b64 [String, nil] base64 PSK or alias.
|
||||
# @return [Hash{Integer=>Array<String>}] mapping of hash bytes to names.
|
||||
def table_for(psk_b64)
|
||||
key = psk_b64.to_s
|
||||
@tables[key] ||= build_table(psk_b64)
|
||||
end
|
||||
|
||||
# Build a hash-to-name mapping for the provided PSK.
|
||||
#
|
||||
# @param psk_b64 [String, nil] base64 PSK or alias.
|
||||
# @return [Hash{Integer=>Array<String>}] mapping of hash bytes to names.
|
||||
def build_table(psk_b64)
|
||||
mapping = Hash.new { |hash, key| hash[key] = [] }
|
||||
|
||||
ChannelNames::CHANNEL_NAME_CANDIDATES.each do |name|
|
||||
hash = ChannelHash.channel_hash(name, psk_b64)
|
||||
next unless hash
|
||||
|
||||
mapping[hash] << name
|
||||
end
|
||||
|
||||
mapping
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -20,6 +20,7 @@ module PotatoMesh
|
||||
MAX_QUERY_LIMIT = 1000
|
||||
DEFAULT_TELEMETRY_WINDOW_SECONDS = 86_400
|
||||
DEFAULT_TELEMETRY_BUCKET_SECONDS = 300
|
||||
TELEMETRY_ZERO_INVALID_COLUMNS = %w[battery_level voltage].freeze
|
||||
TELEMETRY_AGGREGATE_COLUMNS =
|
||||
%w[
|
||||
battery_level
|
||||
@@ -48,6 +49,9 @@ module PotatoMesh
|
||||
soil_moisture
|
||||
soil_temperature
|
||||
].freeze
|
||||
TELEMETRY_AGGREGATE_SCALERS = {
|
||||
"current" => 0.001,
|
||||
}.freeze
|
||||
|
||||
# Remove nil or empty values from an API response hash to reduce payload size
|
||||
# while preserving legitimate zero-valued measurements.
|
||||
@@ -78,6 +82,19 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
|
||||
# Treat zero-valued telemetry measurements that are known to be invalid
|
||||
# (such as battery level or voltage) as missing data so they are omitted
|
||||
# from API responses. Metrics that can legitimately be zero will remain
|
||||
# untouched when routed through this helper.
|
||||
#
|
||||
# @param value [Numeric, nil] telemetry measurement.
|
||||
# @return [Numeric, nil] nil when the value is zero, otherwise the original value.
|
||||
def nil_if_zero(value)
|
||||
return nil if value.respond_to?(:zero?) && value.zero?
|
||||
|
||||
value
|
||||
end
|
||||
|
||||
# Normalise a caller-provided limit to a sane, positive integer.
|
||||
#
|
||||
# @param limit [Object] value coerced to an integer.
|
||||
@@ -99,6 +116,17 @@ module PotatoMesh
|
||||
coerced
|
||||
end
|
||||
|
||||
# Normalise a caller-supplied timestamp for API pagination windows.
|
||||
#
|
||||
# @param since [Object] requested lower bound expressed as seconds since the epoch.
|
||||
# @param floor [Integer] minimum allowable timestamp used to clamp the value.
|
||||
# @return [Integer] non-negative timestamp greater than or equal to +floor+.
|
||||
def normalize_since_threshold(since, floor: 0)
|
||||
threshold = coerce_integer(since)
|
||||
threshold = 0 if threshold.nil? || threshold.negative?
|
||||
[threshold, floor].max
|
||||
end
|
||||
|
||||
def node_reference_tokens(node_ref)
|
||||
parts = canonical_node_parts(node_ref)
|
||||
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
|
||||
@@ -181,12 +209,20 @@ module PotatoMesh
|
||||
["(#{clauses.join(" OR ")})", params]
|
||||
end
|
||||
|
||||
def query_nodes(limit, node_ref: nil)
|
||||
# Fetch node state optionally scoped by identifier and timestamp.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows to return.
|
||||
# @param node_ref [String, Integer, nil] optional node reference to narrow results.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
|
||||
# @return [Array<Hash>] compacted node rows suitable for API responses.
|
||||
def query_nodes(limit, node_ref: nil, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
now = Time.now.to_i
|
||||
min_last_heard = now - PotatoMesh::Config.week_seconds
|
||||
since_floor = node_ref ? 0 : min_last_heard
|
||||
since_threshold = normalize_since_threshold(since, floor: since_floor)
|
||||
params = []
|
||||
where_clauses = []
|
||||
|
||||
@@ -197,7 +233,7 @@ module PotatoMesh
|
||||
params.concat(clause.last)
|
||||
else
|
||||
where_clauses << "last_heard >= ?"
|
||||
params << min_last_heard
|
||||
params << since_threshold
|
||||
end
|
||||
|
||||
if private_mode?
|
||||
@@ -225,7 +261,7 @@ module PotatoMesh
|
||||
.map { |value| coerce_integer(value) }
|
||||
.compact
|
||||
.max
|
||||
last_candidate && last_candidate >= min_last_heard
|
||||
last_candidate && last_candidate >= since_threshold
|
||||
end
|
||||
rows.each do |r|
|
||||
r["role"] ||= "CLIENT"
|
||||
@@ -245,6 +281,47 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Fetch ingestor heartbeats with optional freshness filtering.
|
||||
#
|
||||
# @param limit [Integer] maximum number of ingestors to return.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
|
||||
# @return [Array<Hash>] compacted ingestor rows suitable for API responses.
|
||||
def query_ingestors(limit, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
now = Time.now.to_i
|
||||
cutoff = now - PotatoMesh::Config.week_seconds
|
||||
since_threshold = normalize_since_threshold(since, floor: cutoff)
|
||||
sql = <<~SQL
|
||||
SELECT node_id, start_time, last_seen_time, version, lora_freq, modem_preset
|
||||
FROM ingestors
|
||||
WHERE last_seen_time >= ?
|
||||
ORDER BY last_seen_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
|
||||
rows = db.execute(sql, [since_threshold, limit])
|
||||
rows.each do |row|
|
||||
row.delete_if { |key, _| key.is_a?(Integer) }
|
||||
start_time = coerce_integer(row["start_time"])
|
||||
last_seen_time = coerce_integer(row["last_seen_time"])
|
||||
start_time = now if start_time && start_time > now
|
||||
last_seen_time = now if last_seen_time && last_seen_time > now
|
||||
if start_time && last_seen_time && last_seen_time < start_time
|
||||
last_seen_time = start_time
|
||||
end
|
||||
row["start_time"] = start_time
|
||||
row["last_seen_time"] = last_seen_time
|
||||
row["start_time_iso"] = Time.at(start_time).utc.iso8601 if start_time
|
||||
row["last_seen_iso"] = Time.at(last_seen_time).utc.iso8601 if last_seen_time
|
||||
end
|
||||
|
||||
rows.map { |row| compact_api_row(row) }
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Fetch chat messages with optional filtering.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows to return.
|
||||
@@ -254,8 +331,7 @@ module PotatoMesh
|
||||
# @return [Array<Hash>] compacted message rows safe for API responses.
|
||||
def query_messages(limit, node_ref: nil, include_encrypted: false, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
since_threshold = coerce_integer(since)
|
||||
since_threshold = 0 if since_threshold.nil? || since_threshold.negative?
|
||||
since_threshold = normalize_since_threshold(since, floor: 0)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
@@ -281,7 +357,7 @@ module PotatoMesh
|
||||
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
|
||||
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
|
||||
m.lora_freq, m.modem_preset, m.channel_name, m.snr,
|
||||
m.reply_id, m.emoji
|
||||
m.reply_id, m.emoji, m.decrypted, m.decryption_confidence
|
||||
FROM messages m
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n"
|
||||
@@ -295,6 +371,30 @@ module PotatoMesh
|
||||
r.delete_if { |key, _| key.is_a?(Integer) }
|
||||
r["reply_id"] = coerce_integer(r["reply_id"]) if r.key?("reply_id")
|
||||
r["emoji"] = string_or_nil(r["emoji"]) if r.key?("emoji")
|
||||
if string_or_nil(r["encrypted"])
|
||||
r.delete("portnum")
|
||||
end
|
||||
|
||||
if r.key?("decrypted")
|
||||
decrypted_raw = r["decrypted"]
|
||||
decrypted = case decrypted_raw
|
||||
when true, false
|
||||
decrypted_raw
|
||||
when Integer
|
||||
!decrypted_raw.zero?
|
||||
when String
|
||||
trimmed = decrypted_raw.strip
|
||||
!trimmed.empty? && trimmed != "0" && trimmed.casecmp("false") != 0
|
||||
else
|
||||
!!decrypted_raw
|
||||
end
|
||||
r["decrypted"] = decrypted
|
||||
r.delete("decryption_confidence") unless decrypted
|
||||
end
|
||||
|
||||
if r.key?("decryption_confidence") && !r["decryption_confidence"].nil?
|
||||
r["decryption_confidence"] = r["decryption_confidence"].to_f
|
||||
end
|
||||
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
|
||||
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
|
||||
debug_log(
|
||||
@@ -333,7 +433,13 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_positions(limit, node_ref: nil)
|
||||
# Fetch positions optionally scoped by node and timestamp.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows to return.
|
||||
# @param node_ref [String, Integer, nil] optional node reference to scope results.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
|
||||
# @return [Array<Hash>] compacted position rows suitable for API responses.
|
||||
def query_positions(limit, node_ref: nil, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
@@ -341,8 +447,10 @@ module PotatoMesh
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
since_floor = node_ref ? 0 : min_rx_time
|
||||
since_threshold = normalize_since_threshold(since, floor: since_floor)
|
||||
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
params << since_threshold
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
@@ -384,7 +492,13 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_neighbors(limit, node_ref: nil)
|
||||
# Fetch neighbor relationships optionally scoped by node and timestamp.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows to return.
|
||||
# @param node_ref [String, Integer, nil] optional node reference to scope results.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
|
||||
# @return [Array<Hash>] compacted neighbor rows suitable for API responses.
|
||||
def query_neighbors(limit, node_ref: nil, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
@@ -392,8 +506,10 @@ module PotatoMesh
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
since_floor = node_ref ? 0 : min_rx_time
|
||||
since_threshold = normalize_since_threshold(since, floor: since_floor)
|
||||
where_clauses << "COALESCE(rx_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
params << since_threshold
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
|
||||
@@ -424,7 +540,13 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_telemetry(limit, node_ref: nil)
|
||||
# Fetch telemetry packets optionally scoped by node and timestamp.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows to return.
|
||||
# @param node_ref [String, Integer, nil] optional node reference to scope results.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window for collections.
|
||||
# @return [Array<Hash>] compacted telemetry rows suitable for API responses.
|
||||
def query_telemetry(limit, node_ref: nil, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
@@ -432,8 +554,10 @@ module PotatoMesh
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
since_floor = node_ref ? 0 : min_rx_time
|
||||
since_threshold = normalize_since_threshold(since, floor: since_floor)
|
||||
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
params << since_threshold
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
@@ -470,8 +594,8 @@ module PotatoMesh
|
||||
r["rssi"] = coerce_integer(r["rssi"])
|
||||
r["bitfield"] = coerce_integer(r["bitfield"])
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
r["battery_level"] = coerce_float(r["battery_level"])
|
||||
r["voltage"] = coerce_float(r["voltage"])
|
||||
r["battery_level"] = sanitize_zero_invalid_metric("battery_level", coerce_float(r["battery_level"]))
|
||||
r["voltage"] = sanitize_zero_invalid_metric("voltage", coerce_float(r["voltage"]))
|
||||
r["channel_utilization"] = coerce_float(r["channel_utilization"])
|
||||
r["air_util_tx"] = coerce_float(r["air_util_tx"])
|
||||
r["uptime_seconds"] = coerce_integer(r["uptime_seconds"])
|
||||
@@ -479,7 +603,8 @@ module PotatoMesh
|
||||
r["relative_humidity"] = coerce_float(r["relative_humidity"])
|
||||
r["barometric_pressure"] = coerce_float(r["barometric_pressure"])
|
||||
r["gas_resistance"] = coerce_float(r["gas_resistance"])
|
||||
r["current"] = coerce_float(r["current"])
|
||||
current_ma = coerce_float(r["current"])
|
||||
r["current"] = current_ma.nil? ? nil : current_ma / 1000.0
|
||||
r["iaq"] = coerce_integer(r["iaq"])
|
||||
r["distance"] = coerce_float(r["distance"])
|
||||
r["lux"] = coerce_float(r["lux"])
|
||||
@@ -502,7 +627,13 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_telemetry_buckets(window_seconds:, bucket_seconds:)
|
||||
# Aggregate telemetry metrics into time buckets.
|
||||
#
|
||||
# @param window_seconds [Integer] duration expressed in seconds to include in the query.
|
||||
# @param bucket_seconds [Integer] size of each aggregation bucket in seconds.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the requested window.
|
||||
# @return [Array<Hash>] aggregated telemetry metrics grouped by bucket start time.
|
||||
def query_telemetry_buckets(window_seconds:, bucket_seconds:, since: 0)
|
||||
window = coerce_integer(window_seconds) || DEFAULT_TELEMETRY_WINDOW_SECONDS
|
||||
window = DEFAULT_TELEMETRY_WINDOW_SECONDS if window <= 0
|
||||
bucket = coerce_integer(bucket_seconds) || DEFAULT_TELEMETRY_BUCKET_SECONDS
|
||||
@@ -512,6 +643,7 @@ module PotatoMesh
|
||||
db.results_as_hash = true
|
||||
now = Time.now.to_i
|
||||
min_timestamp = now - window
|
||||
since_threshold = normalize_since_threshold(since, floor: min_timestamp)
|
||||
bucket_expression = "((COALESCE(rx_time, telemetry_time) / ?) * ?)"
|
||||
select_clauses = [
|
||||
"#{bucket_expression} AS bucket_start",
|
||||
@@ -521,9 +653,10 @@ module PotatoMesh
|
||||
]
|
||||
|
||||
TELEMETRY_AGGREGATE_COLUMNS.each do |column|
|
||||
select_clauses << "AVG(#{column}) AS #{column}_avg"
|
||||
select_clauses << "MIN(#{column}) AS #{column}_min"
|
||||
select_clauses << "MAX(#{column}) AS #{column}_max"
|
||||
aggregate_source = telemetry_aggregate_source(column)
|
||||
select_clauses << "AVG(#{aggregate_source}) AS #{column}_avg"
|
||||
select_clauses << "MIN(#{aggregate_source}) AS #{column}_min"
|
||||
select_clauses << "MAX(#{aggregate_source}) AS #{column}_max"
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
@@ -536,7 +669,7 @@ module PotatoMesh
|
||||
ORDER BY bucket_start ASC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params = [bucket, bucket, min_timestamp, MAX_QUERY_LIMIT]
|
||||
params = [bucket, bucket, since_threshold, MAX_QUERY_LIMIT]
|
||||
rows = db.execute(sql, params)
|
||||
rows.map do |row|
|
||||
bucket_start = coerce_integer(row["bucket_start"])
|
||||
@@ -549,8 +682,18 @@ module PotatoMesh
|
||||
avg = coerce_float(row["#{column}_avg"])
|
||||
min_value = coerce_float(row["#{column}_min"])
|
||||
max_value = coerce_float(row["#{column}_max"])
|
||||
scale = TELEMETRY_AGGREGATE_SCALERS[column]
|
||||
if scale
|
||||
avg *= scale unless avg.nil?
|
||||
min_value *= scale unless min_value.nil?
|
||||
max_value *= scale unless max_value.nil?
|
||||
end
|
||||
|
||||
metrics = {}
|
||||
avg = sanitize_zero_invalid_metric(column, avg)
|
||||
min_value = sanitize_zero_invalid_metric(column, min_value)
|
||||
max_value = sanitize_zero_invalid_metric(column, max_value)
|
||||
|
||||
metrics["avg"] = avg unless avg.nil?
|
||||
metrics["min"] = min_value unless min_value.nil?
|
||||
metrics["max"] = max_value unless max_value.nil?
|
||||
@@ -578,12 +721,51 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_traces(limit, node_ref: nil)
|
||||
# Normalise telemetry metrics that cannot legitimately be zero so API
|
||||
# consumers do not mistake absent readings for valid measurements. Values
|
||||
# for fields such as battery level and voltage are treated as missing data
|
||||
# when they are zero.
|
||||
#
|
||||
# @param column [String] telemetry metric name.
|
||||
# @param value [Numeric, nil] raw metric value.
|
||||
# @return [Numeric, nil] metric value or nil when zero is invalid.
|
||||
def sanitize_zero_invalid_metric(column, value)
|
||||
return nil_if_zero(value) if TELEMETRY_ZERO_INVALID_COLUMNS.include?(column)
|
||||
|
||||
value
|
||||
end
|
||||
|
||||
# Choose the SQL expression used to aggregate telemetry metrics. Metrics
|
||||
# that cannot legitimately be zero are wrapped in a NULLIF to ensure
|
||||
# invalid zero readings are ignored by aggregate functions such as AVG,
|
||||
# MIN, and MAX, aligning the database semantics with API-level
|
||||
# zero-as-missing handling.
|
||||
#
|
||||
# @param column [String] telemetry metric name.
|
||||
# @return [String] SQL fragment used in aggregate expressions.
|
||||
def telemetry_aggregate_source(column)
|
||||
return "NULLIF(#{column}, 0)" if TELEMETRY_ZERO_INVALID_COLUMNS.include?(column)
|
||||
|
||||
column
|
||||
end
|
||||
|
||||
# Fetch trace records optionally scoped by node and timestamp.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows to return.
|
||||
# @param node_ref [String, Integer, nil] optional node reference to scope results.
|
||||
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
|
||||
# @return [Array<Hash>] compacted trace rows suitable for API responses.
|
||||
def query_traces(limit, node_ref: nil, since: 0)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.trace_neighbor_window_seconds
|
||||
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
|
||||
where_clauses << "COALESCE(rx_time, 0) >= ?"
|
||||
params << since_threshold
|
||||
|
||||
if node_ref
|
||||
tokens = node_reference_tokens(node_ref)
|
||||
|
||||
@@ -64,7 +64,7 @@ module PotatoMesh
|
||||
app.get "/api/nodes" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_nodes(limit).to_json
|
||||
query_nodes(limit, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/nodes/:id" do
|
||||
@@ -72,11 +72,17 @@ module PotatoMesh
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
rows = query_nodes(limit, node_ref: node_ref)
|
||||
rows = query_nodes(limit, node_ref: node_ref, since: params["since"])
|
||||
halt 404, { error: "not found" }.to_json if rows.empty?
|
||||
rows.first.to_json
|
||||
end
|
||||
|
||||
app.get "/api/ingestors" do
|
||||
content_type :json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
query_ingestors(limit, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/messages" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
@@ -105,7 +111,7 @@ module PotatoMesh
|
||||
app.get "/api/positions" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit).to_json
|
||||
query_positions(limit, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/positions/:id" do
|
||||
@@ -113,13 +119,13 @@ module PotatoMesh
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit, node_ref: node_ref).to_json
|
||||
query_positions(limit, node_ref: node_ref, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/neighbors" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit).to_json
|
||||
query_neighbors(limit, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/neighbors/:id" do
|
||||
@@ -127,13 +133,13 @@ module PotatoMesh
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit, node_ref: node_ref).to_json
|
||||
query_neighbors(limit, node_ref: node_ref, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit).to_json
|
||||
query_telemetry(limit, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry/aggregated" do
|
||||
@@ -164,7 +170,11 @@ module PotatoMesh
|
||||
halt 400, { error: "bucketSeconds too small for requested window" }.to_json
|
||||
end
|
||||
|
||||
query_telemetry_buckets(window_seconds: window_seconds, bucket_seconds: bucket_seconds).to_json
|
||||
query_telemetry_buckets(
|
||||
window_seconds: window_seconds,
|
||||
bucket_seconds: bucket_seconds,
|
||||
since: params["since"],
|
||||
).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry/:id" do
|
||||
@@ -172,13 +182,13 @@ module PotatoMesh
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit, node_ref: node_ref).to_json
|
||||
query_telemetry(limit, node_ref: node_ref, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/traces" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_traces(limit).to_json
|
||||
query_traces(limit, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/traces/:id" do
|
||||
@@ -186,7 +196,7 @@ module PotatoMesh
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_traces(limit, node_ref: node_ref).to_json
|
||||
query_traces(limit, node_ref: node_ref, since: params["since"]).to_json
|
||||
end
|
||||
|
||||
app.get "/api/instances" do
|
||||
|
||||
@@ -65,6 +65,25 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/ingestors" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
payload = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
unless payload.is_a?(Hash)
|
||||
halt 400, { error: "invalid payload" }.to_json
|
||||
end
|
||||
db = open_database
|
||||
stored = upsert_ingestor(db, payload)
|
||||
halt 400, { error: "invalid payload" }.to_json unless stored
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/instances" do
|
||||
content_type :json
|
||||
begin
|
||||
@@ -113,6 +132,7 @@ module PotatoMesh
|
||||
raw_private = payload.key?("isPrivate") ? payload["isPrivate"] : payload["is_private"]
|
||||
is_private = coerce_boolean(raw_private)
|
||||
signature = string_or_nil(payload["signature"])
|
||||
contact_link = string_or_nil(payload["contactLink"])
|
||||
|
||||
attributes = {
|
||||
id: id,
|
||||
@@ -126,6 +146,7 @@ module PotatoMesh
|
||||
longitude: longitude,
|
||||
last_update_time: last_update_time,
|
||||
is_private: is_private,
|
||||
contact_link: contact_link,
|
||||
}
|
||||
|
||||
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
|
||||
@@ -138,6 +159,10 @@ module PotatoMesh
|
||||
end
|
||||
|
||||
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
|
||||
if !signature_valid && contact_link
|
||||
stripped_attributes = attributes.merge(contact_link: nil)
|
||||
signature_valid = verify_instance_signature(stripped_attributes, signature, attributes[:pubkey])
|
||||
end
|
||||
# Some remote peers sign payloads using a canonicalised lowercase
|
||||
# domain while still sending a mixed-case domain. Retry signature
|
||||
# verification with the original casing when the first attempt
|
||||
@@ -145,6 +170,10 @@ module PotatoMesh
|
||||
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
|
||||
alternate_attributes = attributes.merge(domain: raw_domain)
|
||||
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
|
||||
if !signature_valid && contact_link
|
||||
stripped_alternate = alternate_attributes.merge(contact_link: nil)
|
||||
signature_valid = verify_instance_signature(stripped_alternate, signature, attributes[:pubkey])
|
||||
end
|
||||
end
|
||||
|
||||
unless signature_valid
|
||||
|
||||
@@ -186,6 +186,11 @@ module PotatoMesh
|
||||
render_root_view(:charts, view_mode: :charts)
|
||||
end
|
||||
|
||||
app.get %r{/federation/?} do
|
||||
halt 404 unless federation_enabled?
|
||||
render_root_view(:federation, view_mode: :federation)
|
||||
end
|
||||
|
||||
app.get "/nodes/:id" do
|
||||
node_ref = params.fetch("id", nil)
|
||||
reference_payload = build_node_detail_reference(node_ref)
|
||||
|
||||
@@ -14,6 +14,8 @@
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "timeout"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# WorkerPool executes submitted blocks using a bounded set of Ruby threads.
|
||||
@@ -124,8 +126,9 @@ module PotatoMesh
|
||||
#
|
||||
# @param size [Integer] number of worker threads to spawn.
|
||||
# @param max_queue [Integer, nil] optional upper bound on queued jobs.
|
||||
# @param task_timeout [Numeric, nil] optional per-task execution timeout.
|
||||
# @param name [String] prefix assigned to worker thread names.
|
||||
def initialize(size:, max_queue: nil, name: "worker-pool")
|
||||
def initialize(size:, max_queue: nil, task_timeout: nil, name: "worker-pool")
|
||||
raise ArgumentError, "size must be positive" unless size.is_a?(Integer) && size.positive?
|
||||
|
||||
@name = name
|
||||
@@ -133,6 +136,7 @@ module PotatoMesh
|
||||
@threads = []
|
||||
@stopped = false
|
||||
@mutex = Mutex.new
|
||||
@task_timeout = normalize_task_timeout(task_timeout)
|
||||
spawn_workers(size)
|
||||
end
|
||||
|
||||
@@ -192,23 +196,45 @@ module PotatoMesh
|
||||
worker = Thread.new do
|
||||
Thread.current.name = "#{@name}-#{index}" if Thread.current.respond_to?(:name=)
|
||||
Thread.current.report_on_exception = false if Thread.current.respond_to?(:report_on_exception=)
|
||||
# Daemon threads allow the process to exit even if a job is stuck.
|
||||
Thread.current.daemon = true if Thread.current.respond_to?(:daemon=)
|
||||
|
||||
loop do
|
||||
task, block = @queue.pop
|
||||
break if task.equal?(STOP_SIGNAL)
|
||||
|
||||
begin
|
||||
result = block.call
|
||||
result = if @task_timeout
|
||||
Timeout.timeout(@task_timeout, TaskTimeoutError, "task exceeded timeout") do
|
||||
block.call
|
||||
end
|
||||
else
|
||||
block.call
|
||||
end
|
||||
task.fulfill(result)
|
||||
rescue StandardError => e
|
||||
task.reject(e)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@threads << worker
|
||||
end
|
||||
end
|
||||
|
||||
# Normalize the per-task timeout into a positive float value.
|
||||
#
|
||||
# @param task_timeout [Numeric, nil] candidate timeout value.
|
||||
# @return [Float, nil] positive timeout in seconds or nil when disabled.
|
||||
def normalize_task_timeout(task_timeout)
|
||||
return nil if task_timeout.nil?
|
||||
|
||||
value = Float(task_timeout)
|
||||
return nil unless value.positive?
|
||||
|
||||
value
|
||||
rescue ArgumentError, TypeError
|
||||
nil
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@@ -32,6 +32,7 @@ module PotatoMesh
|
||||
DEFAULT_MAP_CENTER = "#{DEFAULT_MAP_CENTER_LAT},#{DEFAULT_MAP_CENTER_LON}"
|
||||
DEFAULT_CHANNEL = "#LongFast"
|
||||
DEFAULT_FREQUENCY = "915MHz"
|
||||
DEFAULT_MESHTASTIC_PSK_B64 = "AQ=="
|
||||
DEFAULT_CONTACT_LINK = "#potatomesh:dod.ngo"
|
||||
DEFAULT_MAX_DISTANCE_KM = 42.0
|
||||
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 15
|
||||
@@ -42,6 +43,7 @@ module PotatoMesh
|
||||
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
|
||||
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
|
||||
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
|
||||
DEFAULT_FEDERATION_SEED_DOMAINS = %w[potatomesh.net potatomesh.jmrp.io mesh.qrp.ro].freeze
|
||||
|
||||
# Retrieve the configured API token used for authenticated requests.
|
||||
#
|
||||
@@ -157,6 +159,13 @@ module PotatoMesh
|
||||
7 * 24 * 60 * 60
|
||||
end
|
||||
|
||||
# Rolling retention window in seconds for trace and neighbor API queries.
|
||||
#
|
||||
# @return [Integer] seconds in twenty-eight days.
|
||||
def trace_neighbor_window_seconds
|
||||
28 * 24 * 60 * 60
|
||||
end
|
||||
|
||||
# Default upper bound for accepted JSON payload sizes.
|
||||
#
|
||||
# @return [Integer] byte ceiling for HTTP request bodies.
|
||||
@@ -175,7 +184,7 @@ module PotatoMesh
|
||||
#
|
||||
# @return [String] semantic version identifier.
|
||||
def version_fallback
|
||||
"0.5.6"
|
||||
"0.5.10"
|
||||
end
|
||||
|
||||
# Default refresh interval for frontend polling routines.
|
||||
@@ -409,7 +418,7 @@ module PotatoMesh
|
||||
#
|
||||
# @return [Array<String>] list of default seed domains.
|
||||
def federation_seed_domains
|
||||
["potatomesh.net"].freeze
|
||||
DEFAULT_FEDERATION_SEED_DOMAINS
|
||||
end
|
||||
|
||||
# Determine how often we broadcast federation announcements.
|
||||
@@ -436,6 +445,13 @@ module PotatoMesh
|
||||
fetch_string("SITE_NAME", "PotatoMesh Demo")
|
||||
end
|
||||
|
||||
# Retrieve the configured announcement banner copy.
|
||||
#
|
||||
# @return [String, nil] announcement string when configured.
|
||||
def announcement
|
||||
fetch_string("ANNOUNCEMENT", nil)
|
||||
end
|
||||
|
||||
# Retrieve the default radio channel label.
|
||||
#
|
||||
# @return [String] channel name from configuration.
|
||||
@@ -450,6 +466,13 @@ module PotatoMesh
|
||||
fetch_string("FREQUENCY", DEFAULT_FREQUENCY)
|
||||
end
|
||||
|
||||
# Retrieve the Meshtastic PSK used for decrypting channel messages.
|
||||
#
|
||||
# @return [String] base64-encoded PSK or alias.
|
||||
def meshtastic_psk_b64
|
||||
fetch_string("MESHTASTIC_PSK_B64", DEFAULT_MESHTASTIC_PSK_B64)
|
||||
end
|
||||
|
||||
# Parse the configured map centre coordinates.
|
||||
#
|
||||
# @return [Hash{Symbol=>Float}] latitude and longitude in decimal degrees.
|
||||
|
||||
@@ -199,6 +199,14 @@ module PotatoMesh
|
||||
sanitized_string(Config.site_name)
|
||||
end
|
||||
|
||||
# Retrieve the configured announcement banner copy and normalise blank values to nil.
|
||||
#
|
||||
# @return [String, nil] announcement copy or +nil+ when blank.
|
||||
def sanitized_announcement
|
||||
value = sanitized_string(Config.announcement)
|
||||
value.empty? ? nil : value
|
||||
end
|
||||
|
||||
# Retrieve the configured channel as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
|
||||
Generated
+2
-2
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.6",
|
||||
"version": "0.5.10",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.6",
|
||||
"version": "0.5.10",
|
||||
"devDependencies": {
|
||||
"istanbul-lib-coverage": "^3.2.2",
|
||||
"istanbul-lib-report": "^3.0.1",
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.6",
|
||||
"version": "0.5.10",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
|
||||
@@ -113,11 +113,9 @@ test('buildChatTabModel returns sorted nodes and channel buckets', () => {
|
||||
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), ['iso-ts', 'recent-alt']);
|
||||
});
|
||||
|
||||
test('buildChatTabModel always includes channel zero bucket', () => {
|
||||
test('buildChatTabModel skips channel buckets when there are no messages', () => {
|
||||
const model = buildChatTabModel({ nodes: [], messages: [], nowSeconds: NOW, windowSeconds: WINDOW });
|
||||
assert.equal(model.channels.length, 1);
|
||||
assert.equal(model.channels[0].index, 0);
|
||||
assert.equal(model.channels[0].entries.length, 0);
|
||||
assert.equal(model.channels.length, 0);
|
||||
});
|
||||
|
||||
test('buildChatTabModel falls back to numeric label when no metadata provided', () => {
|
||||
@@ -168,21 +166,45 @@ test('buildChatTabModel includes telemetry, position, and neighbor events', () =
|
||||
telemetry: [{ node_id: nodeId, rx_time: NOW - 30 }],
|
||||
positions: [{ node_id: nodeId, rx_time: NOW - 20 }],
|
||||
neighbors: [{ node_id: nodeId, neighbor_id: neighborId, rx_time: NOW - 10 }],
|
||||
traces: [{ id: 5_000, src: nodeId, hops: [neighborId], dest: '!charlie', rx_time: NOW - 5 }],
|
||||
messages: [],
|
||||
nowSeconds: NOW,
|
||||
windowSeconds: WINDOW
|
||||
});
|
||||
|
||||
assert.deepEqual(model.logEntries.map(entry => entry.type), [
|
||||
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
|
||||
CHAT_LOG_ENTRY_TYPES.NODE_INFO,
|
||||
CHAT_LOG_ENTRY_TYPES.TELEMETRY,
|
||||
CHAT_LOG_ENTRY_TYPES.POSITION,
|
||||
CHAT_LOG_ENTRY_TYPES.NEIGHBOR
|
||||
]);
|
||||
const types = model.logEntries.map(entry => entry.type);
|
||||
assert.equal(types[0], CHAT_LOG_ENTRY_TYPES.NODE_NEW);
|
||||
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.NODE_INFO));
|
||||
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.TELEMETRY));
|
||||
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.POSITION));
|
||||
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.NEIGHBOR));
|
||||
assert.ok(types.includes(CHAT_LOG_ENTRY_TYPES.TRACE));
|
||||
assert.equal(model.logEntries[0].nodeId, nodeId);
|
||||
const lastEntry = model.logEntries[model.logEntries.length - 1];
|
||||
assert.equal(lastEntry.neighborId, neighborId);
|
||||
const neighborEntry = model.logEntries.find(entry => entry.type === CHAT_LOG_ENTRY_TYPES.NEIGHBOR);
|
||||
assert.ok(neighborEntry);
|
||||
assert.equal(neighborEntry.neighborId, neighborId);
|
||||
const traceEntry = model.logEntries.find(entry => entry.type === CHAT_LOG_ENTRY_TYPES.TRACE);
|
||||
assert.ok(traceEntry);
|
||||
assert.deepEqual(traceEntry.traceLabels, [nodeId, neighborId, '!charlie']);
|
||||
});
|
||||
|
||||
test('buildChatTabModel normalises numeric traceroute hops into canonical IDs', () => {
|
||||
const source = 0xabcdef01;
|
||||
const hops = ['0xABCDEF02', '!abcdef03', 123];
|
||||
const dest = 0xabcdef04;
|
||||
const model = buildChatTabModel({
|
||||
nodes: [],
|
||||
traces: [{ rx_time: NOW - 5, src: source, hops, dest }],
|
||||
nowSeconds: NOW,
|
||||
windowSeconds: WINDOW
|
||||
});
|
||||
const traceEntry = model.logEntries.find(entry => entry.type === CHAT_LOG_ENTRY_TYPES.TRACE);
|
||||
assert.ok(traceEntry);
|
||||
assert.equal(traceEntry.nodeId, '!abcdef01');
|
||||
assert.deepEqual(
|
||||
traceEntry.tracePath.map(hop => hop.id),
|
||||
['!abcdef01', '!abcdef02', '!abcdef03', '!0000007b', '!abcdef04']
|
||||
);
|
||||
});
|
||||
|
||||
test('buildChatTabModel merges dedicated encrypted log feed without altering channels', () => {
|
||||
|
||||
@@ -74,6 +74,18 @@ test('chatLogEntryMatchesQuery inspects neighbor node context', () => {
|
||||
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
|
||||
});
|
||||
|
||||
test('chatLogEntryMatchesQuery inspects traceroute hop labels', () => {
|
||||
const entry = {
|
||||
type: CHAT_LOG_ENTRY_TYPES.TRACE,
|
||||
traceLabels: ['!alpha', '!bravo', '!charlie'],
|
||||
tracePath: [{ id: '!alpha' }, { id: '!bravo' }, { id: '!charlie' }]
|
||||
};
|
||||
const query = normaliseChatFilterQuery('bravo');
|
||||
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
|
||||
const missQuery = normaliseChatFilterQuery('delta');
|
||||
assert.equal(chatLogEntryMatchesQuery(entry, missQuery), false);
|
||||
});
|
||||
|
||||
test('filterChatModel filters both log entries and channel messages', () => {
|
||||
const model = {
|
||||
logEntries: [
|
||||
|
||||
@@ -104,6 +104,7 @@ class MockElement {
|
||||
this.style = {};
|
||||
this.textContent = '';
|
||||
this.classList = new MockClassList();
|
||||
this.childNodes = [];
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -129,6 +130,113 @@ class MockElement {
|
||||
getAttribute(name) {
|
||||
return this.attributes.has(name) ? this.attributes.get(name) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an attribute from the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @returns {void}
|
||||
*/
|
||||
removeAttribute(name) {
|
||||
this.attributes.delete(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Append a child node to this element.
|
||||
*
|
||||
* @param {Object} node Child node to append.
|
||||
* @returns {Object} Appended node.
|
||||
*/
|
||||
appendChild(node) {
|
||||
this.childNodes.push(node);
|
||||
return node;
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace all existing children with the provided nodes.
|
||||
*
|
||||
* @param {...Object} nodes Child nodes to set on the element.
|
||||
* @returns {void}
|
||||
*/
|
||||
replaceChildren(...nodes) {
|
||||
const expanded = [];
|
||||
nodes.forEach(node => {
|
||||
if (node && node.tagName === 'FRAGMENT' && Array.isArray(node.childNodes)) {
|
||||
expanded.push(...node.childNodes);
|
||||
} else {
|
||||
expanded.push(node);
|
||||
}
|
||||
});
|
||||
this.childNodes = expanded;
|
||||
}
|
||||
|
||||
/**
|
||||
* Serialize the element's children into a naive HTML string for test
|
||||
* assertions. This intentionally covers only the subset of markup produced
|
||||
* in unit tests.
|
||||
*
|
||||
* @returns {string} Serialized HTML content.
|
||||
*/
|
||||
get innerHTML() {
|
||||
return this.childNodes
|
||||
.map(node => {
|
||||
if (typeof node === 'string') return node;
|
||||
if (node && node.tagName) {
|
||||
const attrs = [];
|
||||
if (node.attributes.size) {
|
||||
node.attributes.forEach((value, key) => {
|
||||
attrs.push(`${key}="${value}"`);
|
||||
});
|
||||
}
|
||||
const classAttr = node.classList && node.classList._values && node.classList._values.size
|
||||
? `class="${Array.from(node.classList._values).join(' ')}"`
|
||||
: null;
|
||||
if (classAttr) attrs.push(classAttr);
|
||||
const children = node.innerHTML || '';
|
||||
return `<${node.tagName.toLowerCase()}${attrs.length ? ' ' + attrs.join(' ') : ''}>${children}</${node.tagName.toLowerCase()}>`;
|
||||
}
|
||||
return '';
|
||||
})
|
||||
.join('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Setter to overwrite children from a raw HTML string in tests. This is a
|
||||
* minimal stub and only supports plain text content insertion.
|
||||
*
|
||||
* @param {string} value Raw HTML content.
|
||||
* @returns {void}
|
||||
*/
|
||||
set innerHTML(value) {
|
||||
this.childNodes = [String(value)];
|
||||
}
|
||||
|
||||
/**
|
||||
* Very small querySelectorAll implementation that supports ``.class`` lookups
|
||||
* used in unit tests.
|
||||
*
|
||||
* @param {string} selector CSS selector (class names only).
|
||||
* @returns {Array<MockElement>} Matching child nodes.
|
||||
*/
|
||||
querySelectorAll(selector) {
|
||||
if (!selector || typeof selector !== 'string') return [];
|
||||
const classMatch = selector.match(/^\.(.+)$/);
|
||||
if (!classMatch) return [];
|
||||
const className = classMatch[1];
|
||||
const matches = [];
|
||||
const visit = node => {
|
||||
if (node && node.classList && typeof node.classList.contains === 'function') {
|
||||
if (node.classList.contains(className)) {
|
||||
matches.push(node);
|
||||
}
|
||||
}
|
||||
if (node && Array.isArray(node.childNodes)) {
|
||||
node.childNodes.forEach(child => visit(child));
|
||||
}
|
||||
};
|
||||
visit(this);
|
||||
return matches;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -182,8 +290,9 @@ export function createDomEnvironment(options = {}) {
|
||||
documentListeners.delete(event);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const handler = documentListeners.get(event);
|
||||
if (handler) handler();
|
||||
const key = typeof event === 'string' ? event : event?.type;
|
||||
const handler = documentListeners.get(key);
|
||||
if (handler) handler(event);
|
||||
},
|
||||
getElementById(id) {
|
||||
return registry.get(id) || null;
|
||||
@@ -193,6 +302,18 @@ export function createDomEnvironment(options = {}) {
|
||||
},
|
||||
createElement(tagName) {
|
||||
return new MockElement(tagName, registry);
|
||||
},
|
||||
createDocumentFragment() {
|
||||
const fragment = new MockElement('fragment', null);
|
||||
fragment.childNodes = [];
|
||||
fragment.appendChild = node => {
|
||||
fragment.childNodes.push(node);
|
||||
return node;
|
||||
};
|
||||
fragment.replaceChildren = (...nodes) => {
|
||||
fragment.childNodes = [...nodes];
|
||||
};
|
||||
return fragment;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -218,8 +339,9 @@ export function createDomEnvironment(options = {}) {
|
||||
windowListeners.delete(event);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const handler = windowListeners.get(event);
|
||||
if (handler) handler();
|
||||
const key = typeof event === 'string' ? event : event?.type;
|
||||
const handler = windowListeners.get(key);
|
||||
if (handler) handler(event);
|
||||
},
|
||||
getComputedStyle(target) {
|
||||
if (typeof computedStyleImpl === 'function') {
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createDomEnvironment } from './dom-environment.js';
|
||||
|
||||
test('dom environment supports class queries and innerHTML setter', () => {
|
||||
const env = createDomEnvironment({ includeBody: true });
|
||||
const { document, createElement, cleanup } = env;
|
||||
|
||||
const parent = createElement('div');
|
||||
const child = createElement('span');
|
||||
child.classList.add('leaflet-tile');
|
||||
child.setAttribute('data-test', 'ok');
|
||||
parent.appendChild(child);
|
||||
|
||||
const matches = parent.querySelectorAll('.leaflet-tile');
|
||||
assert.equal(matches.length, 1);
|
||||
assert.equal(matches[0], child);
|
||||
|
||||
const target = createElement('div');
|
||||
target.innerHTML = '<b>hello</b>';
|
||||
assert.match(target.innerHTML, /hello/);
|
||||
|
||||
const fragment = document.createDocumentFragment();
|
||||
fragment.replaceChildren(createElement('p'));
|
||||
const container = createElement('section');
|
||||
const decorated = createElement('span');
|
||||
decorated.setAttribute('data-id', '123');
|
||||
decorated.classList.add('foo');
|
||||
container.appendChild(decorated);
|
||||
assert.match(container.innerHTML, /data-id="123"/);
|
||||
assert.match(container.innerHTML, /class="foo"/);
|
||||
container.replaceChildren(createElement('div')); // cover non-fragment path
|
||||
container.childNodes.push({}); // cover empty serialization branch
|
||||
assert.ok(container.innerHTML.includes('<div'));
|
||||
|
||||
cleanup();
|
||||
});
|
||||
@@ -0,0 +1,659 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createDomEnvironment } from './dom-environment.js';
|
||||
import { initializeFederationPage } from '../federation-page.js';
|
||||
import { roleColors } from '../role-helpers.js';
|
||||
|
||||
test('federation map centers on configured coordinates and follows theme filters', async () => {
|
||||
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: true });
|
||||
const { document, window, createElement, registerElement, cleanup } = env;
|
||||
|
||||
const mapEl = createElement('div', 'map');
|
||||
registerElement('map', mapEl);
|
||||
const mapPanel = createElement('div', 'mapPanel');
|
||||
mapPanel.dataset.legendCollapsed = 'true';
|
||||
registerElement('mapPanel', mapPanel);
|
||||
const statusEl = createElement('div', 'status');
|
||||
registerElement('status', statusEl);
|
||||
const tableEl = createElement('table', 'instances');
|
||||
const tbodyEl = createElement('tbody');
|
||||
registerElement('instances', tableEl);
|
||||
|
||||
const configPayload = {
|
||||
mapCenter: { lat: 10, lon: 20 },
|
||||
mapZoom: 7,
|
||||
tileFilters: { light: 'brightness(1)', dark: 'invert(1)' }
|
||||
};
|
||||
const configEl = createElement('div');
|
||||
configEl.setAttribute('data-app-config', JSON.stringify(configPayload));
|
||||
|
||||
document.querySelector = selector => {
|
||||
if (selector === '[data-app-config]') return configEl;
|
||||
if (selector === '#instances tbody') return tbodyEl;
|
||||
return null;
|
||||
};
|
||||
|
||||
const tileContainer = createElement('div');
|
||||
const tilePane = createElement('div');
|
||||
const tileImage = createElement('img');
|
||||
tileImage.classList.add('leaflet-tile');
|
||||
tileContainer.appendChild(tileImage);
|
||||
tilePane.appendChild(tileImage);
|
||||
const mapSetViewCalls = [];
|
||||
const mapFitBoundsCalls = [];
|
||||
const circleMarkerCalls = [];
|
||||
const tileLayerStub = {
|
||||
addTo() {
|
||||
return this;
|
||||
},
|
||||
getContainer() {
|
||||
return tileContainer;
|
||||
},
|
||||
on(event, handler) {
|
||||
if (event === 'load') {
|
||||
this._onLoad = handler;
|
||||
}
|
||||
}
|
||||
};
|
||||
const mapStub = {
|
||||
setView(...args) {
|
||||
mapSetViewCalls.push(args);
|
||||
},
|
||||
on() {},
|
||||
getPane(name) {
|
||||
return name === 'tilePane' ? tilePane : null;
|
||||
},
|
||||
fitBounds(...args) {
|
||||
mapFitBoundsCalls.push(args);
|
||||
}
|
||||
};
|
||||
const leafletStub = {
|
||||
map() {
|
||||
return mapStub;
|
||||
},
|
||||
tileLayer() {
|
||||
return tileLayerStub;
|
||||
},
|
||||
layerGroup() {
|
||||
return {
|
||||
addLayer() {},
|
||||
addTo() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
},
|
||||
circleMarker(latlng, options) {
|
||||
circleMarkerCalls.push({ latlng, options });
|
||||
return {
|
||||
bindPopup() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
json: async () => [
|
||||
{
|
||||
domain: 'alpha.mesh',
|
||||
contactLink: 'https://chat.alpha',
|
||||
version: '1.0.0',
|
||||
latitude: 10.12345,
|
||||
longitude: -20.98765,
|
||||
lastUpdateTime: Math.floor(Date.now() / 1000) - 90,
|
||||
nodesCount: 12
|
||||
},
|
||||
{
|
||||
domain: 'bravo.mesh',
|
||||
contactLink: null,
|
||||
version: '2.0.0',
|
||||
lastUpdateTime: Math.floor(Date.now() / 1000) - (2 * 86400),
|
||||
nodesCount: 2
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeFederationPage({ config: configPayload, fetchImpl, leaflet: leafletStub });
|
||||
|
||||
assert.deepEqual(mapSetViewCalls[0], [[10, 20], 7]);
|
||||
assert.equal(tileContainer.style.filter, 'invert(1)');
|
||||
assert.equal(tilePane.style.filter, 'invert(1)');
|
||||
assert.equal(tileImage.style.filter, 'invert(1)');
|
||||
|
||||
document.body.classList.remove('dark');
|
||||
document.documentElement.setAttribute('data-theme', 'light');
|
||||
window.dispatchEvent({ type: 'themechange', detail: { theme: 'light' } });
|
||||
assert.equal(tileContainer.style.filter, 'brightness(1)');
|
||||
assert.equal(tilePane.style.filter, 'brightness(1)');
|
||||
assert.equal(tileImage.style.filter, 'brightness(1)');
|
||||
|
||||
document.documentElement.removeAttribute('data-theme');
|
||||
document.body.classList.remove('dark');
|
||||
window.dispatchEvent({ type: 'themechange', detail: { theme: null } });
|
||||
assert.equal(tileContainer.style.filter, 'invert(1)');
|
||||
|
||||
const rows = tbodyEl.childNodes;
|
||||
assert.equal(rows.length, 2);
|
||||
const firstRowHtml = rows[0].innerHTML;
|
||||
assert.match(firstRowHtml, /alpha\.mesh/);
|
||||
assert.match(firstRowHtml, /https:\/\/chat\.alpha/);
|
||||
assert.match(firstRowHtml, /10\.12345/);
|
||||
assert.match(firstRowHtml, /-20\.98765/);
|
||||
assert.match(firstRowHtml, />12</);
|
||||
assert.match(firstRowHtml, /ago/);
|
||||
|
||||
const secondRowHtml = rows[1].innerHTML;
|
||||
assert.match(secondRowHtml, /bravo\.mesh/);
|
||||
assert.match(secondRowHtml, /<em>—<\/em>/); // no contact link
|
||||
assert.match(secondRowHtml, /2\.0\.0/);
|
||||
assert.match(secondRowHtml, />2</);
|
||||
assert.match(secondRowHtml, /d ago/);
|
||||
assert.deepEqual(mapFitBoundsCalls[0][0], [[10.12345, -20.98765]]);
|
||||
assert.equal(circleMarkerCalls[0].options.fillColor, roleColors.CLIENT_HIDDEN);
|
||||
} catch (error) {
|
||||
console.error('federation sorting test error', error);
|
||||
throw error;
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('federation table sorting, contact rendering, and legend creation', async () => {
|
||||
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
|
||||
const { document, createElement, registerElement, cleanup } = env;
|
||||
|
||||
const mapEl = createElement('div', 'map');
|
||||
registerElement('map', mapEl);
|
||||
const statusEl = createElement('div', 'status');
|
||||
registerElement('status', statusEl);
|
||||
|
||||
const tableEl = createElement('table', 'instances');
|
||||
const tbodyEl = createElement('tbody');
|
||||
registerElement('instances', tableEl);
|
||||
tableEl.appendChild(tbodyEl);
|
||||
|
||||
const headerNameTh = createElement('th');
|
||||
const headerName = createElement('span');
|
||||
headerName.classList.add('sort-header');
|
||||
headerName.dataset.sortKey = 'name';
|
||||
headerName.dataset.sortLabel = 'Name';
|
||||
headerNameTh.appendChild(headerName);
|
||||
|
||||
const headerDomainTh = createElement('th');
|
||||
const headerDomain = createElement('span');
|
||||
headerDomain.classList.add('sort-header');
|
||||
headerDomain.dataset.sortKey = 'domain';
|
||||
headerDomain.dataset.sortLabel = 'Domain';
|
||||
headerDomainTh.appendChild(headerDomain);
|
||||
|
||||
const ths = [headerNameTh, headerDomainTh];
|
||||
const headers = [headerName, headerDomain];
|
||||
const headerHandlers = new Map();
|
||||
headers.forEach(header => {
|
||||
header.addEventListener = (event, handler) => {
|
||||
const existing = headerHandlers.get(header) || {};
|
||||
existing[event] = handler;
|
||||
headerHandlers.set(header, existing);
|
||||
};
|
||||
header.closest = () => ths.find(th => th.childNodes.includes(header));
|
||||
header.querySelector = selector => {
|
||||
if (selector === '.sort-indicator') {
|
||||
const span = createElement('span');
|
||||
span.classList.add('sort-indicator');
|
||||
return span;
|
||||
}
|
||||
return null;
|
||||
};
|
||||
});
|
||||
|
||||
tableEl.querySelectorAll = selector => {
|
||||
if (selector === 'thead .sort-header[data-sort-key]') return headers;
|
||||
if (selector === 'thead th') return ths;
|
||||
return [];
|
||||
};
|
||||
|
||||
const configPayload = {
|
||||
mapCenter: { lat: 0, lon: 0 },
|
||||
mapZoom: 3,
|
||||
tileFilters: { light: 'none', dark: 'invert(1)' }
|
||||
};
|
||||
const configEl = createElement('div');
|
||||
configEl.setAttribute('data-app-config', JSON.stringify(configPayload));
|
||||
|
||||
document.querySelector = selector => {
|
||||
if (selector === '[data-app-config]') return configEl;
|
||||
if (selector === '#instances tbody') return tbodyEl;
|
||||
return null;
|
||||
};
|
||||
|
||||
const legendContainers = [];
|
||||
const mapSetViewCalls = [];
|
||||
const mapFitBoundsCalls = [];
|
||||
const circleMarkerCalls = [];
|
||||
|
||||
const DomUtil = {
|
||||
create(tag, className, parent) {
|
||||
const el = {
|
||||
tagName: tag,
|
||||
className,
|
||||
children: [],
|
||||
style: {},
|
||||
textContent: '',
|
||||
setAttribute() {},
|
||||
appendChild(child) {
|
||||
this.children.push(child);
|
||||
return child;
|
||||
},
|
||||
};
|
||||
if (parent && parent.appendChild) parent.appendChild(el);
|
||||
return el;
|
||||
}
|
||||
};
|
||||
|
||||
const controlStub = () => {
|
||||
const ctrl = {
|
||||
onAdd: null,
|
||||
container: null,
|
||||
addTo(map) {
|
||||
this.container = this.onAdd ? this.onAdd(map) : null;
|
||||
legendContainers.push(this.container);
|
||||
return this;
|
||||
},
|
||||
getContainer() {
|
||||
return this.container;
|
||||
}
|
||||
};
|
||||
return ctrl;
|
||||
};
|
||||
|
||||
const markersLayer = {
|
||||
layers: [],
|
||||
addLayer(marker) {
|
||||
this.layers.push(marker);
|
||||
return marker;
|
||||
},
|
||||
addTo() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
|
||||
const mapStub = {
|
||||
addedControls: [],
|
||||
setView(...args) {
|
||||
mapSetViewCalls.push(args);
|
||||
},
|
||||
on() {},
|
||||
fitBounds(...args) {
|
||||
mapFitBoundsCalls.push(args);
|
||||
},
|
||||
addLayer(layer) {
|
||||
this.addedControls.push(layer);
|
||||
return layer;
|
||||
}
|
||||
};
|
||||
|
||||
const leafletStub = {
|
||||
map() {
|
||||
return mapStub;
|
||||
},
|
||||
tileLayer() {
|
||||
return {
|
||||
addTo() {
|
||||
return this;
|
||||
},
|
||||
getContainer() {
|
||||
return null;
|
||||
},
|
||||
on() {}
|
||||
};
|
||||
},
|
||||
layerGroup() {
|
||||
return markersLayer;
|
||||
},
|
||||
circleMarker(latlng, options) {
|
||||
circleMarkerCalls.push({ latlng, options });
|
||||
return {
|
||||
bindPopup() {
|
||||
return this;
|
||||
},
|
||||
addTo() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
},
|
||||
control: controlStub,
|
||||
DomUtil
|
||||
};
|
||||
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
json: async () => [
|
||||
{
|
||||
domain: 'c.mesh',
|
||||
name: 'Charlie',
|
||||
contactLink: 'https://charlie.example\nmatrix:#c:mesh',
|
||||
version: '3.0.0',
|
||||
latitude: 1,
|
||||
longitude: 1,
|
||||
lastUpdateTime: now - 10,
|
||||
nodesCount: 0
|
||||
},
|
||||
{
|
||||
domain: 'b.mesh',
|
||||
contactLink: '',
|
||||
version: '2.0.0',
|
||||
latitude: 2,
|
||||
longitude: 2,
|
||||
lastUpdateTime: now - 60,
|
||||
nodesCount: 650
|
||||
},
|
||||
{
|
||||
domain: 'a.mesh',
|
||||
name: 'Alpha',
|
||||
contactLink: 'mailto:alpha@mesh',
|
||||
version: '1.0.0',
|
||||
latitude: 3,
|
||||
longitude: 3,
|
||||
lastUpdateTime: now - 30,
|
||||
nodesCount: 5
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeFederationPage({ config: configPayload, fetchImpl, leaflet: leafletStub });
|
||||
|
||||
const rows = tbodyEl.childNodes.map(node => String(node.childNodes[0]));
|
||||
assert.match(rows[0], /c\.mesh/);
|
||||
assert.match(rows[0], /0</);
|
||||
assert.match(rows[0], /https:\/\/charlie\.example/);
|
||||
assert.match(rows[0], /matrix:#c:mesh/);
|
||||
assert.match(rows[1], /a\.mesh/);
|
||||
assert.match(rows[2], /b\.mesh/);
|
||||
|
||||
const nameHandlers = headerHandlers.get(headerName);
|
||||
nameHandlers.click();
|
||||
const afterNameSort = tbodyEl.childNodes.map(node => String(node.childNodes[0]));
|
||||
assert.match(afterNameSort[0], /a\.mesh/);
|
||||
assert.match(afterNameSort[1], /c\.mesh/);
|
||||
assert.match(afterNameSort[2], /b\.mesh/);
|
||||
|
||||
nameHandlers.click();
|
||||
const descSort = tbodyEl.childNodes.map(node => String(node.childNodes[0]));
|
||||
assert.match(descSort[0], /c\.mesh/);
|
||||
assert.match(descSort[1], /a\.mesh/);
|
||||
assert.match(descSort[2], /b\.mesh/);
|
||||
assert.equal(headerName.closest().attributes.get('aria-sort'), 'descending');
|
||||
|
||||
assert.equal(circleMarkerCalls[0].options.fillColor, roleColors.CLIENT_HIDDEN);
|
||||
assert.equal(circleMarkerCalls[1].options.fillColor, roleColors.REPEATER);
|
||||
|
||||
assert.deepEqual(mapSetViewCalls[0], [[0, 0], 3]);
|
||||
assert.equal(mapFitBoundsCalls[0][0].length, 3);
|
||||
|
||||
assert.equal(legendContainers.length, 2);
|
||||
const legend = legendContainers.find(container => container.className.includes('legend--instances'));
|
||||
assert.ok(legend);
|
||||
assert.ok(legend.className.includes('legend-hidden'));
|
||||
const legendHeader = legend.children.find(child => child.className === 'legend-header');
|
||||
const legendTitle = legendHeader && Array.isArray(legendHeader.children)
|
||||
? legendHeader.children.find(child => child.className === 'legend-title')
|
||||
: null;
|
||||
assert.ok(legendTitle);
|
||||
assert.equal(legendTitle.textContent, 'Active nodes');
|
||||
const legendToggle = legendContainers.find(container => container.className.includes('legend-toggle'));
|
||||
assert.ok(legendToggle);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('federation legend toggle respects media query changes', async () => {
|
||||
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
|
||||
const { document, createElement, registerElement, cleanup } = env;
|
||||
|
||||
const mapEl = createElement('div', 'map');
|
||||
registerElement('map', mapEl);
|
||||
const mapPanel = createElement('div', 'mapPanel');
|
||||
mapPanel.setAttribute('data-legend-collapsed', 'false');
|
||||
registerElement('mapPanel', mapPanel);
|
||||
const statusEl = createElement('div', 'status');
|
||||
registerElement('status', statusEl);
|
||||
|
||||
const tableEl = createElement('table', 'instances');
|
||||
const tbodyEl = createElement('tbody');
|
||||
registerElement('instances', tableEl);
|
||||
tableEl.appendChild(tbodyEl);
|
||||
|
||||
const configPayload = {
|
||||
mapCenter: { lat: 0, lon: 0 },
|
||||
mapZoom: 3,
|
||||
tileFilters: { light: 'none', dark: 'invert(1)' }
|
||||
};
|
||||
const configEl = createElement('div');
|
||||
configEl.setAttribute('data-app-config', JSON.stringify(configPayload));
|
||||
|
||||
document.querySelector = selector => {
|
||||
if (selector === '[data-app-config]') return configEl;
|
||||
if (selector === '#instances tbody') return tbodyEl;
|
||||
return null;
|
||||
};
|
||||
|
||||
let mediaQueryHandler = null;
|
||||
window.matchMedia = () => ({
|
||||
matches: false,
|
||||
addListener(handler) {
|
||||
mediaQueryHandler = handler;
|
||||
}
|
||||
});
|
||||
|
||||
const legendContainers = [];
|
||||
const legendButtons = [];
|
||||
|
||||
const DomUtil = {
|
||||
create(tag, className, parent) {
|
||||
const classSet = new Set(className ? className.split(/\s+/).filter(Boolean) : []);
|
||||
const el = {
|
||||
tagName: tag,
|
||||
className,
|
||||
classList: {
|
||||
toggle(name, force) {
|
||||
const shouldAdd = typeof force === 'boolean' ? force : !classSet.has(name);
|
||||
if (shouldAdd) {
|
||||
classSet.add(name);
|
||||
} else {
|
||||
classSet.delete(name);
|
||||
}
|
||||
el.className = Array.from(classSet).join(' ');
|
||||
}
|
||||
},
|
||||
children: [],
|
||||
style: {},
|
||||
textContent: '',
|
||||
attributes: new Map(),
|
||||
setAttribute(name, value) {
|
||||
this.attributes.set(name, String(value));
|
||||
},
|
||||
appendChild(child) {
|
||||
this.children.push(child);
|
||||
return child;
|
||||
},
|
||||
addEventListener(event, handler) {
|
||||
if (event === 'click') {
|
||||
this._clickHandler = handler;
|
||||
}
|
||||
},
|
||||
querySelector() {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
if (parent && parent.appendChild) parent.appendChild(el);
|
||||
if (className && className.includes('legend-toggle-button')) {
|
||||
legendButtons.push(el);
|
||||
}
|
||||
return el;
|
||||
}
|
||||
};
|
||||
|
||||
const controlStub = () => {
|
||||
const ctrl = {
|
||||
onAdd: null,
|
||||
container: null,
|
||||
addTo(map) {
|
||||
this.container = this.onAdd ? this.onAdd(map) : null;
|
||||
legendContainers.push(this.container);
|
||||
return this;
|
||||
},
|
||||
getContainer() {
|
||||
return this.container;
|
||||
}
|
||||
};
|
||||
return ctrl;
|
||||
};
|
||||
|
||||
const markersLayer = {
|
||||
addLayer() {
|
||||
return null;
|
||||
},
|
||||
addTo() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
|
||||
const leafletStub = {
|
||||
map() {
|
||||
return {
|
||||
setView() {},
|
||||
on() {},
|
||||
fitBounds() {}
|
||||
};
|
||||
},
|
||||
tileLayer() {
|
||||
return {
|
||||
addTo() {
|
||||
return this;
|
||||
},
|
||||
getContainer() {
|
||||
return null;
|
||||
},
|
||||
on() {}
|
||||
};
|
||||
},
|
||||
layerGroup() {
|
||||
return markersLayer;
|
||||
},
|
||||
circleMarker() {
|
||||
return {
|
||||
bindPopup() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
},
|
||||
control: controlStub,
|
||||
DomUtil,
|
||||
DomEvent: {
|
||||
disableClickPropagation() {},
|
||||
disableScrollPropagation() {}
|
||||
}
|
||||
};
|
||||
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
json: async () => []
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeFederationPage({ config: configPayload, fetchImpl, leaflet: leafletStub });
|
||||
|
||||
const legend = legendContainers.find(container => container.className.includes('legend--instances'));
|
||||
assert.ok(legend);
|
||||
assert.ok(!legend.className.includes('legend-hidden'));
|
||||
|
||||
assert.equal(legendButtons.length, 1);
|
||||
legendButtons[0]._clickHandler?.({ preventDefault() {}, stopPropagation() {} });
|
||||
assert.ok(legend.className.includes('legend-hidden'));
|
||||
|
||||
if (mediaQueryHandler) {
|
||||
mediaQueryHandler({ matches: false });
|
||||
assert.ok(!legend.className.includes('legend-hidden'));
|
||||
}
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('federation page tolerates fetch failures', async () => {
|
||||
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
|
||||
const { document, createElement, registerElement, cleanup } = env;
|
||||
|
||||
const mapEl = createElement('div', 'map');
|
||||
registerElement('map', mapEl);
|
||||
const statusEl = createElement('div', 'status');
|
||||
registerElement('status', statusEl);
|
||||
const tableEl = createElement('table', 'instances');
|
||||
const tbodyEl = createElement('tbody');
|
||||
registerElement('instances', tableEl);
|
||||
const configEl = createElement('div');
|
||||
configEl.setAttribute('data-app-config', JSON.stringify({}));
|
||||
document.querySelector = selector => {
|
||||
if (selector === '[data-app-config]') return configEl;
|
||||
if (selector === '#instances tbody') return tbodyEl;
|
||||
return null;
|
||||
};
|
||||
|
||||
const leafletStub = {
|
||||
map() {
|
||||
return {
|
||||
setView() {},
|
||||
on() {},
|
||||
getPane() {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
},
|
||||
tileLayer() {
|
||||
return {
|
||||
addTo() {
|
||||
return this;
|
||||
},
|
||||
getContainer() {
|
||||
return null;
|
||||
},
|
||||
on() {}
|
||||
};
|
||||
},
|
||||
layerGroup() {
|
||||
return { addLayer() {}, addTo() { return this; } };
|
||||
},
|
||||
circleMarker() {
|
||||
return { bindPopup() { return this; } };
|
||||
}
|
||||
};
|
||||
|
||||
const fetchImpl = async () => {
|
||||
throw new Error('boom');
|
||||
};
|
||||
|
||||
await initializeFederationPage({ config: {}, fetchImpl, leaflet: leafletStub });
|
||||
cleanup();
|
||||
});
|
||||
@@ -20,7 +20,7 @@ import { createDomEnvironment } from './dom-environment.js';
|
||||
|
||||
import { buildInstanceUrl, initializeInstanceSelector, __test__ } from '../instance-selector.js';
|
||||
|
||||
const { resolveInstanceLabel } = __test__;
|
||||
const { resolveInstanceLabel, updateFederationNavCount } = __test__;
|
||||
|
||||
function setupSelectElement(document) {
|
||||
const select = document.createElement('select');
|
||||
@@ -90,10 +90,29 @@ test('resolveInstanceLabel falls back to the domain when the name is missing', (
|
||||
test('buildInstanceUrl normalises domains into navigable HTTPS URLs', () => {
|
||||
assert.equal(buildInstanceUrl('mesh.example'), 'https://mesh.example');
|
||||
assert.equal(buildInstanceUrl(' https://mesh.example '), 'https://mesh.example');
|
||||
assert.equal(buildInstanceUrl('https://mesh.example/path?query#fragment'), 'https://mesh.example');
|
||||
assert.equal(buildInstanceUrl('javascript:alert(1)'), null);
|
||||
assert.equal(buildInstanceUrl('ftp://mesh.example'), null);
|
||||
assert.equal(buildInstanceUrl('mesh.example:8080'), 'https://mesh.example:8080');
|
||||
assert.equal(buildInstanceUrl('mesh.example<script>'), null);
|
||||
assert.equal(buildInstanceUrl(''), null);
|
||||
assert.equal(buildInstanceUrl(null), null);
|
||||
});
|
||||
|
||||
test('buildInstanceUrl rejects malformed HTTP URLs safely', () => {
|
||||
const originalWarn = console.warn;
|
||||
const warnings = [];
|
||||
console.warn = message => warnings.push(message);
|
||||
|
||||
try {
|
||||
assert.equal(buildInstanceUrl('http://[::1'), null);
|
||||
assert.equal(buildInstanceUrl('https://bad host.example'), null);
|
||||
assert.ok(warnings.length >= 1);
|
||||
} finally {
|
||||
console.warn = originalWarn;
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeInstanceSelector populates options alphabetically and selects the configured domain', async () => {
|
||||
const env = createDomEnvironment();
|
||||
const select = setupSelectElement(env.document);
|
||||
@@ -172,3 +191,65 @@ test('initializeInstanceSelector navigates to the chosen instance domain', async
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeInstanceSelector updates federation navigation labels with instance count', async () => {
|
||||
const env = createDomEnvironment();
|
||||
const select = setupSelectElement(env.document);
|
||||
const navLink = env.document.createElement('a');
|
||||
navLink.classList.add('js-federation-nav');
|
||||
navLink.textContent = 'Federation';
|
||||
env.document.body.appendChild(navLink);
|
||||
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
async json() {
|
||||
return [{ domain: 'alpha.mesh' }, { domain: 'beta.mesh' }];
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeInstanceSelector({
|
||||
selectElement: select,
|
||||
fetchImpl,
|
||||
windowObject: env.window,
|
||||
documentObject: env.document
|
||||
});
|
||||
|
||||
assert.equal(navLink.textContent, 'Federation (2)');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateFederationNavCount prefers stored labels and normalizes counts', () => {
|
||||
const env = createDomEnvironment();
|
||||
const navLink = env.document.createElement('a');
|
||||
navLink.classList.add('js-federation-nav');
|
||||
navLink.textContent = 'Federation';
|
||||
navLink.dataset.federationLabel = 'Community';
|
||||
env.document.body.appendChild(navLink);
|
||||
|
||||
try {
|
||||
updateFederationNavCount({ documentObject: env.document, count: -3 });
|
||||
|
||||
assert.equal(navLink.textContent, 'Community (0)');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateFederationNavCount falls back to existing link text when no dataset label', () => {
|
||||
const env = createDomEnvironment();
|
||||
const navLink = env.document.createElement('a');
|
||||
navLink.classList.add('js-federation-nav');
|
||||
navLink.textContent = 'Federation (9)';
|
||||
env.document.body.appendChild(navLink);
|
||||
|
||||
try {
|
||||
updateFederationNavCount({ documentObject: env.document, count: 4 });
|
||||
|
||||
assert.equal(navLink.textContent, 'Federation (4)');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { resolveLegendVisibility } from '../map-legend-visibility.js';
|
||||
|
||||
test('resolveLegendVisibility hides when a default collapse is requested', () => {
|
||||
assert.equal(resolveLegendVisibility({ defaultCollapsed: true, mediaQueryMatches: false }), false);
|
||||
assert.equal(resolveLegendVisibility({ defaultCollapsed: true, mediaQueryMatches: true }), false);
|
||||
});
|
||||
|
||||
test('resolveLegendVisibility hides for dashboard and map views', () => {
|
||||
assert.equal(
|
||||
resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: false, viewMode: 'dashboard' }),
|
||||
false
|
||||
);
|
||||
assert.equal(
|
||||
resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: false, viewMode: 'map' }),
|
||||
false
|
||||
);
|
||||
});
|
||||
|
||||
test('resolveLegendVisibility follows the media query when not forced', () => {
|
||||
assert.equal(resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: false }), true);
|
||||
assert.equal(resolveLegendVisibility({ defaultCollapsed: false, mediaQueryMatches: true }), false);
|
||||
});
|
||||
@@ -0,0 +1,455 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { __test__, initializeMobileMenu } from '../mobile-menu.js';
|
||||
|
||||
const { createMobileMenuController, resolveFocusableElements } = __test__;
|
||||
|
||||
function createClassList() {
|
||||
const values = new Set();
|
||||
return {
|
||||
add(...names) {
|
||||
names.forEach(name => values.add(name));
|
||||
},
|
||||
remove(...names) {
|
||||
names.forEach(name => values.delete(name));
|
||||
},
|
||||
contains(name) {
|
||||
return values.has(name);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function createElement(tagName = 'div', initialId = '') {
|
||||
const listeners = new Map();
|
||||
const attributes = new Map();
|
||||
if (initialId) {
|
||||
attributes.set('id', String(initialId));
|
||||
}
|
||||
return {
|
||||
tagName: tagName.toUpperCase(),
|
||||
attributes,
|
||||
classList: createClassList(),
|
||||
dataset: {},
|
||||
hidden: false,
|
||||
parentNode: null,
|
||||
nextSibling: null,
|
||||
setAttribute(name, value) {
|
||||
attributes.set(name, String(value));
|
||||
},
|
||||
getAttribute(name) {
|
||||
return attributes.has(name) ? attributes.get(name) : null;
|
||||
},
|
||||
addEventListener(event, handler) {
|
||||
listeners.set(event, handler);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const key = typeof event === 'string' ? event : event?.type;
|
||||
const handler = listeners.get(key);
|
||||
if (handler) {
|
||||
handler(event);
|
||||
}
|
||||
},
|
||||
appendChild(node) {
|
||||
this.lastAppended = node;
|
||||
return node;
|
||||
},
|
||||
insertBefore(node, nextSibling) {
|
||||
this.lastInserted = { node, nextSibling };
|
||||
return node;
|
||||
},
|
||||
focus() {
|
||||
globalThis.document.activeElement = this;
|
||||
},
|
||||
querySelector() {
|
||||
return null;
|
||||
},
|
||||
querySelectorAll() {
|
||||
return [];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function createDomStub() {
|
||||
const originalDocument = globalThis.document;
|
||||
const registry = new Map();
|
||||
const documentStub = {
|
||||
body: createElement('body'),
|
||||
activeElement: null,
|
||||
querySelectorAll() {
|
||||
return [];
|
||||
},
|
||||
getElementById(id) {
|
||||
return registry.get(id) || null;
|
||||
}
|
||||
};
|
||||
globalThis.document = documentStub;
|
||||
return {
|
||||
documentStub,
|
||||
registry,
|
||||
cleanup() {
|
||||
globalThis.document = originalDocument;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function createWindowStub(matches = true) {
|
||||
const listeners = new Map();
|
||||
const mediaListeners = new Map();
|
||||
return {
|
||||
matchMedia() {
|
||||
return {
|
||||
matches,
|
||||
addEventListener(event, handler) {
|
||||
mediaListeners.set(event, handler);
|
||||
}
|
||||
};
|
||||
},
|
||||
addEventListener(event, handler) {
|
||||
listeners.set(event, handler);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const key = typeof event === 'string' ? event : event?.type;
|
||||
const handler = listeners.get(key);
|
||||
if (handler) {
|
||||
handler(event);
|
||||
}
|
||||
},
|
||||
dispatchMediaChange() {
|
||||
const handler = mediaListeners.get('change');
|
||||
if (handler) {
|
||||
handler();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function createWindowStubWithListener(matches = true) {
|
||||
const listeners = new Map();
|
||||
let mediaHandler = null;
|
||||
return {
|
||||
matchMedia() {
|
||||
return {
|
||||
matches,
|
||||
addListener(handler) {
|
||||
mediaHandler = handler;
|
||||
}
|
||||
};
|
||||
},
|
||||
addEventListener(event, handler) {
|
||||
listeners.set(event, handler);
|
||||
},
|
||||
dispatchMediaChange() {
|
||||
if (mediaHandler) {
|
||||
mediaHandler();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test('mobile menu toggles open state and aria-expanded', () => {
|
||||
const { documentStub, registry, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStub(true);
|
||||
|
||||
const menuToggle = createElement('button');
|
||||
const menu = createElement('div');
|
||||
const menuPanel = createElement('div');
|
||||
const closeButton = createElement('button');
|
||||
const navLink = createElement('a');
|
||||
|
||||
menu.hidden = true;
|
||||
menuPanel.classList.add('mobile-menu__panel');
|
||||
|
||||
menu.querySelector = selector => {
|
||||
if (selector === '.mobile-menu__panel') return menuPanel;
|
||||
return null;
|
||||
};
|
||||
menu.querySelectorAll = selector => {
|
||||
if (selector === '[data-mobile-menu-close]') return [closeButton];
|
||||
if (selector === 'a') return [navLink];
|
||||
return [];
|
||||
};
|
||||
menuPanel.querySelectorAll = () => [closeButton, navLink];
|
||||
|
||||
registry.set('mobileMenuToggle', menuToggle);
|
||||
registry.set('mobileMenu', menu);
|
||||
|
||||
try {
|
||||
const controller = createMobileMenuController({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
|
||||
controller.initialize();
|
||||
windowStub.dispatchMediaChange();
|
||||
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
assert.equal(menu.hidden, false);
|
||||
assert.equal(menuToggle.getAttribute('aria-expanded'), 'true');
|
||||
assert.equal(documentStub.body.classList.contains('menu-open'), true);
|
||||
|
||||
navLink.dispatchEvent({ type: 'click' });
|
||||
assert.equal(menu.hidden, true);
|
||||
|
||||
closeButton.dispatchEvent({ type: 'click' });
|
||||
assert.equal(menu.hidden, true);
|
||||
assert.equal(menuToggle.getAttribute('aria-expanded'), 'false');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('mobile menu closes on escape and route changes', () => {
|
||||
const { documentStub, registry, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStub(true);
|
||||
|
||||
const menuToggle = createElement('button');
|
||||
const menu = createElement('div');
|
||||
const menuPanel = createElement('div');
|
||||
const closeButton = createElement('button');
|
||||
|
||||
menu.hidden = true;
|
||||
menuPanel.classList.add('mobile-menu__panel');
|
||||
|
||||
menu.querySelector = selector => {
|
||||
if (selector === '.mobile-menu__panel') return menuPanel;
|
||||
return null;
|
||||
};
|
||||
menu.querySelectorAll = selector => {
|
||||
if (selector === '[data-mobile-menu-close]') return [closeButton];
|
||||
return [];
|
||||
};
|
||||
menuPanel.querySelectorAll = () => [closeButton];
|
||||
|
||||
registry.set('mobileMenuToggle', menuToggle);
|
||||
registry.set('mobileMenu', menu);
|
||||
|
||||
try {
|
||||
const controller = createMobileMenuController({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
|
||||
controller.initialize();
|
||||
|
||||
menuPanel.dispatchEvent({ type: 'keydown', key: 'Escape', preventDefault() {} });
|
||||
assert.equal(menu.hidden, true);
|
||||
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
assert.equal(menu.hidden, false);
|
||||
|
||||
menuPanel.dispatchEvent({ type: 'keydown', key: 'ArrowDown' });
|
||||
assert.equal(menu.hidden, false);
|
||||
|
||||
menuPanel.dispatchEvent({ type: 'keydown', key: 'Escape', preventDefault() {} });
|
||||
assert.equal(menu.hidden, true);
|
||||
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
windowStub.dispatchEvent({ type: 'hashchange' });
|
||||
assert.equal(menu.hidden, true);
|
||||
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
windowStub.dispatchEvent({ type: 'popstate' });
|
||||
assert.equal(menu.hidden, true);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('mobile menu traps focus within the panel', () => {
|
||||
const { documentStub, registry, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStub(true);
|
||||
|
||||
const menuToggle = createElement('button');
|
||||
const menu = createElement('div');
|
||||
const menuPanel = createElement('div');
|
||||
const firstLink = createElement('a');
|
||||
const lastButton = createElement('button');
|
||||
|
||||
menuPanel.classList.add('mobile-menu__panel');
|
||||
menuPanel.querySelectorAll = () => [firstLink, lastButton];
|
||||
menu.querySelector = selector => {
|
||||
if (selector === '.mobile-menu__panel') return menuPanel;
|
||||
return null;
|
||||
};
|
||||
menu.querySelectorAll = () => [];
|
||||
|
||||
registry.set('mobileMenuToggle', menuToggle);
|
||||
registry.set('mobileMenu', menu);
|
||||
|
||||
try {
|
||||
const controller = createMobileMenuController({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
|
||||
controller.initialize();
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
|
||||
documentStub.activeElement = lastButton;
|
||||
menuPanel.dispatchEvent({ type: 'keydown', key: 'Tab', preventDefault() {}, shiftKey: false });
|
||||
assert.equal(documentStub.activeElement, firstLink);
|
||||
|
||||
documentStub.activeElement = firstLink;
|
||||
menuPanel.dispatchEvent({ type: 'keydown', key: 'Tab', preventDefault() {}, shiftKey: true });
|
||||
assert.equal(documentStub.activeElement, lastButton);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('resolveFocusableElements filters out aria-hidden nodes', () => {
|
||||
const hiddenButton = createElement('button');
|
||||
hiddenButton.getAttribute = name => (name === 'aria-hidden' ? 'true' : null);
|
||||
const openLink = createElement('a');
|
||||
const bareNode = { tagName: 'DIV' };
|
||||
const container = {
|
||||
querySelectorAll() {
|
||||
return [hiddenButton, bareNode, openLink];
|
||||
}
|
||||
};
|
||||
|
||||
const focusables = resolveFocusableElements(container);
|
||||
assert.equal(focusables.length, 1);
|
||||
assert.equal(focusables[0], openLink);
|
||||
});
|
||||
|
||||
test('resolveFocusableElements handles empty containers', () => {
|
||||
assert.deepEqual(resolveFocusableElements(null), []);
|
||||
assert.deepEqual(resolveFocusableElements({}), []);
|
||||
});
|
||||
|
||||
test('mobile menu focuses the panel when no focusables exist', () => {
|
||||
const { documentStub, registry, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStub(true);
|
||||
|
||||
const menuToggle = createElement('button');
|
||||
const menu = createElement('div');
|
||||
const menuPanel = createElement('div');
|
||||
const lastActive = createElement('button');
|
||||
|
||||
menuPanel.classList.add('mobile-menu__panel');
|
||||
menuPanel.querySelectorAll = () => [];
|
||||
menu.querySelector = selector => {
|
||||
if (selector === '.mobile-menu__panel') return menuPanel;
|
||||
return null;
|
||||
};
|
||||
menu.querySelectorAll = () => [];
|
||||
|
||||
registry.set('mobileMenuToggle', menuToggle);
|
||||
registry.set('mobileMenu', menu);
|
||||
documentStub.activeElement = lastActive;
|
||||
|
||||
try {
|
||||
const controller = createMobileMenuController({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
|
||||
controller.initialize();
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
assert.equal(documentStub.activeElement, menuPanel);
|
||||
|
||||
menuToggle.dispatchEvent({ type: 'click', preventDefault() {} });
|
||||
assert.equal(documentStub.activeElement, lastActive);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('mobile menu registers legacy media query listeners', () => {
|
||||
const { documentStub, registry, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStubWithListener(true);
|
||||
|
||||
const menuToggle = createElement('button');
|
||||
const menu = createElement('div');
|
||||
const menuPanel = createElement('div');
|
||||
|
||||
menuPanel.classList.add('mobile-menu__panel');
|
||||
menu.querySelector = selector => {
|
||||
if (selector === '.mobile-menu__panel') return menuPanel;
|
||||
return null;
|
||||
};
|
||||
menu.querySelectorAll = () => [];
|
||||
|
||||
registry.set('mobileMenuToggle', menuToggle);
|
||||
registry.set('mobileMenu', menu);
|
||||
|
||||
try {
|
||||
const controller = createMobileMenuController({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
|
||||
controller.initialize();
|
||||
windowStub.dispatchMediaChange();
|
||||
assert.equal(menuToggle.getAttribute('aria-expanded'), 'false');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('mobile menu safely no-ops without required nodes', () => {
|
||||
const { documentStub, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStub(true);
|
||||
|
||||
try {
|
||||
const controller = createMobileMenuController({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
|
||||
controller.initialize();
|
||||
controller.openMenu();
|
||||
controller.closeMenu();
|
||||
controller.syncLayout();
|
||||
assert.equal(documentStub.body.classList.contains('menu-open'), false);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeMobileMenu returns a controller', () => {
|
||||
const { documentStub, registry, cleanup } = createDomStub();
|
||||
const windowStub = createWindowStub(true);
|
||||
|
||||
const menuToggle = createElement('button');
|
||||
const menu = createElement('div');
|
||||
const menuPanel = createElement('div');
|
||||
|
||||
menuPanel.classList.add('mobile-menu__panel');
|
||||
menu.querySelector = selector => {
|
||||
if (selector === '.mobile-menu__panel') return menuPanel;
|
||||
return null;
|
||||
};
|
||||
menu.querySelectorAll = () => [];
|
||||
|
||||
registry.set('mobileMenuToggle', menuToggle);
|
||||
registry.set('mobileMenu', menu);
|
||||
|
||||
try {
|
||||
const controller = initializeMobileMenu({
|
||||
documentObject: documentStub,
|
||||
windowObject: windowStub
|
||||
});
|
||||
assert.equal(typeof controller.openMenu, 'function');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
@@ -405,6 +405,77 @@ test('renderTelemetryCharts renders condensed scatter charts when telemetry exis
|
||||
assert.equal(html.includes('node-detail__chart-point'), true);
|
||||
});
|
||||
|
||||
test('renderTelemetryCharts expands upper bounds when overflow metrics exceed defaults', () => {
|
||||
const nowMs = Date.UTC(2025, 0, 8, 12, 0, 0);
|
||||
const nowSeconds = Math.floor(nowMs / 1000);
|
||||
const node = {
|
||||
rawSources: {
|
||||
telemetry: {
|
||||
snapshots: [
|
||||
{
|
||||
rx_time: nowSeconds - 120,
|
||||
device_metrics: {
|
||||
battery_level: 90,
|
||||
voltage: 7.2,
|
||||
current: 3.6,
|
||||
channel_utilization: 45,
|
||||
air_util_tx: 18,
|
||||
},
|
||||
environment_metrics: {
|
||||
temperature: 45,
|
||||
relative_humidity: 48,
|
||||
barometric_pressure: 1250,
|
||||
gas_resistance: 1200,
|
||||
iaq: 650,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
const html = renderTelemetryCharts(node, { nowMs });
|
||||
assert.match(html, />7\.2<\/text>/);
|
||||
assert.match(html, />3\.6<\/text>/);
|
||||
assert.match(html, />45<\/text>/);
|
||||
assert.match(html, />650<\/text>/);
|
||||
assert.match(html, />1100<\/text>/);
|
||||
});
|
||||
|
||||
test('renderTelemetryCharts keeps default bounds when metrics stay within limits', () => {
|
||||
const nowMs = Date.UTC(2025, 0, 8, 12, 0, 0);
|
||||
const nowSeconds = Math.floor(nowMs / 1000);
|
||||
const node = {
|
||||
rawSources: {
|
||||
telemetry: {
|
||||
snapshots: [
|
||||
{
|
||||
rx_time: nowSeconds - 180,
|
||||
device_metrics: {
|
||||
battery_level: 70,
|
||||
voltage: 4.5,
|
||||
current: 1.5,
|
||||
channel_utilization: 35,
|
||||
air_util_tx: 15,
|
||||
},
|
||||
environment_metrics: {
|
||||
temperature: 25,
|
||||
relative_humidity: 50,
|
||||
barometric_pressure: 1015,
|
||||
gas_resistance: 1500,
|
||||
iaq: 200,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
};
|
||||
const html = renderTelemetryCharts(node, { nowMs });
|
||||
assert.match(html, />6\.0<\/text>/);
|
||||
assert.match(html, />3\.0<\/text>/);
|
||||
assert.match(html, />40<\/text>/);
|
||||
assert.match(html, />500<\/text>/);
|
||||
});
|
||||
|
||||
test('renderNodeDetailHtml composes the table, neighbors, and messages', () => {
|
||||
const html = renderNodeDetailHtml(
|
||||
{
|
||||
@@ -875,13 +946,19 @@ test('initializeNodeDetailPage reports an error when refresh fails', async () =>
|
||||
throw new Error('boom');
|
||||
};
|
||||
const renderShortHtml = short => `<span>${short}</span>`;
|
||||
const result = await initializeNodeDetailPage({
|
||||
document: documentStub,
|
||||
refreshImpl,
|
||||
renderShortHtml,
|
||||
});
|
||||
assert.equal(result, false);
|
||||
assert.equal(element.innerHTML.includes('Failed to load'), true);
|
||||
const originalError = console.error;
|
||||
console.error = () => {};
|
||||
try {
|
||||
const result = await initializeNodeDetailPage({
|
||||
document: documentStub,
|
||||
refreshImpl,
|
||||
renderShortHtml,
|
||||
});
|
||||
assert.equal(result, false);
|
||||
assert.equal(element.innerHTML.includes('Failed to load'), true);
|
||||
} finally {
|
||||
console.error = originalError;
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeNodeDetailPage handles missing reference payloads', async () => {
|
||||
|
||||
@@ -62,6 +62,16 @@ test('normalizeNodeCollection applies canonical forms to all nodes', () => {
|
||||
assert.equal(nodes[1].air_util_tx, 5.5);
|
||||
});
|
||||
|
||||
test('normalizeNodeSnapshot maps numeric roles to canonical identifiers', () => {
|
||||
const roleNode = { role: '12', node_id: '!role' };
|
||||
const numberRoleNode = { role: 12, nodeId: '!number-role' };
|
||||
|
||||
normalizeNodeCollection([roleNode, numberRoleNode]);
|
||||
|
||||
assert.equal(roleNode.role, 'CLIENT_BASE');
|
||||
assert.equal(numberRoleNode.role, 'CLIENT_BASE');
|
||||
});
|
||||
|
||||
test('normaliser helpers coerce primitive values consistently', () => {
|
||||
assert.equal(normalizeNumber('42.1'), 42.1);
|
||||
assert.equal(normalizeNumber('not-a-number'), null);
|
||||
|
||||
@@ -19,8 +19,13 @@ import assert from 'node:assert/strict';
|
||||
|
||||
import { buildTraceSegments, __testUtils } from '../trace-paths.js';
|
||||
|
||||
const { coerceFiniteNumber, findNode, resolveNodeCoordinates } = __testUtils;
|
||||
const { buildNodeIndex } = __testUtils;
|
||||
const {
|
||||
coerceFiniteNumber,
|
||||
findNode,
|
||||
resolveNodeCoordinates,
|
||||
canonicalNodeIdFromNumeric,
|
||||
buildNodeIndex
|
||||
} = __testUtils;
|
||||
|
||||
test('buildTraceSegments connects source, hops, and destination when coordinates exist', () => {
|
||||
const traces = [
|
||||
@@ -43,6 +48,29 @@ test('buildTraceSegments connects source, hops, and destination when coordinates
|
||||
assert.equal(segments[0].color, 'color:ROUTER');
|
||||
assert.equal(segments[1].color, 'color:CLIENT');
|
||||
assert.equal(segments[0].rxTime, 1700);
|
||||
assert.deepEqual(
|
||||
segments[0].pathNodes.map(node => node.node_id),
|
||||
['2658361180', '19088743', '4242424242']
|
||||
);
|
||||
});
|
||||
|
||||
test('buildTraceSegments links traces to canonical node IDs when numeric references are provided', () => {
|
||||
const traces = [
|
||||
{ id: 9_010, src: 0xbead_f00d, hops: [0xcafe_babe], dest: 0xfeed_c0de, rx_time: 1900 },
|
||||
];
|
||||
const nodes = [
|
||||
{ node_id: '!beadf00d', latitude: 0, longitude: 0, role: 'ROUTER' },
|
||||
{ node_id: '!cafebabe', latitude: 1, longitude: 1, role: 'CLIENT' },
|
||||
{ node_id: '!feedc0de', latitude: 2, longitude: 2, role: 'CLIENT' },
|
||||
];
|
||||
|
||||
const segments = buildTraceSegments(traces, nodes, { colorForNode: () => '#abcdef' });
|
||||
|
||||
assert.equal(segments.length, 2);
|
||||
assert.deepEqual(segments[0].latlngs, [[0, 0], [1, 1]]);
|
||||
assert.deepEqual(segments[1].latlngs, [[1, 1], [2, 2]]);
|
||||
assert.equal(segments[0].color, '#abcdef');
|
||||
assert.equal(segments[1].color, '#abcdef');
|
||||
});
|
||||
|
||||
test('buildTraceSegments drops paths through hops without locations', () => {
|
||||
@@ -98,13 +126,24 @@ test('helper utilities coerce values and locate nodes', () => {
|
||||
assert.equal(coerceFiniteNumber(null), null);
|
||||
assert.equal(coerceFiniteNumber(' '), null);
|
||||
assert.equal(coerceFiniteNumber('7'), 7);
|
||||
assert.equal(coerceFiniteNumber('!beadf00d'), 0xbeadf00d);
|
||||
assert.equal(coerceFiniteNumber('0x10'), 16);
|
||||
|
||||
const byId = new Map([['!id', { node_id: '!id', latitude: 1, longitude: 2 }]]);
|
||||
const byNum = new Map([[99, { node_id: '!other', latitude: 0, longitude: 0 }]]);
|
||||
const byId = new Map([
|
||||
['!id', { node_id: '!id', latitude: 1, longitude: 2 }],
|
||||
['!beadf00d', { node_id: '!beadf00d', latitude: 3, longitude: 4 }]
|
||||
]);
|
||||
const byNum = new Map([
|
||||
[99, { node_id: '!other', latitude: 0, longitude: 0 }],
|
||||
[0xbeadf00d, { node_id: '!beadf00d', latitude: 3, longitude: 4 }]
|
||||
]);
|
||||
assert.equal(findNode(byId, byNum, '!id').node_id, '!id');
|
||||
assert.equal(findNode(byId, byNum, 99).node_id, '!other');
|
||||
assert.equal(findNode(byId, new Map(), 0xbeadf00d).node_id, '!beadf00d');
|
||||
assert.equal(findNode(byId, byNum, 100), null);
|
||||
|
||||
assert.equal(canonicalNodeIdFromNumeric(0xbeadf00d), '!beadf00d');
|
||||
|
||||
const coords = resolveNodeCoordinates({ latitude: 5, longitude: 6, distance_km: 10 }, { limitDistance: true, maxDistanceKm: 15 });
|
||||
assert.deepEqual(coords, [5, 6]);
|
||||
const outOfRange = resolveNodeCoordinates({ latitude: 0, longitude: 0, distance_km: 20 }, { limitDistance: true, maxDistanceKm: 15 });
|
||||
|
||||
@@ -30,7 +30,8 @@ export const MAX_CHANNEL_INDEX = 9;
|
||||
* NODE_INFO: 'node-info',
|
||||
* TELEMETRY: 'telemetry',
|
||||
* POSITION: 'position',
|
||||
* NEIGHBOR: 'neighbor'
|
||||
* NEIGHBOR: 'neighbor',
|
||||
* TRACE: 'trace'
|
||||
* }}
|
||||
*/
|
||||
export const CHAT_LOG_ENTRY_TYPES = Object.freeze({
|
||||
@@ -39,6 +40,7 @@ export const CHAT_LOG_ENTRY_TYPES = Object.freeze({
|
||||
TELEMETRY: 'telemetry',
|
||||
POSITION: 'position',
|
||||
NEIGHBOR: 'neighbor',
|
||||
TRACE: 'trace',
|
||||
MESSAGE_ENCRYPTED: 'message-encrypted'
|
||||
});
|
||||
|
||||
@@ -63,13 +65,15 @@ function resolveSnapshotList(entry) {
|
||||
* Build a data model describing the content for chat tabs.
|
||||
*
|
||||
* Entries outside the recent activity window, encrypted messages, and
|
||||
* channels above {@link MAX_CHANNEL_INDEX} are filtered out.
|
||||
* channels above {@link MAX_CHANNEL_INDEX} are filtered out. Channel
|
||||
* buckets are only created when messages are present for that channel.
|
||||
*
|
||||
* @param {{
|
||||
* nodes?: Array<Object>,
|
||||
* telemetry?: Array<Object>,
|
||||
* positions?: Array<Object>,
|
||||
* neighbors?: Array<Object>,
|
||||
* traces?: Array<Object>,
|
||||
* messages?: Array<Object>,
|
||||
* logOnlyMessages?: Array<Object>,
|
||||
* nowSeconds: number,
|
||||
@@ -87,6 +91,7 @@ export function buildChatTabModel({
|
||||
telemetry = [],
|
||||
positions = [],
|
||||
neighbors = [],
|
||||
traces = [],
|
||||
messages = [],
|
||||
logOnlyMessages = [],
|
||||
nowSeconds,
|
||||
@@ -98,11 +103,29 @@ export function buildChatTabModel({
|
||||
const logEntries = [];
|
||||
const channelBuckets = new Map();
|
||||
const primaryChannelEnvLabel = normalisePrimaryChannelEnvLabel(primaryChannelFallbackLabel);
|
||||
const nodeById = new Map();
|
||||
const nodeByNum = new Map();
|
||||
const nodeInfoKeys = new Set();
|
||||
|
||||
const buildNodeInfoKey = (nodeId, nodeNum, ts) => `${nodeId ?? ''}:${nodeNum ?? ''}:${ts ?? ''}`;
|
||||
const recordNodeInfoEntry = (ts, nodeId, nodeNum) => {
|
||||
if (ts == null) return;
|
||||
const key = buildNodeInfoKey(nodeId, nodeNum, ts);
|
||||
if (nodeInfoKeys.has(key)) return;
|
||||
const node = nodeId && nodeById.has(nodeId)
|
||||
? nodeById.get(nodeId)
|
||||
: (nodeNum != null && nodeByNum.has(nodeNum) ? nodeByNum.get(nodeNum) : null);
|
||||
if (!node) return;
|
||||
nodeInfoKeys.add(key);
|
||||
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, node, nodeId, nodeNum });
|
||||
};
|
||||
|
||||
for (const node of nodes || []) {
|
||||
if (!node) continue;
|
||||
const nodeId = normaliseNodeId(node);
|
||||
const nodeNum = normaliseNodeNum(node);
|
||||
if (nodeId) nodeById.set(nodeId, node);
|
||||
if (nodeNum != null) nodeByNum.set(nodeNum, node);
|
||||
const firstTs = resolveTimestampSeconds(node.first_heard ?? node.firstHeard, node.first_heard_iso ?? node.firstHeardIso);
|
||||
if (firstTs != null && firstTs >= cutoff) {
|
||||
logEntries.push({ ts: firstTs, type: CHAT_LOG_ENTRY_TYPES.NODE_NEW, node, nodeId, nodeNum });
|
||||
@@ -110,6 +133,7 @@ export function buildChatTabModel({
|
||||
const lastTs = resolveTimestampSeconds(node.last_heard ?? node.lastHeard, node.last_seen_iso ?? node.lastSeenIso);
|
||||
if (lastTs != null && lastTs >= cutoff) {
|
||||
logEntries.push({ ts: lastTs, type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, node, nodeId, nodeNum });
|
||||
nodeInfoKeys.add(buildNodeInfoKey(nodeId, nodeNum, lastTs));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,6 +149,7 @@ export function buildChatTabModel({
|
||||
const nodeId = normaliseNodeId(snapshot);
|
||||
const nodeNum = normaliseNodeNum(snapshot);
|
||||
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.TELEMETRY, telemetry: snapshot, nodeId, nodeNum });
|
||||
recordNodeInfoEntry(ts, nodeId, nodeNum);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -140,6 +165,7 @@ export function buildChatTabModel({
|
||||
const nodeId = normaliseNodeId(snapshot);
|
||||
const nodeNum = normaliseNodeNum(snapshot);
|
||||
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.POSITION, position: snapshot, nodeId, nodeNum });
|
||||
recordNodeInfoEntry(ts, nodeId, nodeNum);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -153,9 +179,39 @@ export function buildChatTabModel({
|
||||
const nodeNum = normaliseNodeNum(snapshot);
|
||||
const neighborId = normaliseNeighborId(snapshot);
|
||||
logEntries.push({ ts, type: CHAT_LOG_ENTRY_TYPES.NEIGHBOR, neighbor: snapshot, nodeId, nodeNum, neighborId });
|
||||
recordNodeInfoEntry(ts, nodeId, nodeNum);
|
||||
}
|
||||
}
|
||||
|
||||
for (const trace of traces || []) {
|
||||
if (!trace) continue;
|
||||
const ts = resolveTimestampSeconds(trace.rx_time ?? trace.rxTime, trace.rx_iso ?? trace.rxIso);
|
||||
if (ts == null || ts < cutoff) continue;
|
||||
const path = buildTracePath(trace);
|
||||
if (path.length < 2) continue;
|
||||
const firstHop = path[0] || {};
|
||||
const traceLabels = path
|
||||
.map(hop => {
|
||||
if (!hop || typeof hop !== 'object') return null;
|
||||
const candidates = [hop.id, hop.raw];
|
||||
if (Number.isFinite(hop.num)) {
|
||||
candidates.push(String(hop.num));
|
||||
}
|
||||
return candidates.find(val => val != null && String(val).trim().length > 0) ?? null;
|
||||
})
|
||||
.filter(value => value != null && value !== '');
|
||||
logEntries.push({
|
||||
ts,
|
||||
type: CHAT_LOG_ENTRY_TYPES.TRACE,
|
||||
trace,
|
||||
tracePath: path,
|
||||
traceLabels,
|
||||
nodeId: firstHop.id ?? null,
|
||||
nodeNum: firstHop.num ?? null
|
||||
});
|
||||
recordNodeInfoEntry(ts, firstHop.id ?? null, firstHop.num ?? null);
|
||||
}
|
||||
|
||||
const encryptedLogEntries = [];
|
||||
const encryptedLogKeys = new Set();
|
||||
|
||||
@@ -255,26 +311,6 @@ export function buildChatTabModel({
|
||||
|
||||
logEntries.sort((a, b) => a.ts - b.ts);
|
||||
|
||||
let hasPrimaryBucket = false;
|
||||
for (const bucket of channelBuckets.values()) {
|
||||
if (bucket.index === 0) {
|
||||
hasPrimaryBucket = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!hasPrimaryBucket) {
|
||||
const bucketKey = '0';
|
||||
channelBuckets.set(bucketKey, {
|
||||
key: bucketKey,
|
||||
id: buildChannelTabId(bucketKey),
|
||||
index: 0,
|
||||
label: '0',
|
||||
entries: [],
|
||||
labelPriority: CHANNEL_LABEL_PRIORITY.INDEX,
|
||||
isPrimaryFallback: true
|
||||
});
|
||||
}
|
||||
|
||||
const channels = Array.from(channelBuckets.values()).sort((a, b) => {
|
||||
if (a.index !== b.index) {
|
||||
return a.index - b.index;
|
||||
@@ -345,10 +381,59 @@ function pickFirstPropertyValue(source, keys) {
|
||||
* @param {*} value Arbitrary payload candidate.
|
||||
* @returns {?string} Canonical node identifier.
|
||||
*/
|
||||
function coerceFiniteNumber(value) {
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'number') {
|
||||
return Number.isFinite(value) ? value : null;
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return null;
|
||||
if (trimmed.startsWith('!')) {
|
||||
const hex = trimmed.slice(1);
|
||||
if (!/^[0-9A-Fa-f]+$/.test(hex)) return null;
|
||||
const parsedHex = Number.parseInt(hex, 16);
|
||||
return Number.isFinite(parsedHex) ? parsedHex >>> 0 : null;
|
||||
}
|
||||
if (/^0[xX][0-9A-Fa-f]+$/.test(trimmed)) {
|
||||
const parsedHex = Number.parseInt(trimmed, 16);
|
||||
return Number.isFinite(parsedHex) ? parsedHex >>> 0 : null;
|
||||
}
|
||||
const parsed = Number(trimmed);
|
||||
return Number.isFinite(parsed) ? parsed : null;
|
||||
}
|
||||
const parsed = Number(value);
|
||||
return Number.isFinite(parsed) ? parsed : null;
|
||||
}
|
||||
|
||||
function canonicalNodeIdFromNumeric(ref) {
|
||||
if (!Number.isFinite(ref)) return null;
|
||||
const unsigned = ref >>> 0;
|
||||
const hex = unsigned.toString(16).padStart(8, '0');
|
||||
return `!${hex}`;
|
||||
}
|
||||
|
||||
function normaliseNodeId(value) {
|
||||
if (!value || typeof value !== 'object') return null;
|
||||
const raw = value.node_id ?? value.nodeId ?? null;
|
||||
return typeof raw === 'string' && raw.trim().length ? raw.trim() : null;
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'number') {
|
||||
return canonicalNodeIdFromNumeric(value);
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return null;
|
||||
const canonicalFromNumeric = canonicalNodeIdFromNumeric(coerceFiniteNumber(trimmed));
|
||||
return canonicalFromNumeric ?? trimmed;
|
||||
}
|
||||
if (typeof value !== 'object') return null;
|
||||
const rawId = value.node_id ?? value.nodeId ?? null;
|
||||
if (rawId != null) {
|
||||
const canonical = normaliseNodeId(rawId);
|
||||
if (canonical) return canonical;
|
||||
}
|
||||
const numericRef = value.node_num ?? value.nodeNum ?? value.num;
|
||||
const numericId = canonicalNodeIdFromNumeric(coerceFiniteNumber(numericRef));
|
||||
if (numericId) return numericId;
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -366,6 +451,29 @@ function normaliseNeighborId(value) {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build an ordered trace path of node identifiers and numeric references.
|
||||
*
|
||||
* @param {Object} trace Trace payload.
|
||||
* @returns {Array<{id: ?string, num: ?number, raw: *}>} Ordered hop descriptors.
|
||||
*/
|
||||
function buildTracePath(trace) {
|
||||
const path = [];
|
||||
const append = value => {
|
||||
if (value == null || value === '') return;
|
||||
const id = normaliseNodeId(value);
|
||||
const num = normaliseNodeNum({ num: value });
|
||||
path.push({ id, num, raw: value });
|
||||
};
|
||||
append(trace.src ?? trace.source ?? trace.from);
|
||||
const hops = Array.isArray(trace.hops) ? trace.hops : [];
|
||||
for (const hop of hops) {
|
||||
append(hop);
|
||||
}
|
||||
append(trace.dest ?? trace.destination ?? trace.to);
|
||||
return path;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a finite node number from a payload when available.
|
||||
*
|
||||
@@ -373,14 +481,17 @@ function normaliseNeighborId(value) {
|
||||
* @returns {?number} Canonical numeric identifier.
|
||||
*/
|
||||
function normaliseNodeNum(value) {
|
||||
if (!value || typeof value !== 'object') return null;
|
||||
const raw = value.node_num ?? value.nodeNum ?? value.num;
|
||||
if (raw == null || raw === '') return null;
|
||||
if (typeof raw === 'number') {
|
||||
return Number.isFinite(raw) ? raw : null;
|
||||
if (Number.isFinite(value)) {
|
||||
return Math.trunc(value);
|
||||
}
|
||||
const parsed = Number(raw);
|
||||
return Number.isFinite(parsed) ? parsed : null;
|
||||
const fromObject = value && typeof value === 'object'
|
||||
? coerceFiniteNumber(value.node_num ?? value.nodeNum ?? value.num)
|
||||
: null;
|
||||
if (fromObject != null) {
|
||||
return Math.trunc(fromObject);
|
||||
}
|
||||
const parsed = coerceFiniteNumber(value);
|
||||
return parsed != null ? Math.trunc(parsed) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -110,6 +110,7 @@ export function chatLogEntryMatchesQuery(entry, query) {
|
||||
candidates.push(...collectSearchValues(entry.position));
|
||||
candidates.push(...collectSearchValues(entry.neighbor));
|
||||
candidates.push(...collectSearchValues(entry.neighborNode));
|
||||
candidates.push(...(Array.isArray(entry.traceLabels) ? entry.traceLabels : []));
|
||||
if (entry.nodeId) candidates.push(entry.nodeId);
|
||||
if (entry.nodeNum != null && entry.nodeNum !== '') candidates.push(entry.nodeNum);
|
||||
if (entry.neighborId) candidates.push(entry.neighborId);
|
||||
|
||||
@@ -0,0 +1,680 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { readAppConfig } from './config.js';
|
||||
import { resolveLegendVisibility } from './map-legend-visibility.js';
|
||||
import { mergeConfig } from './settings.js';
|
||||
import { roleColors } from './role-helpers.js';
|
||||
|
||||
/**
|
||||
* Escape HTML special characters to prevent XSS.
|
||||
*
|
||||
* @param {string} str Raw string to escape.
|
||||
* @returns {string} Escaped string safe for HTML insertion.
|
||||
*/
|
||||
function escapeHtml(str) {
|
||||
if (typeof str !== 'string') return '';
|
||||
return str
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a coordinate value to fixed decimal places.
|
||||
*
|
||||
* @param {number|null|undefined} v Coordinate value.
|
||||
* @param {number} d Decimal places (default 5).
|
||||
* @returns {string} Formatted coordinate or empty string.
|
||||
*/
|
||||
function fmtCoords(v, d = 5) {
|
||||
if (v == null || v === '') return '';
|
||||
const n = Number(v);
|
||||
if (!Number.isFinite(n)) return '';
|
||||
return n.toFixed(d);
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a Unix timestamp to a human-readable relative time string.
|
||||
*
|
||||
* @param {number|null|undefined} unixSec Unix timestamp in seconds.
|
||||
* @param {number} nowSec Current timestamp in seconds.
|
||||
* @returns {string} Relative time string or empty string.
|
||||
*/
|
||||
function timeAgo(unixSec, nowSec = Date.now() / 1000) {
|
||||
if (unixSec == null || unixSec === '') return '';
|
||||
const ts = Number(unixSec);
|
||||
if (!Number.isFinite(ts) || ts <= 0) return '';
|
||||
const diff = Math.max(0, Math.floor(nowSec - ts));
|
||||
if (diff < 60) return `${diff}s ago`;
|
||||
if (diff < 3600) return `${Math.floor(diff / 60)}m ago`;
|
||||
if (diff < 86400) return `${Math.floor(diff / 3600)}h ago`;
|
||||
return `${Math.floor(diff / 86400)}d ago`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a navigable URL for an instance domain.
|
||||
*
|
||||
* @param {string} domain Instance domain.
|
||||
* @returns {string|null} Navigable URL or null.
|
||||
*/
|
||||
function buildInstanceUrl(domain) {
|
||||
if (typeof domain !== 'string' || !domain.trim()) return null;
|
||||
const trimmed = domain.trim();
|
||||
if (/^https?:\/\//i.test(trimmed)) return trimmed;
|
||||
return `https://${trimmed}`;
|
||||
}
|
||||
|
||||
const NODE_COUNT_COLOR_STOPS = [
|
||||
{ limit: 100, color: roleColors.CLIENT_HIDDEN },
|
||||
{ limit: 200, color: roleColors.SENSOR },
|
||||
{ limit: 300, color: roleColors.TRACKER },
|
||||
{ limit: 400, color: roleColors.CLIENT_MUTE },
|
||||
{ limit: 500, color: roleColors.CLIENT },
|
||||
{ limit: 600, color: roleColors.CLIENT_BASE },
|
||||
{ limit: 700, color: roleColors.REPEATER },
|
||||
{ limit: 800, color: roleColors.ROUTER_LATE },
|
||||
{ limit: 900, color: roleColors.ROUTER }
|
||||
];
|
||||
|
||||
const DEFAULT_INSTANCE_COLOR = roleColors.LOST_AND_FOUND || '#3388ff';
|
||||
|
||||
/**
|
||||
* Determine the marker colour for an instance based on its active node count.
|
||||
*
|
||||
* @param {*} count Raw node count value from the API.
|
||||
* @returns {string} CSS colour string.
|
||||
*/
|
||||
function colorForNodeCount(count) {
|
||||
const numeric = Number(count);
|
||||
if (!Number.isFinite(numeric) || numeric < 0) return DEFAULT_INSTANCE_COLOR;
|
||||
const stop = NODE_COUNT_COLOR_STOPS.find(entry => numeric < entry.limit);
|
||||
return stop && stop.color ? stop.color : DEFAULT_INSTANCE_COLOR;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render arbitrary contact text while hyperlinking recognised URL-like segments.
|
||||
*
|
||||
* @param {*} contact Raw contact value from the API.
|
||||
* @returns {string} HTML markup safe for insertion.
|
||||
*/
|
||||
function renderContactHtml(contact) {
|
||||
if (typeof contact !== 'string') return '';
|
||||
const trimmed = contact.trim();
|
||||
if (!trimmed) return '';
|
||||
const urlPattern = /(https?:\/\/[^\s]+|mailto:[^\s]+|matrix:[^\s]+)/gi;
|
||||
const parts = [];
|
||||
let lastIndex = 0;
|
||||
let match;
|
||||
|
||||
while ((match = urlPattern.exec(trimmed)) !== null) {
|
||||
const textBefore = trimmed.slice(lastIndex, match.index);
|
||||
if (textBefore) {
|
||||
parts.push(escapeHtml(textBefore));
|
||||
}
|
||||
const url = match[0];
|
||||
const safeUrl = escapeHtml(url);
|
||||
parts.push(`<a href="${safeUrl}" target="_blank" rel="noopener noreferrer">${safeUrl}</a>`);
|
||||
lastIndex = match.index + url.length;
|
||||
}
|
||||
|
||||
const trailing = trimmed.slice(lastIndex);
|
||||
if (trailing) {
|
||||
parts.push(escapeHtml(trailing));
|
||||
}
|
||||
|
||||
const html = parts.join('');
|
||||
return html.replace(/\r?\n/g, '<br>');
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a value into a finite number or null when invalid.
|
||||
*
|
||||
* @param {*} value Raw value to convert.
|
||||
* @returns {number|null} Finite number or null.
|
||||
*/
|
||||
function toFiniteNumber(value) {
|
||||
const num = Number(value);
|
||||
return Number.isFinite(num) ? num : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two string-like values ignoring case.
|
||||
*
|
||||
* @param {*} a Left-hand operand.
|
||||
* @param {*} b Right-hand operand.
|
||||
* @returns {number} Comparator result.
|
||||
*/
|
||||
function compareString(a, b) {
|
||||
const left = typeof a === 'string' ? a.toLowerCase() : String(a ?? '').toLowerCase();
|
||||
const right = typeof b === 'string' ? b.toLowerCase() : String(b ?? '').toLowerCase();
|
||||
return left.localeCompare(right);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare two numeric values.
|
||||
*
|
||||
* @param {*} a Left-hand operand.
|
||||
* @param {*} b Right-hand operand.
|
||||
* @returns {number} Comparator result.
|
||||
*/
|
||||
function compareNumber(a, b) {
|
||||
const left = toFiniteNumber(a);
|
||||
const right = toFiniteNumber(b);
|
||||
if (left == null && right == null) return 0;
|
||||
if (left == null) return 1;
|
||||
if (right == null) return -1;
|
||||
if (left === right) return 0;
|
||||
return left < right ? -1 : 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether a string-like value is present.
|
||||
*
|
||||
* @param {*} value Candidate value.
|
||||
* @returns {boolean} true when present.
|
||||
*/
|
||||
function hasStringValue(value) {
|
||||
if (value == null) return false;
|
||||
if (typeof value === 'string') return value.trim() !== '';
|
||||
return String(value).trim() !== '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether a numeric value is present.
|
||||
*
|
||||
* @param {*} value Candidate value.
|
||||
* @returns {boolean} true when present.
|
||||
*/
|
||||
function hasNumberValue(value) {
|
||||
return toFiniteNumber(value) != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Toggle the legend hidden class on a container element.
|
||||
*
|
||||
* @param {HTMLElement|{ classList?: { toggle?: Function }, className?: string }} container Legend container.
|
||||
* @param {boolean} hidden Whether the legend should be hidden.
|
||||
* @returns {void}
|
||||
*/
|
||||
function toggleLegendHiddenClass(container, hidden) {
|
||||
if (!container) return;
|
||||
if (container.classList && typeof container.classList.toggle === 'function') {
|
||||
container.classList.toggle('legend-hidden', hidden);
|
||||
return;
|
||||
}
|
||||
if (typeof container.className === 'string') {
|
||||
const classes = container.className.split(/\s+/).filter(Boolean);
|
||||
const hasHidden = classes.includes('legend-hidden');
|
||||
if (hidden && !hasHidden) {
|
||||
classes.push('legend-hidden');
|
||||
} else if (!hidden && hasHidden) {
|
||||
classes.splice(classes.indexOf('legend-hidden'), 1);
|
||||
}
|
||||
container.className = classes.join(' ');
|
||||
}
|
||||
}
|
||||
|
||||
const TILE_LAYER_URL = 'https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png';
|
||||
|
||||
/**
|
||||
* Initialize the federation page by fetching instances, rendering the map,
|
||||
* and populating the table.
|
||||
*
|
||||
* @param {{
|
||||
* config?: object,
|
||||
* fetchImpl?: typeof fetch,
|
||||
* leaflet?: typeof L
|
||||
* }} [options] Optional overrides for testing.
|
||||
* @returns {Promise<void>}
|
||||
*/
|
||||
export async function initializeFederationPage(options = {}) {
|
||||
const rawConfig = options.config || readAppConfig();
|
||||
const config = mergeConfig(rawConfig);
|
||||
const fetchImpl = options.fetchImpl || fetch;
|
||||
const leaflet = options.leaflet || (typeof window !== 'undefined' ? window.L : null);
|
||||
const mapContainer = document.getElementById('map');
|
||||
const mapPanel = document.getElementById('mapPanel');
|
||||
const tableEl = document.getElementById('instances');
|
||||
const tableBody = document.querySelector('#instances tbody');
|
||||
const statusEl = document.getElementById('status');
|
||||
const sortHeaders = tableEl
|
||||
? Array.from(tableEl.querySelectorAll('thead .sort-header[data-sort-key]'))
|
||||
: [];
|
||||
|
||||
const hasLeaflet =
|
||||
typeof leaflet === 'object' &&
|
||||
leaflet &&
|
||||
typeof leaflet.map === 'function' &&
|
||||
typeof leaflet.tileLayer === 'function';
|
||||
|
||||
let map = null;
|
||||
let markersLayer = null;
|
||||
let tileLayer = null;
|
||||
let legendContainer = null;
|
||||
let legendToggleButton = null;
|
||||
let legendVisible = true;
|
||||
const legendCollapsedValue = mapPanel ? mapPanel.getAttribute('data-legend-collapsed') : null;
|
||||
const legendDefaultCollapsed = legendCollapsedValue == null
|
||||
? true
|
||||
: legendCollapsedValue.trim() !== 'false';
|
||||
const tableSorters = {
|
||||
name: { getValue: inst => inst.name ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
|
||||
domain: { getValue: inst => inst.domain ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
|
||||
contact: { getValue: inst => inst.contactLink ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
|
||||
version: { getValue: inst => inst.version ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
|
||||
channel: { getValue: inst => inst.channel ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
|
||||
frequency: { getValue: inst => inst.frequency ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
|
||||
nodesCount: {
|
||||
getValue: inst => toFiniteNumber(inst.nodesCount ?? inst.nodes_count),
|
||||
compare: compareNumber,
|
||||
hasValue: hasNumberValue,
|
||||
defaultDirection: 'desc'
|
||||
},
|
||||
latitude: { getValue: inst => toFiniteNumber(inst.latitude), compare: compareNumber, hasValue: hasNumberValue, defaultDirection: 'asc' },
|
||||
longitude: { getValue: inst => toFiniteNumber(inst.longitude), compare: compareNumber, hasValue: hasNumberValue, defaultDirection: 'asc' },
|
||||
lastUpdateTime: {
|
||||
getValue: inst => toFiniteNumber(inst.lastUpdateTime),
|
||||
compare: compareNumber,
|
||||
hasValue: hasNumberValue,
|
||||
defaultDirection: 'desc'
|
||||
}
|
||||
};
|
||||
let sortState = {
|
||||
key: 'lastUpdateTime',
|
||||
direction: tableSorters.lastUpdateTime ? tableSorters.lastUpdateTime.defaultDirection : 'desc'
|
||||
};
|
||||
|
||||
/**
|
||||
* Sort instances using the active sort configuration.
|
||||
*
|
||||
* @param {Array<Object>} data Instance rows.
|
||||
* @returns {Array<Object>} sorted rows.
|
||||
*/
|
||||
const sortInstancesData = data => {
|
||||
const sorter = tableSorters[sortState.key];
|
||||
if (!sorter) return Array.isArray(data) ? [...data] : [];
|
||||
const dir = sortState.direction === 'asc' ? 1 : -1;
|
||||
return [...(data || [])].sort((a, b) => {
|
||||
const aVal = sorter.getValue(a);
|
||||
const bVal = sorter.getValue(b);
|
||||
const aHas = sorter.hasValue ? sorter.hasValue(aVal) : hasStringValue(aVal);
|
||||
const bHas = sorter.hasValue ? sorter.hasValue(bVal) : hasStringValue(bVal);
|
||||
if (aHas && bHas) {
|
||||
return sorter.compare(aVal, bVal) * dir;
|
||||
}
|
||||
if (aHas) return -1;
|
||||
if (bHas) return 1;
|
||||
return 0;
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Update the visual sort indicators for the active column.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
const syncSortIndicators = () => {
|
||||
if (!tableEl || !sortHeaders.length) return;
|
||||
tableEl.querySelectorAll('thead th').forEach(th => th.removeAttribute('aria-sort'));
|
||||
sortHeaders.forEach(header => {
|
||||
header.removeAttribute('data-sort-active');
|
||||
const indicator = header.querySelector('.sort-indicator');
|
||||
if (indicator) indicator.textContent = '';
|
||||
});
|
||||
const active = sortHeaders.find(header => header.dataset.sortKey === sortState.key);
|
||||
if (!active) return;
|
||||
const indicator = active.querySelector('.sort-indicator');
|
||||
if (indicator) indicator.textContent = sortState.direction === 'asc' ? '▲' : '▼';
|
||||
active.setAttribute('data-sort-active', 'true');
|
||||
const th = active.closest('th');
|
||||
if (th) {
|
||||
th.setAttribute('aria-sort', sortState.direction === 'asc' ? 'ascending' : 'descending');
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Render the instances table body with sorting applied.
|
||||
*
|
||||
* @param {Array<Object>} data Instance rows.
|
||||
* @param {number} nowSec Reference timestamp for relative time rendering.
|
||||
* @returns {void}
|
||||
*/
|
||||
const renderTableRows = (data, nowSec) => {
|
||||
if (!tableBody) return;
|
||||
const frag = document.createDocumentFragment();
|
||||
const sorted = sortInstancesData(data);
|
||||
|
||||
for (const instance of sorted) {
|
||||
const tr = document.createElement('tr');
|
||||
const url = buildInstanceUrl(instance.domain);
|
||||
const nameHtml = instance.name ? escapeHtml(instance.name) : '<em>—</em>';
|
||||
const domainHtml = url
|
||||
? `<a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(instance.domain || '')}</a>`
|
||||
: escapeHtml(instance.domain || '');
|
||||
const contactHtml = renderContactHtml(instance.contactLink);
|
||||
const nodesCountValue = toFiniteNumber(instance.nodesCount ?? instance.nodes_count);
|
||||
const nodesCountText = nodesCountValue == null ? '<em>—</em>' : escapeHtml(String(nodesCountValue));
|
||||
|
||||
tr.innerHTML = `
|
||||
<td class="instances-col instances-col--name">${nameHtml}</td>
|
||||
<td class="instances-col instances-col--domain mono">${domainHtml}</td>
|
||||
<td class="instances-col instances-col--contact">${contactHtml || '<em>—</em>'}</td>
|
||||
<td class="instances-col instances-col--version mono">${escapeHtml(instance.version || '')}</td>
|
||||
<td class="instances-col instances-col--channel">${escapeHtml(instance.channel || '')}</td>
|
||||
<td class="instances-col instances-col--frequency">${escapeHtml(instance.frequency || '')}</td>
|
||||
<td class="instances-col instances-col--nodes mono">${nodesCountText}</td>
|
||||
<td class="instances-col instances-col--latitude mono">${fmtCoords(instance.latitude)}</td>
|
||||
<td class="instances-col instances-col--longitude mono">${fmtCoords(instance.longitude)}</td>
|
||||
<td class="instances-col instances-col--last-update mono">${timeAgo(instance.lastUpdateTime, nowSec)}</td>
|
||||
`;
|
||||
|
||||
frag.appendChild(tr);
|
||||
}
|
||||
|
||||
tableBody.replaceChildren(frag);
|
||||
syncSortIndicators();
|
||||
};
|
||||
|
||||
/**
|
||||
* Update the pressed state of the legend visibility toggle button.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
const updateLegendToggleState = () => {
|
||||
if (!legendToggleButton) return;
|
||||
const baseLabel = legendVisible ? 'Hide map legend' : 'Show map legend';
|
||||
const baseText = legendVisible ? 'Hide legend' : 'Show legend';
|
||||
legendToggleButton.setAttribute('aria-pressed', legendVisible ? 'true' : 'false');
|
||||
legendToggleButton.setAttribute('aria-label', baseLabel);
|
||||
legendToggleButton.textContent = baseText;
|
||||
};
|
||||
|
||||
/**
|
||||
* Show or hide the map legend component.
|
||||
*
|
||||
* @param {boolean} visible Whether the legend should be displayed.
|
||||
* @returns {void}
|
||||
*/
|
||||
const setLegendVisibility = visible => {
|
||||
legendVisible = Boolean(visible);
|
||||
if (legendContainer) {
|
||||
toggleLegendHiddenClass(legendContainer, !legendVisible);
|
||||
if (typeof legendContainer.setAttribute === 'function') {
|
||||
legendContainer.setAttribute('aria-hidden', legendVisible ? 'false' : 'true');
|
||||
}
|
||||
}
|
||||
updateLegendToggleState();
|
||||
};
|
||||
|
||||
/**
|
||||
* Wire up click and keyboard handlers for sortable headers.
|
||||
*
|
||||
* @param {Function} rerender Callback to refresh the table.
|
||||
* @returns {void}
|
||||
*/
|
||||
const attachSortHandlers = rerender => {
|
||||
if (!sortHeaders.length) return;
|
||||
const applySortKey = key => {
|
||||
if (!key) return;
|
||||
if (sortState.key === key) {
|
||||
sortState = { key, direction: sortState.direction === 'asc' ? 'desc' : 'asc' };
|
||||
} else {
|
||||
const defaultDir = tableSorters[key]?.defaultDirection || 'asc';
|
||||
sortState = { key, direction: defaultDir };
|
||||
}
|
||||
rerender();
|
||||
};
|
||||
|
||||
sortHeaders.forEach(header => {
|
||||
const key = header.dataset.sortKey;
|
||||
header.addEventListener('click', () => applySortKey(key));
|
||||
header.addEventListener('keydown', event => {
|
||||
if (event.key === 'Enter' || event.key === ' ') {
|
||||
event.preventDefault();
|
||||
applySortKey(key);
|
||||
}
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/**
|
||||
* Resolve the active theme based on the DOM state.
|
||||
*
|
||||
* @returns {'dark' | 'light'}
|
||||
*/
|
||||
const resolveTheme = () => {
|
||||
if (document.body && document.body.classList.contains('dark')) return 'dark';
|
||||
const htmlTheme = document.documentElement?.getAttribute('data-theme');
|
||||
if (htmlTheme === 'dark' || htmlTheme === 'light') return htmlTheme;
|
||||
return 'dark';
|
||||
};
|
||||
|
||||
/**
|
||||
* Apply the configured CSS filter to the active tile container.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
const applyTileFilter = () => {
|
||||
if (!tileLayer) return;
|
||||
const theme = resolveTheme();
|
||||
const filterValue = theme === 'dark' ? config.tileFilters.dark : config.tileFilters.light;
|
||||
const container =
|
||||
typeof tileLayer.getContainer === 'function' ? tileLayer.getContainer() : null;
|
||||
if (container && container.style) {
|
||||
container.style.filter = filterValue;
|
||||
container.style.webkitFilter = filterValue;
|
||||
}
|
||||
const tilePane = map && typeof map.getPane === 'function' ? map.getPane('tilePane') : null;
|
||||
if (tilePane && tilePane.style) {
|
||||
tilePane.style.filter = filterValue;
|
||||
tilePane.style.webkitFilter = filterValue;
|
||||
}
|
||||
const tileNodes = [];
|
||||
if (container && typeof container.querySelectorAll === 'function') {
|
||||
tileNodes.push(...container.querySelectorAll('.leaflet-tile'));
|
||||
}
|
||||
if (tilePane && typeof tilePane.querySelectorAll === 'function') {
|
||||
tileNodes.push(...tilePane.querySelectorAll('.leaflet-tile'));
|
||||
}
|
||||
tileNodes.forEach(tile => {
|
||||
if (tile && tile.style) {
|
||||
tile.style.filter = filterValue;
|
||||
tile.style.webkitFilter = filterValue;
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// Initialize the map if Leaflet is available
|
||||
if (hasLeaflet && mapContainer) {
|
||||
const initialZoom = Number.isFinite(config.mapZoom) ? config.mapZoom : 5;
|
||||
map = leaflet.map(mapContainer, { worldCopyJump: true, attributionControl: false });
|
||||
map.setView([config.mapCenter.lat, config.mapCenter.lon], initialZoom);
|
||||
|
||||
tileLayer = leaflet
|
||||
.tileLayer(TILE_LAYER_URL, {
|
||||
maxZoom: 19,
|
||||
className: 'map-tiles',
|
||||
crossOrigin: 'anonymous'
|
||||
})
|
||||
.addTo(map);
|
||||
|
||||
tileLayer.on?.('load', applyTileFilter);
|
||||
applyTileFilter();
|
||||
|
||||
window.addEventListener('themechange', applyTileFilter);
|
||||
markersLayer = leaflet.layerGroup().addTo(map);
|
||||
}
|
||||
|
||||
// Fetch instances data
|
||||
let instances = [];
|
||||
try {
|
||||
const response = await fetchImpl('/api/instances', {
|
||||
headers: { Accept: 'application/json' },
|
||||
credentials: 'omit'
|
||||
});
|
||||
if (response.ok) {
|
||||
instances = await response.json();
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Failed to fetch federation instances', err);
|
||||
}
|
||||
|
||||
if (statusEl) {
|
||||
statusEl.textContent = `${instances.length} instances`;
|
||||
statusEl.classList.remove('pill--loading');
|
||||
}
|
||||
|
||||
const nowSec = Date.now() / 1000;
|
||||
|
||||
// Render map markers
|
||||
if (map && markersLayer && hasLeaflet && Array.isArray(instances)) {
|
||||
const bounds = [];
|
||||
const canRenderLegend =
|
||||
typeof leaflet.control === 'function' && leaflet.DomUtil && typeof leaflet.DomUtil.create === 'function';
|
||||
if (canRenderLegend) {
|
||||
const legendMediaQuery = typeof window !== 'undefined' && window.matchMedia
|
||||
? window.matchMedia('(max-width: 1024px)')
|
||||
: null;
|
||||
const initialLegendVisible = resolveLegendVisibility({
|
||||
defaultCollapsed: legendDefaultCollapsed,
|
||||
mediaQueryMatches: legendMediaQuery ? legendMediaQuery.matches : false
|
||||
});
|
||||
legendVisible = initialLegendVisible;
|
||||
|
||||
const legendStops = NODE_COUNT_COLOR_STOPS.map((stop, index) => {
|
||||
const lower = index === 0 ? 0 : NODE_COUNT_COLOR_STOPS[index - 1].limit;
|
||||
const upper = stop.limit - 1;
|
||||
const label = index === 0 ? `< ${stop.limit} nodes` : `${lower}-${upper} nodes`;
|
||||
return { color: stop.color || DEFAULT_INSTANCE_COLOR, label };
|
||||
});
|
||||
const lastLimit = NODE_COUNT_COLOR_STOPS[NODE_COUNT_COLOR_STOPS.length - 1]?.limit || 900;
|
||||
legendStops.push({ color: DEFAULT_INSTANCE_COLOR, label: `≥ ${lastLimit} nodes` });
|
||||
|
||||
const legend = leaflet.control({ position: 'bottomright' });
|
||||
legend.onAdd = function onAdd() {
|
||||
const container = leaflet.DomUtil.create('div', 'legend legend--instances');
|
||||
container.id = 'federationLegend';
|
||||
container.setAttribute('aria-label', 'Active nodes legend');
|
||||
container.setAttribute('role', 'region');
|
||||
container.setAttribute('aria-hidden', initialLegendVisible ? 'false' : 'true');
|
||||
toggleLegendHiddenClass(container, !initialLegendVisible);
|
||||
const header = leaflet.DomUtil.create('div', 'legend-header', container);
|
||||
const title = leaflet.DomUtil.create('span', 'legend-title', header);
|
||||
title.textContent = 'Active nodes';
|
||||
const items = leaflet.DomUtil.create('div', 'legend-items', container);
|
||||
legendStops.forEach(stop => {
|
||||
const item = leaflet.DomUtil.create('div', 'legend-item', items);
|
||||
item.setAttribute('aria-hidden', 'true');
|
||||
const swatch = leaflet.DomUtil.create('span', 'legend-swatch', item);
|
||||
swatch.style.background = stop.color;
|
||||
const label = leaflet.DomUtil.create('span', 'legend-label', item);
|
||||
label.textContent = stop.label;
|
||||
});
|
||||
legendContainer = container;
|
||||
return container;
|
||||
};
|
||||
legend.addTo(map);
|
||||
|
||||
const legendToggleControl = leaflet.control({ position: 'bottomright' });
|
||||
legendToggleControl.onAdd = function onAdd() {
|
||||
const container = leaflet.DomUtil.create('div', 'leaflet-control legend-toggle');
|
||||
const button = leaflet.DomUtil.create('button', 'legend-toggle-button', container);
|
||||
button.type = 'button';
|
||||
button.setAttribute('aria-controls', 'federationLegend');
|
||||
button.addEventListener?.('click', event => {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
setLegendVisibility(!legendVisible);
|
||||
});
|
||||
legendToggleButton = button;
|
||||
updateLegendToggleState();
|
||||
if (leaflet.DomEvent && typeof leaflet.DomEvent.disableClickPropagation === 'function') {
|
||||
leaflet.DomEvent.disableClickPropagation(container);
|
||||
}
|
||||
if (leaflet.DomEvent && typeof leaflet.DomEvent.disableScrollPropagation === 'function') {
|
||||
leaflet.DomEvent.disableScrollPropagation(container);
|
||||
}
|
||||
return container;
|
||||
};
|
||||
legendToggleControl.addTo(map);
|
||||
|
||||
setLegendVisibility(initialLegendVisible);
|
||||
if (legendMediaQuery) {
|
||||
const changeHandler = event => {
|
||||
if (legendDefaultCollapsed) return;
|
||||
setLegendVisibility(!event.matches);
|
||||
};
|
||||
if (typeof legendMediaQuery.addEventListener === 'function') {
|
||||
legendMediaQuery.addEventListener('change', changeHandler);
|
||||
} else if (typeof legendMediaQuery.addListener === 'function') {
|
||||
legendMediaQuery.addListener(changeHandler);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const instance of instances) {
|
||||
const lat = Number(instance.latitude);
|
||||
const lon = Number(instance.longitude);
|
||||
|
||||
if (!Number.isFinite(lat) || !Number.isFinite(lon)) continue;
|
||||
|
||||
bounds.push([lat, lon]);
|
||||
|
||||
const name = instance.name || instance.domain || 'Unknown';
|
||||
const url = buildInstanceUrl(instance.domain);
|
||||
const nodeCountValue = toFiniteNumber(instance.nodesCount ?? instance.nodes_count);
|
||||
const popupLines = [
|
||||
url
|
||||
? `<strong><a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(name)}</a></strong>`
|
||||
: `<strong>${escapeHtml(name)}</strong>`,
|
||||
`<span class="mono">${escapeHtml(instance.domain || '')}</span>`,
|
||||
instance.channel ? `Channel: ${escapeHtml(instance.channel)}` : '',
|
||||
instance.frequency ? `Frequency: ${escapeHtml(instance.frequency)}` : '',
|
||||
instance.version ? `Version: ${escapeHtml(instance.version)}` : '',
|
||||
nodeCountValue != null ? `Active nodes (24h): ${escapeHtml(String(nodeCountValue))}` : ''
|
||||
].filter(Boolean);
|
||||
|
||||
const marker = leaflet.circleMarker([lat, lon], {
|
||||
radius: 9,
|
||||
fillColor: colorForNodeCount(nodeCountValue),
|
||||
color: '#000',
|
||||
weight: 1,
|
||||
opacity: 0.8,
|
||||
fillOpacity: 0.75
|
||||
});
|
||||
|
||||
marker.bindPopup(popupLines.join('<br>'));
|
||||
markersLayer.addLayer(marker);
|
||||
}
|
||||
|
||||
if (bounds.length > 0 && typeof map.fitBounds === 'function') {
|
||||
try {
|
||||
map.fitBounds(bounds, { padding: [50, 50], maxZoom: 10 });
|
||||
} catch (err) {
|
||||
console.warn('Failed to fit federation map bounds', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Render table
|
||||
if (tableBody && Array.isArray(instances)) {
|
||||
attachSortHandlers(() => renderTableRows(instances, nowSec));
|
||||
renderTableRows(instances, nowSec);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user