Compare commits

..

93 Commits

Author SHA1 Message Date
l5y 546e009867 address missing id field ingestor bug (#469)
* address missing id field ingestor bug

* cover missing unit test vectors

* cover missing unit test vectors
2025-11-19 08:22:24 +01:00
l5y be46963744 merge secondary channels by name (#468)
* merge secondary channels by name

* cover missing unit test vectors
2025-11-18 18:33:02 +01:00
l5y 8f7adba65a rate limit host device telemetry (#467)
* rate limit host device telemetry

* Spec: add more unit tests
2025-11-18 18:04:40 +01:00
l5y e8b38ed65a add traceroutes to frontend (#466)
* add traceroutes to frontend

* Spec: add more unit tests
2025-11-18 13:12:14 +01:00
l5y 700fcef33f feat: implement traceroute app packet handling across the stack (#463)
* feat: implement traceroute app packet handling across the stack

* run linter

* tests: fix

* Spec: add more unit tests
2025-11-18 11:23:46 +01:00
l5y b23d864f1d Bump version and update changelog (#462)
* chore: bump version to 0.5.6 everywhere

* docs: update changelog

* chore: bump version to 0.5.6 everywhere
2025-11-16 17:38:41 +01:00
l5y e1d43cec57 Added comprehensive helper unit tests (#457)
* Added comprehensive helper unit tests

* run black
2025-11-16 16:47:57 +01:00
l5y cd7bced827 Added reaction-aware handling (#455) 2025-11-16 15:31:17 +01:00
l5y b298f2f22c env: add map zoom (#454)
* chore: bump version to 0.5.5 everywhere

* add MAP_ZOOM varibale

* run black
2025-11-16 12:57:47 +01:00
l5y 9304a99745 charts: render aggregated telemetry charts for all nodes (#453) 2025-11-15 17:09:55 +01:00
l5y 4a03e17886 nodes: render charts detail pages as overlay (#452) 2025-11-15 12:13:06 +01:00
l5y e502ddd436 fix telemetry parsing for charts (#451) 2025-11-14 21:18:37 +01:00
l5y 12f1801ed2 nodes: improve charts on detail pages (#450)
* nodes: add charts to detail pages

* nodes: improve charts on detail pages

* fix ignored packet debug loggin

* run rufo

* address review comments
2025-11-14 20:17:58 +01:00
l5y a6a63bf12e nodes: add charts to detail pages (#449) 2025-11-14 16:24:09 +01:00
l5y 631455237f Aggregate frontend snapshots across views (#447) 2025-11-13 22:02:42 +01:00
Alexkurd 382e2609c9 Remove added 1 if reply with emoji (#443)
In reply message.text contains emoji, and message.emoji is 1.
2025-11-13 21:15:35 +01:00
l5y 05efbc5f20 Refine node detail view layout (#442)
* Refine node detail view layout

* Refine node detail controls and formatting

* Improve node detail neighbor roles and message metadata

* Fix node detail neighbor metadata hydration
2025-11-13 19:59:07 +01:00
l5y 9a45430321 Enable map centering from node table coordinates (#439)
* Enable map centering from node table coordinates

* Replace node coordinate buttons with links
2025-11-13 17:23:35 +01:00
l5y cb843d5774 Add node detail route and page (#441) 2025-11-13 17:19:20 +01:00
l5y c823347175 Ensure nodeinfo patch runs before importing interfaces (#440) 2025-11-13 17:16:59 +01:00
l5y d87c0cc226 Filter zero-valued fields from API responses (#438)
* Filter zero-value fields from API responses

* Restore zero-valued API fields (#438)

* Clarify compact_api_row documentation
2025-11-13 17:10:46 +01:00
l5y 9c957a4a14 Add debug payload tracing and ignored packet logging (#437) 2025-11-13 17:06:35 +01:00
l5y 16442bab08 Tighten map auto-fit behaviour (#435) 2025-11-12 20:49:03 +01:00
l5y e479983d38 Fetch encrypted chat log entries for log tab (#434)
* Fetch encrypted chat log entries for log tab

* Guard log-only chat log merge from plaintext
2025-11-12 14:13:46 +01:00
l5y 70fca17230 Add encrypted filter to messages API (#432) 2025-11-12 12:46:34 +01:00
l5y 2107d6790d Guard NodeInfo handler against missing IDs (#426) (#431) 2025-11-12 12:39:36 +01:00
l5y 8823b7cb48 Add standalone full-screen map, chat, and nodes views (#429)
* Add dedicated full-screen dashboard views

* Simplify full-screen routes layout

* Restore refresh controls on full-screen views

* Polish standalone view layout

* Streamline standalone layouts
2025-11-12 11:38:26 +01:00
l5y e40c0d9078 Ensure chat history fetches full message limit (#428) 2025-11-11 22:33:30 +01:00
l5y 8b090cb238 Handle nodeinfo packets without identifiers (#426) (#427) 2025-11-11 20:45:32 +01:00
l5y 2bb8e3fd66 Chore: update license headers (#424) 2025-11-08 10:41:57 +01:00
l5y deb7263c3e Chore: bump version to 0.5.5 (#423) 2025-11-08 09:15:52 +00:00
l5y 3daadc4f68 handle naming when primary channel has a name (#422) 2025-11-08 09:44:41 +01:00
l5y 6b72b1b3da handle edge case when primary channel has a name (#421) 2025-11-07 21:39:26 +01:00
l5y 52486d82ad Add preset mode to logs (#420) 2025-11-07 17:56:27 +01:00
l5y 487d618e00 Parallelize federation tasks with worker pool (#419)
* Parallelize federation work with worker pool

* Handle worker pool shutdown fallback during federation announcements
2025-11-07 17:24:37 +01:00
l5y 9239805129 allow filtering chat and logs by node name (#417) 2025-11-07 15:55:11 +01:00
l5y 554b2abd82 gem: add erb as dependency removed from std (#416)
* gem: add erb as dependency removed from std

* Relax erb dependency for Ruby 3.3 compatibility
2025-11-07 15:11:05 +01:00
l5y 8bb98f65d6 implement support for replies and reactions app (#411)
* implement support for replies and reactions app

* Allow numeric reaction port packets

* allow reaction packets through mai channel filter
2025-11-06 20:58:35 +01:00
l5y 71c0f8b21e ingestor: ignore direct messages on default channel (#414)
* ingestor: ignore direct messages on default channel

* tests: run black formatter
2025-11-06 20:14:32 +01:00
l5y aa2bc68544 agents: add instructions (#410) 2025-11-03 22:23:20 +00:00
l5y a8394effdc display encrypted messages in frontend log window (#409)
* display encrypted messages in frontend log window

* render recipient by known node name short id
2025-11-03 22:51:20 +01:00
l5y e27d5ab53c Add chat log entries for telemetry, position, and neighbor events (#408)
* Add telemetry and neighbor chat log events

* Refine chat log highlights for telemetry and position updates

* Add emoji prefixes to chat log events

* Fix telemetry highlights and emoji styling

* Remove italic chat copy and drop zero-valued highlights

* address style and formatting issues
2025-11-03 12:33:02 +01:00
l5y 6af272c01f Handle missing instance domain outside production (#405) 2025-10-31 12:36:53 +01:00
l5y 03e2fe6a72 Add tabbed chat panel with channel grouping (#404)
* feat: add tabbed chat panel with channel grouping

* Handle ISO-only chat timestamps in dashboard renderer

* Remove redundant chat channel tag
2025-10-31 12:24:17 +01:00
l5y 87b4cd79e7 Normalize numeric client roles using Meshtastic CLI enums (#402)
* Normalize firmware client roles using CLI enums

* Prioritize CLI role lookup before protobuf fallbacks
2025-10-31 11:43:48 +01:00
l5y d94d75e605 Ensure Docker images publish versioned tags (#403) 2025-10-31 11:43:30 +01:00
l5y c965d05229 Document environment configuration variables (#400)
* Document environment configuration variables

* Escape sed replacements when updating .env values
2025-10-31 11:08:06 +01:00
l5y ba80fac36c Document federation refresh cadence (#401) 2025-10-31 11:05:08 +01:00
l5y 3c2c7611ee docs: document prometheus metrics (#399) 2025-10-31 11:04:20 +01:00
Nic Jansma 49e0f39ca9 Config: Read PROM_REPORT_IDS from environment (#398) 2025-10-29 09:22:33 +01:00
KenADev 625df7982d feat: Mesh-Ingestor: Ability to provide already-existing interface instance (#395)
* feat: Mesh-Ingestor: Ability to provide already-existing interface instance

* Prevent Signal-Registration if not main thread (causes exception)

* fix redundant ternary operator

---------

Co-authored-by: Ken Ahr <ken.a.iphone@googlemail.com>
2025-10-26 20:47:23 +01:00
KenADev 8eeb13166b fix: Ingestor: Fix error for non-existing datetime.UTC reference (#396)
Co-authored-by: Ken Ahr <ken.a.iphone@googlemail.com>
2025-10-26 20:46:31 +01:00
l5y 80645990cb Chore: bump version to 0.5.4 (#388)
Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-19 10:36:09 +00:00
l5y 96a3bb86e9 Add telemetry formatting module and overlay metrics (#387) 2025-10-19 12:13:32 +02:00
l5y 6775de3cca Prune blank values from API responses (#386) 2025-10-18 20:16:14 +02:00
l5y 8143fbd8f7 Add full support to telemetry schema and API (#385)
* feat: auto-upgrade telemetry schema

* Ensure numeric metrics fallback to valid values

* Format data processing numeric metric lookup
2025-10-18 15:19:33 +02:00
l5y cf3949ef95 Respect PORT environment override (#384) 2025-10-18 13:01:48 +02:00
l5y 32d9da2865 Add instance selector dropdown for federation deployments (#382)
* Add instance selector for federation regions

* Avoid HTML insertion when seeding instance selector
2025-10-18 10:53:26 +02:00
l5y 61e8c92f62 Harden federation announcements (#381) 2025-10-18 10:38:28 +02:00
l5y d954df6294 Ensure private mode disables federation (#380) 2025-10-18 09:48:40 +02:00
l5y 30d535bd43 Ensure private mode disables chat messaging (#378) 2025-10-17 22:47:54 +02:00
l5y d06aa42ab2 Respect FEDERATION flag for federation endpoints (#379) 2025-10-17 22:47:41 +02:00
l5y 108fc93ca1 Expose PRIVATE environment configuration (#377) 2025-10-17 22:43:42 +02:00
l5y 427479c1e6 Fix frontend coverage export for Codecov (#376)
* fix: export frontend coverage for codecov

* Merge V8 file coverages across workers
2025-10-17 22:43:23 +02:00
l5y ee05f312e8 Restrict instance API to recent updates (#374) 2025-10-17 22:17:49 +02:00
l5y c4193e38dc Document and expose federation configuration (#375) 2025-10-17 22:17:32 +02:00
l5y cb9b081606 Chore: bump version to 0.5.3 (#372) 2025-10-17 19:47:18 +00:00
l5y cc8fec6d05 Align theme and info controls (#371)
* Align theme and info controls

* design tweaks
2025-10-17 19:27:14 +00:00
l5y 01665b6e3a Fixes POST request 403 errors on instances behind Cloudflare proxy (#368)
* Add full headers to ingestor POST requests to avoid CF bans

* run black

* Guard Authorization header when token absent

---------

Co-authored-by: varna9000 <milen@aeroisk.com>
2025-10-16 22:29:04 +02:00
l5y 1898a99789 Delay initial federation announcements (#366) 2025-10-16 21:50:43 +02:00
l5y 3eefda9205 Ensure well-known document stays in sync (#365) 2025-10-16 21:43:11 +02:00
l5y a6ba9a8227 Guard federation DNS resolution against restricted networks (#362)
* Guard federation DNS resolution against restricted networks

* Pin federation HTTP clients to vetted IPs
2025-10-16 21:15:34 +02:00
l5y 7055444c4b Add federation ingestion limits and tests (#364) 2025-10-16 21:15:18 +02:00
l5y 4bfc0e25cb Prefer reported primary channel names (#363) 2025-10-16 20:35:24 +02:00
l5y 81335cbf7b Decouple messages API from node joins (#360) 2025-10-16 13:19:29 +02:00
l5y 76b57c08c6 Fix ingestor reconnection detection (#361) 2025-10-16 13:06:32 +02:00
l5y 926b5591b0 Harden instance domain validation (#359) 2025-10-16 10:51:34 +02:00
l5y 957e597004 Ensure INSTANCE_DOMAIN propagates to containers (#358) 2025-10-15 23:22:46 +02:00
l5y 68cfbf139f chore: bump version to 0.5.2 (#356)
Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-15 23:16:30 +02:00
l5y b2f4fcaaa5 Gracefully retry federation announcements over HTTP (#355) 2025-10-15 23:11:59 +02:00
l5y dc2fa9d247 Recursively ingest federated instances (#353)
* Recursively ingest federated instances

* Keep absent is_private nil during signature verification
2025-10-15 21:35:37 +02:00
l5y a32125996c Remove federation timeout environment overrides (#352) 2025-10-15 20:04:19 +02:00
l5y 506a1ab5f6 Close unrelated short info overlays when opening short info (#351)
* Close unrelated overlays when opening short info

* Ensure map overlays respect nested short overlay closing
2025-10-15 16:35:38 +00:00
l5y db7b67d859 Improve federation instance error diagnostics (#350) 2025-10-15 18:35:22 +02:00
l5y 49f08a7f75 Harden federation domain validation and tests (#347)
* Harden federation domain validation and tests

* Preserve domain casing for signature verification

* Forward sanitize helper keyword argument

* Handle mixed-case domains during signature verification
2025-10-15 18:14:31 +02:00
l5y b2d35d3edf Handle malformed instance records (#348) 2025-10-15 17:08:24 +02:00
l5y a9d618cdbc Fix ingestor device mounting for non-serial connections (#346)
* Adjust ingestor device handling

* Restore serial device permissions for ingestor
2025-10-15 16:52:37 +02:00
l5y 6a65abd2e3 Persist instance config assets across Docker restarts (#345) 2025-10-15 16:14:59 +02:00
l5y a3aef8cadd Add modem preset display to node overlay (#340)
* Add modem metadata line to node overlays

* Ensure modem metadata loads for all overlays
2025-10-14 20:59:47 +02:00
l5y cff89a8c88 Display message frequency and channel in chat log (#339)
* Display message frequency and channel in chat log

* Ensure chat prefixes display consistent metadata brackets

* Ensure chat prefixes show non-breaking frequency placeholder

* Adjust chat channel tag placement
2025-10-14 20:56:42 +02:00
l5y 26c1366412 Bump fallback version to v0.5.1 (#338) 2025-10-14 16:51:04 +00:00
l5y 28f5b49f4d docs: update changelog for 0.5.0 (#337) 2025-10-14 16:48:36 +00:00
l5y a46da284e5 Fix ingestor package layout in Docker image (#336) 2025-10-14 18:47:54 +02:00
161 changed files with 21847 additions and 1138 deletions
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
coverage:
status:
project:
+14
View File
@@ -49,6 +49,12 @@ MAX_DISTANCE=42
# Matrix aliases (e.g. #meshtastic-berlin:matrix.org) will be linked via matrix.to automatically.
CONTACT_LINK='#potatomesh:dod.ngo'
# Enable or disable PotatoMesh federation features (1=enabled, 0=disabled)
FEDERATION=1
# Hide public mesh messages from unauthenticated visitors (1=hidden, 0=public)
PRIVATE=0
# =============================================================================
# ADVANCED SETTINGS
@@ -57,9 +63,17 @@ CONTACT_LINK='#potatomesh:dod.ngo'
# Debug mode (0=off, 1=on)
DEBUG=0
# Public domain name for this PotatoMesh instance
# Provide a hostname (with optional port) that resolves to the web service.
# Example: mesh.example.org or mesh.example.org:41447
INSTANCE_DOMAIN=mesh.example.org
# Docker image architecture (linux-amd64, linux-arm64, linux-armv7)
POTATOMESH_IMAGE_ARCH=linux-amd64
# Docker image tag (use "latest" for the newest release or pin to vX.Y)
POTATOMESH_IMAGE_TAG=latest
# Docker Compose networking profile
# Leave unset for Linux hosts (default host networking).
# Set to "bridge" on Docker Desktop (macOS/Windows) if host networking
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: 2
updates:
- package-ecosystem: "ruby"
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: "CodeQL Advanced"
on:
+30 -6
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Build and Push Docker Images
on:
@@ -56,12 +70,17 @@ jobs:
id: version
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
VERSION="${{ github.event.inputs.version }}"
RAW_VERSION="${{ github.event.inputs.version }}"
else
VERSION=${GITHUB_REF#refs/tags/v}
RAW_VERSION=${GITHUB_REF#refs/tags/}
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Published version: $VERSION"
STRIPPED_VERSION=${RAW_VERSION#v}
echo "version=$STRIPPED_VERSION" >> $GITHUB_OUTPUT
echo "version_with_v=v$STRIPPED_VERSION" >> $GITHUB_OUTPUT
echo "raw_version=$RAW_VERSION" >> $GITHUB_OUTPUT
echo "Published version: $STRIPPED_VERSION"
- name: Build and push ${{ matrix.service }} for ${{ matrix.architecture.name }}
uses: docker/build-push-action@v5
@@ -74,6 +93,7 @@ jobs:
tags: |
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:latest
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:${{ steps.version.outputs.version }}
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:${{ steps.version.outputs.version_with_v }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
@@ -111,12 +131,15 @@ jobs:
- name: Extract version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/v}
echo "version=$VERSION" >> $GITHUB_OUTPUT
RAW_VERSION=${GITHUB_REF#refs/tags/}
STRIPPED_VERSION=${RAW_VERSION#v}
echo "version=$STRIPPED_VERSION" >> $GITHUB_OUTPUT
echo "version_with_v=v$STRIPPED_VERSION" >> $GITHUB_OUTPUT
- name: Test web application (Linux AMD64)
run: |
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version }}
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version_with_v }}
docker run --rm -d --name web-test -p 41447:41447 \
-e API_TOKEN=test-token \
-e DEBUG=1 \
@@ -128,6 +151,7 @@ jobs:
- name: Test ingestor (Linux AMD64)
run: |
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }}
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version_with_v }}
docker run --rm --name ingestor-test \
-e POTATOMESH_INSTANCE=http://localhost:41447 \
-e API_TOKEN=test-token \
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: JavaScript
on:
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Python
on:
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Ruby
on:
+7
View File
@@ -69,3 +69,10 @@ ai_docs/
# Generated credentials for the instance
web/.config
# JavaScript dependencies
node_modules/
web/node_modules/
# Debug symbols
ignored.txt
+39
View File
@@ -0,0 +1,39 @@
# Repository Guidelines
Keep code well structured, modular, and not monolithic. If modules get to big, consider submodules structure.
Make sure all tests pass for Python (`pytest`), Ruby (`rspec`), and JavaScript (`npm test`).
Make sure all code is properly inline documented (PDoc, RDoc, JSDoc, et.c). We do not want any undocumented code.
Make sure all code is 100% unit tested. We want all lines, units, and branches to be thouroughly covered by tests.
New source files should have Apache v2 license headers using the exact string `Copyright © 2025-26 l5yth & contributors`.
Run linters for Python (`black`) and Ruby (`rufo`) to ensure consistent code formatting.
## Project Structure & Module Organization
The repository splits runtime and ingestion logic. `web/` holds the Sinatra dashboard (Ruby code in `lib/potato_mesh`, views in `views/`, static bundles in `public/`).
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
## Build, Test, and Development Commands
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
## Coding Style & Naming Conventions
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
JavaScript follows ES modules under `public/assets/js`; co-locate components with `__tests__` folders and use kebab-case filenames. Format Ruby via `bundle exec rufo .` and Python via `black`. Skip committing generated coverage artifacts.
## Testing Guidelines
Ruby specs run with `cd web && bundle exec rspec`, producing SimpleCov output in `coverage/`. Front-end behaviour is verified through Nodes test runner: `cd web && npm test` writes V8 coverage and JUnit XML under `reports/`.
The ingestion layer is guarded by `pytest -q tests/test_mesh.py`; leave fixtures in `tests/` untouched so CI can replay them. New features should ship with matching specs and updated integration checks.
## Commit & Pull Request Guidelines
Commits should stay imperative and reference issues the way history does (`Add chat log entries... (#408)`). Squash noisy work-in-progress commits before pushing. Pull requests need a concise summary, screenshots or curl traces for UI/API tweaks, and links to tracked issues. Paste the command output for the test suites you ran and mention configuration toggles (`API_TOKEN`, `PRIVATE`) reviewers must set.
## Security & Configuration Tips
Never commit real API tokens or `.sqlite` dumps; use `.env.local` files ignored by Git. Confirm env defaults (`API_TOKEN`, `INSTANCE_DOMAIN`, `PRIVATE`) before deploying, and set `FEDERATION=0` when staging private nodes. Review `PROMETHEUS.md` when exposing metrics so scrape endpoints stay internal.
+172 -3
View File
@@ -1,12 +1,181 @@
# CHANGELOG
## Unreleased
## v0.5.5
* Preserve legacy configuration assets when migrating to XDG directories.
* Added comprehensive helper unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/457>
* Added reaction-aware handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/455>
* Env: add map zoom by @l5yth in <https://github.com/l5yth/potato-mesh/pull/454>
* Charts: render aggregated telemetry charts for all nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/453>
* Nodes: render charts detail pages as overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/452>
* Fix telemetry parsing for charts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/451>
* Nodes: improve charts on detail pages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/450>
* Nodes: add charts to detail pages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/449>
* Aggregate frontend snapshots across views by @l5yth in <https://github.com/l5yth/potato-mesh/pull/447>
* Remove added 1 if reply with emoji by @Alexkurd in <https://github.com/l5yth/potato-mesh/pull/443>
* Refine node detail view layout by @l5yth in <https://github.com/l5yth/potato-mesh/pull/442>
* Enable map centering from node table coordinates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/439>
* Add node detail route and page by @l5yth in <https://github.com/l5yth/potato-mesh/pull/441>
* Ensure Meshtastic nodeinfo patch runs before importing interfaces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/440>
* Filter zero-valued fields from API responses by @l5yth in <https://github.com/l5yth/potato-mesh/pull/438>
* Add debug payload tracing and ignored packet logging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/437>
* Tighten map auto-fit behaviour by @l5yth in <https://github.com/l5yth/potato-mesh/pull/435>
* Fetch encrypted chat log entries for log tab by @l5yth in <https://github.com/l5yth/potato-mesh/pull/434>
* Add encrypted filter to messages API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/432>
* Guard NodeInfo handler against missing IDs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/431>
* Add standalone full-screen map, chat, and nodes views by @l5yth in <https://github.com/l5yth/potato-mesh/pull/429>
* Ensure chat history fetches full message limit by @l5yth in <https://github.com/l5yth/potato-mesh/pull/428>
* Fix ingestion of nodeinfo packets missing ids (#426) by @l5yth in <https://github.com/l5yth/potato-mesh/pull/427>
* Chore: update license headers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/424>
* Chore: bump version to 0.5.5 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/423>
## v0.5.4
* Handle naming when primary channel has a name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/422>
* Handle edge case when primary channel has a name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/421>
* Add preset mode to logs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/420>
* Parallelize federation tasks with worker pool by @l5yth in <https://github.com/l5yth/potato-mesh/pull/419>
* Allow filtering chat and logs by node name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/417>
* Gem: Add erb as dependency removed from std by @l5yth in <https://github.com/l5yth/potato-mesh/pull/416>
* Implement support for replies and reactions app by @l5yth in <https://github.com/l5yth/potato-mesh/pull/411>
* Ingestor: Ignore direct messages on default channel by @l5yth in <https://github.com/l5yth/potato-mesh/pull/414>
* Agents: Add instructions by @l5yth in <https://github.com/l5yth/potato-mesh/pull/410>
* Display encrypted messages in frontend log window by @l5yth in <https://github.com/l5yth/potato-mesh/pull/409>
* Add chat log entries for telemetry, position, and neighbor events by @l5yth in <https://github.com/l5yth/potato-mesh/pull/408>
* Handle missing instance domain outside production by @l5yth in <https://github.com/l5yth/potato-mesh/pull/405>
* Add tabbed chat panel with channel grouping by @l5yth in <https://github.com/l5yth/potato-mesh/pull/404>
* Normalize numeric client roles using Meshtastic CLI enums by @l5yth in <https://github.com/l5yth/potato-mesh/pull/402>
* Ensure Docker images publish versioned tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/403>
* Document environment configuration variables by @l5yth in <https://github.com/l5yth/potato-mesh/pull/400>
* Document federation refresh cadence by @l5yth in <https://github.com/l5yth/potato-mesh/pull/401>
* Add Prometheus monitoring documentation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/399>
* Config: Read PROM_REPORT_IDS from environment by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/398>
* Feat: Mesh-Ingestor: Ability to provide already-existing interface instance by @KenADev in <https://github.com/l5yth/potato-mesh/pull/395>
* Fix: Mesh-Ingestor: Fix error for non-existing datetime.UTC reference by @KenADev in <https://github.com/l5yth/potato-mesh/pull/396>
* Chore: bump version to 0.5.4 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/388>
## v0.5.3
* Add telemetry formatting utilities and extend node overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/387>
* Prune blank values from API responses by @l5yth in <https://github.com/l5yth/potato-mesh/pull/386>
* Add full support to telemetry schema and API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/385>
* Respect PORT environment override by @l5yth in <https://github.com/l5yth/potato-mesh/pull/384>
* Add instance selector dropdown for federation deployments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/382>
* Harden federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/381>
* Ensure private mode disables federation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/380>
* Ensure private mode disables chat messaging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/378>
* Disable federation features when FEDERATION=0 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/379>
* Expose PRIVATE environment configuration across tooling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/377>
* Fix frontend coverage export for Codecov by @l5yth in <https://github.com/l5yth/potato-mesh/pull/376>
* Restrict /api/instances results to recent records by @l5yth in <https://github.com/l5yth/potato-mesh/pull/374>
* Expose FEDERATION environment option across tooling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/375>
* Chore: bump version to 0.5.3 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/372>
## v0.5.2
* Align theme and info controls by @l5yth in <https://github.com/l5yth/potato-mesh/pull/371>
* Fixes POST request 403 errors on instances behind Cloudflare proxy by @varna9000 in <https://github.com/l5yth/potato-mesh/pull/368>
* Delay initial federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/366>
* Ensure well-known document stays in sync on startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/365>
* Guard federation DNS resolution against restricted networks by @l5yth in <https://github.com/l5yth/potato-mesh/pull/362>
* Add federation ingestion limits and tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/364>
* Prefer reported primary channel names by @l5yth in <https://github.com/l5yth/potato-mesh/pull/363>
* Decouple message API node hydration by @l5yth in <https://github.com/l5yth/potato-mesh/pull/360>
* Fix ingestor reconnection detection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/361>
* Harden instance domain validation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/359>
* Ensure INSTANCE_DOMAIN propagates to containers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/358>
* Chore: bump version to 0.5.2 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/356>
* Gracefully retry federation announcements over HTTP by @l5yth in <https://github.com/l5yth/potato-mesh/pull/355>
## v0.5.1
* Recursively ingest federated instances by @l5yth in <https://github.com/l5yth/potato-mesh/pull/353>
* Remove federation timeout environment overrides by @l5yth in <https://github.com/l5yth/potato-mesh/pull/352>
* Close unrelated short info overlays when opening short info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/351>
* Improve federation instance error diagnostics by @l5yth in <https://github.com/l5yth/potato-mesh/pull/350>
* Harden federation domain validation and tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/347>
* Handle malformed instance records gracefully by @l5yth in <https://github.com/l5yth/potato-mesh/pull/348>
* Fix ingestor device mounting for non-serial connections by @l5yth in <https://github.com/l5yth/potato-mesh/pull/346>
* Ensure Docker deployments persist keyfile and well-known assets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/345>
* Add modem preset display to node overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/340>
* Display message frequency and channel in chat log by @l5yth in <https://github.com/l5yth/potato-mesh/pull/339>
* Bump fallback version string to v0.5.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/338>
* Docs: update changelog for 0.5.0 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/337>
* Fix ingestor docker import path by @l5yth in <https://github.com/l5yth/potato-mesh/pull/336>
## v0.5.0
* Add JavaScript configuration tests and coverage workflow
* Ensure node overlays appear above fullscreen map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/333>
* Adjust node table columns responsively by @l5yth in <https://github.com/l5yth/potato-mesh/pull/332>
* Add LoRa metadata fields to nodes and messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/331>
* Add channel metadata capture for message tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/329>
* Capture radio metadata for ingestor payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/327>
* Fix FrozenError when filtering node query results by @l5yth in <https://github.com/l5yth/potato-mesh/pull/324>
* Ensure frontend reports git-aware version strings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/321>
* Ensure web Docker image ships application sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/322>
* Refine stacked short info overlays on the map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/319>
* Refine environment configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/318>
* Fix legacy configuration migration to XDG directories by @l5yth in <https://github.com/l5yth/potato-mesh/pull/317>
* Adopt XDG base directories for app data and config by @l5yth in <https://github.com/l5yth/potato-mesh/pull/316>
* Refactor: streamline ingestor environment variables by @l5yth in <https://github.com/l5yth/potato-mesh/pull/314>
* Adjust map auto-fit padding and default zoom by @l5yth in <https://github.com/l5yth/potato-mesh/pull/315>
* Ensure APIs filter stale data and refresh node details from latest sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/312>
* Improve offline tile fallback initialization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/307>
* Add fallback for offline tile rendering errors by @l5yth in <https://github.com/l5yth/potato-mesh/pull/306>
* Fix map auto-fit handling and add controller by @l5yth in <https://github.com/l5yth/potato-mesh/pull/311>
* Fix map initialization bounds and add coverage by @l5yth in <https://github.com/l5yth/potato-mesh/pull/305>
* Increase coverage for configuration and sanitizer helpers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/303>
* Add comprehensive theme and background front-end tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/302>
* Document sanitization and helper modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/301>
* Add in-repo Meshtastic protobuf stubs for tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/300>
* Handle CRL lookup failures during federation TLS by @l5yth in <https://github.com/l5yth/potato-mesh/pull/299>
* Ensure JavaScript workflow runs frontend tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/298>
* Unify structured logging across application and ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/296>
* Add Apache license headers to missing sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/297>
* Update workflows for ingestor, sinatra, and frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/295>
* Fix IPv6 instance domain canonicalization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/294>
* Handle federation HTTPS CRL verification failures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/293>
* Adjust federation announcement interval to eight hours by @l5yth in <https://github.com/l5yth/potato-mesh/pull/292>
* Restore modular app functionality by @l5yth in <https://github.com/l5yth/potato-mesh/pull/291>
* Refactor config and metadata helpers into PotatoMesh modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/290>
* Update default site configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/288>
* Add regression test for queue drain concurrency by @l5yth in <https://github.com/l5yth/potato-mesh/pull/287>
* Ensure Docker config directories are created for non-root user by @l5yth in <https://github.com/l5yth/potato-mesh/pull/286>
* Clarify numeric address requirement for network target parsing by @l5yth in <https://github.com/l5yth/potato-mesh/pull/285>
* Ensure mesh ingestor queue resets active flag when idle by @l5yth in <https://github.com/l5yth/potato-mesh/pull/284>
* Clarify BLE connection description in README by @l5yth in <https://github.com/l5yth/potato-mesh/pull/283>
* Configure web container for production mode by @l5yth in <https://github.com/l5yth/potato-mesh/pull/282>
* Normalize INSTANCE_DOMAIN configuration to require hostnames by @l5yth in <https://github.com/l5yth/potato-mesh/pull/280>
* Avoid blocking startup on federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/281>
* Fix production Docker builds for web and ingestor images by @l5yth in <https://github.com/l5yth/potato-mesh/pull/279>
* Improve instance domain detection logic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/278>
* Implement federation announcements and instances API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/277>
* Fix federation signature handling and IP guard by @l5yth in <https://github.com/l5yth/potato-mesh/pull/276>
* Add persistent federation metadata endpoint by @l5yth in <https://github.com/l5yth/potato-mesh/pull/274>
* Add configurable instance domain with reverse DNS fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/272>
* Document production deployment configuration by @l5yth in <https://github.com/l5yth/potato-mesh/pull/273>
* Add targeted API endpoints and expose version metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/271>
* Prometheus metrics updates on startup and for position/telemetry by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/270>
* Add hourly reconnect handling for inactive mesh interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/267>
* Dockerfile fixes by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/268>
* Added prometheus /metrics endpoint by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/262>
* Add fullscreen toggle to map view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/263>
* Relocate JS coverage export script into web directory by @l5yth in <https://github.com/l5yth/potato-mesh/pull/266>
* V0.4.0 version string in web UI by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/265>
* Add energy saving cycle to ingestor daemon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/256>
* Chore: restore apache headers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/260>
* Docs: add matrix to readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/259>
* Force dark theme default based on sanitized cookie by @l5yth in <https://github.com/l5yth/potato-mesh/pull/252>
* Document mesh ingestor modules with PDoc-style docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/255>
* Handle missing node IDs in Meshtastic nodeinfo packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/251>
* Document Ruby helper methods with RDoc comments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/254>
* Add JSDoc documentation across client scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/253>
* Fix mesh ingestor telemetry and neighbor handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/249>
* Refactor front-end assets into external modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/245>
* Add tests for helper utilities and asset routes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/243>
* Docs: add ingestor inline docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/244>
* Add comprehensive coverage tests for mesh ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/241>
* Add inline documentation to config helpers and frontend scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/240>
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/238>
## v0.4.0
+37 -18
View File
@@ -13,13 +13,15 @@ will pull the latest release images for you.
## Images on GHCR
| Service | Image |
|----------|-------------------------------------------------------------------|
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:latest` |
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:latest` |
| Service | Image |
|----------|---------------------------------------------------------------------------------------------------------------|
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:<tag>` (e.g. `latest`, `3.0`, or `v3.0`) |
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:<tag>` (e.g. `latest`, `3.0`, or `v3.0`) |
Images are published for every tagged release. Replace `latest` with a
specific version tag if you prefer pinned deployments.
Images are published for every tagged release. Each build receives both semantic
version tags (for example `3.0`) and a matching `v`-prefixed tag (for example
`v3.0`). `latest` always points to the newest release, so pin one of the version
tags when you need a specific build.
## Configure environment
@@ -31,28 +33,45 @@ against the web API.
API_TOKEN=replace-with-a-strong-token
SITE_NAME=PotatoMesh Demo
CONNECTION=/dev/ttyACM0
INSTANCE_DOMAIN=mesh.example.org
```
Additional environment variables are optional:
- `CHANNEL`, `FREQUENCY`, `MAP_CENTER`, `MAX_DISTANCE`, and `CONTACT_LINK`
customise the UI.
- `POTATOMESH_INSTANCE` (defaults to `http://web:41447`) lets the ingestor post
to a remote PotatoMesh instance if you do not run both services together.
- `CONNECTION` overrides the default serial device or network endpoint used by
the ingestor.
- `CHANNEL_INDEX` selects the LoRa channel when using serial or Bluetooth
connections.
- `DEBUG` enables verbose logging across the stack.
| Variable | Default | Purpose |
| --- | --- | --- |
| `API_TOKEN` | _required_ | Shared secret used by the ingestor and API clients for authenticated `POST` requests. |
| `INSTANCE_DOMAIN` | _auto-detected_ | Public hostname (optionally with port) advertised by the web UI, metadata, and API responses. |
| `SITE_NAME` | `"PotatoMesh Demo"` | Title and branding surfaced in the web UI. |
| `CHANNEL` | `"#LongFast"` | Default LoRa channel label displayed on the dashboard. |
| `FREQUENCY` | `"915MHz"` | Default LoRa frequency description shown in the UI. |
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix room alias rendered in UI footers and overlays. |
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map view. |
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom (disables the auto-fit checkbox when set). |
| `MAX_DISTANCE` | `42` | Maximum relationship distance (km) before edges are hidden. |
| `DEBUG` | `0` | Enables verbose logging across services when set to `1`. |
| `FEDERATION` | `1` | Controls whether the instance announces itself and crawls peers (`1`) or stays isolated (`0`). |
| `PRIVATE` | `0` | Restricts public visibility and disables chat/message endpoints when set to `1`. |
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the radio. |
The ingestor also respects supporting variables such as `POTATOMESH_INSTANCE`
(defaults to `http://web:41447`) for remote posting and `CHANNEL_INDEX` when
selecting a LoRa channel on serial or Bluetooth connections.
## Docker Compose file
Use the `docker-compose.yml` file provided in the repository (or download the
[raw file from GitHub](https://raw.githubusercontent.com/l5yth/potato-mesh/main/docker-compose.yml)).
It already references the published GHCR images, defines persistent volumes for
data and logs, and includes optional bridge-profile services for environments
that require classic port mapping. Place this file in the same directory as
your `.env` file so Compose can pick up both.
data, configuration, and logs, and includes optional bridge-profile services for
environments that require classic port mapping. Place this file in the same
directory as your `.env` file so Compose can pick up both.
The dedicated configuration volume binds to `/app/.config/potato-mesh` inside
the container. This path stores the instance private key and staged
`/.well-known/potato-mesh` documents. Because the volume persists independently
of container lifecycle events, generated credentials are not replaced on reboot
or re-deploy.
## Start the stack
+15
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This Dockerfile is kept for backward compatibility. The canonical build
# instructions live in `web/Dockerfile`; keep the two files in sync.
@@ -70,6 +84,7 @@ ENV APP_ENV=production \
CHANNEL="#LongFast" \
FREQUENCY="915MHz" \
MAP_CENTER="38.761944,-27.090833" \
MAP_ZOOM="" \
MAX_DISTANCE=42 \
CONTACT_LINK="#potatomesh:dod.ngo" \
DEBUG=0
+100
View File
@@ -0,0 +1,100 @@
# Prometheus Monitoring for PotatoMesh
PotatoMesh exposes runtime telemetry through a dedicated Prometheus endpoint so you can
observe message flow, node health, and geospatial metadata alongside the rest of your
infrastructure. This guide explains how the exporter is wired into the web
application, which metrics are available, and how to integrate the endpoint with a
Prometheus server.
## Runtime integration
The Sinatra application automatically loads the `prometheus-client` gem and mounts the
collector and exporter middlewares during boot. No additional configuration is
required to enable the `/metrics` endpoint—running the web application is enough to
serve Prometheus data on the same port as the dashboard. The middleware pair both
collects default Rack statistics and publishes PotatoMesh-specific gauges and
counters that are updated whenever the ingestors process new node records.
A background refresh is triggered during start-up via
`update_all_prometheus_metrics_from_nodes`, which seeds the gauges based on the latest
state in the database. Subsequent POST requests to the ingest APIs update each metric
in near real time.
## Selecting which nodes are exported
To avoid creating high-cardinality time series, PotatoMesh does not export per-node
metrics unless you opt in by providing node identifiers. Control this behaviour with
the `PROM_REPORT_IDS` environment variable:
- Leave the variable unset or blank to only export aggregate gauges such as the total
node count.
- Set `PROM_REPORT_IDS=*` to export metrics for every node in the database.
- Provide a comma-separated list (for example `PROM_REPORT_IDS=ABCD1234,EFGH5678`) to
expose metrics for specific nodes.
The selection applies to both the initial refresh and the incremental updates handled
by the ingest pipeline.
## Available metrics
| Metric name | Type | Labels | Description |
| --- | --- | --- | --- |
| `meshtastic_messages_total` | Counter | _none_ | Increments each time the ingest pipeline accepts a new message payload. |
| `meshtastic_nodes` | Gauge | _none_ | Tracks the number of nodes currently stored in the database. |
| `meshtastic_node` | Gauge | `node`, `short_name`, `long_name`, `hw_model`, `role` | Reports a node as present (value `1`) along with identity metadata. |
| `meshtastic_node_battery_level` | Gauge | `node` | Most recent battery percentage reported by the node. |
| `meshtastic_node_voltage` | Gauge | `node` | Most recent battery voltage reading. |
| `meshtastic_node_uptime_seconds` | Gauge | `node` | Uptime reported by the device in seconds. |
| `meshtastic_node_channel_utilization` | Gauge | `node` | Latest channel utilisation ratio supplied by the node. |
| `meshtastic_node_transmit_air_utilization` | Gauge | `node` | Proportion of on-air time spent transmitting. |
| `meshtastic_node_latitude` | Gauge | `node` | Latitude component of the last known position. |
| `meshtastic_node_longitude` | Gauge | `node` | Longitude component of the last known position. |
| `meshtastic_node_altitude` | Gauge | `node` | Altitude (in metres) of the last known position. |
All per-node gauges are only emitted for identifiers included in `PROM_REPORT_IDS`.
Some values require telemetry packets to be present—for example, devices must provide
metrics or positional updates before the related gauges appear.
## Accessing the `/metrics` endpoint
Once the application is running, query the exporter directly:
```bash
curl http://localhost:41447/metrics
```
Use any HTTP client capable of plain-text requests. Prometheus scrapers should target
the same URL. The endpoint returns data in the standard exposition format produced by
`prometheus-client`.
## Prometheus scrape configuration
Add a job to your Prometheus server configuration that points to the PotatoMesh
instance. This example polls an instance running locally on the default port every 15
seconds:
```yaml
scrape_configs:
- job_name: potatomesh
scrape_interval: 15s
static_configs:
- targets:
- localhost:41447
```
If your deployment requires authentication or runs behind a reverse proxy, configure
Prometheus to match your network topology (for example by adding basic authentication
credentials, custom headers, or TLS settings).
## Troubleshooting
- **No per-node metrics appear.** Ensure that `PROM_REPORT_IDS` is set and that the
specified nodes exist in the database. Set the value to `*` if you want to export
every node during initial validation.
- **Metrics look stale after a restart.** Confirm that the ingestor is still posting
telemetry. The exporter only reflects data stored in the PotatoMesh database.
- **Scrapes time out.** Verify that the Prometheus server can reach the PotatoMesh
HTTP port and that no reverse proxy is blocking the `/metrics` path.
With the endpoint configured, you can build Grafana dashboards or alerting rules to
keep track of community mesh health in real time.
+65 -31
View File
@@ -1,10 +1,11 @@
# 🥔 PotatoMesh
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/l5yth/potato-mesh/ruby.yml?branch=main)](https://github.com/l5yth/potato-mesh/actions)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/l5yth/potato-mesh)](https://github.com/l5yth/potato-mesh/releases)
[![GitHub release](https://img.shields.io/github/v/release/l5yth/potato-mesh)](https://github.com/l5yth/potato-mesh/releases)
[![codecov](https://codecov.io/gh/l5yth/potato-mesh/branch/main/graph/badge.svg?token=FS7252JVZT)](https://codecov.io/gh/l5yth/potato-mesh)
[![Open-Source License](https://img.shields.io/github/license/l5yth/potato-mesh)](LICENSE)
[![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/l5yth/potato-mesh/issues)
[![Matrix Chat](https://img.shields.io/badge/matrix-%23potatomesh:dod.ngo-blue)](https://matrix.to/#/#potatomesh:dod.ngo)
A simple Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
@@ -24,7 +25,7 @@ Requires Ruby for the Sinatra web app and SQLite3 for the app's database.
```bash
pacman -S ruby sqlite3
gem install sinatra sqlite3 rackup puma rspec rack-test rufo
gem install sinatra sqlite3 rackup puma rspec rack-test rufo prometheus-client
cd ./web
bundle install
```
@@ -67,29 +68,23 @@ exec ruby app.rb -p 41447 -o 0.0.0.0
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
links and generated metadata resolve correctly.
### Configuration storage
PotatoMesh stores its runtime assets using the XDG base directory specification.
During startup the web application migrates existing configuration from
`web/.config` and `web/config` into the resolved `XDG_CONFIG_HOME` directory.
This preserves previously generated instance key material and
`/.well-known/potato-mesh` documents so upgrades do not create new credentials
unnecessarily. When XDG directories are not provided the application falls back
to the repository root.
The migrated key is written to `<XDG_CONFIG_HOME>/potato-mesh/keyfile` and the
well-known document is staged in
`<XDG_CONFIG_HOME>/potato-mesh/well-known/potato-mesh`.
The web app can be configured with environment variables (defaults shown):
* `SITE_NAME` - title and header shown in the UI (default: "PotatoMesh Demo")
* `CHANNEL` - default channel shown in the UI (default: "#LongFast")
* `FREQUENCY` - default frequency shown in the UI (default: "915MHz")
* `MAP_CENTER` - default map center coordinates (default: `38.761944,-27.090833`)
* `MAX_DISTANCE` - hide nodes farther than this distance from the center (default: `42`)
* `CONTACT_LINK` - chat link or Matrix alias for footer and overlay (default: `#potatomesh:dod.ngo`)
* `PRIVATE` - set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients (default: unset)
| Variable | Default | Purpose |
| --- | --- | --- |
| `API_TOKEN` | _required_ | Shared secret that authorizes ingestors and API clients making `POST` requests. |
| `INSTANCE_DOMAIN` | _auto-detected_ | Public hostname (optionally with port) used for metadata, federation, and generated API links. |
| `SITE_NAME` | `"PotatoMesh Demo"` | Title and header displayed in the UI. |
| `CHANNEL` | `"#LongFast"` | Default channel name displayed in the UI. |
| `FREQUENCY` | `"915MHz"` | Default frequency description displayed in the UI. |
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix alias rendered in the footer and overlays. |
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map on load. |
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
| `DEBUG` | `0` | Set to `1` for verbose logging in the web and ingestor services. |
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the Meshtastic radio. |
The application derives SEO-friendly document titles, descriptions, and social
preview tags from these existing configuration values and reuses the bundled
@@ -98,9 +93,38 @@ logo for Open Graph and Twitter cards.
Example:
```bash
SITE_NAME="PotatoMesh Demo" MAP_CENTER=38.761944,-27.090833 MAX_DISTANCE=42 CONTACT_LINK="#potatomesh:dod.ngo" ./app.sh
SITE_NAME="PotatoMesh Demo" MAP_CENTER=38.761944,-27.090833 MAP_ZOOM=11 MAX_DISTANCE=42 CONTACT_LINK="#potatomesh:dod.ngo" ./app.sh
```
### Configuration & Storage
PotatoMesh stores its runtime assets using the XDG base directory specification.
When XDG directories are not provided the application falls back
to the repository root.
The key is written to `$XDG_CONFIG_HOME/potato-mesh/keyfile` and the
well-known document is staged in
`$XDG_CONFIG_HOME/potato-mesh/well-known/potato-mesh`.
The database can be found in `$XDG_DATA_HOME/potato-mesh`.
### Federation
PotatoMesh instances can optionally federate by publishing signed metadata and
discovering peers. Federation is enabled by default and controlled with the
`FEDERATION` environment variable. Set `FEDERATION=1` (default) to announce your
instance, respond to remote crawlers, and crawl the wider network. Set
`FEDERATION=0` to keep your deployment isolated—federation requests will be
ignored and the ingestor will skip discovery tasks. Private mode still takes
precedence; when `PRIVATE=1`, federation features remain disabled regardless of
the `FEDERATION` value.
When federation is enabled, PotatoMesh automatically refreshes entries from
known peers every eight hours to keep the directory current. Instances that
stop responding are considered stale and are removed from the web frontend after
72 hours, ensuring visitors only see active deployments in the public
directory.
### API
The web app contains an API:
@@ -110,7 +134,9 @@ The web app contains an API:
* GET `/api/messages?limit=100` - returns the latest 100 messages (disabled when `PRIVATE=1`)
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
* GET `/metrics`- prometheus endpoint
* GET `/api/instances` - returns known potato-mesh instances in other locations
* GET `/metrics`- metrics for the prometheus endpoint
* GET `/version`- information about the potato-mesh instance
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/positions` - appends positions provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
@@ -119,6 +145,12 @@ The web app contains an API:
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
### Observability
PotatoMesh ships with a Prometheus exporter mounted at `/metrics`. Consult
[`PROMETHEUS.md`](./PROMETHEUS.md) for deployment guidance, metric details, and
scrape configuration examples.
## Python Ingestor
The web app is not meant to be run locally connected to a Meshtastic node but rather
@@ -162,21 +194,23 @@ interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
## Demos
* <https://potatomesh.net/>
* <https://vrs.kdd2105.ru/>
* <https://potatomesh.stratospire.com/>
* <https://es1tem.uk/>
Post your nodes here:
* <https://github.com/l5yth/potato-mesh/discussions/258>
## Docker
Docker images are published on Github for each release:
```bash
docker pull ghcr.io/l5yth/potato-mesh/web:latest
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
docker pull ghcr.io/l5yth/potato-mesh/web:v3.0 # pinned historical release
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
```
See the [Docker guide](DOCKER.md) for more details and custome deployment instructions.
Set `POTATOMESH_IMAGE_TAG` in your `.env` (or environment) to deploy a specific
tagged release with Docker Compose. See the [Docker guide](DOCKER.md) for more
details and custom deployment instructions.
## License
+77 -3
View File
@@ -1,5 +1,5 @@
#!/bin/bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -56,10 +56,14 @@ read_with_default() {
update_env() {
local key="$1"
local value="$2"
local escaped_value
# Escape characters that would break the sed replacement delimiter or introduce backreferences
escaped_value=$(printf '%s' "$value" | sed -e 's/[&|]/\\&/g')
if grep -q "^$key=" .env; then
# Update existing value
sed -i.bak "s/^$key=.*/$key=$value/" .env
sed -i.bak "s|^$key=.*|$key=$escaped_value|" .env
else
# Add new value
echo "$key=$value" >> .env
@@ -70,16 +74,24 @@ update_env() {
SITE_NAME=$(grep "^SITE_NAME=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "PotatoMesh Demo")
CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#LongFast")
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
FEDERATION=$(grep "^FEDERATION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "1")
PRIVATE=$(grep "^PRIVATE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
MAP_ZOOM=$(grep "^MAP_ZOOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
CONTACT_LINK=$(grep "^CONTACT_LINK=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#potatomesh:dod.ngo")
API_TOKEN=$(grep "^API_TOKEN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
POTATOMESH_IMAGE_ARCH=$(grep "^POTATOMESH_IMAGE_ARCH=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "linux-amd64")
POTATOMESH_IMAGE_TAG=$(grep "^POTATOMESH_IMAGE_TAG=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "latest")
INSTANCE_DOMAIN=$(grep "^INSTANCE_DOMAIN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
DEBUG=$(grep "^DEBUG=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
CONNECTION=$(grep "^CONNECTION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "/dev/ttyACM0")
echo "📍 Location Settings"
echo "-------------------"
read_with_default "Site Name (your mesh network name)" "$SITE_NAME" SITE_NAME
read_with_default "Map Center (lat,lon)" "$MAP_CENTER" MAP_CENTER
read_with_default "Default map zoom (leave blank to auto-fit)" "$MAP_ZOOM" MAP_ZOOM
read_with_default "Max Distance (km)" "$MAX_DISTANCE" MAX_DISTANCE
echo ""
@@ -92,12 +104,44 @@ echo ""
echo "💬 Optional Settings"
echo "-------------------"
read_with_default "Chat link or Matrix room (optional)" "$CONTACT_LINK" CONTACT_LINK
read_with_default "Debug logging (1=enabled, 0=disabled)" "$DEBUG" DEBUG
echo ""
echo "🤝 Federation Settings"
echo "----------------------"
echo "Federation shares instance metadata with other PotatoMesh deployments."
echo "Set to 1 to enable discovery or 0 to keep your instance isolated."
read_with_default "Enable federation (1=yes, 0=no)" "$FEDERATION" FEDERATION
echo ""
echo "🙈 Privacy Settings"
echo "-------------------"
echo "Private mode hides public mesh messages from unauthenticated visitors."
echo "Set to 1 to hide public feeds or 0 to keep them visible."
read_with_default "Enable private mode (1=yes, 0=no)" "$PRIVATE" PRIVATE
echo ""
echo "🛠 Docker Settings"
echo "------------------"
echo "Specify the Docker image architecture for your host (linux-amd64, linux-arm64, linux-armv7)."
read_with_default "Docker image architecture" "$POTATOMESH_IMAGE_ARCH" POTATOMESH_IMAGE_ARCH
echo "Enter the Docker image tag to deploy (use 'latest' for the newest release or pin a version such as v3.0)."
read_with_default "Docker image tag (latest, vX.Y, etc.)" "$POTATOMESH_IMAGE_TAG" POTATOMESH_IMAGE_TAG
echo ""
echo "🔌 Ingestor Connection"
echo "----------------------"
echo "Define how the mesh ingestor connects to your Meshtastic device."
echo "Use serial devices like /dev/ttyACM0, TCP endpoints such as tcp://host:port,"
echo "or Bluetooth addresses when supported."
read_with_default "Connection target" "$CONNECTION" CONNECTION
echo ""
echo "🌐 Domain Settings"
echo "------------------"
echo "Provide the public hostname that clients should use to reach this PotatoMesh instance."
echo "Leave blank to allow automatic detection via reverse DNS."
read_with_default "Instance domain (e.g. mesh.example.org)" "$INSTANCE_DOMAIN" INSTANCE_DOMAIN
echo ""
echo "🔐 Security Settings"
@@ -138,10 +182,25 @@ update_env "SITE_NAME" "\"$SITE_NAME\""
update_env "CHANNEL" "\"$CHANNEL\""
update_env "FREQUENCY" "\"$FREQUENCY\""
update_env "MAP_CENTER" "\"$MAP_CENTER\""
if [ -n "$MAP_ZOOM" ]; then
update_env "MAP_ZOOM" "$MAP_ZOOM"
else
sed -i.bak '/^MAP_ZOOM=.*/d' .env
fi
update_env "MAX_DISTANCE" "$MAX_DISTANCE"
update_env "CONTACT_LINK" "\"$CONTACT_LINK\""
update_env "DEBUG" "$DEBUG"
update_env "API_TOKEN" "$API_TOKEN"
update_env "POTATOMESH_IMAGE_ARCH" "$POTATOMESH_IMAGE_ARCH"
update_env "POTATOMESH_IMAGE_TAG" "$POTATOMESH_IMAGE_TAG"
update_env "FEDERATION" "$FEDERATION"
update_env "PRIVATE" "$PRIVATE"
update_env "CONNECTION" "$CONNECTION"
if [ -n "$INSTANCE_DOMAIN" ]; then
update_env "INSTANCE_DOMAIN" "$INSTANCE_DOMAIN"
else
sed -i.bak '/^INSTANCE_DOMAIN=.*/d' .env
fi
# Migrate legacy connection settings and ensure defaults exist
if grep -q "^MESH_SERIAL=" .env; then
@@ -170,12 +229,27 @@ echo ""
echo "📋 Your settings:"
echo " Site Name: $SITE_NAME"
echo " Map Center: $MAP_CENTER"
if [ -n "$MAP_ZOOM" ]; then
echo " Map Zoom: $MAP_ZOOM"
else
echo " Map Zoom: Auto-fit"
fi
echo " Max Distance: ${MAX_DISTANCE}km"
echo " Channel: $CHANNEL"
echo " Frequency: $FREQUENCY"
echo " Chat: ${CONTACT_LINK:-'Not set'}"
echo " Debug Logging: ${DEBUG}"
echo " Connection: ${CONNECTION}"
echo " API Token: ${API_TOKEN:0:8}..."
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
echo " Docker Image Tag: $POTATOMESH_IMAGE_TAG"
echo " Private Mode: ${PRIVATE}"
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
if [ "${FEDERATION:-1}" = "0" ]; then
echo " Federation: Disabled"
else
echo " Federation: Enabled"
fi
echo ""
echo "🚀 You can now start PotatoMesh with:"
echo " docker-compose up -d"
+17 -4
View File
@@ -1,4 +1,17 @@
# syntax=docker/dockerfile:1.6
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG TARGETOS=linux
ARG PYTHON_VERSION=3.12.6
@@ -26,7 +39,7 @@ RUN set -eux; \
python -m pip install --no-cache-dir -r requirements.txt; \
apk del .build-deps
COPY data/ .
COPY data /app/data
RUN addgroup -S potatomesh && \
adduser -S potatomesh -G potatomesh && \
adduser potatomesh dialout && \
@@ -40,7 +53,7 @@ ENV CONNECTION=/dev/ttyACM0 \
POTATOMESH_INSTANCE="" \
API_TOKEN=""
CMD ["python", "mesh.py"]
CMD ["python", "-m", "data.mesh"]
# Windows production image
FROM python:${PYTHON_VERSION}-windowsservercore-ltsc2022 AS production-windows
@@ -55,7 +68,7 @@ WORKDIR /app
COPY data/requirements.txt ./
RUN python -m pip install --no-cache-dir -r requirements.txt
COPY data/ .
COPY data /app/data
USER ContainerUser
@@ -65,6 +78,6 @@ ENV CONNECTION=/dev/ttyACM0 \
POTATOMESH_INSTANCE="" \
API_TOKEN=""
CMD ["python", "mesh.py"]
CMD ["python", "-m", "data.mesh"]
FROM production-${TARGETOS} AS production
+6 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,3 +17,8 @@
The ``data.mesh`` module exposes helpers for reading Meshtastic node and
message information before forwarding it to the accompanying web application.
"""
VERSION = "0.5.6"
"""Semantic version identifier shared with the dashboard and front-end."""
__version__ = VERSION
+1 -1
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
+1 -1
View File
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+1 -2
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+1 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+27 -12
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -78,17 +78,36 @@ def _iter_channel_objects(channels_obj: Any) -> Iterator[Any]:
def _primary_channel_name() -> str | None:
"""Return the name to use for the primary channel when available."""
"""Return the fallback name to use for the primary channel when needed."""
preset = getattr(config, "MODEM_PRESET", None)
if isinstance(preset, str) and preset.strip():
return preset
return preset.strip()
env_name = os.environ.get("CHANNEL", "").strip()
if env_name:
return env_name
return None
def _extract_channel_name(settings_obj: Any) -> str | None:
"""Normalise the configured channel name extracted from ``settings_obj``."""
if settings_obj is None:
return None
if isinstance(settings_obj, dict):
candidate = settings_obj.get("name")
else:
candidate = getattr(settings_obj, "name", None)
if isinstance(candidate, str):
candidate = candidate.strip()
if candidate:
return candidate
return None
def _normalize_role(role: Any) -> int | None:
"""Convert a channel role descriptor into an integer value."""
@@ -122,27 +141,23 @@ def _channel_tuple(channel_obj: Any) -> tuple[int, str] | None:
role_value = _normalize_role(getattr(channel_obj, "role", None))
if role_value == _ROLE_PRIMARY:
channel_index = 0
channel_name = _primary_channel_name()
channel_name = _extract_channel_name(getattr(channel_obj, "settings", None))
if channel_name is None:
channel_name = _primary_channel_name()
elif role_value == _ROLE_SECONDARY:
raw_index = getattr(channel_obj, "index", None)
try:
channel_index = int(raw_index)
except Exception:
channel_index = None
settings = getattr(channel_obj, "settings", None)
channel_name = getattr(settings, "name", None) if settings else None
channel_name = _extract_channel_name(getattr(channel_obj, "settings", None))
else:
return None
if not isinstance(channel_index, int):
return None
if isinstance(channel_name, str):
channel_name = channel_name.strip()
else:
channel_name = None
if not channel_name:
if not isinstance(channel_name, str) or not channel_name:
return None
return channel_index, channel_name
+1 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+64 -12
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -34,6 +34,7 @@ _RECEIVE_TOPICS = (
"meshtastic.receive.NODEINFO_APP",
"meshtastic.receive.NEIGHBORINFO_APP",
"meshtastic.receive.TEXT_MESSAGE_APP",
"meshtastic.receive.REACTION_APP",
"meshtastic.receive.TELEMETRY_APP",
)
@@ -167,7 +168,46 @@ def _is_ble_interface(iface_obj) -> bool:
return "ble_interface" in module_name
def main() -> None:
def _connected_state(candidate) -> bool | None:
"""Return the connection state advertised by ``candidate``.
Parameters:
candidate: Attribute returned from ``iface.isConnected`` on a
Meshtastic interface. The value may be a boolean, a callable that
yields a boolean, or a :class:`threading.Event` instance.
Returns:
``True`` when the interface is believed to be connected, ``False``
when it appears disconnected, and ``None`` when the state cannot be
determined from the provided attribute.
"""
if candidate is None:
return None
if isinstance(candidate, threading.Event):
return candidate.is_set()
is_set_method = getattr(candidate, "is_set", None)
if callable(is_set_method):
try:
return bool(is_set_method())
except Exception:
return None
if callable(candidate):
try:
return bool(candidate())
except Exception:
return None
try:
return bool(candidate)
except Exception: # pragma: no cover - defensive guard
return None
def main(existing_interface=None) -> None:
"""Run the mesh ingestion daemon until interrupted."""
subscribed = _subscribe_receive_topics()
@@ -179,7 +219,7 @@ def main() -> None:
topics=subscribed,
)
iface = None
iface = existing_interface
resolved_target = None
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
@@ -215,8 +255,9 @@ def main() -> None:
return
stop.set()
signal.signal(signal.SIGINT, handle_sigint)
signal.signal(signal.SIGTERM, handle_sigterm)
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGINT, handle_sigint)
signal.signal(signal.SIGTERM, handle_sigterm)
target = config.INSTANCE or "(no POTATOMESH_INSTANCE)"
configured_port = config.CONNECTION
@@ -243,6 +284,9 @@ def main() -> None:
active_candidate = resolved_target
interfaces._ensure_radio_metadata(iface)
interfaces._ensure_channel_metadata(iface)
handlers.register_host_node_id(
interfaces._extract_host_node_id(iface)
)
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
initial_snapshot_sent = False
if not announced_target and resolved_target:
@@ -411,13 +455,20 @@ def main() -> None:
connected_attr = getattr(iface, "isConnected", None)
believed_disconnected = False
if callable(connected_attr):
try:
believed_disconnected = not bool(connected_attr())
except Exception:
believed_disconnected = False
elif connected_attr is not None:
believed_disconnected = not bool(connected_attr)
connected_state = _connected_state(connected_attr)
if connected_state is None:
if callable(connected_attr):
try:
believed_disconnected = not bool(connected_attr())
except Exception:
believed_disconnected = False
elif connected_attr is not None:
try:
believed_disconnected = not bool(connected_attr)
except Exception: # pragma: no cover - defensive guard
believed_disconnected = False
else:
believed_disconnected = not connected_state
should_reconnect = believed_disconnected or (
inactivity_elapsed >= inactivity_reconnect_secs
@@ -468,5 +519,6 @@ __all__ = [
"_node_items_snapshot",
"_subscribe_receive_topics",
"_is_ble_interface",
"_connected_state",
"main",
]
+594 -5
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,11 +17,69 @@
from __future__ import annotations
import base64
import contextlib
import importlib
import json
import math
import sys
import threading
import time
from collections.abc import Mapping
from datetime import datetime, timezone
from pathlib import Path
from . import channels, config, queue
_IGNORED_PACKET_LOG_PATH = Path(__file__).resolve().parents[2] / "ignored.txt"
"""Filesystem path that stores ignored packets when debugging."""
_IGNORED_PACKET_LOCK = threading.Lock()
"""Lock guarding writes to :data:`_IGNORED_PACKET_LOG_PATH`."""
_HOST_TELEMETRY_INTERVAL_SECS = 60 * 60
"""Minimum interval between accepted host telemetry packets."""
_host_node_id: str | None = None
"""Canonical ``!xxxxxxxx`` identifier for the connected host device."""
_host_telemetry_last_rx: int | None = None
"""Receive timestamp of the last accepted host telemetry packet."""
def _ignored_packet_default(value: object) -> object:
"""Return a JSON-serialisable representation for ignored packet data."""
if isinstance(value, (list, tuple, set)):
return list(value)
if isinstance(value, bytes):
return base64.b64encode(value).decode("ascii")
if isinstance(value, Mapping):
return {
str(key): _ignored_packet_default(sub_value)
for key, sub_value in value.items()
}
return str(value)
def _record_ignored_packet(packet: Mapping | object, *, reason: str) -> None:
"""Persist packet details to :data:`ignored.txt` during debugging."""
if not config.DEBUG:
return
timestamp = datetime.now(timezone.utc).isoformat()
entry = {
"timestamp": timestamp,
"reason": reason,
"packet": _ignored_packet_default(packet),
}
payload = json.dumps(entry, ensure_ascii=False, sort_keys=True)
with _IGNORED_PACKET_LOCK:
_IGNORED_PACKET_LOG_PATH.parent.mkdir(parents=True, exist_ok=True)
with _IGNORED_PACKET_LOG_PATH.open("a", encoding="utf-8") as handle:
handle.write(f"{payload}\n")
from .serialization import (
_canonical_node_id,
_coerce_float,
@@ -42,6 +100,50 @@ from .serialization import (
)
def register_host_node_id(node_id: str | None) -> None:
"""Record the canonical identifier for the connected host device.
Parameters:
node_id: Identifier reported by the connected device. ``None`` clears
the current host assignment.
"""
global _host_node_id, _host_telemetry_last_rx
canonical = _canonical_node_id(node_id)
_host_node_id = canonical
_host_telemetry_last_rx = None
if canonical:
config._debug_log(
"Registered host device node id",
context="handlers.host_device",
host_node_id=canonical,
)
def host_node_id() -> str | None:
"""Return the canonical identifier for the connected host device."""
return _host_node_id
def _mark_host_telemetry_seen(rx_time: int) -> None:
"""Update the last receive time for the host telemetry window."""
global _host_telemetry_last_rx
_host_telemetry_last_rx = rx_time
def _host_telemetry_suppressed(rx_time: int) -> tuple[bool, int]:
"""Return suppression state and minutes remaining for host telemetry."""
if _host_telemetry_last_rx is None:
return False, 0
remaining_secs = (_host_telemetry_last_rx + _HOST_TELEMETRY_INTERVAL_SECS) - rx_time
if remaining_secs <= 0:
return False, 0
return True, int(math.ceil(remaining_secs / 60.0))
def _radio_metadata_fields() -> dict[str, object]:
"""Return the shared radio metadata fields for payload enrichment."""
@@ -324,6 +426,132 @@ def base64_payload(payload_bytes: bytes | None) -> str | None:
return base64.b64encode(payload_bytes).decode("ascii")
def _normalize_trace_hops(hops_value) -> list[int]:
"""Coerce hop entries to integers while preserving order."""
if hops_value is None:
return []
hop_entries = hops_value if isinstance(hops_value, list) else [hops_value]
normalized: list[int] = []
for hop in hop_entries:
hop_value = hop
if isinstance(hop, Mapping):
hop_value = _first(hop, "node_id", "nodeId", "id", "num", default=None)
canonical = _canonical_node_id(hop_value)
hop_id = _node_num_from_id(canonical or hop_value)
if hop_id is None:
hop_id = _coerce_int(hop_value)
if hop_id is not None:
normalized.append(hop_id)
return normalized
def store_traceroute_packet(packet: Mapping, decoded: Mapping) -> None:
"""Persist traceroute details and hop path to the API."""
traceroute_section = (
decoded.get("traceroute") if isinstance(decoded, Mapping) else None
)
request_id = _coerce_int(
_first(
traceroute_section,
"requestId",
"request_id",
default=_first(decoded, "req", "requestId", "request_id", default=None),
)
)
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
if pkt_id is None:
pkt_id = request_id
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
if rx_time is None:
rx_time = int(time.time())
src = _coerce_int(
_first(
decoded,
"src",
"source",
default=_first(packet, "fromId", "from_id", "from", default=None),
)
)
dest = _coerce_int(
_first(
decoded,
"dest",
"destination",
default=_first(packet, "toId", "to_id", "to", default=None),
)
)
metrics = traceroute_section if isinstance(traceroute_section, Mapping) else {}
rssi = _coerce_int(
_first(metrics, "rssi", default=_first(packet, "rssi", "rx_rssi", "rxRssi"))
)
snr = _coerce_float(
_first(metrics, "snr", default=_first(packet, "snr", "rx_snr", "rxSnr"))
)
elapsed_ms = _coerce_int(
_first(metrics, "elapsed_ms", "latency_ms", "latencyMs", default=None)
)
hop_candidates = (
_first(metrics, "hops", default=None),
_first(metrics, "path", default=None),
_first(metrics, "route", default=None),
_first(decoded, "hops", default=None),
_first(decoded, "path", default=None),
(
_first(traceroute_section, "route", default=None)
if isinstance(traceroute_section, Mapping)
else None
),
)
hops: list[int] = []
seen_hops: set[int] = set()
for candidate in hop_candidates:
for hop in _normalize_trace_hops(candidate):
if hop in seen_hops:
continue
seen_hops.add(hop)
hops.append(hop)
if pkt_id is None and request_id is None and not hops:
_record_ignored_packet(packet, reason="traceroute-missing-identifiers")
return
payload = {
"id": pkt_id,
"request_id": request_id,
"src": src,
"dest": dest,
"rx_time": rx_time,
"rx_iso": _iso(rx_time),
"hops": hops,
"rssi": rssi,
"snr": snr,
"elapsed_ms": elapsed_ms,
}
_queue_post_json(
"/api/traces",
_apply_radio_metadata(payload),
priority=queue._TRACE_POST_PRIORITY,
)
if config.DEBUG:
config._debug_log(
"Queued traceroute payload",
context="handlers.store_traceroute_packet",
request_id=request_id,
src=src,
dest=dest,
hop_count=len(hops),
)
def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
"""Persist telemetry metrics extracted from a packet.
@@ -360,6 +588,19 @@ def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
rx_time = int(time.time())
rx_iso = _iso(rx_time)
host_id = host_node_id()
if host_id is not None and node_id == host_id:
suppressed, minutes_remaining = _host_telemetry_suppressed(rx_time)
if suppressed:
config._debug_log(
"Suppressed host telemetry update",
context="handlers.store_telemetry",
host_node_id=host_id,
minutes_remaining=minutes_remaining,
)
return
_mark_host_telemetry_seen(rx_time)
telemetry_time = _coerce_int(_first(telemetry_section, "time", default=None))
channel = _coerce_int(_first(decoded, "channel", default=None))
@@ -460,6 +701,189 @@ def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
)
)
current = _coerce_float(
_first(
telemetry_section,
"current",
"deviceMetrics.current",
"deviceMetrics.current_ma",
"deviceMetrics.currentMa",
"environmentMetrics.current",
default=None,
)
)
gas_resistance = _coerce_float(
_first(
telemetry_section,
"gasResistance",
"gas_resistance",
"environmentMetrics.gasResistance",
"environmentMetrics.gas_resistance",
default=None,
)
)
iaq = _coerce_int(
_first(
telemetry_section,
"iaq",
"environmentMetrics.iaq",
"environmentMetrics.iaqIndex",
"environmentMetrics.iaq_index",
default=None,
)
)
distance = _coerce_float(
_first(
telemetry_section,
"distance",
"environmentMetrics.distance",
"environmentMetrics.range",
"environmentMetrics.rangeMeters",
default=None,
)
)
lux = _coerce_float(
_first(
telemetry_section,
"lux",
"environmentMetrics.lux",
"environmentMetrics.illuminance",
default=None,
)
)
white_lux = _coerce_float(
_first(
telemetry_section,
"whiteLux",
"white_lux",
"environmentMetrics.whiteLux",
"environmentMetrics.white_lux",
default=None,
)
)
ir_lux = _coerce_float(
_first(
telemetry_section,
"irLux",
"ir_lux",
"environmentMetrics.irLux",
"environmentMetrics.ir_lux",
default=None,
)
)
uv_lux = _coerce_float(
_first(
telemetry_section,
"uvLux",
"uv_lux",
"environmentMetrics.uvLux",
"environmentMetrics.uv_lux",
"environmentMetrics.uvIndex",
default=None,
)
)
wind_direction = _coerce_int(
_first(
telemetry_section,
"windDirection",
"wind_direction",
"environmentMetrics.windDirection",
"environmentMetrics.wind_direction",
default=None,
)
)
wind_speed = _coerce_float(
_first(
telemetry_section,
"windSpeed",
"wind_speed",
"environmentMetrics.windSpeed",
"environmentMetrics.wind_speed",
"environmentMetrics.windSpeedMps",
default=None,
)
)
wind_gust = _coerce_float(
_first(
telemetry_section,
"windGust",
"wind_gust",
"environmentMetrics.windGust",
"environmentMetrics.wind_gust",
default=None,
)
)
wind_lull = _coerce_float(
_first(
telemetry_section,
"windLull",
"wind_lull",
"environmentMetrics.windLull",
"environmentMetrics.wind_lull",
default=None,
)
)
weight = _coerce_float(
_first(
telemetry_section,
"weight",
"environmentMetrics.weight",
"environmentMetrics.mass",
default=None,
)
)
radiation = _coerce_float(
_first(
telemetry_section,
"radiation",
"environmentMetrics.radiation",
"environmentMetrics.radiationLevel",
default=None,
)
)
rainfall_1h = _coerce_float(
_first(
telemetry_section,
"rainfall1h",
"rainfall_1h",
"environmentMetrics.rainfall1h",
"environmentMetrics.rainfall_1h",
"environmentMetrics.rainfallOneHour",
default=None,
)
)
rainfall_24h = _coerce_float(
_first(
telemetry_section,
"rainfall24h",
"rainfall_24h",
"environmentMetrics.rainfall24h",
"environmentMetrics.rainfall_24h",
"environmentMetrics.rainfallTwentyFourHour",
default=None,
)
)
soil_moisture = _coerce_int(
_first(
telemetry_section,
"soilMoisture",
"soil_moisture",
"environmentMetrics.soilMoisture",
"environmentMetrics.soil_moisture",
default=None,
)
)
soil_temperature = _coerce_float(
_first(
telemetry_section,
"soilTemperature",
"soil_temperature",
"environmentMetrics.soilTemperature",
"environmentMetrics.soil_temperature",
default=None,
)
)
telemetry_payload = {
"id": pkt_id,
"node_id": node_id,
@@ -494,6 +918,42 @@ def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
telemetry_payload["relative_humidity"] = relative_humidity
if barometric_pressure is not None:
telemetry_payload["barometric_pressure"] = barometric_pressure
if current is not None:
telemetry_payload["current"] = current
if gas_resistance is not None:
telemetry_payload["gas_resistance"] = gas_resistance
if iaq is not None:
telemetry_payload["iaq"] = iaq
if distance is not None:
telemetry_payload["distance"] = distance
if lux is not None:
telemetry_payload["lux"] = lux
if white_lux is not None:
telemetry_payload["white_lux"] = white_lux
if ir_lux is not None:
telemetry_payload["ir_lux"] = ir_lux
if uv_lux is not None:
telemetry_payload["uv_lux"] = uv_lux
if wind_direction is not None:
telemetry_payload["wind_direction"] = wind_direction
if wind_speed is not None:
telemetry_payload["wind_speed"] = wind_speed
if wind_gust is not None:
telemetry_payload["wind_gust"] = wind_gust
if wind_lull is not None:
telemetry_payload["wind_lull"] = wind_lull
if weight is not None:
telemetry_payload["weight"] = weight
if radiation is not None:
telemetry_payload["radiation"] = radiation
if rainfall_1h is not None:
telemetry_payload["rainfall_1h"] = rainfall_1h
if rainfall_24h is not None:
telemetry_payload["rainfall_24h"] = rainfall_24h
if soil_moisture is not None:
telemetry_payload["soil_moisture"] = soil_moisture
if soil_temperature is not None:
telemetry_payload["soil_temperature"] = soil_temperature
_queue_post_json(
"/api/telemetry",
@@ -817,6 +1277,40 @@ def store_packet_dict(packet: Mapping) -> None:
store_telemetry_packet(packet, decoded)
return
traceroute_section = (
decoded.get("traceroute") if isinstance(decoded, Mapping) else None
)
traceroute_port_ints: set[int] = set()
for module_name in (
"meshtastic.portnums_pb2",
"meshtastic.protobuf.portnums_pb2",
):
module = sys.modules.get(module_name)
if module is None:
with contextlib.suppress(ModuleNotFoundError):
module = importlib.import_module(module_name)
if module is None:
continue
portnum_enum = getattr(module, "PortNum", None)
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
if callable(value_lookup):
with contextlib.suppress(Exception):
candidate = _coerce_int(value_lookup("TRACEROUTE_APP"))
if candidate is not None:
traceroute_port_ints.add(candidate)
constant_value = getattr(module, "TRACEROUTE_APP", None)
candidate = _coerce_int(constant_value)
if candidate is not None:
traceroute_port_ints.add(candidate)
if (
portnum == "TRACEROUTE_APP"
or (portnum_int is not None and portnum_int in traceroute_port_ints)
or isinstance(traceroute_section, Mapping)
):
store_traceroute_packet(packet, decoded)
return
if portnum in {"5", "NODEINFO_APP"}:
store_nodeinfo_packet(packet, decoded)
return
@@ -832,14 +1326,84 @@ def store_packet_dict(packet: Mapping) -> None:
store_neighborinfo_packet(packet, decoded)
return
text = _first(decoded, "payload.text", "text", default=None)
text = _first(decoded, "payload.text", "text", "data.text", default=None)
encrypted = _first(decoded, "payload.encrypted", "encrypted", default=None)
if encrypted is None:
encrypted = _first(packet, "encrypted", default=None)
if not text and not encrypted:
return
reply_id_raw = _first(
decoded,
"payload.replyId",
"payload.reply_id",
"data.replyId",
"data.reply_id",
"replyId",
"reply_id",
default=None,
)
reply_id = _coerce_int(reply_id_raw)
emoji_raw = _first(
decoded,
"payload.emoji",
"data.emoji",
"emoji",
default=None,
)
emoji = None
if emoji_raw is not None:
try:
emoji_text = str(emoji_raw)
except Exception:
emoji_text = None
else:
emoji_text = emoji_text.strip()
if emoji_text:
emoji = emoji_text
if portnum and portnum not in {"1", "TEXT_MESSAGE_APP"}:
allowed_port_values = {"1", "TEXT_MESSAGE_APP", "REACTION_APP"}
allowed_port_ints = {1}
reaction_port_candidates: set[int] = set()
for module_name in (
"meshtastic.portnums_pb2",
"meshtastic.protobuf.portnums_pb2",
):
module = sys.modules.get(module_name)
if module is None:
with contextlib.suppress(ModuleNotFoundError):
module = importlib.import_module(module_name)
if module is None:
continue
portnum_enum = getattr(module, "PortNum", None)
value_lookup = getattr(portnum_enum, "Value", None) if portnum_enum else None
if callable(value_lookup):
with contextlib.suppress(Exception):
candidate = _coerce_int(value_lookup("REACTION_APP"))
if candidate is not None:
reaction_port_candidates.add(candidate)
constant_value = getattr(module, "REACTION_APP", None)
candidate = _coerce_int(constant_value)
if candidate is not None:
reaction_port_candidates.add(candidate)
for candidate in reaction_port_candidates:
allowed_port_ints.add(candidate)
allowed_port_values.add(str(candidate))
is_reaction_packet = portnum == "REACTION_APP" or (
reply_id is not None and emoji is not None
)
if is_reaction_packet and portnum_int is not None:
allowed_port_ints.add(portnum_int)
allowed_port_values.add(str(portnum_int))
if portnum and portnum not in allowed_port_values:
if portnum_int not in allowed_port_ints:
_record_ignored_packet(packet, reason="unsupported-port")
return
encrypted_flag = _is_encrypted_flag(encrypted)
if not any([text, encrypted_flag, emoji is not None, reply_id is not None]):
_record_ignored_packet(packet, reason="no-message-payload")
return
channel = _first(decoded, "channel", default=None)
@@ -852,6 +1416,7 @@ def store_packet_dict(packet: Mapping) -> None:
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
if pkt_id is None:
_record_ignored_packet(packet, reason="missing-packet-id")
return
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
from_id = _first(packet, "fromId", "from_id", "from", default=None)
@@ -874,6 +1439,26 @@ def store_packet_dict(packet: Mapping) -> None:
encrypted_flag = _is_encrypted_flag(encrypted)
to_id_normalized = str(to_id).strip() if to_id is not None else ""
if (
not is_reaction_packet
and channel == 0
and not encrypted_flag
and to_id_normalized
and to_id_normalized.lower() != "^all"
):
if config.DEBUG:
config._debug_log(
"Skipped direct message on primary channel",
context="handlers.store_packet_dict",
from_id=_canonical_node_id(from_id) or from_id,
to_id=_canonical_node_id(to_id) or to_id,
channel=channel,
)
_record_ignored_packet(packet, reason="skipped-direct-message")
return
message_payload = {
"id": int(pkt_id),
"rx_time": rx_time,
@@ -887,6 +1472,8 @@ def store_packet_dict(packet: Mapping) -> None:
"snr": float(snr) if snr is not None else None,
"rssi": int(rssi) if rssi is not None else None,
"hop_limit": int(hop) if hop is not None else None,
"reply_id": reply_id,
"emoji": emoji,
}
channel_name_value = None
@@ -972,8 +1559,10 @@ def on_receive(packet, interface) -> None:
__all__ = [
"_queue_post_json",
"host_node_id",
"last_packet_monotonic",
"on_receive",
"register_host_node_id",
"store_neighborinfo_packet",
"store_nodeinfo_packet",
"store_packet_dict",
+318 -42
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,18 +16,177 @@
from __future__ import annotations
import contextlib
import glob
import importlib
import ipaddress
import re
import sys
import urllib.parse
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any
from meshtastic.serial_interface import SerialInterface
from meshtastic.tcp_interface import TCPInterface
try: # pragma: no cover - dependency optional in tests
import meshtastic # type: ignore
except Exception: # pragma: no cover - dependency optional in tests
meshtastic = None # type: ignore[assignment]
from . import channels, config, serialization
def _ensure_mapping(value) -> Mapping | None:
"""Return ``value`` as a mapping when conversion is possible."""
if isinstance(value, Mapping):
return value
if hasattr(value, "__dict__") and isinstance(value.__dict__, Mapping):
return value.__dict__
with contextlib.suppress(Exception):
converted = serialization._node_to_dict(value)
if isinstance(converted, Mapping):
return converted
return None
def _candidate_node_id(mapping: Mapping | None) -> str | None:
"""Extract a canonical node identifier from ``mapping`` when present."""
if mapping is None:
return None
primary_keys = (
"id",
"userId",
"user_id",
"fromId",
"from_id",
"from",
"nodeId",
"node_id",
"nodeNum",
"node_num",
"num",
)
for key in primary_keys:
with contextlib.suppress(Exception):
node_id = serialization._canonical_node_id(mapping.get(key))
if node_id:
return node_id
user_section = _ensure_mapping(mapping.get("user"))
if user_section is not None:
for key in ("id", "userId", "user_id", "num", "nodeNum", "node_num"):
with contextlib.suppress(Exception):
node_id = serialization._canonical_node_id(user_section.get(key))
if node_id:
return node_id
decoded_section = _ensure_mapping(mapping.get("decoded"))
if decoded_section is not None:
node_id = _candidate_node_id(decoded_section)
if node_id:
return node_id
payload_section = _ensure_mapping(mapping.get("payload"))
if payload_section is not None:
node_id = _candidate_node_id(payload_section)
if node_id:
return node_id
for key in ("packet", "meta", "info"):
node_id = _candidate_node_id(_ensure_mapping(mapping.get(key)))
if node_id:
return node_id
for value in mapping.values():
if isinstance(value, (list, tuple)):
for item in value:
node_id = _candidate_node_id(_ensure_mapping(item))
if node_id:
return node_id
else:
node_id = _candidate_node_id(_ensure_mapping(value))
if node_id:
return node_id
return None
def _extract_host_node_id(iface) -> str | None:
"""Return the canonical node identifier for the connected host device."""
if iface is None:
return None
def _as_mapping(candidate) -> Mapping | None:
mapping = _ensure_mapping(candidate)
if mapping is not None:
return mapping
if callable(candidate):
with contextlib.suppress(Exception):
return _ensure_mapping(candidate())
return None
candidates: list[Mapping] = []
for attr in ("myInfo", "my_node_info", "myNodeInfo", "my_node", "localNode"):
mapping = _as_mapping(getattr(iface, attr, None))
if mapping is None:
continue
candidates.append(mapping)
nested_info = _ensure_mapping(mapping.get("info"))
if nested_info:
candidates.append(nested_info)
for mapping in candidates:
node_id = _candidate_node_id(mapping)
if node_id:
return node_id
for key in ("myNodeNum", "my_node_num", "myNodeId", "my_node_id"):
node_id = serialization._canonical_node_id(mapping.get(key))
if node_id:
return node_id
node_id = serialization._canonical_node_id(getattr(iface, "myNodeNum", None))
if node_id:
return node_id
return None
def _normalise_nodeinfo_packet(packet) -> dict | None:
"""Return a dictionary view of ``packet`` with a guaranteed ``id`` when known."""
mapping = _ensure_mapping(packet)
if mapping is None:
return None
try:
normalised: dict = dict(mapping)
except Exception:
try:
normalised = {key: mapping[key] for key in mapping}
except Exception:
return None
node_id = _candidate_node_id(normalised)
if node_id and normalised.get("id") != node_id:
normalised["id"] = node_id
decoded_section = _ensure_mapping(normalised.get("decoded"))
if decoded_section is not None:
decoded_dict = dict(decoded_section)
user_section = _ensure_mapping(decoded_dict.get("user"))
if user_section is not None:
user_dict = dict(user_section)
if node_id and user_dict.get("id") != node_id:
user_dict["id"] = node_id
decoded_dict["user"] = user_dict
normalised["decoded"] = decoded_dict
return normalised
if TYPE_CHECKING: # pragma: no cover - import only used for type checking
from meshtastic.ble_interface import BLEInterface as _BLEInterface
@@ -37,50 +196,46 @@ BLEInterface = None
def _patch_meshtastic_nodeinfo_handler() -> None:
"""Ensure Meshtastic nodeinfo packets always include an ``id`` field."""
try:
import meshtastic # type: ignore
except Exception: # pragma: no cover - dependency optional in tests
module = sys.modules.get("meshtastic", meshtastic)
if module is None:
with contextlib.suppress(Exception):
module = importlib.import_module("meshtastic")
if module is None:
return
globals()["meshtastic"] = module
original = getattr(meshtastic, "_onNodeInfoReceive", None)
original = getattr(module, "_onNodeInfoReceive", None)
if not callable(original):
return
if getattr(original, "_potato_mesh_safe_wrapper", False):
return
mesh_interface_module = getattr(module, "mesh_interface", None)
if mesh_interface_module is None:
with contextlib.suppress(Exception):
mesh_interface_module = importlib.import_module("meshtastic.mesh_interface")
safe_callback = original
if not getattr(original, "_potato_mesh_safe_wrapper", False):
safe_callback = _build_safe_nodeinfo_callback(original)
module._onNodeInfoReceive = safe_callback
if (
mesh_interface_module is not None
and getattr(mesh_interface_module, "_onNodeInfoReceive", None) is original
):
mesh_interface_module._onNodeInfoReceive = safe_callback
_patch_protocol_nodeinfo_callback(module, original, safe_callback)
_patch_protocol_nodeinfo_callback(mesh_interface_module, original, safe_callback)
_patch_nodeinfo_handler_class(mesh_interface_module, module)
def _build_safe_nodeinfo_callback(original):
"""Return a wrapper that injects a missing ``id`` before dispatching."""
def _safe_on_node_info_receive(iface, packet): # type: ignore[override]
candidate_mapping: Mapping | None = None
if isinstance(packet, Mapping):
candidate_mapping = packet
elif hasattr(packet, "__dict__") and isinstance(packet.__dict__, Mapping):
candidate_mapping = packet.__dict__
node_id = None
if candidate_mapping is not None:
node_id = serialization._canonical_node_id(candidate_mapping.get("id"))
if node_id is None:
user_section = candidate_mapping.get("user")
if isinstance(user_section, Mapping):
node_id = serialization._canonical_node_id(user_section.get("id"))
if node_id is None:
for key in ("fromId", "from_id", "from", "num", "nodeId", "node_id"):
node_id = serialization._canonical_node_id(
candidate_mapping.get(key)
)
if node_id:
break
if node_id:
if not isinstance(candidate_mapping, dict):
try:
candidate_mapping = dict(candidate_mapping)
except Exception:
candidate_mapping = {
k: candidate_mapping[k] for k in candidate_mapping
}
if candidate_mapping.get("id") != node_id:
candidate_mapping["id"] = node_id
packet = candidate_mapping
normalised = _normalise_nodeinfo_packet(packet)
if normalised is not None:
packet = normalised
try:
return original(iface, packet)
@@ -90,12 +245,132 @@ def _patch_meshtastic_nodeinfo_handler() -> None:
raise
_safe_on_node_info_receive._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
meshtastic._onNodeInfoReceive = _safe_on_node_info_receive
return _safe_on_node_info_receive
def _replace_known_protocol_callback(protocol, replacement):
"""Return ``protocol`` with ``onReceive`` set to ``replacement``."""
replacer = getattr(protocol, "_replace", None)
if callable(replacer):
try:
return replacer(onReceive=replacement)
except Exception:
pass
protocol_cls = getattr(protocol, "__class__", None)
try:
return protocol_cls(
getattr(protocol, "name", None),
getattr(protocol, "protobufFactory", None),
replacement,
)
except Exception:
return protocol
def _patch_protocol_nodeinfo_callback(module, original, replacement) -> None:
"""Swap the NodeInfo protocol callback to ``replacement`` when needed."""
if module is None or replacement is None:
return
protocols = getattr(module, "protocols", None)
if not isinstance(protocols, Mapping):
return
portnums = getattr(module, "portnums_pb2", None)
portnum_enum = getattr(portnums, "PortNum", None)
try:
nodeinfo_key = getattr(portnum_enum, "NODEINFO_APP")
except Exception:
nodeinfo_key = None
for key, protocol in list(protocols.items()):
on_receive = getattr(protocol, "onReceive", None)
if key == nodeinfo_key or on_receive is original:
protocols[key] = _replace_known_protocol_callback(protocol, replacement)
def _update_nodeinfo_handler_aliases(original, replacement) -> None:
"""Ensure Meshtastic modules reference the patched ``NodeInfoHandler``."""
for module_name, module in list(sys.modules.items()):
if not module_name.startswith("meshtastic"):
continue
existing = getattr(module, "NodeInfoHandler", None)
if existing is original:
setattr(module, "NodeInfoHandler", replacement)
def _patch_nodeinfo_handler_class(
mesh_interface_module, meshtastic_module=None
) -> None:
"""Wrap ``NodeInfoHandler.onReceive`` to normalise packets before callbacks."""
if mesh_interface_module is None:
return
handler_class = getattr(mesh_interface_module, "NodeInfoHandler", None)
if handler_class is None:
return
if getattr(handler_class, "_potato_mesh_safe_wrapper", False):
return
original_on_receive = getattr(handler_class, "onReceive", None)
if not callable(original_on_receive):
return
class _SafeNodeInfoHandler(handler_class): # type: ignore[misc]
"""Subclass that guards against missing node identifiers."""
def onReceive(self, iface, packet): # type: ignore[override]
normalised = _normalise_nodeinfo_packet(packet)
if normalised is not None:
packet = normalised
try:
return super().onReceive(iface, packet)
except KeyError as exc: # pragma: no cover - defensive only
if exc.args and exc.args[0] == "id":
return None
raise
_SafeNodeInfoHandler.__name__ = handler_class.__name__
_SafeNodeInfoHandler.__qualname__ = getattr(
handler_class, "__qualname__", handler_class.__name__
)
_SafeNodeInfoHandler.__module__ = getattr(
handler_class, "__module__", mesh_interface_module.__name__
)
_SafeNodeInfoHandler.__doc__ = getattr(
handler_class, "__doc__", _SafeNodeInfoHandler.__doc__
)
_SafeNodeInfoHandler._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
setattr(mesh_interface_module, "NodeInfoHandler", _SafeNodeInfoHandler)
if meshtastic_module is None:
meshtastic_module = globals().get("meshtastic")
if meshtastic_module is not None:
existing_top = getattr(meshtastic_module, "NodeInfoHandler", None)
if existing_top is handler_class:
setattr(meshtastic_module, "NodeInfoHandler", _SafeNodeInfoHandler)
_update_nodeinfo_handler_aliases(handler_class, _SafeNodeInfoHandler)
_patch_meshtastic_nodeinfo_handler()
try: # pragma: no cover - optional dependency may be unavailable
from meshtastic.serial_interface import SerialInterface # type: ignore
except Exception: # pragma: no cover - optional dependency may be unavailable
SerialInterface = None # type: ignore[assignment]
try: # pragma: no cover - optional dependency may be unavailable
from meshtastic.tcp_interface import TCPInterface # type: ignore
except Exception: # pragma: no cover - optional dependency may be unavailable
TCPInterface = None # type: ignore[assignment]
def _patch_meshtastic_ble_receive_loop() -> None:
"""Prevent ``UnboundLocalError`` crashes in Meshtastic's BLE reader."""
@@ -601,6 +876,7 @@ __all__ = [
"NoAvailableMeshInterface",
"_ensure_channel_metadata",
"_ensure_radio_metadata",
"_extract_host_node_id",
"_DummySerialInterface",
"_DEFAULT_TCP_PORT",
"_DEFAULT_TCP_TARGET",
+82 -6
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,12 +22,60 @@ import json
import threading
import urllib.request
from dataclasses import dataclass, field
from typing import Callable, Iterable, Tuple
from typing import Callable, Iterable, Mapping, Tuple
from . import config
def _stringify_payload_value(value: object) -> str:
"""Return a stable string representation for ``value``."""
if isinstance(value, Mapping):
try:
return json.dumps(
{
str(key): value[key]
for key in sorted(value, key=lambda item: str(item))
},
sort_keys=True,
ensure_ascii=False,
default=str,
)
except Exception: # pragma: no cover - defensive guard
return str(value)
if isinstance(value, (list, tuple)):
try:
return json.dumps(list(value), ensure_ascii=False, default=str)
except Exception: # pragma: no cover - defensive guard
return str(value)
if isinstance(value, set):
try:
return json.dumps(sorted(value, key=str), ensure_ascii=False, default=str)
except Exception: # pragma: no cover - defensive guard
return str(value)
if isinstance(value, bytes):
return json.dumps(value.decode("utf-8", "replace"), ensure_ascii=False)
if isinstance(value, str):
return json.dumps(value, ensure_ascii=False)
return str(value)
def _payload_key_value_pairs(payload: Mapping[str, object]) -> str:
"""Serialise ``payload`` into ``key=value`` pairs for debug logs."""
pairs: list[str] = []
for key in sorted(payload):
try:
formatted = _stringify_payload_value(payload[key])
except Exception: # pragma: no cover - defensive guard
formatted = str(payload[key])
pairs.append(f"{key}={formatted}")
return " ".join(pairs)
_MESSAGE_POST_PRIORITY = 10
_NEIGHBOR_POST_PRIORITY = 20
_TRACE_POST_PRIORITY = 25
_POSITION_POST_PRIORITY = 30
_TELEMETRY_POST_PRIORITY = 40
_NODE_POST_PRIORITY = 50
@@ -72,11 +120,25 @@ def _post_json(
return
url = f"{instance}{path}"
data = json.dumps(payload).encode("utf-8")
req = urllib.request.Request(
url, data=data, headers={"Content-Type": "application/json"}
)
# Add full headers to avoid Cloudflare blocks on instances behind cloudflare proxy
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "application/json",
"Accept-Language": "en-US,en;q=0.9",
"Origin": f"{instance}",
"Referer": f"{instance}",
}
if api_token:
req.add_header("Authorization", f"Bearer {api_token}")
headers["Authorization"] = f"Bearer {api_token}"
req = urllib.request.Request(
url,
data=data,
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=10) as resp:
resp.read()
@@ -159,6 +221,19 @@ def _queue_post_json(
if send is None:
send = _post_json
if config.DEBUG:
formatted_payload = (
_payload_key_value_pairs(payload)
if isinstance(payload, Mapping)
else str(payload)
)
config._debug_log(
f"Forwarding payload to API: {formatted_payload}",
context="queue.queue_post_json",
path=path,
priority=priority,
)
_enqueue_post_json(path, payload, priority, state=state)
with state.lock:
if state.active:
@@ -187,6 +262,7 @@ __all__ = [
"_NEIGHBOR_POST_PRIORITY",
"_NODE_POST_PRIORITY",
"_POSITION_POST_PRIORITY",
"_TRACE_POST_PRIORITY",
"_TELEMETRY_POST_PRIORITY",
"_clear_post_queue",
"_drain_post_queue",
+165 -2
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -22,6 +22,8 @@ from __future__ import annotations
import base64
import dataclasses
import enum
import importlib
import json
import math
import time
@@ -31,6 +33,18 @@ from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from google.protobuf.message import Message as ProtoMessage
_CLI_ROLE_MODULE_NAMES: tuple[str, ...] = (
"meshtastic.cli.common",
"meshtastic.cli.roles",
"meshtastic.cli.enums",
"meshtastic_cli.common",
"meshtastic_cli.roles",
)
"""Possible module paths that may expose the Meshtastic CLI role enum."""
_CLI_ROLE_LOOKUP: dict[int, str] | None = None
"""Cached mapping of CLI role identifiers to their textual names."""
def _get(obj, key, default=None):
"""Return ``obj[key]`` or ``getattr(obj, key)`` when available.
@@ -49,6 +63,96 @@ def _get(obj, key, default=None):
return getattr(obj, key, default)
def _reset_cli_role_cache() -> None:
"""Clear the cached CLI role lookup mapping.
The helper is primarily used by tests to ensure deterministic behaviour
when substituting stub CLI modules.
Returns:
``None``. The next lookup will trigger a fresh import attempt.
"""
global _CLI_ROLE_LOOKUP
_CLI_ROLE_LOOKUP = None
def _load_cli_role_lookup() -> dict[int, str]:
"""Return a mapping of role identifiers from the Meshtastic CLI.
The Meshtastic CLI exposes extended role enums that may include entries
absent from the protobuf definition shipped with the firmware. This
helper lazily imports the CLI module when present and extracts the
available role names so that numeric values received from the firmware can
be normalised into human-friendly strings.
Returns:
Mapping of integer role identifiers to their canonical string names.
"""
global _CLI_ROLE_LOOKUP
if _CLI_ROLE_LOOKUP is not None:
return _CLI_ROLE_LOOKUP
lookup: dict[int, str] = {}
def _from_candidate(candidate) -> dict[int, str]:
mapping: dict[int, str] = {}
if isinstance(candidate, enum.EnumMeta):
for member in candidate: # pragma: no branch - Enum iteration deterministic
try:
mapping[int(member.value)] = str(member.name)
except Exception: # pragma: no cover - defensive guard
continue
return mapping
members = getattr(candidate, "__members__", None)
if isinstance(members, Mapping):
for name, member in members.items():
value = getattr(member, "value", None)
if isinstance(value, (int, enum.IntEnum)):
try:
mapping[int(value)] = str(name)
except Exception: # pragma: no cover - defensive
continue
if mapping:
return mapping
if isinstance(candidate, Mapping):
for key, value in candidate.items():
try:
key_int = int(key)
except Exception: # pragma: no cover - defensive
continue
mapping[key_int] = str(value)
return mapping
for module_name in _CLI_ROLE_MODULE_NAMES:
try:
module = importlib.import_module(module_name)
except Exception: # pragma: no cover - optional dependency
continue
candidates = []
for attr_name in ("Role", "Roles", "ClientRole", "ClientRoles"):
candidate = getattr(module, attr_name, None)
if candidate is not None:
candidates.append(candidate)
for candidate in candidates:
mapping = _from_candidate(candidate)
if not mapping:
continue
lookup.update(mapping)
if lookup:
break
_CLI_ROLE_LOOKUP = {
key: value.strip().upper()
for key, value in lookup.items()
if isinstance(value, str) and value.strip()
}
return _CLI_ROLE_LOOKUP
def _node_to_dict(n) -> dict:
"""Convert ``n`` into a JSON-serialisable mapping.
@@ -99,6 +203,57 @@ def _node_to_dict(n) -> dict:
return _convert(n)
def _normalize_user_role(value) -> str | None:
"""Return a canonical role string for ``value`` when possible.
Parameters:
value: Raw role descriptor emitted by the Meshtastic firmware or
decoded JSON payloads.
Returns:
Uppercase role string or ``None`` if the value cannot be resolved.
"""
if value is None:
return None
if isinstance(value, str):
cleaned = value.strip()
if not cleaned:
return None
return cleaned.upper()
numeric = _coerce_int(value)
if numeric is None:
return None
role_name = None
cli_lookup = _load_cli_role_lookup()
role_name = cli_lookup.get(numeric)
if not role_name:
try: # pragma: no branch - minimal control flow
from meshtastic.protobuf import mesh_pb2
role_name = mesh_pb2.User.Role.Name(numeric)
except Exception: # pragma: no cover - depends on protobuf version
role_name = None
if not role_name:
try:
from meshtastic.protobuf import config_pb2
role_name = config_pb2.Config.DeviceConfig.Role.Name(numeric)
except Exception: # pragma: no cover - depends on protobuf version
role_name = None
if role_name:
return role_name.strip().upper()
return str(numeric)
def upsert_payload(node_id, node) -> dict:
"""Return the payload expected by ``/api/nodes`` upsert requests.
@@ -120,7 +275,7 @@ def _iso(ts: int | float) -> str:
import datetime
return (
datetime.datetime.fromtimestamp(int(ts), datetime.UTC)
datetime.datetime.fromtimestamp(int(ts), datetime.timezone.utc)
.isoformat()
.replace("+00:00", "Z")
)
@@ -587,6 +742,11 @@ def _nodeinfo_user_dict(node_info, decoded_user):
if canonical:
user_dict = dict(user_dict)
user_dict["id"] = canonical
role_value = user_dict.get("role")
normalized_role = _normalize_user_role(role_value)
if normalized_role and normalized_role != role_value:
user_dict = dict(user_dict)
user_dict["role"] = normalized_role
return user_dict
@@ -594,6 +754,8 @@ __all__ = [
"_canonical_node_id",
"_coerce_float",
"_coerce_int",
"_load_cli_role_lookup",
"_normalize_user_role",
"_decode_nodeinfo_payload",
"_extract_payload_bytes",
"_first",
@@ -606,6 +768,7 @@ __all__ = [
"_nodeinfo_position_dict",
"_nodeinfo_user_dict",
"_pkt_to_dict",
"_reset_cli_role_cache",
"DecodeError",
"MessageToDict",
"ProtoMessage",
+5 -2
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@@ -27,7 +27,9 @@ CREATE TABLE IF NOT EXISTS messages (
hop_limit INTEGER,
lora_freq INTEGER,
modem_preset TEXT,
channel_name TEXT
channel_name TEXT,
reply_id INTEGER,
emoji TEXT
);
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
@@ -35,3 +37,4 @@ CREATE INDEX IF NOT EXISTS idx_messages_from_id ON messages(from_id);
CREATE INDEX IF NOT EXISTS idx_messages_to_id ON messages(to_id);
CREATE INDEX IF NOT EXISTS idx_messages_channel ON messages(channel);
CREATE INDEX IF NOT EXISTS idx_messages_portnum ON messages(portnum);
CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id);
@@ -1,3 +1,17 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Add support for encrypted messages to the existing schema.
BEGIN;
ALTER TABLE messages ADD COLUMN encrypted TEXT;
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@@ -11,8 +11,9 @@
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
--
-- Extend the nodes and messages tables with LoRa metadata columns.
BEGIN;
ALTER TABLE nodes ADD COLUMN lora_freq INTEGER;
ALTER TABLE nodes ADD COLUMN modem_preset TEXT;
@@ -0,0 +1,36 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Extend the telemetry table with additional environment metrics.
BEGIN;
ALTER TABLE telemetry ADD COLUMN gas_resistance REAL;
ALTER TABLE telemetry ADD COLUMN current REAL;
ALTER TABLE telemetry ADD COLUMN iaq INTEGER;
ALTER TABLE telemetry ADD COLUMN distance REAL;
ALTER TABLE telemetry ADD COLUMN lux REAL;
ALTER TABLE telemetry ADD COLUMN white_lux REAL;
ALTER TABLE telemetry ADD COLUMN ir_lux REAL;
ALTER TABLE telemetry ADD COLUMN uv_lux REAL;
ALTER TABLE telemetry ADD COLUMN wind_direction INTEGER;
ALTER TABLE telemetry ADD COLUMN wind_speed REAL;
ALTER TABLE telemetry ADD COLUMN weight REAL;
ALTER TABLE telemetry ADD COLUMN wind_gust REAL;
ALTER TABLE telemetry ADD COLUMN wind_lull REAL;
ALTER TABLE telemetry ADD COLUMN radiation REAL;
ALTER TABLE telemetry ADD COLUMN rainfall_1h REAL;
ALTER TABLE telemetry ADD COLUMN rainfall_24h REAL;
ALTER TABLE telemetry ADD COLUMN soil_moisture INTEGER;
ALTER TABLE telemetry ADD COLUMN soil_temperature REAL;
COMMIT;
@@ -0,0 +1,21 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Extend the messages table to capture reply relationships and emoji reactions.
BEGIN;
ALTER TABLE messages ADD COLUMN reply_id INTEGER;
ALTER TABLE messages ADD COLUMN emoji TEXT;
CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id);
COMMIT;
+1 -1
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
+1 -1
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
+1 -1
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
+20 -2
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@@ -35,7 +35,25 @@ CREATE TABLE IF NOT EXISTS telemetry (
uptime_seconds INTEGER,
temperature REAL,
relative_humidity REAL,
barometric_pressure REAL
barometric_pressure REAL,
gas_resistance REAL,
current REAL,
iaq INTEGER,
distance REAL,
lux REAL,
white_lux REAL,
ir_lux REAL,
uv_lux REAL,
wind_direction INTEGER,
wind_speed REAL,
weight REAL,
wind_gust REAL,
wind_lull REAL,
radiation REAL,
rainfall_1h REAL,
rainfall_24h REAL,
soil_moisture INTEGER,
soil_temperature REAL
);
CREATE INDEX IF NOT EXISTS idx_telemetry_rx_time ON telemetry(rx_time);
+38
View File
@@ -0,0 +1,38 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
CREATE TABLE IF NOT EXISTS traces (
id INTEGER PRIMARY KEY,
request_id INTEGER,
src INTEGER,
dest INTEGER,
rx_time INTEGER NOT NULL,
rx_iso TEXT NOT NULL,
rssi INTEGER,
snr REAL,
elapsed_ms INTEGER
);
CREATE TABLE IF NOT EXISTS trace_hops (
id INTEGER PRIMARY KEY,
trace_id INTEGER NOT NULL,
hop_index INTEGER NOT NULL,
node_id INTEGER NOT NULL,
FOREIGN KEY(trace_id) REFERENCES traces(id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_traces_rx_time ON traces(rx_time);
CREATE INDEX IF NOT EXISTS idx_traces_request ON traces(request_id);
CREATE INDEX IF NOT EXISTS idx_trace_hops_trace ON trace_hops(trace_id);
CREATE INDEX IF NOT EXISTS idx_trace_hops_node ON trace_hops(node_id);
+20
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Development overrides for docker-compose.yml
services:
web:
@@ -6,6 +20,7 @@ services:
volumes:
- ./web:/app
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/vendor/bundle
web-bridge:
@@ -14,6 +29,7 @@ services:
volumes:
- ./web:/app
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/vendor/bundle
ports:
- "41447:41447"
@@ -25,7 +41,9 @@ services:
volumes:
- ./data:/app
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/.local
- /dev:/dev
ingestor-bridge:
environment:
@@ -33,4 +51,6 @@ services:
volumes:
- ./data:/app
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/.local
- /dev:/dev
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Production overrides for docker-compose.yml
services:
web:
+32 -4
View File
@@ -1,5 +1,19 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
x-web-base: &web-base
image: ghcr.io/l5yth/potato-mesh-web-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:latest
image: ghcr.io/l5yth/potato-mesh-web-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
environment:
APP_ENV: ${APP_ENV:-production}
RACK_ENV: ${RACK_ENV:-production}
@@ -7,13 +21,18 @@ x-web-base: &web-base
CHANNEL: ${CHANNEL:-#LongFast}
FREQUENCY: ${FREQUENCY:-915MHz}
MAP_CENTER: ${MAP_CENTER:-38.761944,-27.090833}
MAP_ZOOM: ${MAP_ZOOM:-""}
MAX_DISTANCE: ${MAX_DISTANCE:-42}
CONTACT_LINK: ${CONTACT_LINK:-#potatomesh:dod.ngo}
FEDERATION: ${FEDERATION:-1}
PRIVATE: ${PRIVATE:-0}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
DEBUG: ${DEBUG:-0}
command: ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
volumes:
- potatomesh_data:/app/.local/share/potato-mesh
- potatomesh_config:/app/.config/potato-mesh
- potatomesh_logs:/app/logs
restart: unless-stopped
deploy:
@@ -26,18 +45,25 @@ x-web-base: &web-base
cpus: '0.25'
x-ingestor-base: &ingestor-base
image: ghcr.io/l5yth/potato-mesh-ingestor-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:latest
image: ghcr.io/l5yth/potato-mesh-ingestor-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
environment:
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
DEBUG: ${DEBUG:-0}
FEDERATION: ${FEDERATION:-1}
PRIVATE: ${PRIVATE:-0}
volumes:
- potatomesh_data:/app/.local/share/potato-mesh
- potatomesh_config:/app/.config/potato-mesh
- potatomesh_logs:/app/logs
devices:
- ${CONNECTION:-/dev/ttyACM0}:${CONNECTION:-/dev/ttyACM0}
- /dev:/dev
device_cgroup_rules:
- 'c 166:* rwm' # ttyACM devices
- 'c 188:* rwm' # ttyUSB devices
- 'c 4:* rwm' # ttyS devices
privileged: false
restart: unless-stopped
deploy:
@@ -85,6 +111,8 @@ services:
volumes:
potatomesh_data:
driver: local
potatomesh_config:
driver: local
potatomesh_logs:
driver: local
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 202 KiB

+1 -2
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+1 -1
View File
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+2 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal Meshtastic protobuf stubs for isolated unit testing."""
from __future__ import annotations
+54 -3
View File
@@ -12,12 +12,31 @@
"battery_level": 101,
"bitfield": 1,
"payload_b64": "DTVr0mgSFQhlFQIrh0AdJb8YPyXYFSA9KJTPEg==",
"current": 0.0715,
"gas_resistance": 1456.0,
"iaq": 83,
"distance": 12.5,
"lux": 100.25,
"white_lux": 64.5,
"ir_lux": 12.75,
"uv_lux": 1.6,
"wind_direction": 270,
"wind_speed": 5.9,
"wind_gust": 7.4,
"wind_lull": 4.8,
"weight": 32.7,
"radiation": 0.45,
"rainfall_1h": 0.18,
"rainfall_24h": 1.42,
"soil_moisture": 3100,
"soil_temperature": 18.9,
"device_metrics": {
"batteryLevel": 101,
"voltage": 4.224,
"channelUtilization": 0.59666663,
"airUtilTx": 0.03908333,
"uptimeSeconds": 305044
"uptimeSeconds": 305044,
"current": 0.0715
},
"raw": {
"device_metrics": {
@@ -43,7 +62,24 @@
"environment_metrics": {
"temperature": 21.98,
"relativeHumidity": 39.475586,
"barometricPressure": 1017.8353
"barometricPressure": 1017.8353,
"gasResistance": 1456.0,
"iaq": 83,
"distance": 12.5,
"lux": 100.25,
"whiteLux": 64.5,
"irLux": 12.75,
"uvLux": 1.6,
"windDirection": 270,
"windSpeed": 5.9,
"windGust": 7.4,
"windLull": 4.8,
"weight": 32.7,
"radiation": 0.45,
"rainfall1h": 0.18,
"rainfall24h": 1.42,
"soilMoisture": 3100,
"soilTemperature": 18.9
},
"raw": {
"environment_metrics": {
@@ -70,7 +106,22 @@
"voltage": 3.92,
"channel_utilization": 0.284,
"air_util_tx": 0.051,
"uptime_seconds": 86400
"uptime_seconds": 86400,
"current": 0.033
},
"environment_metrics": {
"temperature": 19.5,
"relative_humidity": 48.2,
"barometric_pressure": 1013.1,
"distance": 7.25,
"lux": 75.5,
"whiteLux": 40.0,
"windDirection": 180,
"windSpeed": 4.3,
"weight": 28.4,
"rainfall24h": 0.75,
"soilMoisture": 2850,
"soilTemperature": 17.1
},
"local_stats": {
"numPacketsTx": 1280,
+216
View File
@@ -0,0 +1,216 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional tests that exercise defensive helpers and interfaces."""
import importlib
import sys
import types
from pathlib import Path
from types import SimpleNamespace
import pytest
REPO_ROOT = Path(__file__).resolve().parents[1]
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
from data.mesh_ingestor import channels, config, interfaces, queue, serialization
@pytest.fixture(autouse=True)
def reset_state(monkeypatch):
"""Ensure mutable singletons are cleaned up between tests."""
repo_root = Path(__file__).resolve().parents[1]
monkeypatch.syspath_prepend(str(repo_root))
channels._reset_channel_cache()
yield
channels._reset_channel_cache()
importlib.reload(config)
def test_config_module_port_aliases(monkeypatch):
"""Ensure the config module keeps CONNECTION and PORT in sync."""
reloaded = importlib.reload(config)
monkeypatch.setattr(reloaded, "CONNECTION", "dev-tty", raising=False)
reloaded.PORT = "new-port"
assert reloaded.CONNECTION == "new-port"
assert reloaded.PORT == "new-port"
def test_queue_stringification_and_ordering():
"""Exercise queue payload formatting and priority ordering."""
mapping_payload = {"b": 1, "a": 2}
assert queue._stringify_payload_value(mapping_payload).startswith('{"a"')
assert queue._stringify_payload_value([1, 2, 3]).startswith("[1")
assert queue._stringify_payload_value({1, 2}).replace(" ", "") in ("[1,2]", "[2,1]")
assert queue._stringify_payload_value(b"bytes") == '"bytes"'
assert queue._stringify_payload_value("text") == '"text"'
pairs = queue._payload_key_value_pairs(mapping_payload)
assert pairs.split(" ") == ["a=2", "b=1"]
state = queue.QueueState()
order = []
queue._enqueue_post_json("/low", {"x": 1}, priority=90, state=state)
queue._enqueue_post_json("/high", {"x": 2}, priority=10, state=state)
state.active = True
queue._drain_post_queue(
state=state, send=lambda path, payload: order.append((path, payload["x"]))
)
assert order == [("/high", 2), ("/low", 1)]
assert state.active is False
assert state.queue == []
def test_channels_iterator_and_capture(monkeypatch):
"""Verify channel helpers normalise roles and cache primary/secondary entries."""
channels._reset_channel_cache()
class StubSettings:
def __init__(self, name):
self.name = name
class PrimaryChannel:
def __init__(self):
self.role = "PRIMARY"
self.settings = StubSettings("Alpha")
class SecondaryChannel:
def __init__(self, index, name):
self.role = "SECONDARY"
self.index = index
self.settings = StubSettings(name)
class Container:
def __len__(self):
return 2
def __getitem__(self, idx):
if idx == 0:
return PrimaryChannel()
if idx == 1:
return SecondaryChannel(5, "Bravo")
raise IndexError
class StubLocalNode:
def __init__(self):
self.channels = Container()
class StubIface:
def __init__(self):
self.localNode = StubLocalNode()
def waitForConfig(self):
return True
channels.capture_from_interface(StubIface())
assert channels.channel_mappings() == ((0, "Alpha"), (5, "Bravo"))
assert channels.channel_name(5) == "Bravo"
assert list(channels._iter_channel_objects({"0": "zero"})) == ["zero"]
def test_candidate_node_id_and_normaliser():
"""Ensure node identifiers are found inside nested payloads."""
nested = {
"payload": {"meta": {"user": {"id": "0x42"}}},
"decoded": {"from": "!0000002a"},
}
node_id = interfaces._candidate_node_id(nested)
assert node_id == "!0000002a"
packet = {"user": {"id": "!0000002a"}, "userId": None}
normalised = interfaces._normalise_nodeinfo_packet(packet)
assert normalised["id"] == "!0000002a"
assert normalised["user"]["id"] == "!0000002a"
def test_safe_nodeinfo_wrapper_handles_missing_id():
"""Cover the KeyError guard and wrapper marker."""
called = {}
def original(_iface, _packet):
called["ran"] = True
raise KeyError("id")
wrapper = interfaces._build_safe_nodeinfo_callback(original)
result = wrapper(SimpleNamespace(), {"anything": 1})
assert called["ran"] is True
assert result is None
assert getattr(wrapper, "_potato_mesh_safe_wrapper")
def test_patch_nodeinfo_handler_class(monkeypatch):
"""Ensure NodeInfoHandler subclasses normalise packets with missing ids."""
class DummyHandler:
def __init__(self):
self.calls = []
def onReceive(self, iface, packet):
self.calls.append(packet)
return packet.get("id")
mesh_interface = types.SimpleNamespace(
NodeInfoHandler=DummyHandler, __name__="meshtastic.mesh_interface"
)
interfaces._patch_nodeinfo_handler_class(mesh_interface)
handler_cls = mesh_interface.NodeInfoHandler
handler = handler_cls()
iface = SimpleNamespace()
packet = {"user": {"id": "abcd"}}
result = handler.onReceive(iface, packet)
assert result == serialization._canonical_node_id("abcd")
assert handler.calls[0]["id"] == serialization._canonical_node_id("abcd")
def test_region_frequency_and_resolution_helpers():
"""Cover enum name parsing for LoRa region frequency."""
class EnumValue:
def __init__(self, name):
self.name = name
class EnumType:
def __init__(self):
self.values_by_number = {1: EnumValue("REGION_915")}
class FieldDesc:
def __init__(self):
self.enum_type = EnumType()
class Descriptor:
def __init__(self):
self.fields_by_name = {"region": FieldDesc()}
class LoraMessage:
def __init__(self, region):
self.region = region
self.DESCRIPTOR = Descriptor()
freq = interfaces._region_frequency(LoraMessage(1))
assert freq == 915
class LocalConfig:
def __init__(self, lora):
self.lora = lora
lora_msg = LoraMessage(1)
resolved = interfaces._resolve_lora_message(LocalConfig(lora_msg))
assert resolved is lora_msg
+877 -12
View File
File diff suppressed because it is too large Load Diff
+69
View File
@@ -0,0 +1,69 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure version identifiers stay synchronised across all packages."""
from __future__ import annotations
import json
import re
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parents[1]
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
import data
def _ruby_fallback_version() -> str:
config_path = REPO_ROOT / "web" / "lib" / "potato_mesh" / "config.rb"
contents = config_path.read_text(encoding="utf-8")
inside = False
for line in contents.splitlines():
stripped = line.strip()
if stripped.startswith("def version_fallback"):
inside = True
continue
if inside and stripped == "end":
break
if inside:
literal = re.search(r"['\"](?P<version>[^'\"]+)['\"]", stripped)
if literal:
return literal.group("version")
raise AssertionError("Unable to locate version_fallback definition in config.rb")
def _javascript_package_version() -> str:
package_path = REPO_ROOT / "web" / "package.json"
data = json.loads(package_path.read_text(encoding="utf-8"))
version = data.get("version")
if isinstance(version, str):
return version
raise AssertionError("package.json does not expose a string version")
def test_version_identifiers_match_across_languages() -> None:
"""Guard against version drift between Python, Ruby, and JavaScript."""
python_version = getattr(data, "__version__", None)
assert (
isinstance(python_version, str) and python_version
), "data.__version__ missing"
ruby_version = _ruby_fallback_version()
javascript_version = _javascript_package_version()
assert python_version == ruby_version == javascript_version
+1 -2
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+14
View File
@@ -1,4 +1,17 @@
# syntax=docker/dockerfile:1.6
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Main application builder stage
FROM ruby:3.3-alpine AS builder
@@ -78,6 +91,7 @@ ENV RACK_ENV=production \
CHANNEL="#LongFast" \
FREQUENCY="915MHz" \
MAP_CENTER="38.761944,-27.090833" \
MAP_ZOOM="" \
MAX_DISTANCE=42 \
CONTACT_LINK="#potatomesh:dod.ngo" \
DEBUG=0
+2 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,6 +15,7 @@
source "https://rubygems.org"
gem "sinatra", "~> 4.0"
gem "erb", "~> 4.0"
gem "sqlite3", "~> 1.7"
gem "rackup", "~> 2.2"
gem "puma", "~> 7.0"
+2
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
+2 -3
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -18,4 +17,4 @@ set -euo pipefail
bundle install
exec ruby app.rb -p 41447 -o 0.0.0.0
exec bundle exec ruby app.rb -p 41447 -o 0.0.0.0
+35 -3
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -43,11 +45,13 @@ require_relative "application/errors"
require_relative "application/database"
require_relative "application/networking"
require_relative "application/identity"
require_relative "application/worker_pool"
require_relative "application/federation"
require_relative "application/prometheus"
require_relative "application/queries"
require_relative "application/data_processing"
require_relative "application/filesystem"
require_relative "application/instances"
require_relative "application/routes/api"
require_relative "application/routes/ingest"
require_relative "application/routes/root"
@@ -59,6 +63,7 @@ module PotatoMesh
extend App::Networking
extend App::Identity
extend App::Federation
extend App::Instances
extend App::Prometheus
extend App::Queries
extend App::DataProcessing
@@ -69,6 +74,7 @@ module PotatoMesh
include App::Networking
include App::Identity
include App::Federation
include App::Instances
include App::Prometheus
include App::Queries
include App::DataProcessing
@@ -97,18 +103,37 @@ module PotatoMesh
logger.level = PotatoMesh::Config.debug? ? Logger::DEBUG : Logger::WARN
end
# Determine the port the application should listen on.
# Determine the port the application should listen on by honouring the
# conventional +PORT+ environment variable used by hosting platforms. Any
# non-numeric or out-of-range values fall back to the provided default to
# keep the application bootable in misconfigured environments.
#
# @param default_port [Integer] fallback port when ENV['PORT'] is absent or invalid.
# @param default_port [Integer] fallback port when +ENV['PORT']+ is absent or invalid.
# @return [Integer] port number for the HTTP server.
def self.resolve_port(default_port: DEFAULT_PORT)
default_port
raw_port = ENV["PORT"]
return default_port if raw_port.nil?
trimmed = raw_port.to_s.strip
return default_port if trimmed.empty?
begin
port = Integer(trimmed, 10)
rescue ArgumentError
return default_port
end
return default_port unless port.positive?
return default_port unless PotatoMesh::Sanitizer.valid_port?(trimmed)
port
end
configure do
set :public_folder, File.expand_path("../../public", __dir__)
set :views, File.expand_path("../../views", __dir__)
set :federation_thread, nil
set :federation_worker_pool, nil
set :port, resolve_port
set :bind, DEFAULT_BIND_ADDRESS
@@ -132,6 +157,12 @@ module PotatoMesh
ensure_self_instance_record!
update_all_prometheus_metrics_from_nodes
if federation_enabled?
ensure_federation_worker_pool!
else
shutdown_federation_worker_pool!
end
if federation_announcements_active?
start_initial_federation_announcement!
start_federation_announcer!
@@ -166,6 +197,7 @@ SELF_INSTANCE_ID = PotatoMesh::Application::SELF_INSTANCE_ID unless defined?(SEL
PotatoMesh::App::Networking,
PotatoMesh::App::Identity,
PotatoMesh::App::Federation,
PotatoMesh::App::Instances,
PotatoMesh::App::Prometheus,
PotatoMesh::App::Queries,
PotatoMesh::App::DataProcessing,
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -731,6 +733,88 @@ module PotatoMesh
end
end
# Resolve a telemetry metric from the provided data sources.
#
# @param key_map [Hash{Symbol=>Array<String>}] ordered mapping of source names to candidate keys.
# @param sources [Hash{Symbol=>Hash}] data structures to search for metric values.
# @param type [Symbol] coercion strategy, ``:float`` or ``:integer``.
# @return [Numeric, nil] coerced metric value or nil when no candidates exist.
def resolve_numeric_metric(key_map, sources, type)
key_map.each do |source, keys|
next if keys.nil? || keys.empty?
data = sources[source]
next unless data.is_a?(Hash)
keys.each do |name|
next if name.nil?
key = name.to_s
value = if data.key?(key)
data[key]
else
sym_key = key.to_sym
data.key?(sym_key) ? data[sym_key] : nil
end
next if value.nil?
coerced = case type
when :float
coerce_float(value)
when :integer
coerce_integer(value)
else
value
end
return coerced unless coerced.nil?
end
end
nil
end
private :resolve_numeric_metric
# Normalise a traceroute hop entry to a numeric node identifier.
#
# @param hop [Object] raw hop entry from the payload.
# @return [Integer, nil] coerced node ID or nil when the value is unusable.
def coerce_trace_node_id(hop)
case hop
when Integer
return hop
when Numeric
return hop.to_i
when String
trimmed = hop.strip
return nil if trimmed.empty?
return Integer(trimmed, 10) if trimmed.match?(/\A-?\d+\z/)
parts = canonical_node_parts(trimmed)
return parts[1] if parts
when Hash
candidate = hop["node_id"] || hop[:node_id] || hop["id"] || hop[:id] || hop["num"] || hop[:num]
return coerce_trace_node_id(candidate)
end
nil
rescue ArgumentError
nil
end
# Extract hop identifiers from a traceroute payload preserving order.
#
# @param hops_value [Object] raw hops array or path collection.
# @return [Array<Integer>] ordered list of coerced hop identifiers.
def normalize_trace_hops(hops_value)
return [] if hops_value.nil?
hop_entries = hops_value.is_a?(Array) ? hops_value : [hops_value]
hop_entries.filter_map { |entry| coerce_trace_node_id(entry) }
end
def insert_telemetry(db, payload)
return unless payload.is_a?(Hash)
@@ -776,54 +860,285 @@ module PotatoMesh
environment_metrics = normalize_json_object(payload["environment_metrics"] || payload["environmentMetrics"])
environment_metrics ||= normalize_json_object(telemetry_section["environmentMetrics"]) if telemetry_section&.key?("environmentMetrics")
fetch_metric = lambda do |map, *names|
next nil unless map.is_a?(Hash)
names.each do |name|
next unless name
key = name.to_s
return map[key] if map.key?(key)
end
nil
sources = {
payload: payload,
telemetry: telemetry_section,
device: device_metrics,
environment: environment_metrics,
}
metric_definitions = [
[
"battery_level",
:float,
{
payload: %w[battery_level batteryLevel],
telemetry: %w[batteryLevel],
device: %w[battery_level batteryLevel],
environment: %w[battery_level batteryLevel],
},
],
[
"voltage",
:float,
{
payload: %w[voltage],
telemetry: %w[voltage],
device: %w[voltage],
environment: %w[voltage],
},
],
[
"channel_utilization",
:float,
{
payload: %w[channel_utilization channelUtilization],
telemetry: %w[channelUtilization],
device: %w[channel_utilization channelUtilization],
},
],
[
"air_util_tx",
:float,
{
payload: %w[air_util_tx airUtilTx],
telemetry: %w[airUtilTx],
device: %w[air_util_tx airUtilTx],
},
],
[
"uptime_seconds",
:integer,
{
payload: %w[uptime_seconds uptimeSeconds],
telemetry: %w[uptimeSeconds],
device: %w[uptime_seconds uptimeSeconds],
},
],
[
"temperature",
:float,
{
payload: %w[temperature temperatureC tempC],
telemetry: %w[temperature temperatureC tempC],
environment: %w[temperature temperatureC temperature_c tempC],
},
],
[
"relative_humidity",
:float,
{
payload: %w[relative_humidity relativeHumidity humidity],
telemetry: %w[relative_humidity relativeHumidity humidity],
environment: %w[relative_humidity relativeHumidity humidity],
},
],
[
"barometric_pressure",
:float,
{
payload: %w[barometric_pressure barometricPressure pressure],
telemetry: %w[barometric_pressure barometricPressure pressure],
environment: %w[barometric_pressure barometricPressure pressure],
},
],
[
"gas_resistance",
:float,
{
payload: %w[gas_resistance gasResistance],
telemetry: %w[gas_resistance gasResistance],
environment: %w[gas_resistance gasResistance],
},
],
[
"current",
:float,
{
payload: %w[current current_ma currentMa],
telemetry: %w[current current_ma currentMa],
device: %w[current current_ma currentMa],
environment: %w[current],
},
],
[
"iaq",
:integer,
{
payload: %w[iaq iaqIndex iaq_index],
telemetry: %w[iaq iaqIndex iaq_index],
environment: %w[iaq iaqIndex iaq_index],
},
],
[
"distance",
:float,
{
payload: %w[distance range rangeMeters],
telemetry: %w[distance range rangeMeters],
environment: %w[distance range rangeMeters],
},
],
[
"lux",
:float,
{
payload: %w[lux illuminance lightLux],
telemetry: %w[lux illuminance lightLux],
environment: %w[lux illuminance lightLux],
},
],
[
"white_lux",
:float,
{
payload: %w[white_lux whiteLux],
telemetry: %w[white_lux whiteLux],
environment: %w[white_lux whiteLux],
},
],
[
"ir_lux",
:float,
{
payload: %w[ir_lux irLux],
telemetry: %w[ir_lux irLux],
environment: %w[ir_lux irLux],
},
],
[
"uv_lux",
:float,
{
payload: %w[uv_lux uvLux uvIndex],
telemetry: %w[uv_lux uvLux uvIndex],
environment: %w[uv_lux uvLux uvIndex],
},
],
[
"wind_direction",
:integer,
{
payload: %w[wind_direction windDirection],
telemetry: %w[wind_direction windDirection],
environment: %w[wind_direction windDirection],
},
],
[
"wind_speed",
:float,
{
payload: %w[wind_speed windSpeed windSpeedMps],
telemetry: %w[wind_speed windSpeed windSpeedMps],
environment: %w[wind_speed windSpeed windSpeedMps],
},
],
[
"weight",
:float,
{
payload: %w[weight mass],
telemetry: %w[weight mass],
environment: %w[weight mass],
},
],
[
"wind_gust",
:float,
{
payload: %w[wind_gust windGust],
telemetry: %w[wind_gust windGust],
environment: %w[wind_gust windGust],
},
],
[
"wind_lull",
:float,
{
payload: %w[wind_lull windLull],
telemetry: %w[wind_lull windLull],
environment: %w[wind_lull windLull],
},
],
[
"radiation",
:float,
{
payload: %w[radiation radiationLevel],
telemetry: %w[radiation radiationLevel],
environment: %w[radiation radiationLevel],
},
],
[
"rainfall_1h",
:float,
{
payload: %w[rainfall_1h rainfall1h rainfallOneHour],
telemetry: %w[rainfall_1h rainfall1h rainfallOneHour],
environment: %w[rainfall_1h rainfall1h rainfallOneHour],
},
],
[
"rainfall_24h",
:float,
{
payload: %w[rainfall_24h rainfall24h rainfallTwentyFourHour],
telemetry: %w[rainfall_24h rainfall24h rainfallTwentyFourHour],
environment: %w[rainfall_24h rainfall24h rainfallTwentyFourHour],
},
],
[
"soil_moisture",
:integer,
{
payload: %w[soil_moisture soilMoisture],
telemetry: %w[soil_moisture soilMoisture],
environment: %w[soil_moisture soilMoisture],
},
],
[
"soil_temperature",
:float,
{
payload: %w[soil_temperature soilTemperature],
telemetry: %w[soil_temperature soilTemperature],
environment: %w[soil_temperature soilTemperature],
},
],
]
metric_values = {}
metric_definitions.each do |column, type, key_map|
value = resolve_numeric_metric(key_map, sources, type)
metric_values[column] = value unless value.nil?
end
battery_level = payload.key?("battery_level") ? payload["battery_level"] : nil
battery_level = coerce_float(battery_level)
battery_level ||= coerce_float(fetch_metric.call(device_metrics, :battery_level, :batteryLevel))
voltage = payload.key?("voltage") ? payload["voltage"] : nil
voltage = coerce_float(voltage)
voltage ||= coerce_float(fetch_metric.call(device_metrics, :voltage))
channel_utilization = payload.key?("channel_utilization") ? payload["channel_utilization"] : nil
channel_utilization ||= payload["channelUtilization"] if payload.key?("channelUtilization")
channel_utilization = coerce_float(channel_utilization)
channel_utilization ||= coerce_float(fetch_metric.call(device_metrics, :channel_utilization, :channelUtilization))
air_util_tx = payload.key?("air_util_tx") ? payload["air_util_tx"] : nil
air_util_tx ||= payload["airUtilTx"] if payload.key?("airUtilTx")
air_util_tx = coerce_float(air_util_tx)
air_util_tx ||= coerce_float(fetch_metric.call(device_metrics, :air_util_tx, :airUtilTx))
uptime_seconds = payload.key?("uptime_seconds") ? payload["uptime_seconds"] : nil
uptime_seconds ||= payload["uptimeSeconds"] if payload.key?("uptimeSeconds")
uptime_seconds = coerce_integer(uptime_seconds)
uptime_seconds ||= coerce_integer(fetch_metric.call(device_metrics, :uptime_seconds, :uptimeSeconds))
temperature = payload.key?("temperature") ? payload["temperature"] : nil
temperature = coerce_float(temperature)
temperature ||= coerce_float(fetch_metric.call(environment_metrics, :temperature, :temperatureC, :temperature_c, :tempC))
relative_humidity = payload.key?("relative_humidity") ? payload["relative_humidity"] : nil
relative_humidity ||= payload["relativeHumidity"] if payload.key?("relativeHumidity")
relative_humidity ||= payload["humidity"] if payload.key?("humidity")
relative_humidity = coerce_float(relative_humidity)
relative_humidity ||= coerce_float(fetch_metric.call(environment_metrics, :relative_humidity, :relativeHumidity, :humidity))
barometric_pressure = payload.key?("barometric_pressure") ? payload["barometric_pressure"] : nil
barometric_pressure ||= payload["barometricPressure"] if payload.key?("barometricPressure")
barometric_pressure ||= payload["pressure"] if payload.key?("pressure")
barometric_pressure = coerce_float(barometric_pressure)
barometric_pressure ||= coerce_float(fetch_metric.call(environment_metrics, :barometric_pressure, :barometricPressure, :pressure))
battery_level = metric_values["battery_level"]
voltage = metric_values["voltage"]
channel_utilization = metric_values["channel_utilization"]
air_util_tx = metric_values["air_util_tx"]
uptime_seconds = metric_values["uptime_seconds"]
temperature = metric_values["temperature"]
relative_humidity = metric_values["relative_humidity"]
barometric_pressure = metric_values["barometric_pressure"]
gas_resistance = metric_values["gas_resistance"]
current = metric_values["current"]
iaq = metric_values["iaq"]
distance = metric_values["distance"]
lux = metric_values["lux"]
white_lux = metric_values["white_lux"]
ir_lux = metric_values["ir_lux"]
uv_lux = metric_values["uv_lux"]
wind_direction = metric_values["wind_direction"]
wind_speed = metric_values["wind_speed"]
weight = metric_values["weight"]
wind_gust = metric_values["wind_gust"]
wind_lull = metric_values["wind_lull"]
radiation = metric_values["radiation"]
rainfall_1h = metric_values["rainfall_1h"]
rainfall_24h = metric_values["rainfall_24h"]
soil_moisture = metric_values["soil_moisture"]
soil_temperature = metric_values["soil_temperature"]
row = [
telemetry_id,
@@ -849,13 +1164,33 @@ module PotatoMesh
temperature,
relative_humidity,
barometric_pressure,
gas_resistance,
current,
iaq,
distance,
lux,
white_lux,
ir_lux,
uv_lux,
wind_direction,
wind_speed,
weight,
wind_gust,
wind_lull,
radiation,
rainfall_1h,
rainfall_24h,
soil_moisture,
soil_temperature,
]
placeholders = Array.new(row.length, "?").join(",")
with_busy_retry do
db.execute <<~SQL, row
INSERT INTO telemetry(id,node_id,node_num,from_id,to_id,rx_time,rx_iso,telemetry_time,channel,portnum,hop_limit,snr,rssi,bitfield,payload_b64,
battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,temperature,relative_humidity,barometric_pressure)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,temperature,relative_humidity,barometric_pressure,gas_resistance,current,iaq,distance,lux,white_lux,ir_lux,uv_lux,wind_direction,wind_speed,weight,wind_gust,wind_lull,radiation,rainfall_1h,rainfall_24h,soil_moisture,soil_temperature)
VALUES (#{placeholders})
ON CONFLICT(id) DO UPDATE SET
node_id=COALESCE(excluded.node_id,telemetry.node_id),
node_num=COALESCE(excluded.node_num,telemetry.node_num),
@@ -878,7 +1213,25 @@ module PotatoMesh
uptime_seconds=COALESCE(excluded.uptime_seconds,telemetry.uptime_seconds),
temperature=COALESCE(excluded.temperature,telemetry.temperature),
relative_humidity=COALESCE(excluded.relative_humidity,telemetry.relative_humidity),
barometric_pressure=COALESCE(excluded.barometric_pressure,telemetry.barometric_pressure)
barometric_pressure=COALESCE(excluded.barometric_pressure,telemetry.barometric_pressure),
gas_resistance=COALESCE(excluded.gas_resistance,telemetry.gas_resistance),
current=COALESCE(excluded.current,telemetry.current),
iaq=COALESCE(excluded.iaq,telemetry.iaq),
distance=COALESCE(excluded.distance,telemetry.distance),
lux=COALESCE(excluded.lux,telemetry.lux),
white_lux=COALESCE(excluded.white_lux,telemetry.white_lux),
ir_lux=COALESCE(excluded.ir_lux,telemetry.ir_lux),
uv_lux=COALESCE(excluded.uv_lux,telemetry.uv_lux),
wind_direction=COALESCE(excluded.wind_direction,telemetry.wind_direction),
wind_speed=COALESCE(excluded.wind_speed,telemetry.wind_speed),
weight=COALESCE(excluded.weight,telemetry.weight),
wind_gust=COALESCE(excluded.wind_gust,telemetry.wind_gust),
wind_lull=COALESCE(excluded.wind_lull,telemetry.wind_lull),
radiation=COALESCE(excluded.radiation,telemetry.radiation),
rainfall_1h=COALESCE(excluded.rainfall_1h,telemetry.rainfall_1h),
rainfall_24h=COALESCE(excluded.rainfall_24h,telemetry.rainfall_24h),
soil_moisture=COALESCE(excluded.soil_moisture,telemetry.soil_moisture),
soil_temperature=COALESCE(excluded.soil_temperature,telemetry.soil_temperature)
SQL
end
@@ -891,6 +1244,74 @@ module PotatoMesh
})
end
# Persist a traceroute observation and its hop path.
#
# @param db [SQLite3::Database] open database handle.
# @param payload [Hash] traceroute payload as produced by the ingestor.
# @return [void]
def insert_trace(db, payload)
return unless payload.is_a?(Hash)
trace_identifier = coerce_integer(payload["id"] || payload["packet_id"] || payload["packetId"])
trace_identifier ||= coerce_integer(payload["trace_id"])
request_id = coerce_integer(payload["request_id"] || payload["req"])
trace_identifier ||= request_id
now = Time.now.to_i
rx_time = coerce_integer(payload["rx_time"])
rx_time = now if rx_time.nil? || rx_time > now
rx_iso = string_or_nil(payload["rx_iso"]) || Time.at(rx_time).utc.iso8601
metrics = normalize_json_object(payload["metrics"])
src = coerce_integer(payload["src"] || payload["source"] || payload["from"])
dest = coerce_integer(payload["dest"] || payload["destination"] || payload["to"])
rssi = coerce_integer(payload["rssi"]) || coerce_integer(metrics["rssi"])
snr = coerce_float(payload["snr"]) || coerce_float(metrics["snr"])
elapsed_ms = coerce_integer(
payload["elapsed_ms"] ||
payload["latency_ms"] ||
metrics&.[]("elapsed_ms") ||
metrics&.[]("latency_ms") ||
metrics&.[]("latencyMs"),
)
hops_value = payload.key?("hops") ? payload["hops"] : payload["path"]
hops = normalize_trace_hops(hops_value)
all_nodes = [src, dest, *hops].compact.uniq
all_nodes.each do |node|
ensure_unknown_node(db, node, node, heard_time: rx_time)
touch_node_last_seen(db, node, node, rx_time: rx_time, source: :trace)
end
with_busy_retry do
db.execute <<~SQL, [trace_identifier, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms]
INSERT INTO traces(id, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms)
VALUES(?,?,?,?,?,?,?,?,?)
ON CONFLICT(id) DO UPDATE SET
request_id=COALESCE(excluded.request_id,traces.request_id),
src=COALESCE(excluded.src,traces.src),
dest=COALESCE(excluded.dest,traces.dest),
rx_time=excluded.rx_time,
rx_iso=excluded.rx_iso,
rssi=COALESCE(excluded.rssi,traces.rssi),
snr=COALESCE(excluded.snr,traces.snr),
elapsed_ms=COALESCE(excluded.elapsed_ms,traces.elapsed_ms)
SQL
trace_id = trace_identifier || db.last_insert_row_id
return unless trace_id
db.execute("DELETE FROM trace_hops WHERE trace_id = ?", [trace_id])
hops.each_with_index do |hop_id, index|
db.execute(
"INSERT INTO trace_hops(trace_id, hop_index, node_id) VALUES(?,?,?)",
[trace_id, index, hop_id],
)
end
end
end
def insert_message(db, message)
return unless message.is_a?(Hash)
@@ -949,6 +1370,8 @@ module PotatoMesh
lora_freq = coerce_integer(message["lora_freq"] || message["loraFrequency"])
modem_preset = string_or_nil(message["modem_preset"] || message["modemPreset"])
channel_name = string_or_nil(message["channel_name"] || message["channelName"])
reply_id = coerce_integer(message["reply_id"] || message["replyId"])
emoji = string_or_nil(message["emoji"])
row = [
msg_id,
@@ -966,11 +1389,13 @@ module PotatoMesh
lora_freq,
modem_preset,
channel_name,
reply_id,
emoji,
]
with_busy_retry do
existing = db.get_first_row(
"SELECT from_id, to_id, encrypted, lora_freq, modem_preset, channel_name FROM messages WHERE id = ?",
"SELECT from_id, to_id, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji FROM messages WHERE id = ?",
[msg_id],
)
if existing
@@ -1021,6 +1446,19 @@ module PotatoMesh
updates["channel_name"] = channel_name if should_update
end
unless reply_id.nil?
existing_reply = existing.is_a?(Hash) ? existing["reply_id"] : existing[6]
updates["reply_id"] = reply_id if existing_reply != reply_id
end
if emoji
existing_emoji = existing.is_a?(Hash) ? existing["emoji"] : existing[7]
existing_emoji_str = existing_emoji&.to_s
should_update = existing_emoji_str.nil? || existing_emoji_str.strip.empty?
should_update ||= existing_emoji != emoji
updates["emoji"] = emoji if should_update
end
unless updates.empty?
assignments = updates.keys.map { |column| "#{column} = ?" }.join(", ")
db.execute("UPDATE messages SET #{assignments} WHERE id = ?", updates.values + [msg_id])
@@ -1030,8 +1468,8 @@ module PotatoMesh
begin
db.execute <<~SQL, row
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
SQL
rescue SQLite3::ConstraintException
fallback_updates = {}
@@ -1041,6 +1479,8 @@ module PotatoMesh
fallback_updates["lora_freq"] = lora_freq unless lora_freq.nil?
fallback_updates["modem_preset"] = modem_preset if modem_preset
fallback_updates["channel_name"] = channel_name if channel_name
fallback_updates["reply_id"] = reply_id unless reply_id.nil?
fallback_updates["emoji"] = emoji if emoji
unless fallback_updates.empty?
assignments = fallback_updates.keys.map { |column| "#{column} = ?" }.join(", ")
db.execute("UPDATE messages SET #{assignments} WHERE id = ?", fallback_updates.values + [msg_id])
+71 -3
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -15,6 +17,30 @@
module PotatoMesh
module App
module Database
# Column definitions required for environment telemetry support. Each
# entry pairs the column name with the SQL type used when backfilling
# legacy databases that pre-date the extended telemetry schema.
TELEMETRY_COLUMN_DEFINITIONS = [
["gas_resistance", "REAL"],
["current", "REAL"],
["iaq", "INTEGER"],
["distance", "REAL"],
["lux", "REAL"],
["white_lux", "REAL"],
["ir_lux", "REAL"],
["uv_lux", "REAL"],
["wind_direction", "INTEGER"],
["wind_speed", "REAL"],
["weight", "REAL"],
["wind_gust", "REAL"],
["wind_lull", "REAL"],
["radiation", "REAL"],
["rainfall_1h", "REAL"],
["rainfall_24h", "REAL"],
["soil_moisture", "INTEGER"],
["soil_temperature", "REAL"],
].freeze
# Open a connection to the application database applying common pragmas.
#
# @param readonly [Boolean] whether to open the database in read-only mode.
@@ -55,10 +81,10 @@ module PotatoMesh
return false unless File.exist?(PotatoMesh::Config.db_path)
db = open_database(readonly: true)
required = %w[nodes messages positions telemetry neighbors instances]
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops]
tables =
db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances')",
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops')",
).flatten
(required - tables).empty?
rescue SQLite3::Exception
@@ -73,7 +99,7 @@ module PotatoMesh
def init_db
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
db = open_database
%w[nodes messages positions telemetry neighbors instances].each do |schema|
%w[nodes messages positions telemetry neighbors instances traces].each do |schema|
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
@@ -114,11 +140,53 @@ module PotatoMesh
db.execute("ALTER TABLE messages ADD COLUMN channel_name TEXT")
end
unless message_columns.include?("reply_id")
db.execute("ALTER TABLE messages ADD COLUMN reply_id INTEGER")
message_columns << "reply_id"
end
unless message_columns.include?("emoji")
db.execute("ALTER TABLE messages ADD COLUMN emoji TEXT")
message_columns << "emoji"
end
reply_index_exists =
db.get_first_value(
"SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_messages_reply_id'",
).to_i > 0
unless reply_index_exists
db.execute("CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id)")
end
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='instances'").flatten
if tables.empty?
sql_file = File.expand_path("../../../../data/instances.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
telemetry_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='telemetry'").flatten
if telemetry_tables.empty?
telemetry_schema = File.expand_path("../../../../data/telemetry.sql", __dir__)
db.execute_batch(File.read(telemetry_schema))
end
telemetry_columns = db.execute("PRAGMA table_info(telemetry)").map { |row| row[1] }
TELEMETRY_COLUMN_DEFINITIONS.each do |name, type|
next if telemetry_columns.include?(name)
db.execute("ALTER TABLE telemetry ADD COLUMN #{name} #{type}")
telemetry_columns << name
end
trace_tables =
db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('traces','trace_hops')",
).flatten
unless trace_tables.include?("traces") && trace_tables.include?("trace_hops")
traces_schema = File.expand_path("../../../../data/traces.sql", __dir__)
db.execute_batch(File.read(traces_schema))
end
rescue SQLite3::SQLException, Errno::ENOENT => e
warn_log(
"Failed to apply schema upgrade",
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
+638 -37
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -15,13 +17,47 @@
module PotatoMesh
module App
module Federation
# Resolve the canonical domain for the running instance.
#
# @return [String, nil] sanitized instance domain or nil outside production.
# @raise [RuntimeError] when the domain cannot be determined in production.
def self_instance_domain
sanitized = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
return sanitized if sanitized
unless production_environment?
debug_log(
"INSTANCE_DOMAIN unavailable; skipping self instance domain",
context: "federation.instances",
app_env: string_or_nil(ENV["APP_ENV"]),
rack_env: string_or_nil(ENV["RACK_ENV"]),
source: app_constant(:INSTANCE_DOMAIN_SOURCE),
)
return nil
end
raise "INSTANCE_DOMAIN could not be determined"
end
# Determine whether the local instance should persist its own record.
#
# @param domain [String, nil] candidate domain for the running instance.
# @return [Array(Boolean, String, nil)] tuple containing a decision flag and an optional reason.
def self_instance_registration_decision(domain)
source = app_constant(:INSTANCE_DOMAIN_SOURCE)
return [false, "INSTANCE_DOMAIN source is #{source}"] unless source == :environment
sanitized = sanitize_instance_domain(domain)
return [false, "INSTANCE_DOMAIN missing or invalid"] unless sanitized
ip = ip_from_domain(sanitized)
if ip && restricted_ip_address?(ip)
return [false, "INSTANCE_DOMAIN resolves to restricted IP"]
end
[true, nil]
end
def self_instance_attributes
domain = self_instance_domain
last_update = latest_node_update_timestamp || Time.now.to_i
@@ -68,43 +104,130 @@ module PotatoMesh
def ensure_self_instance_record!
attributes = self_instance_attributes
signature = sign_instance_attributes(attributes)
db = open_database
upsert_instance_record(db, attributes, signature)
debug_log(
"Registered self instance record",
context: "federation.instances",
domain: attributes[:domain],
instance_id: attributes[:id],
)
db = nil
allowed, reason = self_instance_registration_decision(attributes[:domain])
if allowed
db = open_database
upsert_instance_record(db, attributes, signature)
debug_log(
"Registered self instance record",
context: "federation.instances",
domain: attributes[:domain],
instance_id: attributes[:id],
)
else
debug_log(
"Skipped self instance registration",
context: "federation.instances",
domain: attributes[:domain],
reason: reason,
)
end
[attributes, signature]
ensure
db&.close
end
# Retrieve or initialize the worker pool servicing federation jobs.
#
# @return [PotatoMesh::App::WorkerPool, nil] active worker pool or nil when disabled.
def federation_worker_pool
ensure_federation_worker_pool!
end
# Ensure the federation worker pool exists when federation remains enabled.
#
# @return [PotatoMesh::App::WorkerPool, nil] active worker pool if created.
def ensure_federation_worker_pool!
return nil unless federation_enabled?
existing = settings.respond_to?(:federation_worker_pool) ? settings.federation_worker_pool : nil
return existing if existing&.alive?
pool = PotatoMesh::App::WorkerPool.new(
size: PotatoMesh::Config.federation_worker_pool_size,
max_queue: PotatoMesh::Config.federation_worker_queue_capacity,
name: "potato-mesh-fed",
)
at_exit do
begin
pool.shutdown(timeout: PotatoMesh::Config.federation_task_timeout_seconds)
rescue StandardError
# Suppress shutdown errors during interpreter teardown.
end
end
set(:federation_worker_pool, pool) if respond_to?(:set)
pool
end
# Shutdown and clear the federation worker pool if present.
#
# @return [void]
def shutdown_federation_worker_pool!
existing = settings.respond_to?(:federation_worker_pool) ? settings.federation_worker_pool : nil
return unless existing
begin
existing.shutdown(timeout: PotatoMesh::Config.federation_task_timeout_seconds)
rescue StandardError => e
warn_log(
"Failed to shut down federation worker pool",
context: "federation",
error_class: e.class.name,
error_message: e.message,
)
ensure
set(:federation_worker_pool, nil) if respond_to?(:set)
end
end
def federation_target_domains(self_domain)
domains = Set.new
normalized_self = sanitize_instance_domain(self_domain)&.downcase
ordered = []
seen = Set.new
PotatoMesh::Config.federation_seed_domains.each do |seed|
sanitized = sanitize_instance_domain(seed)
domains << sanitized.downcase if sanitized
sanitized = sanitize_instance_domain(seed)&.downcase
next unless sanitized
next if normalized_self && sanitized == normalized_self
next if seen.include?(sanitized)
ordered << sanitized
seen << sanitized
end
db = open_database(readonly: true)
db.results_as_hash = false
rows = with_busy_retry { db.execute("SELECT domain FROM instances WHERE domain IS NOT NULL AND TRIM(domain) != ''") }
rows.flatten.compact.each do |raw_domain|
sanitized = sanitize_instance_domain(raw_domain)
domains << sanitized.downcase if sanitized
cutoff = Time.now.to_i - PotatoMesh::Config.week_seconds
rows = with_busy_retry do
db.execute(
"SELECT domain, last_update_time FROM instances WHERE domain IS NOT NULL AND TRIM(domain) != ''",
)
end
if self_domain
domains.delete(self_domain.downcase)
rows.each do |row|
raw_domain = row[0]
last_update_time = coerce_integer(row[1])
next unless last_update_time && last_update_time >= cutoff
sanitized = sanitize_instance_domain(raw_domain)&.downcase
next unless sanitized
next if normalized_self && sanitized == normalized_self
next if seen.include?(sanitized)
ordered << sanitized
seen << sanitized
end
domains.to_a
ordered
rescue SQLite3::Exception
domains =
PotatoMesh::Config.federation_seed_domains.map do |seed|
sanitize_instance_domain(seed)&.downcase
end.compact
self_domain ? domains.reject { |domain| domain == self_domain.downcase } : domains
fallback = PotatoMesh::Config.federation_seed_domains.filter_map do |seed|
candidate = sanitize_instance_domain(seed)&.downcase
next if normalized_self && candidate == normalized_self
candidate
end
fallback.uniq
ensure
db&.close
end
@@ -112,12 +235,13 @@ module PotatoMesh
def announce_instance_to_domain(domain, payload_json)
return false unless domain && !domain.empty?
https_failures = []
instance_uri_candidates(domain, "/api/instances").each do |uri|
begin
http = build_remote_http_client(uri)
response = http.start do |connection|
request = Net::HTTP::Post.new(uri)
request["Content-Type"] = "application/json"
request = build_federation_http_request(Net::HTTP::Post, uri)
request.body = payload_json
connection.request(request)
end
@@ -137,16 +261,51 @@ module PotatoMesh
status: response.code,
)
rescue StandardError => e
warn_log(
"Federation announcement raised exception",
metadata = {
context: "federation.announce",
target: uri.to_s,
error_class: e.class.name,
error_message: e.message,
}
if uri.scheme == "https" && https_connection_refused?(e)
debug_log(
"HTTPS federation announcement failed, retrying with HTTP",
**metadata,
)
https_failures << metadata
next
end
warn_log(
"Federation announcement raised exception",
**metadata,
)
end
end
https_failures.each do |metadata|
warn_log(
"Federation announcement raised exception",
**metadata,
)
end
false
end
# Determine whether an HTTPS announcement failure should fall back to HTTP.
#
# @param error [StandardError] failure raised while attempting HTTPS.
# @return [Boolean] true when the error corresponds to a refused TCP connection.
def https_connection_refused?(error)
current = error
while current
return true if current.is_a?(Errno::ECONNREFUSED)
current = current.respond_to?(:cause) ? current.cause : nil
end
false
end
@@ -156,9 +315,39 @@ module PotatoMesh
attributes, signature = ensure_self_instance_record!
payload_json = JSON.generate(instance_announcement_payload(attributes, signature))
domains = federation_target_domains(attributes[:domain])
pool = federation_worker_pool
scheduled = []
domains.each do |domain|
if pool
begin
task = pool.schedule do
announce_instance_to_domain(domain, payload_json)
end
scheduled << [domain, task]
next
rescue PotatoMesh::App::WorkerPool::QueueFullError
warn_log(
"Skipped asynchronous federation announcement",
context: "federation.announce",
domain: domain,
reason: "worker queue saturated",
)
rescue PotatoMesh::App::WorkerPool::ShutdownError
warn_log(
"Worker pool unavailable, falling back to synchronous announcement",
context: "federation.announce",
domain: domain,
)
pool = nil
end
end
announce_instance_to_domain(domain, payload_json)
end
wait_for_federation_tasks(scheduled)
unless domains.empty?
debug_log(
"Federation announcement cycle complete",
@@ -168,7 +357,42 @@ module PotatoMesh
end
end
# Wait for scheduled federation tasks to complete while logging failures.
#
# @param scheduled [Array<(String, PotatoMesh::App::WorkerPool::Task)>] pairs of domains and tasks.
# @return [void]
def wait_for_federation_tasks(scheduled)
return if scheduled.empty?
timeout = PotatoMesh::Config.federation_task_timeout_seconds
scheduled.each do |domain, task|
begin
task.wait(timeout: timeout)
rescue PotatoMesh::App::WorkerPool::TaskTimeoutError => e
warn_log(
"Federation announcement task timed out",
context: "federation.announce",
domain: domain,
timeout: timeout,
error_class: e.class.name,
error_message: e.message,
)
rescue StandardError => e
warn_log(
"Federation announcement task failed",
context: "federation.announce",
domain: domain,
error_class: e.class.name,
error_message: e.message,
)
end
end
end
def start_federation_announcer!
# Federation broadcasts must not execute when federation support is disabled.
return nil unless federation_enabled?
existing = settings.federation_thread
return existing if existing&.alive?
@@ -192,12 +416,20 @@ module PotatoMesh
thread
end
# Launch a background thread responsible for the first federation broadcast.
#
# @return [Thread, nil] the thread handling the initial announcement.
def start_initial_federation_announcement!
# Skip the initial broadcast entirely when federation is disabled.
return nil unless federation_enabled?
existing = settings.respond_to?(:initial_federation_thread) ? settings.initial_federation_thread : nil
return existing if existing&.alive?
thread = Thread.new do
begin
delay = PotatoMesh::Config.initial_federation_delay_seconds
Kernel.sleep(delay) if delay.positive?
announce_instance_to_all_domains
rescue StandardError => e
warn_log(
@@ -257,7 +489,8 @@ module PotatoMesh
def perform_instance_http_request(uri)
http = build_remote_http_client(uri)
http.start do |connection|
response = connection.request(Net::HTTP::Get.new(uri))
request = build_federation_http_request(Net::HTTP::Get, uri)
response = connection.request(request)
case response
when Net::HTTPSuccess
response.body
@@ -266,7 +499,56 @@ module PotatoMesh
end
end
rescue StandardError => e
raise InstanceFetchError, e.message
raise_instance_fetch_error(e)
end
# Build an HTTP request decorated with the headers required for federation peers.
#
# @param request_class [Class<Net::HTTPRequest>] HTTP request class such as {Net::HTTP::Get}.
# @param uri [URI::Generic] target URI describing the remote endpoint.
# @return [Net::HTTPRequest] configured HTTP request including standard headers.
def build_federation_http_request(request_class, uri)
request = request_class.new(uri)
request["User-Agent"] = federation_user_agent_header
request["Accept"] = "application/json"
request["Content-Type"] = "application/json" if request.request_body_permitted?
request
end
# Compose the User-Agent string used when communicating with federation peers.
#
# @return [String] descriptive identifier for PotatoMesh federation requests.
def federation_user_agent_header
version = app_constant(:APP_VERSION).to_s
version = "unknown" if version.empty?
sanitized_domain = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN), downcase: true)
base = "PotatoMesh/#{version}"
return base unless sanitized_domain && !sanitized_domain.empty?
"#{base} (+https://#{sanitized_domain})"
end
# Build a human readable error message for a failed instance request.
#
# @param error [StandardError] failure raised while performing the request.
# @return [String] description including the error class when necessary.
def instance_fetch_error_message(error)
message = error.message.to_s.strip
class_name = error.class.name || error.class.to_s
return class_name if message.empty?
message.include?(class_name) ? message : "#{class_name}: #{message}"
end
# Raise an InstanceFetchError that preserves the original context.
#
# @param error [StandardError] failure raised while performing the request.
# @return [void]
def raise_instance_fetch_error(error)
message = instance_fetch_error_message(error)
wrapped = InstanceFetchError.new(message)
wrapped.set_backtrace(error.backtrace)
raise wrapped
end
def fetch_instance_json(domain, path)
@@ -284,14 +566,306 @@ module PotatoMesh
[nil, errors]
end
# Parse a remote federation instance payload into canonical attributes.
#
# @param payload [Hash] JSON object describing a remote instance.
# @return [Array<(Hash, String), String>] tuple containing the attribute
# hash and signature when valid or a failure reason when invalid.
def remote_instance_attributes_from_payload(payload)
unless payload.is_a?(Hash)
return [nil, nil, "instance payload is not an object"]
end
id = string_or_nil(payload["id"])
return [nil, nil, "missing instance id"] unless id
domain = sanitize_instance_domain(payload["domain"])
return [nil, nil, "missing instance domain"] unless domain
pubkey = sanitize_public_key_pem(payload["pubkey"])
return [nil, nil, "missing instance public key"] unless pubkey
signature = string_or_nil(payload["signature"])
return [nil, nil, "missing instance signature"] unless signature
private_value = if payload.key?("isPrivate")
payload["isPrivate"]
else
payload["is_private"]
end
private_flag = coerce_boolean(private_value)
if private_flag.nil?
numeric_flag = coerce_integer(private_value)
private_flag = !numeric_flag.to_i.zero? if numeric_flag
end
attributes = {
id: id,
domain: domain,
pubkey: pubkey,
name: string_or_nil(payload["name"]),
version: string_or_nil(payload["version"]),
channel: string_or_nil(payload["channel"]),
frequency: string_or_nil(payload["frequency"]),
latitude: coerce_float(payload["latitude"]),
longitude: coerce_float(payload["longitude"]),
last_update_time: coerce_integer(payload["lastUpdateTime"]),
is_private: private_flag,
}
[attributes, signature, nil]
rescue StandardError => e
[nil, nil, e.message]
end
# Enqueue a federation crawl for the supplied domain using the worker pool.
#
# @param domain [String] sanitized remote domain to crawl.
# @param per_response_limit [Integer, nil] maximum entries processed per response.
# @param overall_limit [Integer, nil] maximum unique domains visited.
# @return [Boolean] true when the crawl was scheduled successfully.
def enqueue_federation_crawl(domain, per_response_limit:, overall_limit:)
pool = federation_worker_pool
unless pool
debug_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: domain,
reason: "federation disabled",
)
return false
end
application = is_a?(Class) ? self : self.class
pool.schedule do
db = application.open_database
begin
application.ingest_known_instances_from!(
db,
domain,
per_response_limit: per_response_limit,
overall_limit: overall_limit,
)
ensure
db&.close
end
end
true
rescue PotatoMesh::App::WorkerPool::QueueFullError
warn_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: domain,
reason: "worker queue saturated",
)
false
rescue PotatoMesh::App::WorkerPool::ShutdownError
warn_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: domain,
reason: "worker pool shut down",
)
false
end
# Recursively ingest federation records exposed by the supplied domain.
#
# @param db [SQLite3::Database] open database connection used for writes.
# @param domain [String] remote domain to crawl for federation records.
# @param visited [Set<String>] domains processed during this crawl.
# @param per_response_limit [Integer, nil] maximum entries processed per response.
# @param overall_limit [Integer, nil] maximum unique domains visited.
# @return [Set<String>] updated set of visited domains.
def ingest_known_instances_from!(
db,
domain,
visited: nil,
per_response_limit: nil,
overall_limit: nil
)
sanitized = sanitize_instance_domain(domain)
return visited || Set.new unless sanitized
visited ||= Set.new
overall_limit ||= PotatoMesh::Config.federation_max_domains_per_crawl
per_response_limit ||= PotatoMesh::Config.federation_max_instances_per_response
if overall_limit && overall_limit.positive? && visited.size >= overall_limit
debug_log(
"Skipped remote instance crawl due to crawl limit",
context: "federation.instances",
domain: sanitized,
limit: overall_limit,
)
return visited
end
return visited if visited.include?(sanitized)
visited << sanitized
payload, metadata = fetch_instance_json(sanitized, "/api/instances")
unless payload.is_a?(Array)
warn_log(
"Failed to load remote federation instances",
context: "federation.instances",
domain: sanitized,
reason: Array(metadata).map(&:to_s).join("; "),
)
return visited
end
processed_entries = 0
payload.each do |entry|
if per_response_limit && per_response_limit.positive? && processed_entries >= per_response_limit
debug_log(
"Skipped remote instance entry due to response limit",
context: "federation.instances",
domain: sanitized,
limit: per_response_limit,
)
break
end
if overall_limit && overall_limit.positive? && visited.size >= overall_limit
debug_log(
"Skipped remote instance entry due to crawl limit",
context: "federation.instances",
domain: sanitized,
limit: overall_limit,
)
break
end
processed_entries += 1
attributes, signature, reason = remote_instance_attributes_from_payload(entry)
unless attributes && signature
warn_log(
"Discarded remote instance entry",
context: "federation.instances",
domain: sanitized,
reason: reason || "invalid payload",
)
next
end
if attributes[:is_private]
debug_log(
"Skipped private remote instance",
context: "federation.instances",
domain: attributes[:domain],
)
next
end
unless verify_instance_signature(attributes, signature, attributes[:pubkey])
warn_log(
"Discarded remote instance entry",
context: "federation.instances",
domain: attributes[:domain],
reason: "invalid signature",
)
next
end
attributes[:is_private] = false if attributes[:is_private].nil?
remote_nodes, node_metadata = fetch_instance_json(attributes[:domain], "/api/nodes")
unless remote_nodes
warn_log(
"Failed to load remote node data",
context: "federation.instances",
domain: attributes[:domain],
reason: Array(node_metadata).map(&:to_s).join("; "),
)
next
end
fresh, freshness_reason = validate_remote_nodes(remote_nodes)
unless fresh
warn_log(
"Discarded remote instance entry",
context: "federation.instances",
domain: attributes[:domain],
reason: freshness_reason || "stale node data",
)
next
end
begin
upsert_instance_record(db, attributes, signature)
ingest_known_instances_from!(
db,
attributes[:domain],
visited: visited,
per_response_limit: per_response_limit,
overall_limit: overall_limit,
)
rescue ArgumentError => e
warn_log(
"Failed to persist remote instance",
context: "federation.instances",
domain: attributes[:domain],
error_class: e.class.name,
error_message: e.message,
)
end
end
visited
end
# Resolve the host component of a remote URI and ensure the destination is
# safe for federation HTTP requests.
#
# The method performs a DNS lookup using Addrinfo to capture every
# available address for the supplied URI host. The resulting addresses are
# converted to {IPAddr} objects for consistent inspection via
# {restricted_ip_address?}. When all resolved addresses fall within
# restricted ranges, the method raises an ArgumentError so callers can
# abort the federation request before contacting the remote endpoint.
#
# @param uri [URI::Generic] remote endpoint candidate.
# @return [Array<IPAddr>] list of resolved, unrestricted IP addresses.
# @raise [ArgumentError] when +uri.host+ is blank or resolves solely to
# restricted addresses.
def resolve_remote_ip_addresses(uri)
host = uri&.host
raise ArgumentError, "URI missing host" unless host
addrinfo_records = Addrinfo.getaddrinfo(host, nil, Socket::AF_UNSPEC, Socket::SOCK_STREAM)
addresses = addrinfo_records.filter_map do |addr|
begin
IPAddr.new(addr.ip_address)
rescue IPAddr::InvalidAddressError
nil
end
end
unique_addresses = addresses.uniq { |ip| [ip.family, ip.to_s] }
unrestricted_addresses = unique_addresses.reject { |ip| restricted_ip_address?(ip) }
if unique_addresses.any? && unrestricted_addresses.empty?
raise ArgumentError, "restricted domain"
end
unrestricted_addresses
end
# Build an HTTP client configured for communication with a remote instance.
#
# @param uri [URI::Generic] target URI describing the remote endpoint.
# @return [Net::HTTP] HTTP client ready to execute the request.
def build_remote_http_client(uri)
remote_addresses = resolve_remote_ip_addresses(uri)
http = Net::HTTP.new(uri.host, uri.port)
if http.respond_to?(:ipaddr=) && remote_addresses.any?
http.ipaddr = remote_addresses.first.to_s
end
http.open_timeout = PotatoMesh::Config.remote_instance_http_timeout
http.read_timeout = PotatoMesh::Config.remote_instance_http_timeout
http.read_timeout = PotatoMesh::Config.remote_instance_read_timeout
http.use_ssl = uri.scheme == "https"
return http unless http.use_ssl?
@@ -435,14 +1009,13 @@ module PotatoMesh
latest = nodes.filter_map do |node|
next unless node.is_a?(Hash)
timestamps = []
timestamps << coerce_integer(node["last_heard"])
timestamps << coerce_integer(node["position_time"])
timestamps << coerce_integer(node["first_heard"])
timestamps.compact.max
last_heard_values = []
last_heard_values << coerce_integer(node["last_heard"])
last_heard_values << coerce_integer(node["lastHeard"])
last_heard_values.compact.max
end.compact.max
return [false, "missing recent node updates"] unless latest
return [false, "missing last_heard data"] unless latest
cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
return [false, "node data is stale"] if latest < cutoff
@@ -451,6 +1024,34 @@ module PotatoMesh
end
def upsert_instance_record(db, attributes, signature)
sanitized_domain = sanitize_instance_domain(attributes[:domain])
raise ArgumentError, "invalid domain" unless sanitized_domain
ip = ip_from_domain(sanitized_domain)
if ip && restricted_ip_address?(ip)
raise ArgumentError, "restricted domain"
end
normalized_domain = sanitized_domain
existing_id = with_busy_retry do
db.get_first_value(
"SELECT id FROM instances WHERE domain = ?",
normalized_domain,
)
end
if existing_id && existing_id != attributes[:id]
with_busy_retry do
db.execute("DELETE FROM instances WHERE id = ?", existing_id)
end
debug_log(
"Removed conflicting instance by domain",
context: "federation.instances",
domain: normalized_domain,
replaced_id: existing_id,
incoming_id: attributes[:id],
)
end
sql = <<~SQL
INSERT INTO instances (
id, domain, pubkey, name, version, channel, frequency,
@@ -472,7 +1073,7 @@ module PotatoMesh
params = [
attributes[:id],
attributes[:domain],
normalized_domain,
attributes[:pubkey],
attributes[:name],
attributes[:version],
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
+80 -4
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -53,9 +55,10 @@ module PotatoMesh
# Proxy for {PotatoMesh::Sanitizer.sanitize_instance_domain}.
#
# @param value [Object] candidate domain string.
# @param downcase [Boolean] whether to force lowercase normalisation.
# @return [String, nil] canonical domain or nil.
def sanitize_instance_domain(value)
PotatoMesh::Sanitizer.sanitize_instance_domain(value)
def sanitize_instance_domain(value, downcase: true)
PotatoMesh::Sanitizer.sanitize_instance_domain(value, downcase: downcase)
end
# Proxy for {PotatoMesh::Sanitizer.instance_domain_host}.
@@ -119,9 +122,11 @@ module PotatoMesh
lat: PotatoMesh::Config.map_center_lat,
lon: PotatoMesh::Config.map_center_lon,
},
mapZoom: PotatoMesh::Config.map_zoom,
maxDistanceKm: PotatoMesh::Config.max_distance_km,
tileFilters: PotatoMesh::Config.tile_filters,
instanceDomain: app_constant(:INSTANCE_DOMAIN),
instancesFeatureEnabled: federation_enabled? && !private_mode?,
}
end
@@ -154,6 +159,67 @@ module PotatoMesh
PotatoMesh::Meta.formatted_distance_km(distance)
end
# Build the canonical node detail path for the supplied identifier.
#
# @param identifier [String, nil] node identifier in ``!xxxx`` notation.
# @return [String, nil] detail path including the canonical ``!`` prefix.
def node_detail_path(identifier)
ident = string_or_nil(identifier)
return nil unless ident && !ident.empty?
trimmed = ident.strip
return nil if trimmed.empty?
body = trimmed.start_with?("!") ? trimmed[1..-1] : trimmed
return nil unless body && !body.empty?
escaped = Rack::Utils.escape_path(body)
"/nodes/!#{escaped}"
end
# Present a version string with a leading ``v`` when missing to keep
# UI labels consistent across tagged and fallback builds.
#
# @param version [String, nil] raw application version string.
# @return [String, nil] version string prefixed with ``v`` when needed.
def display_version(version)
return nil if version.nil? || version.to_s.strip.empty?
text = version.to_s.strip
text.start_with?("v") ? text : "v#{text}"
end
# Render a linked long name pointing to the node detail page.
#
# @param long_name [String] display name for the node.
# @param identifier [String, nil] canonical node identifier.
# @param css_class [String, nil] optional CSS class applied to the anchor.
# @return [String] escaped HTML snippet.
def node_long_name_link(long_name, identifier, css_class: "node-long-link")
text = string_or_nil(long_name)
return "" unless text
href = node_detail_path(identifier)
escaped_text = Rack::Utils.escape_html(text)
return escaped_text unless href
canonical_identifier = canonical_node_identifier(identifier)
class_attr = css_class ? %( class="#{css_class}") : ""
data_attrs = %( data-node-detail-link="true")
if canonical_identifier
escaped_identifier = Rack::Utils.escape_html(canonical_identifier)
data_attrs = %(#{data_attrs} data-node-id="#{escaped_identifier}")
end
%(<a#{class_attr} href="#{href}"#{data_attrs}>#{escaped_text}</a>)
end
# Normalise a node identifier by ensuring the canonical ``!`` prefix.
#
# @param identifier [String, nil] raw identifier string.
# @return [String, nil] canonical identifier or ``nil`` when unavailable.
def canonical_node_identifier(identifier)
ident = string_or_nil(identifier)
return nil unless ident && !ident.empty?
trimmed = ident.strip
return nil if trimmed.empty?
trimmed.start_with?("!") ? trimmed : "!#{trimmed}"
end
# Generate the meta description used in SEO tags.
#
# @return [String] combined descriptive sentence.
@@ -322,7 +388,7 @@ module PotatoMesh
#
# @return [Boolean] true when PRIVATE=1.
def private_mode?
ENV["PRIVATE"] == "1"
PotatoMesh::Config.private_mode_enabled?
end
# Identify whether the Rack environment corresponds to the test suite.
@@ -332,11 +398,21 @@ module PotatoMesh
ENV["RACK_ENV"] == "test"
end
# Determine whether the application is running in a production environment.
#
# @return [Boolean] true when APP_ENV or RACK_ENV resolves to "production".
def production_environment?
app_env = string_or_nil(ENV["APP_ENV"])&.downcase
rack_env = string_or_nil(ENV["RACK_ENV"])&.downcase
app_env == "production" || rack_env == "production"
end
# Determine whether federation features should be active.
#
# @return [Boolean] true when federation configuration allows it.
def federation_enabled?
ENV.fetch("FEDERATION", "1") != "0" && !private_mode?
PotatoMesh::Config.federation_enabled?
end
# Determine whether federation announcements should run asynchronously.
+19 -10
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -170,11 +172,13 @@ module PotatoMesh
# @return [Array(String, String)] pair of JSON output and base64 signature.
def build_well_known_document
last_update = latest_node_update_timestamp
domain_value = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
payload = {
publicKey: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
name: sanitized_site_name,
version: app_constant(:APP_VERSION),
domain: app_constant(:INSTANCE_DOMAIN),
domain: domain_value,
lastUpdate: last_update,
}
@@ -193,24 +197,31 @@ module PotatoMesh
[json_output, signature]
end
# Regenerate the well-known document when the on-disk copy is stale.
# Regenerate the well-known document when it is stale or when the existing
# content no longer matches the current instance configuration.
#
# @return [void]
def refresh_well_known_document_if_stale
FileUtils.mkdir_p(well_known_directory)
path = well_known_file_path
now = Time.now
json_output, signature = build_well_known_document
expected_contents = json_output.end_with?("\n") ? json_output : "#{json_output}\n"
needs_update = true
if File.exist?(path)
current_contents = File.binread(path)
mtime = File.mtime(path)
if (now - mtime) < PotatoMesh::Config.well_known_refresh_interval
return
if current_contents == expected_contents &&
(now - mtime) < PotatoMesh::Config.well_known_refresh_interval
needs_update = false
end
end
json_output, signature = build_well_known_document
return unless needs_update
File.open(path, File::WRONLY | File::CREAT | File::TRUNC, 0o644) do |file|
file.write(json_output)
file.write("\n") unless json_output.end_with?("\n")
file.write(expected_contents)
end
debug_log(
@@ -236,9 +247,7 @@ module PotatoMesh
return nil unless File.exist?(PotatoMesh::Config.db_path)
db = open_database(readonly: true)
value = db.get_first_value(
"SELECT MAX(COALESCE(last_heard, first_heard, position_time)) FROM nodes",
)
value = db.get_first_value("SELECT MAX(last_heard) FROM nodes")
value&.to_i
rescue SQLite3::Exception
nil
@@ -0,0 +1,210 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
# Helper methods for maintaining and presenting instance records.
module Instances
# Remove duplicate instance records grouped by their canonical domain name
# while favouring the most recent entry.
#
# @return [void]
def clean_duplicate_instances!
db = open_database
rows = with_busy_retry do
db.execute(
<<~SQL
SELECT rowid, domain, last_update_time
FROM instances
WHERE domain IS NOT NULL AND TRIM(domain) != ''
SQL
)
end
grouped = rows.group_by do |row|
sanitize_instance_domain(row[1])&.downcase
rescue StandardError
nil
end
deletions = []
updates = {}
grouped.each do |canonical_domain, entries|
next if canonical_domain.nil?
next if entries.size <= 1
sorted_entries = entries.sort_by do |entry|
timestamp = coerce_integer(entry[2]) || -1
[timestamp, entry[0].to_i]
end
keeper = sorted_entries.last
next unless keeper
deletions.concat(sorted_entries[0...-1].map { |entry| entry[0].to_i })
current_domain = entries.find { |entry| entry[0] == keeper[0] }&.[](1)
if canonical_domain && current_domain != canonical_domain
updates[keeper[0].to_i] = canonical_domain
end
removed_count = sorted_entries.length - 1
warn_log(
"Removed duplicate instance records",
context: "instances.cleanup",
domain: canonical_domain,
removed: removed_count,
) if removed_count.positive?
end
unless deletions.empty?
placeholders = Array.new(deletions.size, "?").join(",")
with_busy_retry do
db.execute("DELETE FROM instances WHERE rowid IN (#{placeholders})", deletions)
end
end
updates.each do |rowid, canonical_domain|
with_busy_retry do
db.execute("UPDATE instances SET domain = ? WHERE rowid = ?", [canonical_domain, rowid])
end
end
rescue SQLite3::Exception => e
warn_log(
"Failed to clean duplicate instances",
context: "instances.cleanup",
error_class: e.class.name,
error_message: e.message,
)
ensure
db&.close
end
# Normalise and validate an instance database row for API presentation.
#
# @param row [Hash] raw database row with string keys.
# @return [Hash, nil] cleaned hash or +nil+ when the row is discarded.
def normalize_instance_row(row)
unless row.is_a?(Hash)
warn_log(
"Discarded malformed instance row",
context: "instances.normalize",
reason: "row not hash",
)
return nil
end
id = string_or_nil(row["id"])
domain = sanitize_instance_domain(row["domain"])&.downcase
pubkey = sanitize_public_key_pem(row["pubkey"])
signature = string_or_nil(row["signature"])
last_update_time = coerce_integer(row["last_update_time"])
is_private_raw = row["is_private"]
private_flag = coerce_boolean(is_private_raw)
if private_flag.nil?
numeric_private = coerce_integer(is_private_raw)
private_flag = !numeric_private.to_i.zero? if numeric_private
end
private_flag = false if private_flag.nil?
if id.nil? || domain.nil? || pubkey.nil?
warn_log(
"Discarded malformed instance row",
context: "instances.normalize",
instance_id: row["id"],
domain: row["domain"],
reason: "missing required fields",
)
return nil
end
payload = {
"id" => id,
"domain" => domain,
"pubkey" => pubkey,
"name" => string_or_nil(row["name"]),
"version" => string_or_nil(row["version"]),
"channel" => string_or_nil(row["channel"]),
"frequency" => string_or_nil(row["frequency"]),
"latitude" => coerce_float(row["latitude"]),
"longitude" => coerce_float(row["longitude"]),
"lastUpdateTime" => last_update_time,
"isPrivate" => private_flag,
"signature" => signature,
}
payload.reject { |_, value| value.nil? }
rescue StandardError => e
warn_log(
"Failed to normalise instance row",
context: "instances.normalize",
instance_id: row.respond_to?(:[]) ? row["id"] : nil,
domain: row.respond_to?(:[]) ? row["domain"] : nil,
error_class: e.class.name,
error_message: e.message,
)
nil
end
# Fetch all instance rows ready to be served by the API while handling
# malformed rows gracefully. The dataset is restricted to records updated
# within the rolling window defined by PotatoMesh::Config.week_seconds.
#
# @return [Array<Hash>] list of cleaned instance payloads.
def load_instances_for_api
clean_duplicate_instances!
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_update_time = now - PotatoMesh::Config.week_seconds
sql = <<~SQL
SELECT id, domain, pubkey, name, version, channel, frequency,
latitude, longitude, last_update_time, is_private, signature
FROM instances
WHERE domain IS NOT NULL AND TRIM(domain) != ''
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
AND last_update_time IS NOT NULL AND last_update_time >= ?
ORDER BY LOWER(domain)
SQL
rows = with_busy_retry do
db.execute(sql, min_last_update_time)
end
rows.each_with_object([]) do |row, memo|
normalized = normalize_instance_row(row)
next unless normalized
last_update_time = normalized["lastUpdateTime"]
next unless last_update_time.is_a?(Integer) && last_update_time >= min_last_update_time
memo << normalized
end
rescue SQLite3::Exception => e
warn_log(
"Failed to load instance records",
context: "instances.load",
error_class: e.class.name,
error_message: e.message,
)
[]
ensure
db&.close
end
end
end
end
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -62,6 +64,22 @@ module PotatoMesh
candidate = "#{candidate_host}:#{port}" if port_required?(uri, trimmed)
end
ipv6_with_port = candidate.match(/\A(?<address>.+):(?<port>\d+)\z/)
if ipv6_with_port
address = ipv6_with_port[:address]
port = ipv6_with_port[:port]
literal = ipv6_literal?(address)
if literal && PotatoMesh::Sanitizer.valid_port?(port)
candidate = "[#{literal}]:#{port}"
else
ipv6_literal = ipv6_literal?(candidate)
candidate = "[#{ipv6_literal}]" if ipv6_literal
end
else
ipv6_literal = ipv6_literal?(candidate)
candidate = "[#{ipv6_literal}]" if ipv6_literal
end
sanitized = sanitize_instance_domain(candidate)
unless sanitized
raise "INSTANCE_DOMAIN must be a bare hostname (optionally with a port) without schemes or paths: #{raw.inspect}"
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
+152 -81
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -17,6 +19,35 @@ module PotatoMesh
module Queries
MAX_QUERY_LIMIT = 1000
# Remove nil or empty values from an API response hash to reduce payload size
# while preserving legitimate zero-valued measurements.
# Integer keys emitted by SQLite are ignored because the JSON representation
# only exposes symbolic keys. Strings containing only whitespace are treated
# as empty to mirror sanitisation elsewhere in the application, and any other
# objects responding to `empty?` are dropped when they contain no data.
#
# @param row [Hash] raw database row to compact.
# @return [Hash] cleaned hash without blank values.
def compact_api_row(row)
return {} unless row.is_a?(Hash)
row.each_with_object({}) do |(key, value), acc|
next if key.is_a?(Integer)
next if value.nil?
if value.is_a?(String)
trimmed = value.strip
next if trimmed.empty?
acc[key] = value
next
end
next if value.respond_to?(:empty?) && value.empty?
acc[key] = value
end
end
# Normalise a caller-provided limit to a sane, positive integer.
#
# @param limit [Object] value coerced to an integer.
@@ -79,7 +110,7 @@ module PotatoMesh
cleaned_strings = string_values.compact.map(&:to_s).map(&:strip).reject(&:empty?).uniq
cleaned_numbers = numeric_values.compact.map do |value|
begin
Integer(value, 10)
value.is_a?(String) ? Integer(value, 10) : Integer(value)
rescue ArgumentError, TypeError
nil
end
@@ -179,22 +210,29 @@ module PotatoMesh
pb = r["precision_bits"]
r["precision_bits"] = pb.to_i if pb
end
rows
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
def query_messages(limit, node_ref: nil)
def query_messages(limit, node_ref: nil, include_encrypted: false)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = ["COALESCE(TRIM(m.encrypted), '') = ''"]
where_clauses = [
"(COALESCE(TRIM(m.text), '') != '' OR COALESCE(TRIM(m.encrypted), '') != '' OR m.reply_id IS NOT NULL OR COALESCE(TRIM(m.emoji), '') != '')",
]
include_encrypted = !!include_encrypted
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
where_clauses << "m.rx_time >= ?"
params << min_rx_time
unless include_encrypted
where_clauses << "COALESCE(TRIM(m.encrypted), '') = ''"
end
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["m.from_id", "m.to_id"])
return [] unless clause
@@ -205,34 +243,9 @@ module PotatoMesh
sql = <<~SQL
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
m.lora_freq AS msg_lora_freq, m.modem_preset AS msg_modem_preset,
m.channel_name AS msg_channel_name, m.snr AS msg_snr,
n.node_id AS node_node_id, n.num AS node_num,
n.short_name AS node_short_name, n.long_name AS node_long_name,
n.macaddr AS node_macaddr, n.hw_model AS node_hw_model,
n.role AS node_role, n.public_key AS node_public_key,
n.is_unmessagable AS node_is_unmessagable,
n.is_favorite AS node_is_favorite,
n.hops_away AS node_hops_away, n.snr AS node_snr,
n.last_heard AS node_last_heard, n.first_heard AS node_first_heard,
n.battery_level AS node_battery_level, n.voltage AS node_voltage,
n.channel_utilization AS node_channel_utilization,
n.air_util_tx AS node_air_util_tx,
n.uptime_seconds AS node_uptime_seconds,
n.position_time AS node_position_time,
n.location_source AS node_location_source,
n.precision_bits AS node_precision_bits,
n.latitude AS node_latitude, n.longitude AS node_longitude,
n.altitude AS node_altitude,
n.lora_freq AS node_lora_freq, n.modem_preset AS node_modem_preset
m.lora_freq, m.modem_preset, m.channel_name, m.snr,
m.reply_id, m.emoji
FROM messages m
LEFT JOIN nodes n ON (
m.from_id IS NOT NULL AND TRIM(m.from_id) <> '' AND (
m.from_id = n.node_id OR (
m.from_id GLOB '[0-9]*' AND CAST(m.from_id AS INTEGER) = n.num
)
)
)
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n"
sql += <<~SQL
@@ -243,56 +256,21 @@ module PotatoMesh
rows = db.execute(sql, params)
rows.each do |r|
r.delete_if { |key, _| key.is_a?(Integer) }
r["lora_freq"] = r.delete("msg_lora_freq")
r["modem_preset"] = r.delete("msg_modem_preset")
r["channel_name"] = r.delete("msg_channel_name")
snr_value = r.delete("msg_snr")
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.empty?)
r["reply_id"] = coerce_integer(r["reply_id"]) if r.key?("reply_id")
r["emoji"] = string_or_nil(r["emoji"]) if r.key?("emoji")
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
debug_log(
"Message join produced empty sender",
"Message query produced empty sender",
context: "queries.messages",
stage: "before_join",
stage: "raw_row",
row: raw,
)
debug_log(
"Message join produced empty sender",
context: "queries.messages",
stage: "after_join",
row: r,
)
end
node = {}
r.keys.grep(/^node_/).each do |k|
attribute = k.delete_prefix("node_")
node[attribute] = r.delete(k)
end
r["snr"] = snr_value
references = [r["from_id"]].compact
if references.any? && (node["node_id"].nil? || node["node_id"].to_s.empty?)
lookup_keys = []
canonical = normalize_node_id(db, r["from_id"])
lookup_keys << canonical if canonical
raw_ref = r["from_id"].to_s.strip
lookup_keys << raw_ref unless raw_ref.empty?
lookup_keys << raw_ref.to_i if raw_ref.match?(/\A[0-9]+\z/)
fallback = nil
lookup_keys.uniq.each do |ref|
sql = ref.is_a?(Integer) ? "SELECT * FROM nodes WHERE num = ?" : "SELECT * FROM nodes WHERE node_id = ?"
fallback = db.get_first_row(sql, [ref])
break if fallback
end
if fallback
fallback.each do |key, value|
next unless key.is_a?(String)
node[key] = value if node[key].nil?
end
end
end
node["role"] = "CLIENT" if node.key?("role") && (node["role"].nil? || node["role"].to_s.empty?)
r["node"] = node
canonical_from_id = string_or_nil(node["node_id"]) || string_or_nil(normalize_node_id(db, r["from_id"]))
canonical_from_id = string_or_nil(normalize_node_id(db, r["from_id"]))
node_id = canonical_from_id || string_or_nil(r["from_id"])
if canonical_from_id
raw_from_id = string_or_nil(r["from_id"])
if raw_from_id.nil? || raw_from_id.match?(/\A[0-9]+\z/)
@@ -302,16 +280,18 @@ module PotatoMesh
end
end
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.empty?)
r["node_id"] = node_id if node_id
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
debug_log(
"Message row missing sender after processing",
"Message query produced empty sender",
context: "queries.messages",
stage: "after_processing",
stage: "after_normalization",
row: r,
)
end
end
rows
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
@@ -362,7 +342,7 @@ module PotatoMesh
r["pdop"] = coerce_float(r["pdop"])
r["snr"] = coerce_float(r["snr"])
end
rows
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
@@ -402,7 +382,7 @@ module PotatoMesh
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time
r["snr"] = coerce_float(r["snr"])
end
rows
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
@@ -461,8 +441,99 @@ module PotatoMesh
r["temperature"] = coerce_float(r["temperature"])
r["relative_humidity"] = coerce_float(r["relative_humidity"])
r["barometric_pressure"] = coerce_float(r["barometric_pressure"])
r["gas_resistance"] = coerce_float(r["gas_resistance"])
r["current"] = coerce_float(r["current"])
r["iaq"] = coerce_integer(r["iaq"])
r["distance"] = coerce_float(r["distance"])
r["lux"] = coerce_float(r["lux"])
r["white_lux"] = coerce_float(r["white_lux"])
r["ir_lux"] = coerce_float(r["ir_lux"])
r["uv_lux"] = coerce_float(r["uv_lux"])
r["wind_direction"] = coerce_integer(r["wind_direction"])
r["wind_speed"] = coerce_float(r["wind_speed"])
r["weight"] = coerce_float(r["weight"])
r["wind_gust"] = coerce_float(r["wind_gust"])
r["wind_lull"] = coerce_float(r["wind_lull"])
r["radiation"] = coerce_float(r["radiation"])
r["rainfall_1h"] = coerce_float(r["rainfall_1h"])
r["rainfall_24h"] = coerce_float(r["rainfall_24h"])
r["soil_moisture"] = coerce_integer(r["soil_moisture"])
r["soil_temperature"] = coerce_float(r["soil_temperature"])
end
rows
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
def query_traces(limit, node_ref: nil)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = []
if node_ref
tokens = node_reference_tokens(node_ref)
numeric_values = tokens[:numeric_values]
if numeric_values.empty?
return []
end
placeholders = Array.new(numeric_values.length, "?").join(", ")
candidate_clauses = []
candidate_clauses << "src IN (#{placeholders})"
candidate_clauses << "dest IN (#{placeholders})"
candidate_clauses << "id IN (SELECT trace_id FROM trace_hops WHERE node_id IN (#{placeholders}))"
where_clauses << "(#{candidate_clauses.join(" OR ")})"
3.times { params.concat(numeric_values) }
end
sql = <<~SQL
SELECT id, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms
FROM traces
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
sql += <<~SQL
ORDER BY rx_time DESC
LIMIT ?
SQL
params << limit
rows = db.execute(sql, params)
trace_ids = rows.map { |row| coerce_integer(row["id"]) }.compact
hops_by_trace = Hash.new { |hash, key| hash[key] = [] }
unless trace_ids.empty?
placeholders = Array.new(trace_ids.length, "?").join(", ")
hop_rows =
db.execute(
"SELECT trace_id, hop_index, node_id FROM trace_hops WHERE trace_id IN (#{placeholders}) ORDER BY trace_id, hop_index",
trace_ids,
)
hop_rows.each do |hop|
trace_id = coerce_integer(hop["trace_id"])
node_id = coerce_integer(hop["node_id"])
next unless trace_id && node_id
hops_by_trace[trace_id] << node_id
end
end
rows.each do |r|
rx_time = coerce_integer(r["rx_time"])
r["rx_time"] = rx_time if rx_time
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
r["request_id"] = coerce_integer(r["request_id"])
r["src"] = coerce_integer(r["src"])
r["dest"] = coerce_integer(r["dest"])
r["rssi"] = coerce_integer(r["rssi"])
r["snr"] = coerce_float(r["snr"])
r["elapsed_ms"] = coerce_integer(r["elapsed_ms"])
trace_id = coerce_integer(r["id"])
if trace_id && hops_by_trace.key?(trace_id)
r["hops"] = hops_by_trace[trace_id]
end
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
+33 -36
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -16,7 +18,16 @@ module PotatoMesh
module App
module Routes
module Api
# Register read-only API endpoints that expose cached mesh data and
# instance metadata. Invoked by Sinatra during extension registration.
#
# @param app [Sinatra::Base] application instance receiving the routes.
# @return [void]
def self.registered(app)
app.before "/api/messages*" do
halt 404 if private_mode?
end
app.get "/version" do
content_type :json
last_update = latest_node_update_timestamp
@@ -67,19 +78,19 @@ module PotatoMesh
end
app.get "/api/messages" do
halt 404 if private_mode?
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_messages(limit).to_json
include_encrypted = coerce_boolean(params["encrypted"]) || false
query_messages(limit, include_encrypted: include_encrypted).to_json
end
app.get "/api/messages/:id" do
halt 404 if private_mode?
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_messages(limit, node_ref: node_ref).to_json
include_encrypted = coerce_boolean(params["encrypted"]) || false
query_messages(limit, node_ref: node_ref, include_encrypted: include_encrypted).to_json
end
app.get "/api/positions" do
@@ -124,42 +135,28 @@ module PotatoMesh
query_telemetry(limit, node_ref: node_ref).to_json
end
app.get "/api/traces" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit).to_json
end
app.get "/api/traces/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit, node_ref: node_ref).to_json
end
app.get "/api/instances" do
# Prevent the federation catalog from being exposed when federation is disabled.
halt 404 unless federation_enabled?
content_type :json
ensure_self_instance_record!
db = open_database(readonly: true)
db.results_as_hash = true
rows = with_busy_retry do
db.execute(
<<~SQL,
SELECT id, domain, pubkey, name, version, channel, frequency,
latitude, longitude, last_update_time, is_private, signature
FROM instances
WHERE domain IS NOT NULL AND TRIM(domain) != ''
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
ORDER BY LOWER(domain)
SQL
)
end
payload = rows.map do |row|
{
"id" => row["id"],
"domain" => row["domain"],
"pubkey" => row["pubkey"],
"name" => row["name"],
"version" => row["version"],
"channel" => row["channel"],
"frequency" => row["frequency"],
"latitude" => row["latitude"],
"longitude" => row["longitude"],
"lastUpdateTime" => row["last_update_time"]&.to_i,
"isPrivate" => row["is_private"].to_i == 1,
"signature" => row["signature"],
}.reject { |_, value| value.nil? }
end
payload = load_instances_for_api
JSON.generate(payload)
ensure
db&.close
end
end
end
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -16,6 +18,11 @@ module PotatoMesh
module App
module Routes
module Ingest
# Register ingest endpoints used by the Python collector to persist
# nodes, messages, and federation announcements.
#
# @param app [Sinatra::Base] application instance receiving the routes.
# @return [void]
def self.registered(app)
app.post "/api/nodes" do
require_token!
@@ -40,7 +47,6 @@ module PotatoMesh
end
app.post "/api/messages" do
halt 404 if private_mode?
require_token!
content_type :json
begin
@@ -84,7 +90,18 @@ module PotatoMesh
end
id = string_or_nil(payload["id"]) || string_or_nil(payload["instanceId"])
domain = sanitize_instance_domain(payload["domain"])
raw_domain_input = payload["domain"]
raw_domain = sanitize_instance_domain(raw_domain_input, downcase: false)
normalized_domain = raw_domain && sanitize_instance_domain(raw_domain)
unless raw_domain && normalized_domain
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: string_or_nil(raw_domain_input),
reason: "invalid domain",
)
halt 400, { error: "invalid domain" }.to_json
end
pubkey = sanitize_public_key_pem(payload["pubkey"])
name = string_or_nil(payload["name"])
version = string_or_nil(payload["version"])
@@ -99,7 +116,7 @@ module PotatoMesh
attributes = {
id: id,
domain: domain,
domain: normalized_domain,
pubkey: pubkey,
name: name,
version: version,
@@ -120,11 +137,21 @@ module PotatoMesh
halt 400, { error: "missing required fields" }.to_json
end
unless verify_instance_signature(attributes, signature, attributes[:pubkey])
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
# Some remote peers sign payloads using a canonicalised lowercase
# domain while still sending a mixed-case domain. Retry signature
# verification with the original casing when the first attempt
# fails to maximise interoperability.
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
alternate_attributes = attributes.merge(domain: raw_domain)
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
end
unless signature_valid
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
domain: raw_domain || attributes[:domain],
reason: "invalid signature",
)
halt 400, { error: "invalid signature" }.to_json
@@ -152,6 +179,22 @@ module PotatoMesh
halt 400, { error: "restricted domain" }.to_json
end
begin
resolve_remote_ip_addresses(URI.parse("https://#{attributes[:domain]}"))
rescue ArgumentError => e
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: "restricted domain",
error_message: e.message,
)
halt 400, { error: "restricted domain" }.to_json
rescue SocketError
# DNS lookups that fail to resolve are handled later when the
# registration flow attempts to contact the remote instance.
end
well_known, well_known_meta = fetch_instance_json(attributes[:domain], "/.well-known/potato-mesh")
unless well_known
details_list = Array(well_known_meta).map(&:to_s)
@@ -204,11 +247,17 @@ module PotatoMesh
db = open_database
upsert_instance_record(db, attributes, signature)
enqueued = enqueue_federation_crawl(
attributes[:domain],
per_response_limit: PotatoMesh::Config.federation_max_instances_per_response,
overall_limit: PotatoMesh::Config.federation_max_domains_per_crawl,
)
debug_log(
"Registered remote instance",
context: "ingest.register",
domain: attributes[:domain],
instance_id: attributes[:id],
crawl_enqueued: enqueued,
)
status 201
{ status: "registered" }.to_json
@@ -272,6 +321,25 @@ module PotatoMesh
ensure
db&.close
end
app.post "/api/traces" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
trace_packets = data.is_a?(Array) ? data : [data]
halt 400, { error: "too many traces" }.to_json if trace_packets.size > 1000
db = open_database
trace_packets.each do |packet|
insert_trace(db, packet)
end
{ status: "ok" }.to_json
ensure
db&.close
end
end
end
end
+171 -25
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -16,7 +18,137 @@ module PotatoMesh
module App
module Routes
module Root
module Helpers
# Determine the initial theme from the request cookie and persist
# sanitised values back to the client to avoid invalid states.
#
# @return [String] normalised theme value ('dark' or 'light').
def resolve_initial_theme
raw_theme = request.cookies["theme"]
theme = %w[dark light].include?(raw_theme) ? raw_theme : "dark"
if raw_theme != theme
response.set_cookie(
"theme",
value: theme,
path: "/",
max_age: 60 * 60 * 24 * 7,
same_site: :lax,
)
end
theme
end
# Render a dashboard-oriented ERB template within the shared layout.
#
# @param template [Symbol] identifier for the ERB template.
# @param view_mode [Symbol, String] logical view identifier for CSS hooks.
# @param extra_locals [Hash] additional locals merged into the rendering context.
# @return [String] rendered ERB output.
def render_root_view(template, view_mode: :dashboard, extra_locals: {})
meta = meta_configuration
config = frontend_app_config
theme = resolve_initial_theme
view_mode_sym = view_mode.respond_to?(:to_sym) ? view_mode.to_sym : view_mode
base_locals = {
site_name: meta[:name],
meta_title: meta[:title],
meta_name: meta[:name],
meta_description: meta[:description],
channel: sanitized_channel,
frequency: sanitized_frequency,
map_center_lat: PotatoMesh::Config.map_center_lat,
map_center_lon: PotatoMesh::Config.map_center_lon,
max_distance_km: PotatoMesh::Config.max_distance_km,
contact_link: sanitized_contact_link,
contact_link_url: sanitized_contact_link_url,
version: display_version(app_constant(:APP_VERSION)),
private_mode: private_mode?,
federation_enabled: federation_enabled?,
refresh_interval_seconds: PotatoMesh::Config.refresh_interval_seconds,
app_config_json: JSON.generate(config),
initial_theme: theme,
current_view_mode: view_mode_sym,
map_zoom: PotatoMesh::Config.map_zoom,
}
sanitized_locals = extra_locals.is_a?(Hash) ? extra_locals : {}
merged_locals = base_locals.merge(sanitized_locals)
erb template, layout: :"layouts/app", locals: merged_locals
end
# Remove keys with +nil+ values from the provided hash, returning a
# shallow copy. Hash#compact is only available in newer Ruby
# versions; this helper keeps behaviour consistent across supported
# releases.
#
# @param value [Hash, nil] collection subject to filtering.
# @return [Hash] hash excluding +nil+ values.
def reject_nil_values(value)
return {} unless value.is_a?(Hash)
value.each_with_object({}) do |(key, entry), memo|
memo[key] = entry unless entry.nil?
end
end
# Assemble the payload embedded into the node detail view. The
# payload provides a canonical identifier alongside any cached node,
# telemetry, or position rows that may already exist in the
# database. When no persisted data is available the method returns
# +nil+ so the caller can surface a 404 error.
#
# @param node_ref [Object] raw node identifier from the request.
# @return [Hash, nil] structured node reference payload or nil when
# the node cannot be located.
def build_node_detail_reference(node_ref)
tokens = canonical_node_parts(node_ref)
search_ref = tokens ? tokens.first : node_ref
node_row = query_nodes(1, node_ref: search_ref).first
telemetry_row = query_telemetry(1, node_ref: search_ref).first
position_row = query_positions(1, node_ref: search_ref).first
candidates = [node_row, telemetry_row, position_row].compact
return nil if candidates.empty?
canonical_id = string_or_nil(node_row&.fetch("node_id", nil))
canonical_id ||= string_or_nil(telemetry_row&.fetch("node_id", nil))
canonical_id ||= string_or_nil(position_row&.fetch("node_id", nil))
canonical_id ||= string_or_nil(tokens&.fetch(0, nil))
if canonical_id
canonical_id = canonical_id.start_with?("!") ? canonical_id : "!#{canonical_id}"
end
return nil unless canonical_id
numeric_id = coerce_integer(node_row&.fetch("num", nil))
numeric_id ||= coerce_integer(telemetry_row&.fetch("node_num", nil))
numeric_id ||= coerce_integer(position_row&.fetch("node_num", nil))
numeric_id ||= tokens&.fetch(1, nil)
short_id = string_or_nil(node_row&.fetch("short_name", nil))
short_id ||= string_or_nil(telemetry_row&.fetch("short_name", nil))
short_id ||= string_or_nil(position_row&.fetch("short_name", nil))
short_id ||= tokens&.fetch(2, nil)
fallback_row = node_row || telemetry_row || position_row
fallback = fallback_row ? compact_api_row(fallback_row) : nil
telemetry = telemetry_row ? compact_api_row(telemetry_row) : nil
position = position_row ? compact_api_row(position_row) : nil
{
"nodeId" => canonical_id,
"nodeNum" => numeric_id,
"shortId" => short_id,
"fallback" => fallback,
"telemetry" => telemetry,
"position" => position,
}
end
end
def self.registered(app)
app.helpers Helpers
app.get "/favicon.ico" do
cache_control :public, max_age: PotatoMesh::Config.week_seconds
ico_path = File.join(settings.public_folder, "favicon.ico")
@@ -39,33 +171,47 @@ module PotatoMesh
end
app.get "/" do
meta = meta_configuration
config = frontend_app_config
render_root_view(:index, view_mode: :dashboard)
end
raw_theme = request.cookies["theme"]
theme = %w[dark light].include?(raw_theme) ? raw_theme : "dark"
if raw_theme != theme
response.set_cookie("theme", value: theme, path: "/", max_age: 60 * 60 * 24 * 7, same_site: :lax)
end
app.get %r{/map/?} do
render_root_view(:map, view_mode: :map)
end
erb :index, locals: {
site_name: meta[:name],
meta_title: meta[:title],
meta_name: meta[:name],
meta_description: meta[:description],
channel: sanitized_channel,
frequency: sanitized_frequency,
map_center_lat: PotatoMesh::Config.map_center_lat,
map_center_lon: PotatoMesh::Config.map_center_lon,
max_distance_km: PotatoMesh::Config.max_distance_km,
contact_link: sanitized_contact_link,
contact_link_url: sanitized_contact_link_url,
version: app_constant(:APP_VERSION),
private_mode: private_mode?,
refresh_interval_seconds: PotatoMesh::Config.refresh_interval_seconds,
app_config_json: JSON.generate(config),
initial_theme: theme,
}
app.get %r{/chat/?} do
render_root_view(:chat, view_mode: :chat)
end
app.get %r{/charts/?} do
render_root_view(:charts, view_mode: :charts)
end
app.get "/nodes/:id" do
node_ref = params.fetch("id", nil)
reference_payload = build_node_detail_reference(node_ref)
halt 404, "Not Found" unless reference_payload
fallback = reference_payload["fallback"] || {}
short_name = string_or_nil(fallback["short_name"]) || reference_payload["shortId"]
long_name = string_or_nil(fallback["long_name"])
role = string_or_nil(fallback["role"])
canonical_id = string_or_nil(reference_payload["nodeId"])
render_root_view(
:node_detail,
view_mode: :node_detail,
extra_locals: {
node_reference_json: JSON.generate(reject_nil_values(reference_payload)),
node_page_short_name: short_name,
node_page_long_name: long_name,
node_page_role: role,
node_page_identifier: canonical_id,
},
)
end
app.get %r{/nodes/?} do
render_root_view(:nodes, view_mode: :nodes)
end
app.get "/metrics" do
@@ -0,0 +1,214 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
# WorkerPool executes submitted blocks using a bounded set of Ruby threads.
#
# The pool enforces an upper bound on queued tasks, surfaces errors raised
# by jobs, and supports graceful shutdown during application teardown.
class WorkerPool
# Raised when the worker pool queue has reached its configured capacity.
class QueueFullError < StandardError; end
# Raised when a task fails to complete before the requested timeout.
class TaskTimeoutError < StandardError; end
# Raised when scheduling occurs after the pool has been shut down.
class ShutdownError < StandardError; end
# Internal structure responsible for coordinating task completion.
class Task
# @return [Object, nil] value produced by the task block when available.
attr_reader :value
# @return [StandardError, nil] error raised by the task block when set.
attr_reader :error
def initialize
@mutex = Mutex.new
@condition = ConditionVariable.new
@complete = false
@value = nil
@error = nil
end
# Mark the task as completed successfully.
#
# @param result [Object] value produced by the job.
# @return [void]
def fulfill(result)
@mutex.synchronize do
return if @complete
@complete = true
@value = result
@condition.broadcast
end
end
# Mark the task as failed with the provided error.
#
# @param failure [StandardError] exception raised while executing the job.
# @return [void]
def reject(failure)
@mutex.synchronize do
return if @complete
@complete = true
@error = failure
@condition.broadcast
end
end
# Wait for the task to complete, raising any stored failure.
#
# @param timeout [Numeric, nil] optional timeout in seconds.
# @return [Object] the value produced by the job when successful.
# @raise [TaskTimeoutError] when the timeout elapses prior to completion.
# @raise [StandardError] when the job raised an exception.
def wait(timeout: nil)
deadline = timeout && monotonic_now + timeout
@mutex.synchronize do
until @complete
if deadline
remaining = deadline - monotonic_now
raise TaskTimeoutError, "task deadline exceeded" if remaining <= 0
@condition.wait(@mutex, remaining)
else
@condition.wait(@mutex)
end
end
raise @error if @error
@value
end
end
# Check whether the task has finished executing.
#
# @return [Boolean] true when the task is complete.
def complete?
@mutex.synchronize { @complete }
end
private
def monotonic_now
Process.clock_gettime(Process::CLOCK_MONOTONIC)
end
end
STOP_SIGNAL = Object.new
# @return [Array<Thread>] threads created to service the pool.
attr_reader :threads
# Initialize a worker pool using the supplied configuration.
#
# @param size [Integer] number of worker threads to spawn.
# @param max_queue [Integer, nil] optional upper bound on queued jobs.
# @param name [String] prefix assigned to worker thread names.
def initialize(size:, max_queue: nil, name: "worker-pool")
raise ArgumentError, "size must be positive" unless size.is_a?(Integer) && size.positive?
@name = name
@queue = max_queue ? SizedQueue.new(max_queue) : Queue.new
@threads = []
@stopped = false
@mutex = Mutex.new
spawn_workers(size)
end
# Determine whether the worker pool is still accepting work.
#
# @return [Boolean] true when the pool remains active.
def alive?
@mutex.synchronize { !@stopped }
end
# Submit a block of work for asynchronous execution.
#
# @yieldreturn [Object] result produced by the job block.
# @return [Task] task tracking the asynchronous execution.
# @raise [QueueFullError] when the queue cannot accept additional work.
# @raise [ShutdownError] when the pool is no longer active.
def schedule(&block)
raise ArgumentError, "block required" unless block
task = Task.new
@mutex.synchronize do
raise ShutdownError, "worker pool has been shut down" if @stopped
begin
@queue.push([task, block], true)
rescue ThreadError => e
raise QueueFullError, e.message
end
end
task
end
# Stop accepting work and wait for the worker threads to finish.
#
# @param timeout [Numeric, nil] seconds to wait for each worker to exit.
# @return [void]
def shutdown(timeout: nil)
threads = nil
@mutex.synchronize do
return if @stopped
@stopped = true
threads = @threads.dup
end
threads.each { @queue << STOP_SIGNAL }
threads.each { |thread| thread.join(timeout) }
end
private
def spawn_workers(size)
size.times do |index|
worker = Thread.new do
Thread.current.name = "#{@name}-#{index}" if Thread.current.respond_to?(:name=)
Thread.current.report_on_exception = false if Thread.current.respond_to?(:report_on_exception=)
loop do
task, block = @queue.pop
break if task.equal?(STOP_SIGNAL)
begin
result = block.call
task.fulfill(result)
rescue StandardError => e
task.reject(e)
end
end
end
@threads << worker
end
end
end
end
end
+172 -5
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -32,6 +34,50 @@ module PotatoMesh
DEFAULT_FREQUENCY = "915MHz"
DEFAULT_CONTACT_LINK = "#potatomesh:dod.ngo"
DEFAULT_MAX_DISTANCE_KM = 42.0
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 15
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT = 60
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE = 64
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL = 256
DEFAULT_FEDERATION_WORKER_POOL_SIZE = 4
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
# Retrieve the configured API token used for authenticated requests.
#
# @return [String, nil] API token when provided, otherwise nil.
def api_token
fetch_string("API_TOKEN", nil)
end
# Retrieve an explicit instance domain override when present.
#
# @return [String, nil] hostname or host:port pair supplied via ENV.
def instance_domain
fetch_string("INSTANCE_DOMAIN", nil)
end
# Determine whether private mode should be activated.
#
# @return [Boolean] true when PRIVATE=1 in the environment.
def private_mode_enabled?
value = ENV.fetch("PRIVATE", "0")
value.to_s.strip == "1"
end
# Determine whether federation features are permitted for the instance.
#
# Federation is disabled when ``PRIVATE=1`` regardless of the
# ``FEDERATION`` environment variable to ensure a private deployment does
# not announce itself or crawl peers.
#
# @return [Boolean] true when federation should remain active.
def federation_enabled?
return false if private_mode_enabled?
value = ENV.fetch("FEDERATION", "1")
value.to_s.strip != "0"
end
# Resolve the absolute path to the web application root directory.
#
@@ -129,7 +175,7 @@ module PotatoMesh
#
# @return [String] semantic version identifier.
def version_fallback
"v0.5.0"
"0.5.6"
end
# Default refresh interval for frontend polling routines.
@@ -174,7 +220,7 @@ module PotatoMesh
#
# @return [String] comma separated list of report IDs.
def prom_report_ids
""
fetch_string("PROM_REPORT_IDS", "")
end
# Transform Prometheus report identifiers into a cleaned array.
@@ -269,11 +315,80 @@ module PotatoMesh
"rsa-sha256"
end
# Timeout used when querying remote instances during federation.
# Connection timeout used when establishing federation HTTP sockets.
#
# @return [Integer] HTTP timeout in seconds.
# The timeout can be customised with the REMOTE_INSTANCE_CONNECT_TIMEOUT
# environment variable to accommodate slower or distant federation peers.
#
# @return [Integer] connect timeout in seconds.
def remote_instance_http_timeout
5
fetch_positive_integer(
"REMOTE_INSTANCE_CONNECT_TIMEOUT",
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT,
)
end
# Read timeout used when streaming federation HTTP responses.
#
# The timeout can be customised with the REMOTE_INSTANCE_READ_TIMEOUT
# environment variable to accommodate slower or distant federation peers.
#
# @return [Integer] read timeout in seconds.
def remote_instance_read_timeout
fetch_positive_integer(
"REMOTE_INSTANCE_READ_TIMEOUT",
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT,
)
end
# Limit the number of remote instances processed from a single response.
#
# @return [Integer] maximum entries processed per /api/instances payload.
def federation_max_instances_per_response
fetch_positive_integer(
"FEDERATION_MAX_INSTANCES_PER_RESPONSE",
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE,
)
end
# Limit the total number of distinct domains crawled during one ingestion.
#
# @return [Integer] maximum unique domains visited per crawl.
def federation_max_domains_per_crawl
fetch_positive_integer(
"FEDERATION_MAX_DOMAINS_PER_CRAWL",
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL,
)
end
# Determine the worker pool size used for federation tasks.
#
# @return [Integer] number of worker threads dedicated to federation jobs.
def federation_worker_pool_size
fetch_positive_integer(
"FEDERATION_WORKERS",
DEFAULT_FEDERATION_WORKER_POOL_SIZE,
)
end
# Determine the queue capacity for pending federation jobs.
#
# @return [Integer] maximum number of queued tasks before rejecting work.
def federation_worker_queue_capacity
fetch_positive_integer(
"FEDERATION_WORK_QUEUE",
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY,
)
end
# Determine the timeout applied when awaiting federation worker tasks.
#
# @return [Integer] seconds to wait for asynchronous jobs to complete.
def federation_task_timeout_seconds
fetch_positive_integer(
"FEDERATION_TASK_TIMEOUT",
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS,
)
end
# Maximum acceptable age for remote node data.
@@ -304,6 +419,16 @@ module PotatoMesh
8 * 60 * 60
end
# Determine the grace period before sending the initial federation announcement.
#
# @return [Integer] seconds to wait before the first broadcast cycle.
def initial_federation_delay_seconds
fetch_positive_integer(
"INITIAL_FEDERATION_DELAY_SECONDS",
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS,
)
end
# Retrieve the configured site name for presentation.
#
# @return [String] human friendly site label.
@@ -352,6 +477,20 @@ module PotatoMesh
map_center[:lon]
end
# Retrieve an explicit map zoom override when provided.
#
# @return [Float, nil] positive zoom value or +nil+ when unset.
def map_zoom
raw = fetch_string("MAP_ZOOM", nil)
return nil unless raw
zoom = Float(raw, exception: false)
return nil unless zoom
return nil unless zoom.positive?
zoom
end
# Maximum straight-line distance between nodes before relationships are
# hidden.
#
@@ -371,6 +510,13 @@ module PotatoMesh
fetch_string("CONTACT_LINK", DEFAULT_CONTACT_LINK)
end
# Retrieve the configured connection target for the ingestor service.
#
# @return [String] serial device, TCP endpoint, or Bluetooth target.
def connection_target
fetch_string("CONNECTION", "/dev/ttyACM0")
end
# Determine the best URL to represent the configured contact link.
#
# @return [String, nil] absolute URL when derivable, otherwise nil.
@@ -415,6 +561,27 @@ module PotatoMesh
trimmed.empty? ? default : trimmed
end
# Fetch and validate integer based configuration flags.
#
# @param key [String] environment variable to read.
# @param default [Integer] fallback value when unset or invalid.
# @return [Integer] positive integer sourced from configuration.
def fetch_positive_integer(key, default)
value = ENV[key]
return default if value.nil?
trimmed = value.strip
return default if trimmed.empty?
begin
parsed = Integer(trimmed, 10)
rescue ArgumentError
return default
end
parsed.positive? ? parsed : default
end
# Resolve the effective XDG directory honoring environment overrides.
#
# @param env_key [String] name of the environment variable to inspect.
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "logger"
+2
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
+96 -3
View File
@@ -1,3 +1,5 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -38,11 +40,17 @@ module PotatoMesh
end
# Ensure a value is a valid instance domain according to RFC 1035/3986
# rules. This rejects whitespace, path separators, and trailing dots.
# rules. Hostnames must include at least one dot-separated label and a
# top-level domain containing an alphabetic character. Literal IP
# addresses must be provided in standard dotted decimal form or enclosed in
# brackets when IPv6 notation is used. Optional ports must fall within the
# valid TCP/UDP range. Any opaque identifiers, URIs, or malformed hosts are
# rejected.
#
# @param value [String, Object, nil] candidate domain name.
# @param downcase [Boolean] whether to force the result to lowercase.
# @return [String, nil] canonical domain value or +nil+ when invalid.
def sanitize_instance_domain(value)
def sanitize_instance_domain(value, downcase: true)
host = string_or_nil(value)
return nil unless host
@@ -51,7 +59,92 @@ module PotatoMesh
return nil if trimmed.empty?
return nil if trimmed.match?(%r{[\s/\\@]})
trimmed
if trimmed.start_with?("[")
match = trimmed.match(/\A\[(?<address>[^\]]+)\](?::(?<port>\d+))?\z/)
return nil unless match
address = match[:address]
port = match[:port]
return nil if port && !valid_port?(port)
begin
IPAddr.new(address)
rescue IPAddr::InvalidAddressError
return nil
end
sanitized_address = downcase ? address.downcase : address
return "[#{sanitized_address}]#{port ? ":#{port}" : ""}"
end
domain = trimmed
port = nil
if domain.include?(":")
host_part, port_part = domain.split(":", 2)
return nil if host_part.nil? || host_part.empty?
return nil unless port_part && port_part.match?(/\A\d+\z/)
return nil unless valid_port?(port_part)
return nil if port_part.include?(":")
domain = host_part
port = port_part
end
unless valid_hostname?(domain) || valid_ipv4_literal?(domain)
return nil
end
sanitized_domain = downcase ? domain.downcase : domain
port ? "#{sanitized_domain}:#{port}" : sanitized_domain
end
# Determine whether the supplied hostname conforms to RFC 1035 label
# requirements and includes a valid top-level domain.
#
# @param hostname [String] host component without any port information.
# @return [Boolean] true when the hostname is valid.
def valid_hostname?(hostname)
return false if hostname.length > 253
labels = hostname.split(".")
return false if labels.length < 2
return false unless labels.all? { |label| valid_hostname_label?(label) }
top_level = labels.last
top_level.match?(/[a-z]/i)
end
# Validate a single hostname label ensuring the first and last characters
# are alphanumeric and that no unsupported symbols are present.
#
# @param label [String] hostname component between dots.
# @return [Boolean] true when the label is valid.
def valid_hostname_label?(label)
return false if label.empty?
return false if label.length > 63
label.match?(/\A[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\z/i)
end
# Validate whether a candidate represents a dotted decimal IPv4 literal.
#
# @param address [String] IP address string without port information.
# @return [Boolean] true when the address is a valid IPv4 literal.
def valid_ipv4_literal?(address)
return false unless address.match?(/\A\d{1,3}(?:\.\d{1,3}){3}\z/)
address.split(".").all? { |octet| octet.to_i.between?(0, 255) }
end
# Determine whether a port string represents a valid TCP/UDP port.
#
# @param port [String] numeric port representation.
# @return [Boolean] true when the port falls within the acceptable range.
def valid_port?(port)
value = port.to_i
value.positive? && value <= 65_535
end
# Extract the host component from a potentially bracketed domain literal.
+163 -2
View File
@@ -1,12 +1,173 @@
{
"name": "potato-mesh",
"version": "0.5.0",
"version": "0.5.6",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "potato-mesh",
"version": "0.5.0"
"version": "0.5.6",
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-reports": "^3.2.0",
"v8-to-istanbul": "^9.3.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.5.5",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
"integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
"dev": true,
"license": "MIT"
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.31",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
"integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/resolve-uri": "^3.1.0",
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@types/istanbul-lib-coverage": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz",
"integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==",
"dev": true,
"license": "MIT"
},
"node_modules/convert-source-map": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
"dev": true,
"license": "MIT"
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/html-escaper": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
"integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
"dev": true,
"license": "MIT"
},
"node_modules/istanbul-lib-coverage": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
"integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==",
"dev": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">=8"
}
},
"node_modules/istanbul-lib-report": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
"integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"istanbul-lib-coverage": "^3.0.0",
"make-dir": "^4.0.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/istanbul-reports": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz",
"integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"html-escaper": "^2.0.0",
"istanbul-lib-report": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/make-dir": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
"integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
"dev": true,
"license": "MIT",
"dependencies": {
"semver": "^7.5.3"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semver": {
"version": "7.7.3",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
"integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
"dev": true,
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/v8-to-istanbul": {
"version": "9.3.0",
"resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz",
"integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==",
"dev": true,
"license": "ISC",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.12",
"@types/istanbul-lib-coverage": "^2.0.1",
"convert-source-map": "^2.0.0"
},
"engines": {
"node": ">=10.12.0"
}
}
}
}
+7 -1
View File
@@ -1,9 +1,15 @@
{
"name": "potato-mesh",
"version": "0.5.0",
"version": "0.5.6",
"type": "module",
"private": true,
"scripts": {
"test": "mkdir -p reports coverage && NODE_V8_COVERAGE=coverage node --test --experimental-test-coverage --test-reporter=spec --test-reporter-destination=stdout --test-reporter=junit --test-reporter-destination=reports/javascript-junit.xml && node ./scripts/export-coverage.js"
},
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-reports": "^3.2.0",
"v8-to-istanbul": "^9.3.0"
}
}
@@ -0,0 +1,149 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
fetchAggregatedTelemetry,
initializeChartsPage,
buildMovingAverageSeries,
} from '../charts-page.js';
function createResponse(status, body) {
return {
ok: status >= 200 && status < 300,
status,
async json() {
return body;
},
};
}
test('fetchAggregatedTelemetry requests the latest 1000 telemetry entries', async () => {
const requests = [];
const fetchImpl = async url => {
requests.push(url);
return createResponse(200, [{ rx_time: 1_700_000_000, node_id: '!demo' }]);
};
const snapshots = await fetchAggregatedTelemetry({ fetchImpl });
assert.equal(requests.length, 1);
assert.equal(requests[0], '/api/telemetry?limit=1000');
assert.equal(Array.isArray(snapshots), true);
assert.equal(snapshots[0].node_id, '!demo');
});
test('fetchAggregatedTelemetry validates fetch availability and response codes', async () => {
await assert.rejects(() => fetchAggregatedTelemetry({ fetchImpl: null }), /fetch implementation/i);
const fetchImpl = async () => createResponse(503, []);
await assert.rejects(() => fetchAggregatedTelemetry({ fetchImpl }), /Failed to fetch telemetry/);
});
test('initializeChartsPage renders the telemetry charts when snapshots are available', async () => {
const container = { innerHTML: '' };
const documentStub = {
getElementById(id) {
return id === 'chartsPage' ? container : null;
},
};
const fetchImpl = async () => createResponse(200, [{ rx_time: 1_700_000_000, temperature: 22.5 }]);
let receivedOptions = null;
const renderCharts = (node, options) => {
receivedOptions = options;
return '<section class="node-detail__charts">Charts</section>';
};
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, true);
assert.equal(container.innerHTML.includes('node-detail__charts'), true);
assert.ok(receivedOptions);
assert.equal(receivedOptions.chartOptions.windowMs, 86_400_000);
assert.equal(typeof receivedOptions.chartOptions.lineReducer, 'function');
const average = receivedOptions.chartOptions.lineReducer(
[
{ timestamp: 0, value: 0 },
{ timestamp: 1_800_000, value: 10 },
{ timestamp: 3_600_000, value: 20 },
],
);
assert.equal(Array.isArray(average), true);
});
test('initializeChartsPage shows an error message when fetching fails', async () => {
const container = { innerHTML: '' };
const documentStub = {
getElementById() {
return container;
},
};
const fetchImpl = async () => {
throw new Error('network');
};
const renderCharts = () => '<section>unused</section>';
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, false);
assert.equal(container.innerHTML.includes('Failed to load telemetry charts.'), true);
});
test('initializeChartsPage handles missing containers and empty telemetry snapshots', async () => {
const documentMissing = { getElementById() { return null; } };
const noneResult = await initializeChartsPage({ document: documentMissing });
assert.equal(noneResult, false);
const container = { innerHTML: '' };
const documentStub = {
getElementById() {
return container;
},
};
const fetchImpl = async () => createResponse(200, []);
const renderCharts = () => '';
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, true);
assert.equal(container.innerHTML.includes('Telemetry snapshots are unavailable.'), true);
});
test('initializeChartsPage shows a status when rendering produces no markup', async () => {
const container = { innerHTML: '' };
const documentStub = {
getElementById() {
return container;
},
};
const fetchImpl = async () => createResponse(200, [{ rx_time: 1_700_000_000 }]);
const renderCharts = () => '';
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, true);
assert.equal(container.innerHTML.includes('Telemetry snapshots are unavailable.'), true);
});
test('initializeChartsPage validates the document contract', async () => {
await assert.rejects(() => initializeChartsPage({ document: {} }), /getElementById/);
});
test('buildMovingAverageSeries computes a rolling mean across the window', () => {
const points = [
{ timestamp: 0, value: 0 },
{ timestamp: 30 * 60 * 1000, value: 30 },
{ timestamp: 60 * 60 * 1000, value: 60 },
{ timestamp: 90 * 60 * 1000, value: 90 },
];
const averages = buildMovingAverageSeries(points, 60 * 60 * 1000);
assert.equal(averages.length, points.length);
assert.equal(Math.round(averages[0].value), 0);
assert.equal(Math.round(averages[1].value), 15);
assert.equal(Math.round(averages[2].value), 30);
assert.equal(Math.round(averages[3].value), 60);
});
@@ -0,0 +1,180 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
extractChatMessageMetadata,
formatChatMessagePrefix,
formatChatChannelTag,
formatChatPresetTag,
formatNodeAnnouncementPrefix,
__test__
} from '../chat-format.js';
const {
firstNonNull,
normalizeString,
normalizeFrequency,
normalizeFrequencySlot,
FREQUENCY_PLACEHOLDER,
resolveModemPresetCandidate,
normalizePresetString,
abbreviatePreset,
derivePresetInitials,
normalizePresetSlot,
PRESET_PLACEHOLDER
} = __test__;
test('extractChatMessageMetadata prefers explicit region_frequency and channel_name', () => {
const payload = {
region_frequency: 868,
channel_name: ' Test Channel ',
lora_freq: 915,
channelName: 'Ignored'
};
const result = extractChatMessageMetadata(payload);
assert.deepEqual(result, { frequency: '868', channelName: 'Test Channel', presetCode: null });
});
test('extractChatMessageMetadata falls back to LoRa metadata', () => {
const payload = {
lora_freq: 915,
channelName: 'SpecChannel',
modem_preset: 'MediumFast'
};
const result = extractChatMessageMetadata(payload);
assert.deepEqual(result, { frequency: '915', channelName: 'SpecChannel', presetCode: 'MF' });
});
test('extractChatMessageMetadata returns null metadata for invalid input', () => {
assert.deepEqual(extractChatMessageMetadata(null), { frequency: null, channelName: null, presetCode: null });
assert.deepEqual(extractChatMessageMetadata(undefined), { frequency: null, channelName: null, presetCode: null });
});
test('extractChatMessageMetadata inspects nested node payloads for modem presets', () => {
const payload = {
node: {
modem_preset: 'ShortTurbo'
}
};
const result = extractChatMessageMetadata(payload);
assert.equal(result.presetCode, 'ST');
});
test('firstNonNull returns the first non-null candidate', () => {
assert.equal(firstNonNull(null, undefined, '', 'value'), '');
assert.equal(firstNonNull(undefined, null), null);
});
test('normalizeString trims strings and rejects empties', () => {
assert.equal(normalizeString(' Spec '), 'Spec');
assert.equal(normalizeString(' '), null);
assert.equal(normalizeString(123), '123');
assert.equal(normalizeString(Number.POSITIVE_INFINITY), null);
});
test('normalizeFrequency handles numeric and string inputs', () => {
assert.equal(normalizeFrequency(915), '915');
assert.equal(normalizeFrequency(868.125), '868.125');
assert.equal(normalizeFrequency(' 868MHz '), '868');
assert.equal(normalizeFrequency('n/a'), 'n/a');
assert.equal(normalizeFrequency(-5), null);
assert.equal(normalizeFrequency(null), null);
});
test('formatChatMessagePrefix preserves bracket placeholders', () => {
assert.equal(
formatChatMessagePrefix({ timestamp: '11:46:48', frequency: '868' }),
'[11:46:48][868]'
);
assert.equal(
formatChatMessagePrefix({ timestamp: '16:19:19', frequency: null }),
`[16:19:19][${FREQUENCY_PLACEHOLDER}]`
);
assert.equal(
formatChatMessagePrefix({ timestamp: '09:00:00', frequency: '' }),
`[09:00:00][${FREQUENCY_PLACEHOLDER}]`
);
});
test('formatChatChannelTag wraps channel names after the short name slot', () => {
assert.equal(
formatChatChannelTag({ channelName: 'TEST' }),
'[TEST]'
);
assert.equal(
formatChatChannelTag({ channelName: '' }),
'[]'
);
assert.equal(
formatChatChannelTag({ channelName: null }),
'[]'
);
});
test('formatChatPresetTag renders preset hints with placeholders', () => {
assert.equal(formatChatPresetTag({ presetCode: 'MF' }), '[MF]');
assert.equal(formatChatPresetTag({ presetCode: null }), `[${PRESET_PLACEHOLDER}]`);
});
test('formatNodeAnnouncementPrefix includes optional frequency bracket', () => {
assert.equal(
formatNodeAnnouncementPrefix({ timestamp: '12:34:56', frequency: '868' }),
'[12:34:56][868]'
);
assert.equal(
formatNodeAnnouncementPrefix({ timestamp: '01:02:03', frequency: null }),
`[01:02:03][${FREQUENCY_PLACEHOLDER}]`
);
});
test('normalizeFrequencySlot returns placeholder when frequency is missing', () => {
assert.equal(normalizeFrequencySlot(null), FREQUENCY_PLACEHOLDER);
assert.equal(normalizeFrequencySlot(''), FREQUENCY_PLACEHOLDER);
assert.equal(normalizeFrequencySlot(undefined), FREQUENCY_PLACEHOLDER);
assert.equal(normalizeFrequencySlot('915'), '915');
});
test('resolveModemPresetCandidate walks nested payloads', () => {
const nested = { node: { modemPreset: 'LongFast' } };
assert.equal(resolveModemPresetCandidate(nested), 'LongFast');
});
test('normalizePresetString trims strings and ignores empties', () => {
assert.equal(normalizePresetString(' MediumSlow '), 'MediumSlow');
assert.equal(normalizePresetString(' '), null);
assert.equal(normalizePresetString(null), null);
});
test('abbreviatePreset maps known presets to codes', () => {
assert.equal(abbreviatePreset('VeryLongSlow'), 'VL');
assert.equal(abbreviatePreset('customPreset'), 'CP');
assert.equal(abbreviatePreset('X'), 'X?');
});
test('derivePresetInitials falls back to segmented tokens', () => {
assert.equal(derivePresetInitials('Long Moderate'), 'LM');
assert.equal(derivePresetInitials('ShortTurbo'), 'ST');
assert.equal(derivePresetInitials('Z'), 'Z?');
});
test('normalizePresetSlot enforces placeholders and uppercase output', () => {
assert.equal(normalizePresetSlot('mf'), 'MF');
assert.equal(normalizePresetSlot(''), PRESET_PLACEHOLDER);
assert.equal(normalizePresetSlot(null), PRESET_PLACEHOLDER);
});
@@ -0,0 +1,100 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { formatPositionHighlights, formatTelemetryHighlights } from '../chat-log-highlights.js';
test('formatTelemetryHighlights includes formatted numeric metrics', () => {
const highlights = formatTelemetryHighlights({
temperature: 21.44,
relative_humidity: 54.27,
});
assert.deepEqual(highlights, [
{ label: 'Temperature', value: '21.4°C' },
{ label: 'Humidity', value: '54.3%' },
]);
});
test('formatTelemetryHighlights prefers nested telemetry when top-level values are stale', () => {
const highlights = formatTelemetryHighlights({
channel_utilization: 0,
device_metrics: { channelUtilization: 0.561 },
});
assert.deepEqual(highlights, [
{ label: 'Channel Util', value: '0.561%' },
]);
});
test('formatPositionHighlights renders coordinate and movement data', () => {
const highlights = formatPositionHighlights({
latitude: 52.1234567,
longitude: 13.7654321,
altitude: 150.5,
accuracy: 3.2,
speed: 1.234,
heading: 181.6,
satellites: 7,
});
assert.deepEqual(highlights, [
{ label: 'Lat', value: '52.12346' },
{ label: 'Lon', value: '13.76543' },
{ label: 'Alt', value: '150.5m' },
{ label: 'Accuracy', value: '3.2m' },
{ label: 'Speed', value: '1.2 m/s' },
{ label: 'Heading', value: '182°' },
{ label: 'Sats', value: '7' },
]);
});
test('formatPositionHighlights normalises integer microdegree fields', () => {
const highlights = formatPositionHighlights({
position: {
latitude_i: 52_123_456,
longitude_i: 13_765_432,
},
});
assert.deepEqual(highlights.slice(0, 2), [
{ label: 'Lat', value: '52.12346' },
{ label: 'Lon', value: '13.76543' },
]);
});
test('formatters return empty arrays when payloads are missing', () => {
assert.deepEqual(formatTelemetryHighlights(null), []);
assert.deepEqual(formatPositionHighlights(undefined), []);
assert.deepEqual(formatPositionHighlights({}), []);
});
test('formatPositionHighlights omits zero-valued movement metrics while keeping coordinates', () => {
const highlights = formatPositionHighlights({
latitude: 0,
longitude: 0,
altitude: 0,
speed: '0',
accuracy: 0,
});
assert.deepEqual(highlights, [
{ label: 'Lat', value: '0.00000' },
{ label: 'Lon', value: '0.00000' },
]);
});
@@ -0,0 +1,328 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
CHAT_LOG_ENTRY_TYPES,
buildChatTabModel,
MAX_CHANNEL_INDEX,
normaliseChannelIndex,
normaliseChannelName,
resolveTimestampSeconds
} from '../chat-log-tabs.js';
const NOW = 1_000_000;
const WINDOW = 60 * 60; // one hour
function fixtureNodes() {
return [
{ id: 'recent-node', first_heard: NOW - 120 },
{ id: 'stale-node', first_heard: NOW - WINDOW - 1 },
{ id: 'iso-node', firstHeard: null, first_heard_iso: new Date((NOW - 30) * 1000).toISOString() }
];
}
function fixtureMessages() {
return [
{ id: 'recent-default', rx_time: NOW - 5, channel: 0, channel_name: ' MediumFast ' },
{ id: 'primary-preset', rx_time: NOW - 8, channel: 0, modem_preset: ' ShortFast ' },
{ id: 'env-default', rx_time: NOW - 12, channel: 0 },
{ id: 'recent-alt', rx_time: NOW - 10, channel_index: '1', channel_name: ' BerlinMesh ' },
{ id: 'stale', rx_time: NOW - WINDOW - 5, channel: 2 },
{ id: 'encrypted', rx_time: NOW - 20, channel: 3, encrypted: true },
{ id: 'no-index', rx_time: NOW - 15, channel_name: 'Fallback' },
{ id: 'too-high', rx_time: NOW - 25, channel: MAX_CHANNEL_INDEX + 5, channel_name: 'Ignored' },
{ id: 'iso-ts', rxTime: null, rx_iso: new Date((NOW - 40) * 1000).toISOString(), channel: 1 }
];
}
function buildModel(overrides = {}) {
return buildChatTabModel({
nodes: fixtureNodes(),
messages: fixtureMessages(),
nowSeconds: NOW,
windowSeconds: WINDOW,
primaryChannelFallbackLabel: '#EnvDefault',
...overrides
});
}
test('buildChatTabModel returns sorted nodes and channel buckets', () => {
const model = buildModel();
assert.equal(model.logEntries.length, 3);
assert.deepEqual(model.logEntries.map(entry => entry.type), [
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED
]);
assert.deepEqual(
model.logEntries.map(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED ? entry.message.id : entry.node.id),
['recent-node', 'iso-node', 'encrypted']
);
assert.equal(model.channels.length, 5);
assert.deepEqual(model.channels.map(channel => channel.label), [
'EnvDefault',
'Fallback',
'MediumFast',
'ShortFast',
'BerlinMesh'
]);
const channelByLabel = Object.fromEntries(model.channels.map(channel => [channel.label, channel]));
const envChannel = channelByLabel.EnvDefault;
assert.equal(envChannel.index, 0);
assert.equal(envChannel.id, 'channel-0-envdefault');
assert.deepEqual(envChannel.entries.map(entry => entry.message.id), ['env-default']);
const fallbackChannel = channelByLabel.Fallback;
assert.equal(fallbackChannel.index, 0);
assert.equal(fallbackChannel.id, 'channel-0-fallback');
assert.deepEqual(fallbackChannel.entries.map(entry => entry.message.id), ['no-index']);
const namedPrimaryChannel = channelByLabel.MediumFast;
assert.equal(namedPrimaryChannel.index, 0);
assert.equal(namedPrimaryChannel.id, 'channel-0-mediumfast');
assert.deepEqual(namedPrimaryChannel.entries.map(entry => entry.message.id), ['recent-default']);
const presetChannel = channelByLabel.ShortFast;
assert.equal(presetChannel.index, 0);
assert.equal(presetChannel.id, 'channel-0-shortfast');
assert.deepEqual(presetChannel.entries.map(entry => entry.message.id), ['primary-preset']);
const secondaryChannel = channelByLabel.BerlinMesh;
assert.equal(secondaryChannel.index, 1);
assert.equal(secondaryChannel.id, 'channel-secondary-berlinmesh');
assert.equal(secondaryChannel.entries.length, 2);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), ['iso-ts', 'recent-alt']);
});
test('buildChatTabModel always includes channel zero bucket', () => {
const model = buildChatTabModel({ nodes: [], messages: [], nowSeconds: NOW, windowSeconds: WINDOW });
assert.equal(model.channels.length, 1);
assert.equal(model.channels[0].index, 0);
assert.equal(model.channels[0].entries.length, 0);
});
test('buildChatTabModel falls back to numeric label when no metadata provided', () => {
const model = buildChatTabModel({
nodes: [],
messages: [{ id: 'plain', rx_time: NOW - 5, channel: 0 }],
nowSeconds: NOW,
windowSeconds: WINDOW,
primaryChannelFallbackLabel: ''
});
assert.equal(model.channels.length, 1);
assert.equal(model.channels[0].label, '0');
assert.equal(model.channels[0].id, 'channel-0');
});
test('normaliseChannelIndex handles numeric and textual input', () => {
assert.equal(normaliseChannelIndex(2.9), 2);
assert.equal(normaliseChannelIndex(' 7 '), 7);
assert.equal(normaliseChannelIndex('bad'), null);
assert.equal(normaliseChannelIndex(null), null);
});
test('normaliseChannelName trims strings and allows numeric values', () => {
assert.equal(normaliseChannelName(' Berlin '), 'Berlin');
assert.equal(normaliseChannelName(5), '5');
assert.equal(normaliseChannelName(''), null);
assert.equal(normaliseChannelName(undefined), null);
});
test('resolveTimestampSeconds prefers numeric but falls back to ISO parsing', () => {
assert.equal(resolveTimestampSeconds(1234, null), 1234);
const iso = '1970-01-01T00:10:00Z';
assert.equal(resolveTimestampSeconds('not-numeric', iso), 600);
assert.equal(resolveTimestampSeconds('bad', 'invalid'), null);
});
test('buildChatTabModel includes telemetry, position, and neighbor events', () => {
const nodeId = '!node';
const neighborId = '!peer';
const model = buildChatTabModel({
nodes: [{
node_id: nodeId,
first_heard: NOW - 50,
last_heard: NOW - 40,
short_name: 'NODE',
long_name: 'Node Example'
}],
telemetry: [{ node_id: nodeId, rx_time: NOW - 30 }],
positions: [{ node_id: nodeId, rx_time: NOW - 20 }],
neighbors: [{ node_id: nodeId, neighbor_id: neighborId, rx_time: NOW - 10 }],
messages: [],
nowSeconds: NOW,
windowSeconds: WINDOW
});
assert.deepEqual(model.logEntries.map(entry => entry.type), [
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.NODE_INFO,
CHAT_LOG_ENTRY_TYPES.TELEMETRY,
CHAT_LOG_ENTRY_TYPES.POSITION,
CHAT_LOG_ENTRY_TYPES.NEIGHBOR
]);
assert.equal(model.logEntries[0].nodeId, nodeId);
const lastEntry = model.logEntries[model.logEntries.length - 1];
assert.equal(lastEntry.neighborId, neighborId);
});
test('buildChatTabModel merges dedicated encrypted log feed without altering channels', () => {
const regularMessages = fixtureMessages().filter(message => !message.encrypted);
const encryptedOnly = [
{ id: 'log-only', encrypted: true, rx_time: NOW - 3, channel: 7 }
];
const model = buildChatTabModel({
nodes: [],
messages: regularMessages,
logOnlyMessages: encryptedOnly,
nowSeconds: NOW,
windowSeconds: WINDOW
});
const encryptedEntries = model.logEntries.filter(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED);
assert.equal(encryptedEntries.length, 1);
assert.equal(encryptedEntries[0]?.message?.id, 'log-only');
const channelMessageIds = model.channels.reduce((acc, channel) => {
if (!channel || !Array.isArray(channel.entries)) {
return acc;
}
for (const entry of channel.entries) {
if (entry && entry.message && entry.message.id) {
acc.push(entry.message.id);
}
}
return acc;
}, []);
assert.ok(!channelMessageIds.includes('log-only'));
});
test('buildChatTabModel de-duplicates encrypted messages across feeds', () => {
const duplicateMessage = { id: 'dup', encrypted: true, rx_time: NOW - 4 };
const model = buildChatTabModel({
nodes: [],
messages: [duplicateMessage],
logOnlyMessages: [duplicateMessage],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const encryptedEntries = model.logEntries.filter(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED);
assert.equal(encryptedEntries.length, 1);
assert.equal(encryptedEntries[0]?.message?.id, 'dup');
});
test('buildChatTabModel ignores plaintext log-only entries', () => {
const logOnlyMessages = [
{ id: 'plain', encrypted: false, rx_time: NOW - 5 },
{ id: 'enc', encrypted: true, rx_time: NOW - 4 }
];
const model = buildChatTabModel({
nodes: [],
messages: [],
logOnlyMessages,
nowSeconds: NOW,
windowSeconds: WINDOW
});
const encryptedEntries = model.logEntries.filter(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED);
assert.equal(encryptedEntries.length, 1);
assert.equal(encryptedEntries[0]?.message?.id, 'enc');
});
test('buildChatTabModel merges secondary channels with matching labels regardless of index', () => {
const primaryId = 'primary';
const secondaryFirstId = 'secondary-one';
const secondarySecondId = 'secondary-two';
const label = 'MeshTown';
const model = buildChatTabModel({
nodes: [],
messages: [
{ id: secondaryFirstId, rx_time: NOW - 12, channel: 7, channel_name: label },
{ id: primaryId, rx_time: NOW - 10, channel: 0, channel_name: label },
{ id: secondarySecondId, rx_time: NOW - 8, channel: 3, channel_name: ` ${label} ` }
],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const meshChannels = model.channels.filter(channel => channel.label === label);
assert.equal(meshChannels.length, 2);
const primaryChannel = meshChannels.find(channel => channel.index === 0);
assert.ok(primaryChannel);
assert.equal(primaryChannel.entries.length, 1);
assert.equal(primaryChannel.entries[0]?.message?.id, primaryId);
const secondaryChannel = meshChannels.find(channel => channel.index > 0);
assert.ok(secondaryChannel);
assert.equal(secondaryChannel.id, 'channel-secondary-meshtown');
assert.equal(secondaryChannel.index, 3);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), [secondaryFirstId, secondarySecondId]);
});
test('buildChatTabModel rekeys unnamed secondary buckets when a label later arrives', () => {
const unnamedId = 'unnamed';
const namedId = 'named';
const label = 'SideMesh';
const index = 4;
const model = buildChatTabModel({
nodes: [],
messages: [
{ id: unnamedId, rx_time: NOW - 15, channel: index },
{ id: namedId, rx_time: NOW - 10, channel: index, channel_name: label }
],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const secondaryChannels = model.channels.filter(channel => channel.index === index);
assert.equal(secondaryChannels.length, 1);
const [secondaryChannel] = secondaryChannels;
assert.equal(secondaryChannel.id, 'channel-secondary-sidemesh');
assert.equal(secondaryChannel.label, label);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), [unnamedId, namedId]);
});
test('buildChatTabModel merges unlabeled secondary messages into existing named buckets by index', () => {
const namedId = 'named';
const unlabeledId = 'unlabeled';
const label = 'MeshNorth';
const index = 5;
const model = buildChatTabModel({
nodes: [],
messages: [
{ id: namedId, rx_time: NOW - 12, channel: index, channel_name: label },
{ id: unlabeledId, rx_time: NOW - 8, channel: index }
],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const secondaryChannels = model.channels.filter(channel => channel.index === index);
assert.equal(secondaryChannels.length, 1);
const [secondaryChannel] = secondaryChannels;
assert.equal(secondaryChannel.id, 'channel-secondary-meshnorth');
assert.equal(secondaryChannel.label, label);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), [namedId, unlabeledId]);
});
@@ -0,0 +1,109 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { CHAT_LOG_ENTRY_TYPES } from '../chat-log-tabs.js';
import {
chatLogEntryMatchesQuery,
chatMessageMatchesQuery,
filterChatModel,
normaliseChatFilterQuery
} from '../chat-search.js';
test('normaliseChatFilterQuery lower-cases and trims user input', () => {
assert.equal(normaliseChatFilterQuery(' MIXED Case '), 'mixed case');
assert.equal(normaliseChatFilterQuery(null), '');
});
test('chatMessageMatchesQuery inspects text and node metadata', () => {
const message = { text: 'Hello Mesh', node: { short_name: 'ALFA', long_name: 'Alpha Node' } };
const helloQuery = normaliseChatFilterQuery('mesh');
assert.equal(chatMessageMatchesQuery(message, helloQuery), true);
const aliasQuery = normaliseChatFilterQuery('alfa');
assert.equal(chatMessageMatchesQuery(message, aliasQuery), true);
const missQuery = normaliseChatFilterQuery('bravo');
assert.equal(chatMessageMatchesQuery(message, missQuery), false);
});
test('chatLogEntryMatchesQuery recognises position highlight values', () => {
const entry = {
type: CHAT_LOG_ENTRY_TYPES.POSITION,
ts: 1,
position: { latitude: 51.5, longitude: 0 },
node: { node_id: '!alpha', short_name: 'Alpha' }
};
const query = normaliseChatFilterQuery('51.50000');
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
const missQuery = normaliseChatFilterQuery('bravo');
assert.equal(chatLogEntryMatchesQuery(entry, missQuery), false);
});
test('chatLogEntryMatchesQuery uses enriched node context for lookups', () => {
const entry = {
type: CHAT_LOG_ENTRY_TYPES.TELEMETRY,
nodeId: '!alpha',
telemetry: { voltage: 12.1 },
node: { short_name: 'ALFA', long_name: 'Alpha Node' }
};
const query = normaliseChatFilterQuery('alpha node');
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
});
test('chatLogEntryMatchesQuery inspects neighbor node context', () => {
const entry = {
type: CHAT_LOG_ENTRY_TYPES.NEIGHBOR,
neighborId: '!bravo',
neighborNode: { short_name: 'BRAV', long_name: 'Bravo Station' }
};
const query = normaliseChatFilterQuery('bravo station');
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
});
test('filterChatModel filters both log entries and channel messages', () => {
const model = {
logEntries: [
{ type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, nodeId: '!alpha', node: { short_name: 'Alpha' } },
{ type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, nodeId: '!bravo', node: { short_name: 'Bravo' } }
],
channels: [
{
index: 0,
label: '0',
entries: [
{ ts: 1, message: { text: 'Ping Alpha', node: { short_name: 'Alpha' } } },
{ ts: 2, message: { text: 'Ack Bravo', node: { short_name: 'Bravo' } } }
]
}
]
};
const result = filterChatModel(model, 'bravo');
assert.equal(result.logEntries.length, 1);
assert.equal(result.logEntries[0].nodeId, '!bravo');
assert.equal(result.channels.length, 1);
assert.deepEqual(result.channels[0].entries.map(entry => entry.message.text), ['Ack Bravo']);
});
test('filterChatModel returns original references when query is empty', () => {
const model = {
logEntries: [{ type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, nodeId: '!alpha', node: { short_name: 'Alpha' } }],
channels: [{ index: 0, label: '0', entries: [] }]
};
const result = filterChatModel(model, ' ');
assert.strictEqual(result.logEntries, model.logEntries);
assert.strictEqual(result.channels, model.channels);
});
@@ -0,0 +1,194 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { renderChatTabs } from '../chat-tabs.js';
class MockClassList {
constructor() {
this._values = new Set();
}
add(...names) {
names.forEach(name => {
if (name) this._values.add(name);
});
}
remove(...names) {
names.forEach(name => {
if (name) this._values.delete(name);
});
}
contains(name) {
return this._values.has(name);
}
}
class MockFragment {
constructor() {
this.children = [];
this.isFragment = true;
}
appendChild(node) {
this.children.push(node);
return node;
}
}
class MockElement {
constructor(tagName) {
this.tagName = tagName.toUpperCase();
this.children = [];
this.attributes = new Map();
this.dataset = {};
this.classList = new MockClassList();
this.listeners = new Map();
this.hidden = false;
this.scrollTop = 0;
this.scrollHeight = 200;
}
appendChild(node) {
this.children.push(node);
return node;
}
replaceChildren(...nodes) {
this.children = [];
for (const node of nodes) {
if (!node) continue;
if (node.isFragment && Array.isArray(node.children)) {
this.children.push(...node.children);
} else {
this.children.push(node);
}
}
}
setAttribute(name, value) {
const strValue = String(value);
this.attributes.set(name, strValue);
if (name === 'id') {
this.id = strValue;
}
if (name.startsWith('data-')) {
const key = name
.slice(5)
.replace(/-([a-z])/g, (_, c) => c.toUpperCase());
this.dataset[key] = strValue;
}
}
getAttribute(name) {
return this.attributes.has(name) ? this.attributes.get(name) : null;
}
addEventListener(event, handler) {
this.listeners.set(event, handler);
}
dispatch(event) {
const handler = this.listeners.get(event);
if (handler) {
handler({});
}
}
}
function createMockDocument() {
return {
createElement(tag) {
return new MockElement(tag);
},
createDocumentFragment() {
return new MockFragment();
}
};
}
test('renderChatTabs creates tab markup and selects default active tab', () => {
const document = createMockDocument();
const container = new MockElement('div');
const tabs = [
{ id: 'log', label: 'Log', content: new MockElement('div') },
{ id: 'channel-0', label: 'Default', content: new MockElement('div') },
{ id: 'channel-1', label: 'Alt', content: new MockElement('div') }
];
const active = renderChatTabs({
document,
container,
tabs,
defaultActiveTabId: 'channel-0'
});
assert.equal(active, 'channel-0');
assert.equal(container.dataset.activeTab, 'channel-0');
assert.equal(container.children.length, 2);
const [tabList, panelWrapper] = container.children;
assert.equal(tabList.children.length, 3);
assert.equal(panelWrapper.children.length, 3);
assert.equal(panelWrapper.children[1].hidden, false);
assert.equal(panelWrapper.children[1].scrollTop, panelWrapper.children[1].scrollHeight);
assert.equal(panelWrapper.children[0].hidden, true);
tabList.children[0].dispatch('click');
assert.equal(container.dataset.activeTab, 'log');
assert.equal(panelWrapper.children[0].hidden, false);
assert.equal(panelWrapper.children[1].hidden, true);
});
test('renderChatTabs reuses previous active tab when still available', () => {
const document = createMockDocument();
const container = new MockElement('div');
container.dataset.activeTab = 'log';
const tabs = [
{ id: 'log', label: 'Log', content: new MockElement('div') },
{ id: 'channel-0', label: 'Default', content: new MockElement('div') }
];
const active = renderChatTabs({
document,
container,
tabs,
previousActiveTabId: 'log',
defaultActiveTabId: 'channel-0'
});
assert.equal(active, 'log');
const [tabList, panels] = container.children;
assert.equal(tabList.children[0].getAttribute('aria-selected'), 'true');
assert.equal(panels.children[0].hidden, false);
});
test('renderChatTabs clears container when no tabs exist', () => {
const document = createMockDocument();
const container = new MockElement('div');
container.replaceChildren(new MockElement('span'));
const active = renderChatTabs({ document, container, tabs: [] });
assert.equal(active, null);
assert.equal(container.children.length, 0);
assert.equal(container.dataset.activeTab, '');
});
@@ -1,4 +1,6 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
@@ -79,6 +81,7 @@ test('mergeConfig coerces numeric values and nested objects', () => {
refreshMs: '45000',
mapCenter: { lat: '10.5', lon: '20.1' },
tileFilters: { dark: 'contrast(2)' },
mapZoom: '12',
chatEnabled: 0,
channel: '#Custom',
frequency: '915MHz',
@@ -91,6 +94,7 @@ test('mergeConfig coerces numeric values and nested objects', () => {
assert.equal(result.refreshMs, 45000);
assert.deepEqual(result.mapCenter, { lat: 10.5, lon: 20.1 });
assert.deepEqual(result.tileFilters, { light: DEFAULT_CONFIG.tileFilters.light, dark: 'contrast(2)' });
assert.equal(result.mapZoom, 12);
assert.equal(result.chatEnabled, false);
assert.equal(result.channel, '#Custom');
assert.equal(result.frequency, '915MHz');
@@ -103,12 +107,19 @@ test('mergeConfig falls back to defaults for invalid numeric values', () => {
const result = mergeConfig({
refreshIntervalSeconds: 'NaN',
refreshMs: 'NaN',
maxDistanceKm: 'oops'
maxDistanceKm: 'oops',
mapZoom: 'not-a-number'
});
assert.equal(result.refreshIntervalSeconds, DEFAULT_CONFIG.refreshIntervalSeconds);
assert.equal(result.refreshMs, DEFAULT_CONFIG.refreshMs);
assert.equal(result.maxDistanceKm, DEFAULT_CONFIG.maxDistanceKm);
assert.equal(result.mapZoom, null);
});
test('mergeConfig treats blank mapZoom as null', () => {
const result = mergeConfig({ mapZoom: '' });
assert.equal(result.mapZoom, null);
});
test('document stub returns null for unrelated selectors', () => {
@@ -1,4 +1,6 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2025 l5yth
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -0,0 +1,174 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createDomEnvironment } from './dom-environment.js';
import { buildInstanceUrl, initializeInstanceSelector, __test__ } from '../instance-selector.js';
const { resolveInstanceLabel } = __test__;
function setupSelectElement(document) {
const select = document.createElement('select');
const listeners = new Map();
const options = [];
Object.defineProperty(select, 'options', {
get() {
return options;
}
});
Object.defineProperty(select, 'value', {
get() {
if (typeof select.selectedIndex !== 'number') {
return '';
}
const current = options[select.selectedIndex];
return current ? current.value : '';
},
set(newValue) {
const index = options.findIndex(option => option.value === newValue);
select.selectedIndex = index >= 0 ? index : -1;
}
});
select.selectedIndex = -1;
select.appendChild = option => {
options.push(option);
if (select.selectedIndex === -1) {
select.selectedIndex = 0;
}
return option;
};
select.remove = index => {
if (index >= 0 && index < options.length) {
options.splice(index, 1);
if (options.length === 0) {
select.selectedIndex = -1;
} else if (select.selectedIndex >= options.length) {
select.selectedIndex = options.length - 1;
}
}
};
select.addEventListener = (event, handler) => {
listeners.set(event, handler);
};
select.dispatchEvent = event => {
const key = typeof event === 'string' ? event : event?.type;
const handler = listeners.get(key);
if (handler) {
handler(event);
}
};
return select;
}
test('resolveInstanceLabel falls back to the domain when the name is missing', () => {
assert.equal(resolveInstanceLabel({ domain: 'mesh.example' }), 'mesh.example');
assert.equal(resolveInstanceLabel({ name: ' Mesh Name ' }), 'Mesh Name');
assert.equal(resolveInstanceLabel(null), '');
});
test('buildInstanceUrl normalises domains into navigable HTTPS URLs', () => {
assert.equal(buildInstanceUrl('mesh.example'), 'https://mesh.example');
assert.equal(buildInstanceUrl(' https://mesh.example '), 'https://mesh.example');
assert.equal(buildInstanceUrl(''), null);
assert.equal(buildInstanceUrl(null), null);
});
test('initializeInstanceSelector populates options alphabetically and selects the configured domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const fetchCalls = [];
const fetchImpl = async url => {
fetchCalls.push(url);
return {
ok: true,
async json() {
return [
{ name: 'Zulu Mesh', domain: 'zulu.mesh' },
{ name: 'Alpha Mesh', domain: 'alpha.mesh' },
{ domain: 'beta.mesh' }
];
}
};
};
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document,
instanceDomain: 'beta.mesh',
defaultLabel: 'Select region ...'
});
assert.equal(fetchCalls.length, 1);
assert.equal(select.options.length, 4);
assert.equal(select.options[0].textContent, 'Select region ...');
assert.equal(select.options[1].textContent, 'Alpha Mesh');
assert.equal(select.options[2].textContent, 'beta.mesh');
assert.equal(select.options[3].textContent, 'Zulu Mesh');
assert.equal(select.options[select.selectedIndex].value, 'beta.mesh');
} finally {
env.cleanup();
}
});
test('initializeInstanceSelector navigates to the chosen instance domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const fetchImpl = async () => ({
ok: true,
async json() {
return [{ domain: 'mesh.example' }];
}
});
let navigatedTo = null;
const navigate = url => {
navigatedTo = url;
};
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document,
navigate,
defaultLabel: 'Select region ...'
});
assert.equal(select.options.length, 2);
assert.equal(select.options[1].value, 'mesh.example');
select.value = 'mesh.example';
select.dispatchEvent({ type: 'change', target: select });
assert.equal(navigatedTo, 'https://mesh.example');
} finally {
env.cleanup();
}
});
@@ -1,5 +1,5 @@
/**
* Copyright (C) 2025 l5yth
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -0,0 +1,47 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { resolveAutoFitBoundsConfig, __testUtils } from '../map-auto-fit-settings.js';
const { MINIMUM_AUTO_FIT_RANGE_KM, AUTO_FIT_PADDING_FRACTION } = __testUtils;
test('resolveAutoFitBoundsConfig returns defaults without a distance limit', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: false, maxDistanceKm: null });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.equal(config.minimumRangeKm, MINIMUM_AUTO_FIT_RANGE_KM);
});
test('resolveAutoFitBoundsConfig constrains minimum range by the limit radius', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: true, maxDistanceKm: 2 });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.ok(config.minimumRangeKm >= MINIMUM_AUTO_FIT_RANGE_KM);
assert.ok(config.minimumRangeKm <= 2);
});
test('resolveAutoFitBoundsConfig respects small distance limits', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: true, maxDistanceKm: 0.1 });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.equal(config.minimumRangeKm, 0.1);
});
test('resolveAutoFitBoundsConfig tolerates invalid input', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: true, maxDistanceKm: -5 });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.equal(config.minimumRangeKm, MINIMUM_AUTO_FIT_RANGE_KM);
});
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2025 l5yth
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2025 l5yth
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -0,0 +1,41 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { MESSAGE_LIMIT, normaliseMessageLimit } from '../message-limit.js';
test('normaliseMessageLimit defaults to the message limit for invalid input', () => {
assert.equal(normaliseMessageLimit(undefined), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(null), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(''), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit('abc'), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(-100), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(0), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(Number.POSITIVE_INFINITY), MESSAGE_LIMIT);
});
test('normaliseMessageLimit clamps numeric input to the upper bound', () => {
assert.equal(normaliseMessageLimit(MESSAGE_LIMIT + 1), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(MESSAGE_LIMIT * 2), MESSAGE_LIMIT);
});
test('normaliseMessageLimit accepts positive finite values', () => {
assert.equal(normaliseMessageLimit(250), 250);
assert.equal(normaliseMessageLimit('750'), 750);
assert.equal(normaliseMessageLimit(42.9), 42);
});
@@ -0,0 +1,123 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createMessageNodeHydrator } from '../message-node-hydrator.js';
/**
* Capture warning invocations produced during a test run.
*/
class LoggerStub {
constructor() {
this.messages = [];
}
/**
* Record a warning message for later inspection.
*
* @param {...*} args Warning arguments.
* @returns {void}
*/
warn(...args) {
this.messages.push(args);
}
}
test('hydrate attaches cached nodes without performing lookups', async () => {
const node = { node_id: '!abc', short_name: 'Node' };
const nodesById = new Map([[node.node_id, node]]);
const hydrator = createMessageNodeHydrator({
fetchNodeById: async () => {
throw new Error('fetch should not be called');
},
applyNodeFallback: () => {}
});
const messages = [{ node_id: '!abc', text: 'Hello' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(result.length, 1);
assert.strictEqual(result[0].node, node);
assert.equal(nodesById.size, 1);
});
test('hydrate fetches missing nodes once and caches the result', async () => {
let fetchCalls = 0;
const fetchedNode = { node_id: '!fetch', short_name: 'Fetched' };
const hydrator = createMessageNodeHydrator({
fetchNodeById: async id => {
fetchCalls += 1;
assert.equal(id, '!fetch');
return { ...fetchedNode };
},
applyNodeFallback: () => {}
});
const nodesById = new Map();
const messages = [{ from_id: '!fetch', text: 'one' }, { node_id: '!fetch', text: 'two' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(fetchCalls, 1);
assert.strictEqual(nodesById.get('!fetch').short_name, 'Fetched');
assert.strictEqual(result[0].node, nodesById.get('!fetch'));
assert.strictEqual(result[1].node, nodesById.get('!fetch'));
});
test('hydrate falls back to placeholders when lookups fail', async () => {
const logger = new LoggerStub();
let fallbackCalls = 0;
const hydrator = createMessageNodeHydrator({
fetchNodeById: async () => null,
applyNodeFallback: node => {
fallbackCalls += 1;
if (!node.short_name) {
node.short_name = 'Fallback';
}
},
logger
});
const nodesById = new Map();
const messages = [{ from_id: '!missing', text: 'hi' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(nodesById.has('!missing'), false);
assert.equal(fallbackCalls, 1);
assert.ok(result[0].node);
assert.equal(result[0].node.short_name, 'Fallback');
assert.equal(logger.messages.length, 0);
});
test('hydrate records warning when fetch rejects', async () => {
const logger = new LoggerStub();
const hydrator = createMessageNodeHydrator({
fetchNodeById: async () => {
throw new Error('network error');
},
applyNodeFallback: () => {},
logger
});
const nodesById = new Map();
const messages = [{ from_id: '!warn', text: 'warn' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(result[0].node.node_id, '!warn');
assert.ok(logger.messages.length >= 1);
assert.equal(nodesById.has('!warn'), false);
});
@@ -0,0 +1,104 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
buildMessageBody,
buildMessageIndex,
normaliseMessageId,
resolveReplyPrefix
} from '../message-replies.js';
test('normaliseMessageId coerces numeric identifiers', () => {
assert.equal(normaliseMessageId(42), '42');
assert.equal(normaliseMessageId(' 0042 '), '42');
assert.equal(normaliseMessageId('alpha'), 'alpha');
assert.equal(normaliseMessageId(null), null);
});
test('buildMessageIndex normalises identifiers and ignores duplicates', () => {
const messages = [
{ id: '001', text: 'first' },
{ packet_id: 1, text: 'second' },
{ id: '2', text: 'third' }
];
const index = buildMessageIndex(messages);
assert.equal(index.size, 2);
assert.equal(index.get('1'), messages[0]);
assert.equal(index.get('2'), messages[2]);
});
test('resolveReplyPrefix renders reply badge and buildMessageBody joins emoji', () => {
const parent = {
id: 99,
node: { short_name: 'BEEF', long_name: 'Parent Node', role: 'CLIENT' },
text: 'parent message'
};
const reaction = { id: 100, reply_id: 99, emoji: '🔥' };
const index = buildMessageIndex([parent, reaction]);
const prefix = resolveReplyPrefix({
message: reaction,
messagesById: index,
nodesById: new Map(),
renderShortHtml: (short, role, longName) => `SHORT(${short}|${role}|${longName})`,
escapeHtml: value => `ESC(${value})`
});
assert.equal(
prefix,
'<span class="chat-entry-reply">[ESC(in reply to) SHORT(BEEF|CLIENT|Parent Node)]</span>'
);
const body = buildMessageBody({
message: { text: 'Hello', emoji: ' 🔥 ' },
escapeHtml: value => `ESC(${value})`,
renderEmojiHtml: value => `EMOJI(${value})`
});
assert.equal(body, 'ESC(Hello) EMOJI(🔥)');
});
test('buildMessageBody suppresses reaction slot markers and formats counts', () => {
const reaction = {
text: ' 1 ',
emoji: '👍',
portnum: 'REACTION_APP',
reply_id: 123,
};
const body = buildMessageBody({
message: reaction,
escapeHtml: value => `ESC(${value})`,
renderEmojiHtml: value => `EMOJI(${value})`
});
assert.equal(body, 'EMOJI(👍)');
const countedReaction = {
text: '2',
emoji: '✨',
reply_id: 123
};
const countedBody = buildMessageBody({
message: countedReaction,
escapeHtml: value => `ESC(${value})`,
renderEmojiHtml: value => `EMOJI(${value})`
});
assert.equal(countedBody, 'ESC(×2) EMOJI(✨)');
});
@@ -0,0 +1,134 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createNodeDetailOverlayManager } from '../node-detail-overlay.js';
function createOverlayHarness() {
const overlayListeners = new Map();
const documentListeners = new Map();
const content = { innerHTML: '' };
const closeButton = {
listeners: new Map(),
focusCalled: false,
addEventListener(event, handler) {
this.listeners.set(event, handler);
},
click() {
const handler = this.listeners.get('click');
if (handler) handler({ preventDefault() {} });
},
focus() {
this.focusCalled = true;
},
};
const dialog = {
focusCalled: false,
focus() {
this.focusCalled = true;
},
};
const overlay = {
hidden: true,
style: {},
addEventListener(event, handler) {
overlayListeners.set(event, handler);
},
trigger(event, payload) {
const handler = overlayListeners.get(event);
if (handler) handler(payload);
},
querySelector(selector) {
if (selector === '.node-detail-overlay__dialog') return dialog;
if (selector === '.node-detail-overlay__close') return closeButton;
if (selector === '.node-detail-overlay__content') return content;
return null;
},
};
const body = {
style: {
overflow: '',
removeProperty(prop) {
this[prop] = '';
},
},
};
const document = {
body,
getElementById(id) {
return id === 'nodeDetailOverlay' ? overlay : null;
},
addEventListener(event, handler) {
documentListeners.set(event, handler);
},
removeEventListener(event) {
documentListeners.delete(event);
},
triggerKeydown(key) {
const handler = documentListeners.get('keydown');
if (handler) {
handler({ key, preventDefault() {} });
}
},
};
return { document, overlay, content, closeButton };
}
test('createNodeDetailOverlayManager renders fetched markup and restores focus', async () => {
const { document, overlay, content, closeButton } = createOverlayHarness();
const focusTarget = {
focusCalled: false,
focus() {
this.focusCalled = true;
},
};
const manager = createNodeDetailOverlayManager({
document,
fetchNodeDetail: async reference => `<section class="node-detail">${reference.nodeId}</section>`,
});
assert.ok(manager);
await manager.open({ nodeId: '!alpha' }, { trigger: focusTarget, label: 'Alpha' });
assert.equal(overlay.hidden, false);
assert.equal(content.innerHTML.includes('!alpha'), true);
assert.equal(closeButton.focusCalled, true);
manager.close();
assert.equal(overlay.hidden, true);
assert.equal(focusTarget.focusCalled, true);
});
test('createNodeDetailOverlayManager surfaces errors and supports escape closing', async () => {
const { document, overlay, content } = createOverlayHarness();
const errors = [];
const manager = createNodeDetailOverlayManager({
document,
fetchNodeDetail: async () => {
throw new Error('boom');
},
logger: {
error(err) {
errors.push(err);
},
},
});
assert.ok(manager);
await manager.open({ nodeId: '!fail' });
assert.equal(content.innerHTML.includes('Failed to load node details.'), true);
assert.equal(errors.length, 1);
document.triggerKeydown?.('Escape');
assert.equal(overlay.hidden, true);
});
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2025 l5yth
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -26,6 +26,7 @@ const {
extractNumber,
assignString,
assignNumber,
mergeModemMetadata,
mergeNodeFields,
mergeTelemetry,
mergePosition,
@@ -44,20 +45,22 @@ function createResponse(status, body) {
test('refreshNodeInformation merges telemetry metrics when the base node lacks them', async () => {
const calls = [];
const responses = new Map([
['/api/nodes/!test', createResponse(200, {
['/api/nodes/!test?limit=7', createResponse(200, {
node_id: '!test',
short_name: 'TST',
battery_level: null,
last_heard: 1_000,
modem_preset: 'MediumFast',
lora_freq: '868.1',
})],
['/api/telemetry/!test?limit=1', createResponse(200, [{
['/api/telemetry/!test?limit=1000', createResponse(200, [{
node_id: '!test',
battery_level: 73.5,
rx_time: 1_200,
telemetry_time: 1_180,
voltage: 4.1,
}])],
['/api/positions/!test?limit=1', createResponse(200, [{
['/api/positions/!test?limit=7', createResponse(200, [{
node_id: '!test',
latitude: 52.5,
longitude: 13.4,
@@ -87,6 +90,8 @@ test('refreshNodeInformation merges telemetry metrics when the base node lacks t
assert.equal(node.battery, 73.5);
assert.equal(node.voltage, 4.1);
assert.equal(node.role, 'CLIENT');
assert.equal(node.modemPreset, 'MediumFast');
assert.equal(node.loraFreq, 868.1);
assert.equal(node.lastHeard, 1_200);
assert.equal(node.telemetryTime, 1_180);
assert.equal(node.latitude, 52.5);
@@ -108,14 +113,38 @@ test('refreshNodeInformation merges telemetry metrics when the base node lacks t
});
});
test('refreshNodeInformation normalizes telemetry aliases for downstream consumers', async () => {
const responses = new Map([
['/api/nodes/!chan?limit=7', createResponse(404, { error: 'not found' })],
['/api/telemetry/!chan?limit=1000', createResponse(200, [{
node_id: '!chan',
channel: '76.5',
air_util_tx: '12.25',
}])],
['/api/positions/!chan?limit=7', createResponse(404, { error: 'not found' })],
['/api/neighbors/!chan?limit=1000', createResponse(404, { error: 'not found' })],
]);
const fetchImpl = async url => responses.get(url) ?? createResponse(404, { error: 'not found' });
const node = await refreshNodeInformation('!chan', { fetchImpl });
assert.equal(node.nodeId, '!chan');
assert.equal(node.channel_utilization, 76.5);
assert.equal(node.channelUtilization, 76.5);
assert.equal(node.channel, 76.5);
assert.equal(node.air_util_tx, 12.25);
assert.equal(node.airUtil, 12.25);
assert.equal(node.airUtilTx, 12.25);
});
test('refreshNodeInformation preserves fallback metrics when telemetry is unavailable', async () => {
const responses = new Map([
['/api/nodes/42', createResponse(200, {
['/api/nodes/42?limit=7', createResponse(200, {
node_id: '!num',
short_name: 'NUM',
})],
['/api/telemetry/42?limit=1', createResponse(404, { error: 'not found' })],
['/api/positions/42?limit=1', createResponse(404, { error: 'not found' })],
['/api/telemetry/42?limit=1000', createResponse(404, { error: 'not found' })],
['/api/positions/42?limit=7', createResponse(404, { error: 'not found' })],
['/api/neighbors/42?limit=1000', createResponse(404, { error: 'not found' })],
]);
const fetchImpl = async (url, options) => {
@@ -123,7 +152,7 @@ test('refreshNodeInformation preserves fallback metrics when telemetry is unavai
return response ?? createResponse(404, { error: 'not found' });
};
const fallback = { nodeNum: 42, battery: 12.5, role: 'CLIENT' };
const fallback = { nodeNum: 42, battery: 12.5, role: 'CLIENT', modemPreset: 'FallbackPreset', loraFreq: 915 };
const node = await refreshNodeInformation({ nodeNum: 42, fallback }, { fetchImpl });
assert.equal(node.nodeId, '!num');
@@ -131,6 +160,8 @@ test('refreshNodeInformation preserves fallback metrics when telemetry is unavai
assert.equal(node.shortName, 'NUM');
assert.equal(node.battery, 12.5);
assert.equal(node.role, 'CLIENT');
assert.equal(node.modemPreset, 'FallbackPreset');
assert.equal(node.loraFreq, 915);
assert.equal(Array.isArray(node.neighbors) && node.neighbors.length, 0);
});
@@ -140,15 +171,15 @@ test('refreshNodeInformation requires a node identifier', async () => {
test('refreshNodeInformation handles missing node records by falling back to telemetry data', async () => {
const responses = new Map([
['/api/nodes/!missing', createResponse(404, { error: 'not found' })],
['/api/telemetry/!missing?limit=1', createResponse(200, [{
['/api/nodes/!missing?limit=7', createResponse(404, { error: 'not found' })],
['/api/telemetry/!missing?limit=1000', createResponse(200, [{
node_id: '!missing',
node_num: 77,
battery_level: 66,
rx_time: 2_000,
telemetry_time: 1_950,
}])],
['/api/positions/!missing?limit=1', createResponse(200, [{
['/api/positions/!missing?limit=7', createResponse(200, [{
node_id: '!missing',
latitude: 1.23,
longitude: 3.21,
@@ -196,6 +227,21 @@ test('refreshNodeInformation enforces a fetch implementation', async () => {
}
});
test('mergeModemMetadata respects preference flags', () => {
const target = {};
mergeModemMetadata(target, { modem_preset: 'Base', lora_freq: '915.5' });
assert.equal(target.modemPreset, 'Base');
assert.equal(target.loraFreq, 915.5);
mergeModemMetadata(target, { modem_preset: 'New', lora_freq: '433' }, { preferExisting: true });
assert.equal(target.modemPreset, 'Base');
assert.equal(target.loraFreq, 915.5);
mergeModemMetadata(target, { modem_preset: 'Updated', lora_freq: '433' }, { preferExisting: false });
assert.equal(target.modemPreset, 'Updated');
assert.equal(target.loraFreq, 433);
});
test('helper utilities normalise primitive values', () => {
assert.equal(toTrimmedString(' hello '), 'hello');
assert.equal(toTrimmedString(''), null);
@@ -0,0 +1,65 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { describe, it } from 'node:test';
import assert from 'node:assert/strict';
import { extractModemMetadata, formatLoraFrequencyMHz, formatModemDisplay, __testUtils } from '../node-modem-metadata.js';
describe('node-modem-metadata', () => {
it('extracts modem preset and frequency from mixed payloads', () => {
const payload = {
modem_preset: ' MediumFast ',
lora_freq: '915',
};
assert.deepEqual(extractModemMetadata(payload), { modemPreset: 'MediumFast', loraFreq: 915 });
});
it('falls back across naming conventions when extracting metadata', () => {
const payload = {
modemPreset: 'LongSlow',
frequency: 868,
};
assert.deepEqual(extractModemMetadata(payload), { modemPreset: 'LongSlow', loraFreq: 868 });
});
it('ignores invalid modem metadata entries', () => {
assert.deepEqual(extractModemMetadata({ modem_preset: ' ', lora_freq: 'NaN' }), {
modemPreset: null,
loraFreq: null,
});
});
it('formats positive frequencies with MHz suffix', () => {
assert.equal(formatLoraFrequencyMHz(915), '915MHz');
assert.equal(formatLoraFrequencyMHz(867.5), '867.5MHz');
assert.equal(formatLoraFrequencyMHz('433.1234'), '433.123MHz');
assert.equal(formatLoraFrequencyMHz(null), null);
});
it('combines preset and frequency for overlay display', () => {
assert.equal(formatModemDisplay('MediumFast', 868), 'MediumFast (868MHz)');
assert.equal(formatModemDisplay('ShortSlow', null), 'ShortSlow');
assert.equal(formatModemDisplay(null, 433), '433MHz');
assert.equal(formatModemDisplay(undefined, undefined), null);
});
it('exposes trimmed string helper for targeted assertions', () => {
const { toTrimmedString } = __testUtils;
assert.equal(toTrimmedString(' hello '), 'hello');
assert.equal(toTrimmedString(''), null);
assert.equal(toTrimmedString(null), null);
});
});
@@ -0,0 +1,767 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { initializeNodeDetailPage, fetchNodeDetailHtml, __testUtils } from '../node-page.js';
const {
stringOrNull,
numberOrNull,
formatFrequency,
formatBattery,
formatVoltage,
formatUptime,
formatTimestamp,
formatMessageTimestamp,
formatHardwareModel,
formatCoordinate,
formatRelativeSeconds,
formatDurationSeconds,
formatSnr,
padTwo,
normalizeNodeId,
registerRoleCandidate,
lookupRole,
lookupNeighborDetails,
seedNeighborRoleIndex,
buildNeighborRoleIndex,
categoriseNeighbors,
renderNeighborGroups,
renderSingleNodeTable,
renderTelemetryCharts,
renderMessages,
renderTraceroutes,
renderTracePath,
extractTracePath,
normalizeTraceNodeRef,
renderNodeDetailHtml,
parseReferencePayload,
resolveRenderShortHtml,
fetchMessages,
fetchTracesForNode,
} = __testUtils;
test('format helpers normalise values as expected', () => {
assert.equal(stringOrNull(' foo '), 'foo');
assert.equal(stringOrNull(''), null);
assert.equal(numberOrNull('42'), 42);
assert.equal(numberOrNull('abc'), null);
assert.equal(formatFrequency(915), '915.000 MHz');
assert.equal(formatFrequency('2400000'), '2.400 MHz');
assert.equal(formatFrequency('custom'), 'custom');
assert.equal(formatBattery(87.135), '87.1%');
assert.equal(formatVoltage(4.105), '4.11 V');
assert.equal(formatUptime(3661), '1h 1m 1s');
assert.match(formatTimestamp(1_700_000_000), /T/);
assert.equal(padTwo(3), '03');
assert.equal(normalizeNodeId('!NODE'), '!node');
const messageTimestamp = formatMessageTimestamp(1_700_000_000);
assert.match(messageTimestamp, /^\d{4}-\d{2}-\d{2} \d{2}:\d{2}$/);
});
test('role lookup helpers normalise identifiers and register candidates', () => {
const index = { byId: new Map(), byNum: new Map() };
registerRoleCandidate(index, {
identifier: '!NODE',
numericId: 77,
role: 'ROUTER',
shortName: 'NODE',
longName: 'Node Long',
});
assert.equal(index.byId.get('!node'), 'ROUTER');
assert.equal(index.byNum.get(77), 'ROUTER');
assert.equal(lookupRole(index, { identifier: '!node' }), 'ROUTER');
assert.equal(lookupRole(index, { identifier: '!NODE' }), 'ROUTER');
assert.equal(lookupRole(index, { numericId: 77 }), 'ROUTER');
assert.equal(lookupRole(index, { identifier: '!missing' }), null);
const metadata = lookupNeighborDetails(index, { identifier: '!node', numericId: 77 });
assert.deepEqual(metadata, { role: 'ROUTER', shortName: 'NODE', longName: 'Node Long' });
});
test('seedNeighborRoleIndex captures known roles and missing identifiers', () => {
const index = { byId: new Map(), byNum: new Map() };
const missing = seedNeighborRoleIndex(index, [
{ neighbor_id: '!ALLY', neighbor_role: 'CLIENT', neighbor_short_name: 'ALLY' },
{ node_id: '!self', node_role: 'ROUTER' },
{ neighbor_id: '!unknown' },
]);
assert.equal(index.byId.get('!ally'), 'CLIENT');
assert.equal(index.byId.get('!self'), 'ROUTER');
assert.equal(missing.has('!unknown'), true);
const allyDetails = lookupNeighborDetails(index, { identifier: '!ally' });
assert.equal(allyDetails.shortName, 'ALLY');
});
test('additional format helpers provide table friendly output', () => {
assert.equal(formatHardwareModel('UNSET'), '');
assert.equal(formatHardwareModel('T-Beam'), 'T-Beam');
assert.equal(formatCoordinate(52.123456), '52.12346');
assert.equal(formatCoordinate(null), '');
assert.equal(formatRelativeSeconds(1_000, 1_060), '1m');
assert.equal(formatRelativeSeconds(1_000, 1_120), '2m');
assert.equal(formatRelativeSeconds(1_000, 1_000 + 3_700), '1h 1m');
assert.equal(formatRelativeSeconds(1_000, 1_000 + 90_000).startsWith('1d'), true);
assert.equal(formatDurationSeconds(59), '59s');
assert.equal(formatDurationSeconds(61), '1m 1s');
assert.equal(formatDurationSeconds(3_661), '1h 1m');
assert.equal(formatDurationSeconds(172_800), '2d');
assert.equal(formatSnr(12.345), '12.3 dB');
assert.equal(formatSnr(null), '');
const renderShortHtml = (short, role) => `<span class="short-name" data-role="${role}">${short}</span>`;
const nodeContext = {
shortName: 'NODE',
longName: 'Node Long',
role: 'CLIENT',
nodeId: '!node',
nodeNum: 77,
rawSources: { node: { node_id: '!node', role: 'CLIENT', short_name: 'NODE' } },
};
const messagesHtml = renderMessages(
[
{
text: 'hello',
rx_time: 1_700_000_400,
region_frequency: 868,
modem_preset: 'MediumFast',
channel_name: 'Primary',
node: { short_name: 'SRCE', role: 'ROUTER', node_id: '!src' },
},
{ emoji: '😊', rx_time: 1_700_000_401 },
],
renderShortHtml,
nodeContext,
);
assert.equal(messagesHtml.includes('hello'), true);
assert.equal(messagesHtml.includes('😊'), true);
assert.match(messagesHtml, /\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}\]\[868\]/);
assert.equal(messagesHtml.includes('[868]'), true);
assert.equal(messagesHtml.includes('[MF]'), true);
assert.equal(messagesHtml.includes('[Primary]'), true);
assert.equal(messagesHtml.includes('data-role="ROUTER"'), true);
assert.equal(messagesHtml.includes('&nbsp;&nbsp;&nbsp;'), true);
assert.equal(messagesHtml.includes('&nbsp;&nbsp;'), true);
assert.equal(messagesHtml.includes('data-role="CLIENT"'), true);
assert.equal(messagesHtml.includes(', hello'), false);
});
test('categoriseNeighbors splits inbound and outbound records', () => {
const node = { nodeId: '!self', nodeNum: 42 };
const neighbors = [
{ node_id: '!self', neighbor_id: '!ally-one' },
{ node_id: '!peer', neighbor_id: '!SELF' },
{ node_num: 42, neighbor_id: '!ally-two' },
{ node_id: '!friend', neighbor_num: 42 },
null,
];
const { heardBy, weHear } = categoriseNeighbors(node, neighbors);
assert.equal(heardBy.length, 2);
assert.equal(weHear.length, 2);
});
test('renderNeighborGroups renders grouped neighbour lists', () => {
const node = { nodeId: '!self', nodeNum: 77 };
const neighbors = [
{
node_id: '!peer',
node_short_name: 'PEER',
neighbor_id: '!self',
snr: 9.5,
node: { short_name: 'PEER', role: 'ROUTER' },
},
{
node_id: '!self',
neighbor_id: '!ally',
neighbor_short_name: 'ALLY',
snr: 5.25,
neighbor: { short_name: 'ALLY', role: 'REPEATER' },
},
];
const html = renderNeighborGroups(
node,
neighbors,
(short, role) => `<span class="badge" data-role="${role}">${short}</span>`,
);
assert.equal(html.includes('Neighbors'), true);
assert.equal(html.includes('Heard by'), true);
assert.equal(html.includes('We hear'), true);
assert.equal(html.includes('PEER'), true);
assert.equal(html.includes('ALLY'), true);
assert.equal(html.includes('9.5 dB'), true);
assert.equal(html.includes('5.3 dB'), true);
assert.equal(html.includes('data-role="ROUTER"'), true);
assert.equal(html.includes('data-role="REPEATER"'), true);
});
test('buildNeighborRoleIndex fetches missing neighbor metadata from the API', async () => {
const neighbors = [
{ neighbor_id: '!ally', neighbor_short_name: 'ALLY' },
];
const calls = [];
const fetchImpl = async url => {
calls.push(url);
return {
status: 200,
ok: true,
json: async () => ({ node_id: '!ally', role: 'ROUTER', node_num: 99, short_name: 'ALLY-API' }),
};
};
const index = await buildNeighborRoleIndex({ nodeId: '!self', role: 'CLIENT' }, neighbors, { fetchImpl });
assert.equal(index.byId.get('!self'), 'CLIENT');
assert.equal(index.byId.get('!ally'), 'ROUTER');
assert.equal(index.byNum.get(99), 'ROUTER');
assert.equal(calls.some(url => url.startsWith('/api/nodes/')), true);
const allyMetadata = lookupNeighborDetails(index, { identifier: '!ally', numericId: 99 });
assert.equal(allyMetadata.shortName, 'ALLY-API');
});
test('renderSingleNodeTable renders a condensed table for the node', () => {
const node = {
shortName: 'NODE',
longName: 'Example Node',
nodeId: '!abcd',
role: 'CLIENT',
hwModel: 'T-Beam',
battery: 66,
voltage: 4.12,
uptime: 3_700,
channel_utilization: 1.23,
airUtil: 0.45,
temperature: 22.5,
humidity: 55.5,
pressure: 1_013.2,
latitude: 52.52,
longitude: 13.405,
altitude: 40,
lastHeard: 9_900,
positionTime: 9_850,
rawSources: { node: { node_id: '!abcd', role: 'CLIENT' } },
};
const html = renderSingleNodeTable(
node,
(short, role) => `<span class="short-name" data-role="${role}">${short}</span>`,
10_000,
);
assert.equal(html.includes('<table'), true);
assert.match(html, /<a class="node-long-link" href="\/nodes\/!abcd" data-node-detail-link="true" data-node-id="!abcd">Example Node<\/a>/);
assert.equal(html.includes('66.0%'), true);
assert.equal(html.includes('1.230%'), true);
assert.equal(html.includes('52.52000'), true);
assert.equal(html.includes('1m 40s'), true);
assert.equal(html.includes('2m 30s'), true);
});
test('renderTelemetryCharts renders condensed scatter charts when telemetry exists', () => {
const nowMs = Date.UTC(2025, 0, 8, 12, 0, 0);
const nowSeconds = Math.floor(nowMs / 1000);
const node = {
rawSources: {
telemetry: {
snapshots: [
{
rx_time: nowSeconds - 60,
device_metrics: {
battery_level: 80,
voltage: 4.1,
channel_utilization: 40,
air_util_tx: 22,
},
environment_metrics: {
temperature: 19.5,
relative_humidity: 55,
barometric_pressure: 995,
gas_resistance: 1500,
},
},
{
rx_time: nowSeconds - 3_600,
deviceMetrics: {
batteryLevel: 78,
voltage: 4.05,
channelUtilization: 35,
airUtilTx: 20,
},
environmentMetrics: {
temperature: 18.4,
relativeHumidity: 52,
barometricPressure: 1000,
gasResistance: 2000,
},
},
],
},
},
};
const html = renderTelemetryCharts(node, { nowMs });
const fmt = new Date(nowMs);
const expectedDate = String(fmt.getDate()).padStart(2, '0');
assert.equal(html.includes('node-detail__charts'), true);
assert.equal(html.includes('Power metrics'), true);
assert.equal(html.includes('Environmental telemetry'), true);
assert.equal(html.includes('Battery (0-100%)'), true);
assert.equal(html.includes('Voltage (0-6V)'), true);
assert.equal(html.includes('Channel utilization (%)'), true);
assert.equal(html.includes('Air util TX (%)'), true);
assert.equal(html.includes('Utilization (%)'), true);
assert.equal(html.includes('Gas resistance (10-100k Ω)'), true);
assert.equal(html.includes('Temperature (-20-40°C)'), true);
assert.equal(html.includes(expectedDate), true);
assert.equal(html.includes('node-detail__chart-point'), true);
});
test('renderNodeDetailHtml composes the table, neighbors, and messages', () => {
const html = renderNodeDetailHtml(
{
shortName: 'NODE',
longName: 'Example Node',
nodeId: '!abcd',
nodeNum: 77,
role: 'CLIENT',
battery: 60,
voltage: 4.1,
uptime: 1_000,
latitude: 52.5,
longitude: 13.4,
altitude: 40,
},
{
neighbors: [
{ node_id: '!peer', node_short_name: 'PEER', neighbor_id: '!abcd', snr: 7.5 },
{ node_id: '!abcd', neighbor_id: '!ally', neighbor_short_name: 'ALLY', snr: 5.1 },
],
messages: [{ text: 'Hello', rx_time: 1_700_000_111 }],
traces: [
{ src: '!abcd', hops: ['!beef'], dest: '!ally' },
],
renderShortHtml: (short, role) => `<span class="short-name" data-role="${role}">${short}</span>`,
},
);
assert.equal(html.includes('node-detail__table'), true);
assert.equal(html.includes('Neighbors'), true);
assert.equal(html.includes('Heard by'), true);
assert.equal(html.includes('We hear'), true);
assert.equal(html.includes('Messages'), true);
assert.match(html, /<a class="node-long-link" href="\/nodes\/!abcd" data-node-detail-link="true" data-node-id="!abcd">Example Node<\/a>/);
assert.equal(html.includes('PEER'), true);
assert.equal(html.includes('ALLY'), true);
assert.equal(html.includes('Traceroutes'), true);
assert.match(html, /&rarr;/);
assert.match(html, /\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}\]\[/);
assert.equal(html.includes('data-role="CLIENT"'), true);
});
test('renderNodeDetailHtml embeds telemetry charts when snapshots are present', () => {
const nowMs = Date.UTC(2025, 0, 8, 7, 0, 0);
const node = {
shortName: 'NODE',
nodeId: '!abcd',
role: 'CLIENT',
rawSources: {
node: { node_id: '!abcd', role: 'CLIENT', short_name: 'NODE' },
telemetry: {
snapshots: [
{
rx_time: Math.floor(nowMs / 1000) - 120,
battery_level: 75,
voltage: 4.08,
channel_utilization: 30,
temperature: 20,
relative_humidity: 45,
barometric_pressure: 990,
gas_resistance: 1800,
},
],
},
},
};
const html = renderNodeDetailHtml(node, {
renderShortHtml: short => `<span class="short-name">${short}</span>`,
chartNowMs: nowMs,
});
assert.equal(html.includes('node-detail__charts'), true);
assert.equal(html.includes('Power metrics'), true);
});
test('fetchNodeDetailHtml renders the node layout for overlays', async () => {
const reference = { nodeId: '!alpha' };
const calledUrls = [];
const fetchImpl = async url => {
calledUrls.push(url);
if (url.startsWith('/api/messages/')) {
return {
ok: true,
status: 200,
async json() {
return [{ text: 'Overlay hello', rx_time: 1_700_000_000 }];
},
};
}
if (url.startsWith('/api/traces/')) {
return {
ok: true,
status: 200,
async json() {
return [{ src: '!alpha', dest: '!bravo', hops: [] }];
},
};
}
return {
ok: false,
status: 404,
async json() { return []; },
};
};
const refreshImpl = async () => ({
nodeId: '!alpha',
nodeNum: 1,
shortName: 'ALPH',
longName: 'Example Alpha',
role: 'CLIENT',
neighbors: [],
rawSources: { node: { node_id: '!alpha', role: 'CLIENT', short_name: 'ALPH' } },
});
const html = await fetchNodeDetailHtml(reference, {
refreshImpl,
fetchImpl,
renderShortHtml: short => `<span class="short-name">${short}</span>`,
});
assert.equal(calledUrls.some(url => url.includes('/api/messages/!alpha')), true);
assert.equal(calledUrls.some(url => url.includes('/api/traces/!alpha')), true);
assert.equal(html.includes('Example Alpha'), true);
assert.equal(html.includes('Overlay hello'), true);
assert.equal(html.includes('Traceroutes'), true);
assert.equal(html.includes('node-detail__table'), true);
});
test('fetchNodeDetailHtml requires a node identifier reference', async () => {
await assert.rejects(
() => fetchNodeDetailHtml({}, { refreshImpl: async () => ({}) }),
/identifier/i,
);
});
test('parseReferencePayload returns null for invalid JSON', () => {
assert.equal(parseReferencePayload('{'), null);
assert.deepEqual(parseReferencePayload('{"nodeId":"!abc"}'), { nodeId: '!abc' });
});
test('resolveRenderShortHtml prefers global implementation when available', async () => {
const original = globalThis.PotatoMesh;
try {
globalThis.PotatoMesh = { renderShortHtml: () => '<span>ok</span>' };
const fn = await resolveRenderShortHtml();
assert.equal(fn('X'), '<span>ok</span>');
} finally {
globalThis.PotatoMesh = original;
}
});
test('resolveRenderShortHtml falls back when no implementation is exposed', async () => {
const original = globalThis.PotatoMesh;
try {
delete globalThis.PotatoMesh;
const fn = await resolveRenderShortHtml();
assert.equal(typeof fn, 'function');
assert.equal(fn('AB'), '<span class="short-name">AB</span>');
} finally {
globalThis.PotatoMesh = original;
}
});
test('fetchMessages handles HTTP responses and uses defaults', async () => {
const calls = [];
const fetchImpl = async (url, options) => {
calls.push({ url, options });
return {
status: 200,
ok: true,
json: async () => [{ text: 'hi', rx_time: 1 }],
};
};
const messages = await fetchMessages('!node', { fetchImpl });
assert.equal(messages.length, 1);
assert.equal(calls[0].options.cache, 'no-store');
});
test('fetchMessages returns an empty list when the endpoint is missing', async () => {
const fetchImpl = async () => ({ status: 404, ok: false, json: async () => ({}) });
const messages = await fetchMessages('!node', { fetchImpl });
assert.deepEqual(messages, []);
});
test('normalizeTraceNodeRef canonicalizes references and renderTracePath builds arrowed output', () => {
const ref = normalizeTraceNodeRef(1234);
assert.deepEqual(ref, { identifier: '!000004d2', numericId: 1234 });
const roleIndex = {
byId: new Map([['!000004d2', 'CLIENT']]),
byNum: new Map(),
detailsById: new Map([['!000004d2', { shortName: 'NODE', role: 'ROUTER' }]]),
detailsByNum: new Map(),
};
const path = extractTracePath({ src: 1234, hops: [0xbeef], dest: '!ally' });
const html = renderTracePath(path, (short, role) => `<span data-role="${role}">${short}</span>`, {
roleIndex,
node: { nodeId: '!000004d2', shortName: 'NODE', role: 'ROUTER' },
});
assert.notEqual(html, '');
assert.match(html, /data-role="ROUTER"/);
assert.match(html, /&rarr;/);
});
test('renderTraceroutes lists traceroute paths with badges', () => {
const traces = [
{ src: '!one', hops: ['!two'], dest: '!three' },
];
const html = renderTraceroutes(traces, short => `<span class="short-name">${short}</span>`, {
roleIndex: null,
});
assert.equal(html.includes('Traceroutes'), true);
assert.equal(html.includes('short-name'), true);
});
test('renderTraceroutes skips empty or single-hop paths and renderTracePath uses node metadata', () => {
const pathHtml = renderTracePath([{ identifier: '!self', numericId: 1 }], short => `<b>${short}</b>`, {
roleIndex: null,
node: { nodeId: '!self', shortName: 'SELF', role: 'ROUTER' },
});
assert.equal(pathHtml, '');
const html = renderTraceroutes(
[{ src: '!self', hops: [], dest: '!peer' }],
(short, role) => `<span data-role="${role}">${short}</span>`,
{
roleIndex: {
detailsById: new Map([['!self', { shortName: 'SELF', role: 'CLIENT' }]]),
detailsByNum: new Map(),
byId: new Map([['!peer', 'ROUTER']]),
byNum: new Map(),
},
node: { nodeId: '!self', shortName: 'SELF', role: 'ADMIN' },
},
);
assert.equal(html.includes('Traceroutes'), true);
assert.match(html, /data-role="ADMIN"/);
});
test('renderTrace helpers normalise references and short-circuit when traces are empty', () => {
assert.deepEqual(normalizeTraceNodeRef('!abcd'), { identifier: '!abcd', numericId: null });
assert.equal(extractTracePath(null).length, 0);
const html = renderTraceroutes([], () => '', { roleIndex: null });
assert.equal(html, '');
});
test('fetchTracesForNode requests traceroutes for the node', async () => {
const calls = [];
const fetchImpl = async (url, options) => {
calls.push({ url, options });
return {
status: 200,
ok: true,
json: async () => [{ src: '!abc', dest: '!def', hops: [] }],
};
};
const traces = await fetchTracesForNode('!abc', { fetchImpl });
assert.equal(traces.length, 1);
assert.equal(calls[0].url.includes('/api/traces/!abc'), true);
assert.equal(calls[0].options.cache, 'no-store');
});
test('fetchTracesForNode returns empty when identifier is missing', async () => {
const traces = await fetchTracesForNode(null, { fetchImpl: () => { throw new Error('should not run'); } });
assert.deepEqual(traces, []);
});
test('fetchTracesForNode throws on HTTP error', async () => {
await assert.rejects(
() => fetchTracesForNode('!err', {
fetchImpl: async () => ({ status: 500, ok: false, json: async () => ({}) }),
}),
/Failed to load traceroutes/,
);
});
test('initializeNodeDetailPage hydrates the container with node data', async () => {
const element = {
dataset: {
nodeReference: JSON.stringify({ nodeId: '!node', fallback: { short_name: 'NODE' } }),
privateMode: 'false',
},
innerHTML: '',
};
const documentStub = {
querySelector: selector => (selector === '#nodeDetail' ? element : null),
};
const refreshImpl = async reference => {
assert.equal(reference.nodeId, '!node');
return {
shortName: 'NODE',
longName: 'Node Long',
nodeId: '!node',
role: 'CLIENT',
modemPreset: 'LongFast',
loraFreq: 915,
battery: 66,
voltage: 4.1,
uptime: 100,
latitude: 52.5,
longitude: 13.4,
altitude: 42,
neighbors: [{ node_id: '!node', neighbor_id: '!ally', snr: 5.5 }],
rawSources: { node: { node_id: '!node', role: 'CLIENT' } },
};
};
const fetchImpl = async url => {
if (url.startsWith('/api/messages/')) {
return {
status: 200,
ok: true,
json: async () => [{ text: 'hello', rx_time: 1_700_000_222 }],
};
}
if (url.startsWith('/api/nodes/')) {
return {
status: 200,
ok: true,
json: async () => ({ node_id: '!ally', role: 'ROUTER', short_name: 'ALLY-API' }),
};
}
return { status: 404, ok: false, json: async () => ({}) };
};
const renderShortHtml = short => `<span class="short-name">${short}</span>`;
const result = await initializeNodeDetailPage({
document: documentStub,
refreshImpl,
fetchImpl,
renderShortHtml,
});
assert.equal(result, true);
assert.equal(element.innerHTML.includes('Node Long'), true);
assert.equal(element.innerHTML.includes('node-detail__table'), true);
assert.equal(element.innerHTML.includes('Neighbors'), true);
assert.equal(element.innerHTML.includes('Messages'), true);
assert.equal(element.innerHTML.includes('ALLY-API'), true);
});
test('initializeNodeDetailPage removes legacy filter controls when supported', async () => {
const element = {
dataset: {
nodeReference: JSON.stringify({ nodeId: '!node', fallback: { short_name: 'NODE' } }),
privateMode: 'false',
},
innerHTML: '',
};
const filterContainer = {
removed: false,
remove() {
this.removed = true;
},
};
const documentStub = {
querySelector: selector => {
if (selector === '#nodeDetail') return element;
if (selector === '.filter-input') return filterContainer;
return null;
},
};
const refreshImpl = async () => ({
shortName: 'NODE',
nodeId: '!node',
role: 'CLIENT',
neighbors: [],
rawSources: { node: { node_id: '!node', role: 'CLIENT' } },
});
const fetchImpl = async () => ({ status: 404, ok: false });
const renderShortHtml = short => `<span class="short-name">${short}</span>`;
const result = await initializeNodeDetailPage({
document: documentStub,
refreshImpl,
fetchImpl,
renderShortHtml,
});
assert.equal(result, true);
assert.equal(filterContainer.removed, true);
});
test('initializeNodeDetailPage hides legacy filter controls when removal is unavailable', async () => {
const element = {
dataset: {
nodeReference: JSON.stringify({ nodeId: '!node', fallback: { short_name: 'NODE' } }),
privateMode: 'false',
},
innerHTML: '',
};
const filterContainer = { hidden: false };
const documentStub = {
querySelector: selector => {
if (selector === '#nodeDetail') return element;
if (selector === '.filter-input') return filterContainer;
return null;
},
};
const refreshImpl = async () => ({
shortName: 'NODE',
nodeId: '!node',
role: 'CLIENT',
neighbors: [],
rawSources: { node: { node_id: '!node', role: 'CLIENT' } },
});
const fetchImpl = async () => ({ status: 404, ok: false });
const renderShortHtml = short => `<span class="short-name">${short}</span>`;
const result = await initializeNodeDetailPage({
document: documentStub,
refreshImpl,
fetchImpl,
renderShortHtml,
});
assert.equal(result, true);
assert.equal(filterContainer.hidden, true);
});
test('initializeNodeDetailPage reports an error when refresh fails', async () => {
const element = {
dataset: {
nodeReference: JSON.stringify({ nodeId: '!missing' }),
privateMode: 'false',
},
innerHTML: '',
};
const documentStub = { querySelector: () => element };
const refreshImpl = async () => {
throw new Error('boom');
};
const renderShortHtml = short => `<span>${short}</span>`;
const result = await initializeNodeDetailPage({
document: documentStub,
refreshImpl,
renderShortHtml,
});
assert.equal(result, false);
assert.equal(element.innerHTML.includes('Failed to load'), true);
});
test('initializeNodeDetailPage handles missing reference payloads', async () => {
const element = {
dataset: {},
innerHTML: '',
};
const documentStub = { querySelector: () => element };
const renderShortHtml = short => `<span>${short}</span>`;
const result = await initializeNodeDetailPage({ document: documentStub, renderShortHtml });
assert.equal(result, false);
assert.equal(element.innerHTML.includes('Node reference unavailable'), true);
});
@@ -0,0 +1,73 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { normalizeNodeSnapshot, normalizeNodeCollection, __testUtils } from '../node-snapshot-normalizer.js';
const { normalizeNumber, normalizeString } = __testUtils;
test('normalizeNodeSnapshot synchronises telemetry aliases', () => {
const node = {
node_id: '!test',
channel: '56.2',
airUtil: 13.5,
battery_level: 45.5,
relativeHumidity: 24.3,
lastHeard: '1234',
};
const normalised = normalizeNodeSnapshot(node);
assert.equal(normalised.nodeId, '!test');
assert.equal(normalised.channel_utilization, 56.2);
assert.equal(normalised.channelUtilization, 56.2);
assert.equal(normalised.channel, 56.2);
assert.equal(normalised.air_util_tx, 13.5);
assert.equal(normalised.airUtilTx, 13.5);
assert.equal(normalised.airUtil, 13.5);
assert.equal(normalised.battery, 45.5);
assert.equal(normalised.batteryLevel, 45.5);
assert.equal(normalised.relative_humidity, 24.3);
assert.equal(normalised.humidity, 24.3);
assert.equal(normalised.last_heard, 1234);
});
test('normalizeNodeCollection applies canonical forms to all nodes', () => {
const nodes = [
{ short_name: ' AAA ', voltage: '3.7' },
{ shortName: 'BBB', uptime_seconds: '3600', airUtilTx: '5.5' },
];
normalizeNodeCollection(nodes);
assert.equal(nodes[0].shortName, 'AAA');
assert.equal(nodes[0].short_name, 'AAA');
assert.equal(nodes[0].voltage, 3.7);
assert.equal(nodes[1].uptime, 3600);
assert.equal(nodes[1].air_util_tx, 5.5);
});
test('normaliser helpers coerce primitive values consistently', () => {
assert.equal(normalizeNumber('42.1'), 42.1);
assert.equal(normalizeNumber('not-a-number'), null);
assert.equal(normalizeNumber(Infinity), null);
assert.equal(normalizeString(' hello '), 'hello');
assert.equal(normalizeString(''), null);
assert.equal(normalizeString(null), null);
});
@@ -0,0 +1,119 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { enhanceCoordinateCell, __testUtils } from '../nodes-coordinate-links.js';
const { toFiniteCoordinate } = __testUtils;
test('enhanceCoordinateCell renders an interactive link for valid coordinates', () => {
const cell = {
replacedChildren: null,
replaceChildren(...children) {
this.replacedChildren = children;
}
};
const linkStub = {
dataset: {},
attributes: new Map(),
listeners: new Map(),
href: null,
setAttribute(name, value) {
this.attributes.set(name, value);
},
addEventListener(name, handler) {
this.listeners.set(name, handler);
}
};
const documentStub = {
createElement(tagName) {
assert.equal(tagName, 'a');
return linkStub;
}
};
const activations = [];
const link = enhanceCoordinateCell({
cell,
document: documentStub,
displayText: '51.50000',
formattedLatitude: '51.50000',
formattedLongitude: '-0.12000',
lat: '51.5',
lon: '-0.12',
nodeName: 'Alpha',
onActivate: (lat, lon) => activations.push({ lat, lon })
});
assert.equal(link, linkStub);
assert.deepEqual(cell.replacedChildren, [linkStub]);
assert.equal(linkStub.textContent, '51.50000');
assert.equal(linkStub.dataset.lat, '51.5');
assert.equal(linkStub.dataset.lon, '-0.12');
assert.equal(linkStub.className, 'nodes-coordinate-link');
assert.equal(linkStub.attributes.get('aria-label'), 'Center map on Alpha at 51.50000, -0.12000');
assert.equal(linkStub.attributes.get('href'), '#');
const clickHandler = linkStub.listeners.get('click');
assert.equal(typeof clickHandler, 'function');
const event = {
prevented: false,
stopped: false,
preventDefault() {
this.prevented = true;
},
stopPropagation() {
this.stopped = true;
}
};
clickHandler(event);
assert.equal(event.prevented, true);
assert.equal(event.stopped, true);
assert.deepEqual(activations, [{ lat: 51.5, lon: -0.12 }]);
});
test('enhanceCoordinateCell ignores invalid input data', () => {
const cell = {
replaceChildren() {
assert.fail('replaceChildren should not be called for invalid data');
}
};
const resultEmpty = enhanceCoordinateCell({
cell,
document: {},
displayText: '',
lat: 0,
lon: 0
});
assert.equal(resultEmpty, null);
const resultInvalid = enhanceCoordinateCell({
cell,
document: {},
displayText: 'value',
lat: 'north',
lon: 5
});
assert.equal(resultInvalid, null);
});
test('toFiniteCoordinate returns finite numbers and rejects NaN', () => {
assert.equal(toFiniteCoordinate('12.34'), 12.34);
assert.equal(toFiniteCoordinate(56.78), 56.78);
assert.equal(toFiniteCoordinate('NaN'), null);
assert.equal(toFiniteCoordinate(undefined), null);
});

Some files were not shown because too many files have changed in this diff Show More