Compare commits

..

22 Commits

Author SHA1 Message Date
l5y c1898037c0 web: add secondary seed node jmrp.io (#568) 2025-12-16 21:38:41 +01:00
l5y efc5f64279 data: implement whitelist for ingestor (#567)
* data: implement whitelist for ingestor

* data: run black

* data: cover missing unit test vectors
2025-12-16 21:11:53 +01:00
l5y 636a203254 web: add ?since= parameter to all apis (#566) 2025-12-16 20:24:31 +01:00
l5y 2e78fa7a3a matrix: fix docker build 2025-12-16 19:26:31 +01:00
l5y e74f985630 matrix: fix docker build (#564) 2025-12-16 18:52:07 +01:00
l5y e4facd7f26 web: fix federation signature validation and create fallback (#563)
* web: fix federation signature validation and create fallback

* web: cover missing unit test vectors
2025-12-16 10:52:59 +01:00
l5y f533362f8a chore: update readme (#561) 2025-12-16 08:54:31 +01:00
l5y 175a8f368f matrix: add docker file for bridge (#556)
* matrix: add docker file for bridge

* matrix: address review comments

* matrix: address review comments

* matrix: address review comments

* matrix: address review comments

* matrix: address review comments
2025-12-16 08:53:01 +01:00
l5y 872bcbd529 matrix: add health checks to startup (#555)
* matrix: add health checks to startup

* matrix: address review comments

* matrix: cover missing unit test vectors

* matrix: cover missing unit test vectors
2025-12-15 22:53:32 +01:00
l5y 8811f71e53 matrix: omit the api part in base url (#554)
* matrix: omit the api part in base url

* matrix: address review comments
2025-12-15 22:04:01 +01:00
l5y fec649a159 app: add utility coverage tests for main.dart (#552)
* Add utility coverage tests for main.dart

* Add channel names to message sorting tests

* Fix MeshMessage sort test construction

* chore: run dart formatter
2025-12-15 11:03:51 +01:00
l5y 9e3f481401 Add unit tests for daemon helpers (#553) 2025-12-15 08:43:13 +01:00
l5y 1a497864a7 chore: bump version to 0.5.8 (#551)
* chore: bump version to 0.5.8

* chore: add missing license headers
2025-12-15 08:29:27 +01:00
l5y 06fb90513f data: track ingestors heartbeat (#549)
* data: track ingestors heartbeat

* data: address review comments

* cover missing unit test vectors

* cover missing unit test vectors
2025-12-14 18:42:17 +01:00
l5y b5eecb1ec1 Harden instance selector navigation URLs (#550)
* Harden instance selector navigation URLs

* Cover malformed instance URL handling
2025-12-14 18:40:41 +01:00
l5y 0e211aebdd data: hide channels that have been flag for ignoring (#548)
* data: hide channels that have been flag for ignoring

* data: address review comments
2025-12-14 16:47:44 +01:00
l5y 96b62d7e14 web: fix limit when counting remote nodes (#547) 2025-12-14 15:05:19 +01:00
l5y baf6ffff0b web: improve instances map and table view (#546)
* web: improve instances map and table view

* web: address review comments

* run rufo
2025-12-14 14:35:55 +01:00
l5y 135de0863c web: fix traces submission with optional fields on udp (#545) 2025-12-14 13:27:07 +01:00
l5y 074a61baac chore: bump version to 0.5.7 (#542)
* chore: bump version to 0.5.7

* Change version to 0.5.7 in AppFrameworkInfo.plist

Updated version numbers to 0.5.7.
2025-12-08 20:39:58 +01:00
l5y 209cc948bf Handle zero telemetry aggregates (#538)
* Handle zero telemetry aggregates

* Fix telemetry aggregation to drop zero readings
2025-12-08 20:31:32 +01:00
l5y cc108f2f49 web: fix telemetry api to return current in amperes (#541)
* web: fix telemetry api to return current in amperes

* web: address review comments
2025-12-08 20:18:10 +01:00
54 changed files with 6179 additions and 235 deletions
+27 -5
View File
@@ -43,7 +43,7 @@ jobs:
strategy:
matrix:
service: [web, ingestor]
service: [web, ingestor, matrix-bridge]
architecture:
- { name: linux-amd64, platform: linux/amd64, label: "Linux x86_64", os: linux, architecture: amd64 }
- { name: linux-arm64, platform: linux/arm64, label: "Linux ARM64", os: linux, architecture: arm64 }
@@ -109,8 +109,8 @@ jobs:
uses: docker/build-push-action@v5
with:
context: .
file: ./${{ matrix.service == 'web' && 'web/Dockerfile' || 'data/Dockerfile' }}
target: production
file: ${{ matrix.service == 'web' && './web/Dockerfile' || matrix.service == 'ingestor' && './data/Dockerfile' || './matrix/Dockerfile' }}
target: ${{ matrix.service == 'matrix-bridge' && 'runtime' || 'production' }}
platforms: ${{ matrix.architecture.platform }}
push: true
tags: |
@@ -119,12 +119,12 @@ jobs:
${{ steps.tagging.outputs.include_latest == 'true' && format('{0}/{1}-{2}-{3}:latest', env.REGISTRY, env.IMAGE_PREFIX, matrix.service, matrix.architecture.name) || '' }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || matrix.service == 'ingestor' && 'Python Ingestor' || 'Matrix Bridge' }} for ${{ matrix.architecture.label }}
org.opencontainers.image.licenses=Apache-2.0
org.opencontainers.image.version=${{ steps.version.outputs.version }}
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || 'Ingestor' }} (${{ matrix.architecture.label }})
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || matrix.service == 'ingestor' && 'Ingestor' || 'Matrix Bridge' }} (${{ matrix.architecture.label }})
org.opencontainers.image.vendor=PotatoMesh
org.opencontainers.image.architecture=${{ matrix.architecture.architecture }}
org.opencontainers.image.os=${{ matrix.architecture.os }}
@@ -208,6 +208,19 @@ jobs:
VERSION=${GITHUB_REF#refs/tags/v}
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Determine tagging strategy
id: tagging
run: |
VERSION="${{ steps.version.outputs.version }}"
if echo "$VERSION" | grep -E -- '-(rc|beta|alpha|dev)'; then
INCLUDE_LATEST=false
else
INCLUDE_LATEST=true
fi
echo "include_latest=$INCLUDE_LATEST" >> $GITHUB_OUTPUT
- name: Publish release summary
run: |
echo "## 🚀 PotatoMesh Images Published to GHCR" >> $GITHUB_STEP_SUMMARY
@@ -234,4 +247,13 @@ jobs:
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
# Matrix bridge images
echo "### 🧩 Matrix Bridge" >> $GITHUB_STEP_SUMMARY
if [ "${{ steps.tagging.outputs.include_latest }}" = "true" ]; then
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-matrix-bridge-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
fi
+4
View File
@@ -17,11 +17,15 @@ The repository splits runtime and ingestion logic. `web/` holds the Sinatra dash
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
`matrix/` contains the Rust Matrix bridge; build with `cargo build --release` or `docker build -f matrix/Dockerfile .`, and keep bridge config under `matrix/Config.toml` when running locally.
## Build, Test, and Development Commands
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
Container images publish via `.github/workflows/docker.yml` as `potato-mesh-{service}-linux-$arch` (`web`, `ingestor`, `matrix-bridge`), using the Dockerfiles in `web/`, `data/`, and `matrix/`.
## Coding Style & Naming Conventions
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
+62
View File
@@ -1,5 +1,67 @@
# CHANGELOG
## v0.5.7
* Data: track ingestors heartbeat by @l5yth in <https://github.com/l5yth/potato-mesh/pull/549>
* Harden instance selector navigation URLs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/550>
* Data: hide channels that have been flag for ignoring by @l5yth in <https://github.com/l5yth/potato-mesh/pull/548>
* Web: fix limit when counting remote nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/547>
* Web: improve instances map and table view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/546>
* Web: fix traces submission with optional fields on udp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/545>
* Chore: bump version to 0.5.7 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/542>
* Handle zero telemetry aggregates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/538>
* Web: fix telemetry api to return current in amperes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/541>
* Web: fix traces rendering by @l5yth in <https://github.com/l5yth/potato-mesh/pull/535>
* Normalize numeric node roles to canonical labels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/539>
* Use INSTANCE_DOMAIN env for ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/536>
* Web: further refine the federation page by @l5yth in <https://github.com/l5yth/potato-mesh/pull/534>
* Add Federation Map by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/532>
* Add contact link to the instance data by @apo-mak in <https://github.com/l5yth/potato-mesh/pull/533>
* Matrix: create potato-matrix-bridge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/528>
## v0.5.6
* Web: display sats in view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/523>
* Web: display air quality in separate chart by @l5yth in <https://github.com/l5yth/potato-mesh/pull/521>
* Ci: Add macOS and Ubuntu builds to Flutter workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/519>
* Web: add current to charts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/520>
* App: fix notification icon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/518>
* Spec: update test fixtures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/517>
* App: generate proper icons by @l5yth in <https://github.com/l5yth/potato-mesh/pull/516>
* Web: fix favicon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/515>
* Web: add ?since= parameter to api/messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/512>
* App: implement notifications by @l5yth in <https://github.com/l5yth/potato-mesh/pull/511>
* App: add theme selector by @l5yth in <https://github.com/l5yth/potato-mesh/pull/507>
* App: further harden refresh logic and prefer local first by @l5yth in <https://github.com/l5yth/potato-mesh/pull/506>
* Ci: fix app artifacts for tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/504>
* Ci: build app artifacts for tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/503>
* App: add persistance by @l5yth in <https://github.com/l5yth/potato-mesh/pull/501>
* App: instance and chat mvp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/498>
* App: add instance selector to settings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/497>
* App: add scaffholding gitignore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/496>
* Handle reaction app packets without reply id by @l5yth in <https://github.com/l5yth/potato-mesh/pull/495>
* Render reaction multiplier counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/494>
* Add comprehensive tests for Flutter reader by @l5yth in <https://github.com/l5yth/potato-mesh/pull/491>
* Map numeric role ids to canonical Meshtastic roles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/489>
* Update node detail hydration for traces by @l5yth in <https://github.com/l5yth/potato-mesh/pull/490>
* Add mobile Flutter CI workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/488>
* Align OCI labels in docker workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/487>
* Add Meshtastic reader Flutter app by @l5yth in <https://github.com/l5yth/potato-mesh/pull/483>
* Handle pre-release Docker tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/486>
* Web: remove range from charts labels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/485>
* Floor override frequencies to MHz integers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/476>
* Prevent message ids from being treated as node identifiers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/475>
* Fix 1 after emojis in reply. by @Alexkurd in <https://github.com/l5yth/potato-mesh/pull/464>
* Add frequency and preset to node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/472>
* Subscribe to traceroute app pubsub topic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/471>
* Aggregate telemetry over the last 7 days by @l5yth in <https://github.com/l5yth/potato-mesh/pull/470>
* Address missing id field ingestor bug by @l5yth in <https://github.com/l5yth/potato-mesh/pull/469>
* Merge secondary channels by name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/468>
* Rate limit host device telemetry by @l5yth in <https://github.com/l5yth/potato-mesh/pull/467>
* Add traceroutes to frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/466>
* Feat: implement traceroute app packet handling across the stack by @l5yth in <https://github.com/l5yth/potato-mesh/pull/463>
* Bump version and update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/462>
## v0.5.5
* Added comprehensive helper unit tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/457>
+2
View File
@@ -53,6 +53,8 @@ Additional environment variables are optional:
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom (disables the auto-fit checkbox when set). |
| `MAX_DISTANCE` | `42` | Maximum relationship distance (km) before edges are hidden. |
| `DEBUG` | `0` | Enables verbose logging across services when set to `1`. |
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; other channels are skipped before hidden filters. |
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor skips when forwarding packets. |
| `FEDERATION` | `1` | Controls whether the instance announces itself and crawls peers (`1`) or stays isolated (`0`). |
| `PRIVATE` | `0` | Restricts public visibility and disables chat/message endpoints when set to `1`. |
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the radio. |
+30 -7
View File
@@ -7,13 +7,20 @@
[![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/l5yth/potato-mesh/issues)
[![Matrix Chat](https://img.shields.io/badge/matrix-%23potatomesh:dod.ngo-blue)](https://matrix.to/#/#potatomesh:dod.ngo)
A federated Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
A federated, Meshtastic-powered node dashboard for your local community.
_No MQTT clutter, just local LoRa aether._
* Web app with chat window and map view showing nodes, neighbors, telemetry, and messages.
* API to POST (authenticated) and to GET nodes and messages.
* Shows new node notifications (first seen) in chat.
* Web dashboard with chat window and map view showing nodes, positions, neighbors,
trace routes, telemetry, and messages.
* API to POST (authenticated) and to GET nodes, messages, and telemetry.
* Shows new node notifications (first seen) and telemetry logs in chat.
* Allows searching and filtering for nodes in map and table view.
* Federated: _automatically_ froms a federation with other communities running
Potato Mesh!
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
* Supports multiple ingestors per instance.
* Matrix bridge that posts Meshtastic messages to a defined matrix channel (no
radio required).
* Mobile app to _read_ messages on your local aether (no radio required).
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
@@ -58,6 +65,7 @@ RACK_ENV="production" \
APP_ENV="production" \
API_TOKEN="SuperSecureTokenReally" \
INSTANCE_DOMAIN="https://potatomesh.net" \
MAP_CENTER="53.55,13.42" \
exec ruby app.rb -p 41447 -o 0.0.0.0
```
@@ -68,6 +76,7 @@ exec ruby app.rb -p 41447 -o 0.0.0.0
* Provide a strong `API_TOKEN` value to authorize POST requests against the API.
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
links and generated metadata resolve correctly.
* Don't forget to set a `MAP_CENTER` to point to your local region.
The web app can be configured with environment variables (defaults shown):
@@ -83,6 +92,8 @@ The web app can be configured with environment variables (defaults shown):
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
| `DEBUG` | `0` | Set to `1` for verbose logging in the web and ingestor services. |
| `ALLOWED_CHANNELS` | _unset_ | Comma-separated channel names the ingestor accepts; when set, all other channels are skipped before hidden filters. |
| `HIDDEN_CHANNELS` | _unset_ | Comma-separated channel names the ingestor will ignore when forwarding packets. |
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
@@ -133,7 +144,9 @@ The web app contains an API:
* GET `/api/messages?limit=100&encrypted=false&since=0` - returns the latest 100 messages newer than the provided unix timestamp (defaults to `since=0` to return full history; disabled when `PRIVATE=1`)
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
* GET `/api/traces?limit=100` - returns the latest 100 trace-routes caught
* GET `/api/instances` - returns known potato-mesh instances in other locations
* GET `/api/ingestors` - returns active potato-mesh python ingestors that feed data
* GET `/metrics`- metrics for the prometheus endpoint
* GET `/version`- information about the potato-mesh instance
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
@@ -141,6 +154,7 @@ The web app contains an API:
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
* POST `/api/telemetry` - appends telemetry provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/neighbors` - appends neighbor tuples provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/traces` - appends caught traces routes provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
@@ -189,9 +203,11 @@ node records and parsing new incoming messages. Enable debug output with `DEBUG=
specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set it to
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available. The
ingestor will still honor the legacy `POTATOMESH_INSTANCE` variable when
`INSTANCE_DOMAIN` is unset to ease upgrades from earlier deployments.
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available. To keep
ingestion limited, set `ALLOWED_CHANNELS` to a comma-separated whitelist (for
example `ALLOWED_CHANNELS="Chat,Ops"`); packets on other channels are discarded.
Use `HIDDEN_CHANNELS` to block specific channels from the web UI even when they
appear in the allowlist.
## Docker
@@ -201,12 +217,19 @@ Docker images are published on Github for each release:
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
docker pull ghcr.io/l5yth/potato-mesh/web:v0.5.5 # pinned historical release
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
docker pull ghcr.io/l5yth/potato-mesh/matrix-bridge:latest
```
Feel free to run the [configure.sh](./configure.sh) script to set up your
environment. See the [Docker guide](DOCKER.md) for more details and custom
deployment instructions.
## Matrix Bridge
A matrix bridge is currently being worked on. It requests messages from a configured
potato-mesh instance and forwards it to a specified matrix channel; see
[matrix/README.md](./matrix/README.md).
## Mobile App
A mobile _reader_ app is currently being worked on. Stay tuned for releases and updates.
+2 -2
View File
@@ -15,11 +15,11 @@
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<string>0.5.8</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
<string>0.5.8</string>
<key>MinimumOSVersion</key>
<string>14.0</string>
</dict>
+1 -1
View File
@@ -1,7 +1,7 @@
name: potato_mesh_reader
description: Meshtastic Reader — read-only view for PotatoMesh messages.
publish_to: "none"
version: 0.5.6
version: 0.5.8
environment:
sdk: ">=3.4.0 <4.0.0"
+128
View File
@@ -0,0 +1,128 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import 'package:flutter/material.dart';
import 'package:flutter_test/flutter_test.dart';
import 'package:potato_mesh_reader/main.dart';
void main() {
TestWidgetsFlutterBinding.ensureInitialized();
test('BootstrapProgress renders stage, counts, and detail', () {
const progress = BootstrapProgress(
stage: 'Downloading',
current: 2,
total: 5,
detail: 'instances',
);
expect(progress.label, 'Downloading 2/5 • instances');
const fallback = BootstrapProgress(stage: 'Starting');
expect(fallback.label, 'Starting');
});
test('InstanceVersion summary prefers populated fields', () {
const populated = InstanceVersion(
name: 'BerlinMesh',
channel: '#MediumFast',
frequency: '868MHz',
instanceDomain: 'potatomesh.net',
);
expect(populated.summary, 'BerlinMesh · #MediumFast · 868MHz');
const minimal = InstanceVersion(
name: '',
channel: null,
frequency: null,
instanceDomain: null,
);
expect(minimal.summary, 'Unknown');
});
test('sortMessagesByRxTime keeps unknown timestamps in place', () {
MeshMessage buildMessage({
required int id,
required String text,
required String rxIso,
DateTime? rxTime,
}) {
return MeshMessage(
id: id,
rxTime: rxTime,
rxIso: rxIso,
fromId: '!$id',
nodeId: '!$id',
toId: '^',
channelName: '#general',
channel: 1,
portnum: 'TEXT',
text: text,
rssi: -50,
snr: 1.0,
hopLimit: 1,
);
}
final withTime = buildMessage(
id: 2,
rxTime: DateTime.utc(2024, 1, 1, 12, 1),
rxIso: '2024-01-01T12:01:00Z',
text: 'timed',
);
final withoutTime = buildMessage(
id: 1,
rxTime: null,
rxIso: 'unknown',
text: 'unknown',
);
final laterTime = buildMessage(
id: 3,
rxTime: DateTime.utc(2024, 1, 1, 12, 5),
rxIso: '2024-01-01T12:05:00Z',
text: 'later',
);
final sorted = sortMessagesByRxTime([withoutTime, laterTime, withTime]);
expect(sorted.first.id, withoutTime.id,
reason: 'messages without rxTime should retain position');
expect(sorted[1].id, withTime.id,
reason: 'messages with timestamps should be ordered chronologically');
expect(sorted.last.id, laterTime.id);
});
testWidgets('LoadingScreen displays progress label and icon', (tester) async {
const screen = LoadingScreen(
progress: BootstrapProgress(stage: 'Fetching'),
);
await tester.pumpWidget(const MaterialApp(home: screen));
expect(find.byType(CircularProgressIndicator), findsOneWidget);
expect(find.text('Fetching'), findsOneWidget);
expect(find.bySemanticsLabel('PotatoMesh'), findsOneWidget);
});
testWidgets('LoadingScreen surfaces errors', (tester) async {
const screen = LoadingScreen(
progress: BootstrapProgress(stage: 'Loading'),
error: 'boom',
);
await tester.pumpWidget(const MaterialApp(home: screen));
expect(find.textContaining('Failed to load: boom'), findsOneWidget);
});
}
+19
View File
@@ -76,6 +76,8 @@ CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
FEDERATION=$(grep "^FEDERATION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "1")
PRIVATE=$(grep "^PRIVATE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
HIDDEN_CHANNELS=$(grep "^HIDDEN_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
ALLOWED_CHANNELS=$(grep "^ALLOWED_CHANNELS=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
MAP_ZOOM=$(grep "^MAP_ZOOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
@@ -126,6 +128,11 @@ echo "-------------------"
echo "Private mode hides public mesh messages from unauthenticated visitors."
echo "Set to 1 to hide public feeds or 0 to keep them visible."
read_with_default "Enable private mode (1=yes, 0=no)" "$PRIVATE" PRIVATE
echo "Provide a comma-separated whitelist of channel names to ingest (optional)."
echo "When set, only listed channels are ingested unless explicitly hidden below."
read_with_default "Allowed channels" "$ALLOWED_CHANNELS" ALLOWED_CHANNELS
echo "Provide a comma-separated list of channel names to hide from the web UI (optional)."
read_with_default "Hidden channels" "$HIDDEN_CHANNELS" HIDDEN_CHANNELS
echo ""
echo "🛠 Docker Settings"
@@ -196,6 +203,16 @@ update_env "POTATOMESH_IMAGE_TAG" "$POTATOMESH_IMAGE_TAG"
update_env "FEDERATION" "$FEDERATION"
update_env "PRIVATE" "$PRIVATE"
update_env "CONNECTION" "$CONNECTION"
if [ -n "$ALLOWED_CHANNELS" ]; then
update_env "ALLOWED_CHANNELS" "\"$ALLOWED_CHANNELS\""
else
sed -i.bak '/^ALLOWED_CHANNELS=.*/d' .env
fi
if [ -n "$HIDDEN_CHANNELS" ]; then
update_env "HIDDEN_CHANNELS" "\"$HIDDEN_CHANNELS\""
else
sed -i.bak '/^HIDDEN_CHANNELS=.*/d' .env
fi
if [ -n "$INSTANCE_DOMAIN" ]; then
update_env "INSTANCE_DOMAIN" "$INSTANCE_DOMAIN"
else
@@ -244,6 +261,8 @@ echo " API Token: ${API_TOKEN:0:8}..."
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
echo " Docker Image Tag: $POTATOMESH_IMAGE_TAG"
echo " Private Mode: ${PRIVATE}"
echo " Allowed Channels: ${ALLOWED_CHANNELS:-'All'}"
echo " Hidden Channels: ${HIDDEN_CHANNELS:-'None'}"
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
if [ "${FEDERATION:-1}" = "0" ]; then
echo " Federation: Disabled"
+4
View File
@@ -50,6 +50,8 @@ USER potatomesh
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
ALLOWED_CHANNELS="" \
HIDDEN_CHANNELS="" \
INSTANCE_DOMAIN="" \
API_TOKEN=""
@@ -75,6 +77,8 @@ USER ContainerUser
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
ALLOWED_CHANNELS="" \
HIDDEN_CHANNELS="" \
INSTANCE_DOMAIN="" \
API_TOKEN=""
+1 -1
View File
@@ -18,7 +18,7 @@ The ``data.mesh`` module exposes helpers for reading Meshtastic node and
message information before forwarding it to the accompanying web application.
"""
VERSION = "0.5.6"
VERSION = "0.5.8"
"""Semantic version identifier shared with the dashboard and front-end."""
__version__ = VERSION
+26
View File
@@ -0,0 +1,26 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
PRAGMA journal_mode=WAL;
CREATE TABLE IF NOT EXISTS ingestors (
node_id TEXT PRIMARY KEY,
start_time INTEGER NOT NULL,
last_seen_time INTEGER NOT NULL,
version TEXT,
lora_freq INTEGER,
modem_preset TEXT
);
CREATE INDEX IF NOT EXISTS idx_ingestors_last_seen ON ingestors(last_seen_time);
+1
View File
@@ -26,6 +26,7 @@ CREATE TABLE IF NOT EXISTS instances (
longitude REAL,
last_update_time INTEGER,
is_private BOOLEAN NOT NULL DEFAULT 0,
nodes_count INTEGER,
contact_link TEXT,
signature TEXT
);
+38 -2
View File
@@ -21,7 +21,17 @@ import threading as threading # re-exported for compatibility
import sys
import types
from . import channels, config, daemon, handlers, interfaces, queue, serialization
from .. import VERSION as _PACKAGE_VERSION
from . import (
channels,
config,
daemon,
handlers,
ingestors,
interfaces,
queue,
serialization,
)
__all__: list[str] = []
@@ -40,7 +50,15 @@ def _export_constants() -> None:
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
for _module in (channels, daemon, handlers, interfaces, queue, serialization):
for _module in (
channels,
daemon,
handlers,
interfaces,
queue,
serialization,
ingestors,
):
_reexport(_module)
_export_constants()
@@ -52,11 +70,14 @@ _CONFIG_ATTRS = {
"DEBUG",
"INSTANCE",
"API_TOKEN",
"ALLOWED_CHANNELS",
"HIDDEN_CHANNELS",
"LORA_FREQ",
"MODEM_PRESET",
"_RECONNECT_INITIAL_DELAY_SECS",
"_RECONNECT_MAX_DELAY_SECS",
"_CLOSE_TIMEOUT_SECS",
"_INGESTOR_HEARTBEAT_SECS",
"_debug_log",
}
@@ -70,9 +91,16 @@ _HANDLER_ATTRS = set(handlers.__all__)
_DAEMON_ATTRS = set(daemon.__all__)
_SERIALIZATION_ATTRS = set(serialization.__all__)
_INTERFACE_EXPORTS = set(interfaces.__all__)
_INGESTOR_ATTRS = set(ingestors.__all__)
# Re-export the package version for callers that previously referenced
# data.mesh_ingestor.VERSION directly.
VERSION = _PACKAGE_VERSION
__all__.append("VERSION")
__all__.extend(sorted(_CONFIG_ATTRS))
__all__.extend(sorted(_INTERFACE_ATTRS))
__all__.append("VERSION")
class _MeshIngestorModule(types.ModuleType):
@@ -87,6 +115,10 @@ class _MeshIngestorModule(types.ModuleType):
return getattr(interfaces, name)
if name in _INTERFACE_EXPORTS:
return getattr(interfaces, name)
if name in _INGESTOR_ATTRS:
return getattr(ingestors, name)
if name == "VERSION":
return VERSION
raise AttributeError(name)
def __setattr__(self, name: str, value): # type: ignore[override]
@@ -121,6 +153,10 @@ class _MeshIngestorModule(types.ModuleType):
setattr(serialization, name, value)
super().__setattr__(name, getattr(serialization, name, value))
handled = True
if name in _INGESTOR_ATTRS:
setattr(ingestors, name, value)
super().__setattr__(name, getattr(ingestors, name, value))
handled = True
if handled:
return
super().__setattr__(name, value)
+52
View File
@@ -222,6 +222,54 @@ def channel_name(channel_index: int | None) -> str | None:
return _CHANNEL_LOOKUP.get(int(channel_index))
def hidden_channel_names() -> tuple[str, ...]:
"""Return the configured set of hidden channel names."""
return tuple(getattr(config, "HIDDEN_CHANNELS", ()))
def allowed_channel_names() -> tuple[str, ...]:
"""Return the configured set of explicitly allowed channel names."""
return tuple(getattr(config, "ALLOWED_CHANNELS", ()))
def is_allowed_channel(channel_name_value: str | None) -> bool:
"""Return ``True`` when ``channel_name_value`` is permitted by policy."""
allowed = getattr(config, "ALLOWED_CHANNELS", ())
if not allowed:
return True
if channel_name_value is None:
return False
normalized = channel_name_value.strip()
if not normalized:
return False
normalized_casefold = normalized.casefold()
for allowed_name in allowed:
if normalized_casefold == allowed_name.casefold():
return True
return False
def is_hidden_channel(channel_name_value: str | None) -> bool:
"""Return ``True`` when ``channel_name_value`` is configured as hidden."""
if channel_name_value is None:
return False
normalized = channel_name_value.strip()
if not normalized:
return False
normalized_casefold = normalized.casefold()
for hidden in getattr(config, "HIDDEN_CHANNELS", ()):
if normalized_casefold == hidden.casefold():
return True
return False
def _reset_channel_cache() -> None:
"""Clear cached channel data. Intended for use in tests only."""
@@ -234,5 +282,9 @@ __all__ = [
"capture_from_interface",
"channel_mappings",
"channel_name",
"allowed_channel_names",
"hidden_channel_names",
"is_allowed_channel",
"is_hidden_channel",
"_reset_channel_cache",
]
+50
View File
@@ -46,6 +46,9 @@ DEFAULT_ENERGY_ONLINE_DURATION_SECS = 300.0
DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
"""Sleep duration used when energy saving mode is active."""
DEFAULT_INGESTOR_HEARTBEAT_SECS = float(60 * 60)
"""Interval between ingestor heartbeat announcements."""
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
"""Optional connection target for the mesh interface.
@@ -63,6 +66,49 @@ CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
DEBUG = os.environ.get("DEBUG") == "1"
def _parse_channel_names(raw_value: str | None) -> tuple[str, ...]:
"""Normalise a comma-separated list of channel names.
Parameters:
raw_value: Raw environment string containing channel names separated by
commas. ``None`` and empty segments are ignored.
Returns:
A tuple of unique, non-empty channel names preserving input order while
deduplicating case-insensitively.
"""
if not raw_value:
return ()
normalized_entries: list[str] = []
seen: set[str] = set()
for part in raw_value.split(","):
name = part.strip()
if not name:
continue
key = name.casefold()
if key in seen:
continue
seen.add(key)
normalized_entries.append(name)
return tuple(normalized_entries)
def _parse_hidden_channels(raw_value: str | None) -> tuple[str, ...]:
"""Compatibility wrapper that parses hidden channel names."""
return _parse_channel_names(raw_value)
HIDDEN_CHANNELS = _parse_hidden_channels(os.environ.get("HIDDEN_CHANNELS"))
"""Channel names configured to be ignored by the ingestor."""
ALLOWED_CHANNELS = _parse_channel_names(os.environ.get("ALLOWED_CHANNELS"))
"""Explicitly permitted channel names; when set, other channels are ignored."""
def _resolve_instance_domain() -> str:
"""Resolve the configured instance domain from the environment.
@@ -100,6 +146,7 @@ _CLOSE_TIMEOUT_SECS = DEFAULT_CLOSE_TIMEOUT_SECS
_INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
_INGESTOR_HEARTBEAT_SECS = DEFAULT_INGESTOR_HEARTBEAT_SECS
# Backwards compatibility shim for legacy imports.
PORT = CONNECTION
@@ -144,6 +191,8 @@ __all__ = [
"SNAPSHOT_SECS",
"CHANNEL_INDEX",
"DEBUG",
"HIDDEN_CHANNELS",
"ALLOWED_CHANNELS",
"INSTANCE",
"API_TOKEN",
"ENERGY_SAVING",
@@ -155,6 +204,7 @@ __all__ = [
"_INACTIVITY_RECONNECT_SECS",
"_ENERGY_ONLINE_DURATION_SECS",
"_ENERGY_SLEEP_SECS",
"_INGESTOR_HEARTBEAT_SECS",
"_debug_log",
]
+43 -1
View File
@@ -23,7 +23,7 @@ import time
from pubsub import pub
from . import config, handlers, interfaces
from . import config, handlers, ingestors, interfaces
_RECEIVE_TOPICS = (
"meshtastic.receive",
@@ -169,6 +169,41 @@ def _is_ble_interface(iface_obj) -> bool:
return "ble_interface" in module_name
def _process_ingestor_heartbeat(iface, *, ingestor_announcement_sent: bool) -> bool:
"""Send ingestor liveness heartbeats when a host id is known.
Parameters:
iface: Active mesh interface used to extract a host node id when absent.
ingestor_announcement_sent: Whether an initial heartbeat has already
been sent during the current session.
Returns:
Updated ``ingestor_announcement_sent`` flag reflecting whether an
initial heartbeat was transmitted.
"""
host_id = handlers.host_node_id()
if host_id is None and iface is not None:
extracted = interfaces._extract_host_node_id(iface)
if extracted:
handlers.register_host_node_id(extracted)
host_id = handlers.host_node_id()
if host_id:
ingestors.set_ingestor_node_id(host_id)
heartbeat_sent = ingestors.queue_ingestor_heartbeat(
force=not ingestor_announcement_sent
)
if heartbeat_sent and not ingestor_announcement_sent:
return True
return ingestor_announcement_sent
iface_cls = getattr(iface_obj, "__class__", None)
if iface_cls is None:
return False
module_name = getattr(iface_cls, "__module__", "") or ""
return "ble_interface" in module_name
def _connected_state(candidate) -> bool | None:
"""Return the connection state advertised by ``candidate``.
@@ -233,6 +268,7 @@ def main(existing_interface=None) -> None:
inactivity_reconnect_secs = max(
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
)
ingestor_announcement_sent = False
energy_saving_enabled = config.ENERGY_SAVING
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
@@ -288,6 +324,7 @@ def main(existing_interface=None) -> None:
handlers.register_host_node_id(
interfaces._extract_host_node_id(iface)
)
ingestors.set_ingestor_node_id(handlers.host_node_id())
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
initial_snapshot_sent = False
if not announced_target and resolved_target:
@@ -501,6 +538,10 @@ def main(existing_interface=None) -> None:
iface_connected_at = None
continue
ingestor_announcement_sent = _process_ingestor_heartbeat(
iface, ingestor_announcement_sent=ingestor_announcement_sent
)
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
stop.wait(config.SNAPSHOT_SECS)
except KeyboardInterrupt: # pragma: no cover - interactive only
@@ -520,6 +561,7 @@ __all__ = [
"_node_items_snapshot",
"_subscribe_receive_topics",
"_is_ble_interface",
"_process_ingestor_heartbeat",
"_connected_state",
"main",
]
+27 -5
View File
@@ -1414,6 +1414,8 @@ def store_packet_dict(packet: Mapping) -> None:
except Exception:
channel = 0
channel_name_value = channels.channel_name(channel)
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
if pkt_id is None:
_record_ignored_packet(packet, reason="missing-packet-id")
@@ -1459,6 +1461,29 @@ def store_packet_dict(packet: Mapping) -> None:
_record_ignored_packet(packet, reason="skipped-direct-message")
return
if not channels.is_allowed_channel(channel_name_value):
_record_ignored_packet(packet, reason="disallowed-channel")
if config.DEBUG:
config._debug_log(
"Ignored packet on disallowed channel",
context="handlers.store_packet_dict",
channel=channel,
channel_name=channel_name_value,
allowed_channels=channels.allowed_channel_names(),
)
return
if channels.is_hidden_channel(channel_name_value):
_record_ignored_packet(packet, reason="hidden-channel")
if config.DEBUG:
config._debug_log(
"Ignored packet on hidden channel",
context="handlers.store_packet_dict",
channel=channel,
channel_name=channel_name_value,
)
return
message_payload = {
"id": int(pkt_id),
"rx_time": rx_time,
@@ -1476,11 +1501,8 @@ def store_packet_dict(packet: Mapping) -> None:
"emoji": emoji,
}
channel_name_value = None
if not encrypted_flag:
channel_name_value = channels.channel_name(channel)
if channel_name_value:
message_payload["channel_name"] = channel_name_value
if not encrypted_flag and channel_name_value:
message_payload["channel_name"] = channel_name_value
_queue_post_json(
"/api/messages",
_apply_radio_metadata(message_payload),
+139
View File
@@ -0,0 +1,139 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for tracking ingestor identity and liveness announcements."""
from __future__ import annotations
import time
from dataclasses import dataclass, field
from typing import Callable
from .. import VERSION as INGESTOR_VERSION
from . import config, queue
from .serialization import _canonical_node_id
HEARTBEAT_INTERVAL_SECS = 60 * 60
"""Default interval between ingestor heartbeat announcements."""
@dataclass
class _IngestorState:
"""Mutable ingestor identity and heartbeat tracking data."""
start_time: int = field(default_factory=lambda: int(time.time()))
last_heartbeat: int | None = None
node_id: str | None = None
STATE = _IngestorState()
"""Shared ingestor identity state."""
# Alias retained for clarity without exporting into the top-level mesh module to
# avoid colliding with the HTTP queue state.
INGESTOR_STATE = STATE
def ingestor_start_time() -> int:
"""Return the unix timestamp representing when the ingestor booted."""
return STATE.start_time
def set_ingestor_node_id(node_id: str | None) -> str | None:
"""Record the canonical host node identifier for the ingestor.
Parameters:
node_id: Raw node identifier reported by the connected device.
Returns:
Canonical node identifier in ``!xxxxxxxx`` form or ``None`` when the
provided value cannot be normalised.
"""
canonical = _canonical_node_id(node_id)
if canonical is None:
return None
if STATE.node_id != canonical:
STATE.node_id = canonical
STATE.last_heartbeat = None
return canonical
def queue_ingestor_heartbeat(
*,
force: bool = False,
send: Callable[[str, dict], None] | None = None,
node_id: str | None = None,
) -> bool:
"""Queue a heartbeat payload advertising ingestor liveness.
Parameters:
force: When ``True``, bypasses the heartbeat interval guard so an
announcement is queued immediately.
send: Optional transport callable used for tests; defaults to the queue
dispatcher.
node_id: Optional node identifier to register before sending. When
omitted the previously recorded identifier is reused.
Returns:
``True`` when a heartbeat payload was queued, ``False`` otherwise.
"""
canonical = _canonical_node_id(node_id) if node_id is not None else None
if canonical:
set_ingestor_node_id(canonical)
canonical = STATE.node_id
if canonical is None:
return False
now = int(time.time())
interval = max(
0, int(getattr(config, "_INGESTOR_HEARTBEAT_SECS", HEARTBEAT_INTERVAL_SECS))
)
last = STATE.last_heartbeat
if not force and last is not None and now - last < interval:
return False
payload = {
"node_id": canonical,
"start_time": STATE.start_time,
"last_seen_time": now,
"version": INGESTOR_VERSION,
}
if getattr(config, "LORA_FREQ", None) is not None:
payload["lora_freq"] = config.LORA_FREQ
if getattr(config, "MODEM_PRESET", None) is not None:
payload["modem_preset"] = config.MODEM_PRESET
queue._queue_post_json(
"/api/ingestors",
payload,
priority=getattr(
queue, "_INGESTOR_POST_PRIORITY", queue._DEFAULT_POST_PRIORITY
),
send=send,
)
STATE.last_heartbeat = now
return True
__all__ = [
"HEARTBEAT_INTERVAL_SECS",
"INGESTOR_STATE",
"ingestor_start_time",
"queue_ingestor_heartbeat",
"set_ingestor_node_id",
]
+2
View File
@@ -74,6 +74,7 @@ def _payload_key_value_pairs(payload: Mapping[str, object]) -> str:
_MESSAGE_POST_PRIORITY = 10
_INGESTOR_POST_PRIORITY = 80
_NEIGHBOR_POST_PRIORITY = 20
_TRACE_POST_PRIORITY = 25
_POSITION_POST_PRIORITY = 30
@@ -259,6 +260,7 @@ __all__ = [
"QueueState",
"_DEFAULT_POST_PRIORITY",
"_MESSAGE_POST_PRIORITY",
"_INGESTOR_POST_PRIORITY",
"_NEIGHBOR_POST_PRIORITY",
"_NODE_POST_PRIORITY",
"_POSITION_POST_PRIORITY",
+37
View File
@@ -49,6 +49,8 @@ x-ingestor-base: &ingestor-base
environment:
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
ALLOWED_CHANNELS: ${ALLOWED_CHANNELS:-""}
HIDDEN_CHANNELS: ${HIDDEN_CHANNELS:-""}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
@@ -75,6 +77,21 @@ x-ingestor-base: &ingestor-base
memory: 128M
cpus: '0.1'
x-matrix-bridge-base: &matrix-bridge-base
image: ghcr.io/l5yth/potato-mesh-matrix-bridge-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
volumes:
- potatomesh_matrix_bridge_state:/app
- ./matrix/Config.toml:/app/Config.toml:ro
restart: unless-stopped
deploy:
resources:
limits:
memory: 128M
cpus: '0.1'
reservations:
memory: 64M
cpus: '0.05'
services:
web:
<<: *web-base
@@ -108,6 +125,24 @@ services:
profiles:
- bridge
matrix-bridge:
<<: *matrix-bridge-base
network_mode: host
depends_on:
- web
extra_hosts:
- "web:127.0.0.1"
matrix-bridge-bridge:
<<: *matrix-bridge-base
container_name: potatomesh-matrix-bridge
networks:
- potatomesh-network
depends_on:
- web-bridge
profiles:
- bridge
volumes:
potatomesh_data:
driver: local
@@ -115,6 +150,8 @@ volumes:
driver: local
potatomesh_logs:
driver: local
potatomesh_matrix_bridge_state:
driver: local
networks:
potatomesh-network:
+1 -1
View File
@@ -1,3 +1,3 @@
target/
Cargo.lock
coverage.lcov
bridge_state.json
+2117
View File
File diff suppressed because it is too large Load Diff
+16 -2
View File
@@ -1,6 +1,20 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[package]
name = "potatomesh-matrix-bridge"
version = "0.5.7"
version = "0.5.8"
edition = "2021"
[dependencies]
@@ -17,4 +31,4 @@ urlencoding = "2"
[dev-dependencies]
tempfile = "3"
mockito = "1"
serial_test = "3"
serial_test = "3"
+7 -6
View File
@@ -1,19 +1,20 @@
[potatomesh]
# Base URL without trailing slash
base_url = "https://potatomesh.net/api"
# Base domain (with or without trailing slash)
base_url = "https://potatomesh.net"
# Poll interval in seconds
poll_interval_secs = 60
[matrix]
# Homeserver base URL (client API) without trailing slash
homeserver = "https://matrix.example.org"
homeserver = "https://matrix.dod.ngo"
# Appservice access token (from your registration.yaml)
as_token = "YOUR_APPSERVICE_AS_TOKEN"
as_token = "INVALID_TOKEN_NOT_WORKING"
# Server name (domain) part of Matrix user IDs
server_name = "example.org"
server_name = "dod.ngo"
# Room ID to send into (must be joined by the appservice / puppets)
room_id = "!yourroomid:example.org"
room_id = "!sXabOBXbVObAlZQEUs:c-base.org" # "#potato-bridge:c-base.org"
[state]
# Where to persist last seen message id (optional but recommended)
state_file = "bridge_state.json"
+42
View File
@@ -0,0 +1,42 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM rust:1.91-bookworm AS builder
WORKDIR /app
COPY matrix/Cargo.toml matrix/Cargo.lock ./
COPY matrix/src ./src
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
cargo build --release --locked
FROM debian:bookworm-slim AS runtime
RUN apt-get update \
&& apt-get install -y --no-install-recommends ca-certificates gosu \
&& rm -rf /var/lib/apt/lists/*
RUN useradd --create-home --uid 10001 --shell /usr/sbin/nologin potatomesh
WORKDIR /app
COPY --from=builder /app/target/release/potatomesh-matrix-bridge /usr/local/bin/potatomesh-matrix-bridge
COPY matrix/Config.toml /app/Config.example.toml
COPY matrix/docker-entrypoint.sh /usr/local/bin/docker-entrypoint.sh
RUN chmod +x /usr/local/bin/docker-entrypoint.sh
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
+46 -16
View File
@@ -4,7 +4,7 @@ A small Rust daemon that bridges **PotatoMesh** LoRa messages into a **Matrix**
For each PotatoMesh node, the bridge creates (or uses) a **Matrix puppet user**:
- Matrix localpart: the hex node id (without `!`), e.g. `!67fc83cb``@67fc83cb:example.org`
- Matrix localpart: `potato_` + the hex node id (without `!`), e.g. `!67fc83cb``@potato_67fc83cb:example.org`
- Matrix display name: the nodes `long_name` from the PotatoMesh API
Messages from PotatoMesh are periodically fetched and forwarded to a single Matrix room as those puppet users.
@@ -13,10 +13,10 @@ Messages from PotatoMesh are periodically fetched and forwarded to a single Matr
## Features
- Polls `https://potatomesh.net/api/messages` (or any configured base URL)
- Looks up node metadata via `GET /nodes/{hex}` and caches it
- Polls `https://potatomesh.net/api/messages` (deriving `/api` from the configured base domain)
- Looks up node metadata via `GET /api/nodes/{hex}` and caches it
- One Matrix user per node:
- username: hex node id
- username: `potato_{hex node id}`
- display name: `long_name`
- Forwards `TEXT_MESSAGE_APP` messages into a single Matrix room
- Persists last-seen message ID to avoid duplicates across restarts
@@ -26,12 +26,12 @@ Messages from PotatoMesh are periodically fetched and forwarded to a single Matr
## Architecture Overview
- **PotatoMesh side**
- `GET /messages` returns an array of messages
- `GET /nodes/{hex}` returns node metadata (including `long_name`)
- `GET /api/messages` returns an array of messages
- `GET /api/nodes/{hex}` returns node metadata (including `long_name`)
- **Matrix side**
- Uses the Matrix Client-Server API with an **appservice access token**
- Impersonates puppet users via `user_id=@{hex}:{server_name}&access_token={as_token}`
- Impersonates puppet users via `user_id=@potato_{hex}:{server_name}&access_token={as_token}`
- Sends `m.room.message` events into a configured room
This is **not** a full appservice framework; it just speaks the minimal HTTP needed.
@@ -43,11 +43,11 @@ This is **not** a full appservice framework; it just speaks the minimal HTTP nee
- Rust (stable) and `cargo`
- A Matrix homeserver you control (e.g. Synapse)
- An **application service registration** on your homeserver that:
- Whitelists the puppet user namespace (e.g. `@[0-9a-f]{8}:example.org`)
- Whitelists the puppet user namespace (e.g. `@potato_[0-9a-f]{8}:example.org`)
- Provides an `as_token` the bridge can use
- Network access from the bridge host to:
- `https://potatomesh.net/api` (or your configured PotatoMesh API)
- `https://potatomesh.net/` (bridge appends `/api`)
- Your Matrix homeserver (`https://matrix.example.org`)
---
@@ -60,8 +60,8 @@ Example:
```toml
[potatomesh]
# Base URL without trailing slash
base_url = "https://potatomesh.net/api"
# Base domain (bridge will call {base_url}/api)
base_url = "https://potatomesh.net/"
# Poll interval in seconds
poll_interval_secs = 10
@@ -84,7 +84,7 @@ state_file = "bridge_state.json"
The bridge assumes:
* Messages: `GET {base_url}/messages` → JSON array, for example:
* Messages: `GET {base_url}/api/messages` → JSON array, for example:
```json
[
@@ -108,7 +108,7 @@ The bridge assumes:
]
```
* Nodes: `GET {base_url}/nodes/{hex}` → JSON, for example:
* Nodes: `GET {base_url}/api/nodes/{hex}` → JSON, for example:
```json
{
@@ -122,7 +122,7 @@ The bridge assumes:
}
```
Node hex ID is derived from `node_id` by stripping the leading `!` and using the remainder as the Matrix localpart.
Node hex ID is derived from `node_id` by stripping the leading `!` and using the remainder inside the puppet localpart prefix (`potato_{hex}`).
---
@@ -142,7 +142,7 @@ rate_limited: false
namespaces:
users:
- exclusive: true
regex: "@[0-9a-f]{8}:example.org"
regex: "@potato_[0-9a-f]{8}:example.org"
```
For this bridge, only the `as_token` and `namespaces.users` actually matter. The bridge does not accept inbound events; it only uses the `as_token` to call the homeserver.
@@ -170,6 +170,36 @@ target/release/potatomesh-matrix-bridge
---
## Docker
Build the container from the repo root with the included `matrix/Dockerfile`:
```bash
docker build -f matrix/Dockerfile -t potatomesh-matrix-bridge .
```
Provide your config at `/app/Config.toml` and persist the bridge state file by mounting volumes. Minimal example:
```bash
docker run --rm \
-v bridge_state:/app \
-v "$(pwd)/matrix/Config.toml:/app/Config.toml:ro" \
potatomesh-matrix-bridge
```
If you prefer to isolate the state file from the config, mount it directly instead of the whole `/app` directory:
```bash
docker run --rm \
-v bridge_state:/app \
-v "$(pwd)/matrix/Config.toml:/app/Config.toml:ro" \
potatomesh-matrix-bridge
```
The image ships `Config.example.toml` for reference, but the bridge will exit if `/app/Config.toml` is not provided.
---
## Run
Ensure `Config.toml` is present and valid, then:
@@ -193,7 +223,7 @@ The bridge will:
3. For each new `TEXT_MESSAGE_APP`:
* Fetch node info.
* Ensure puppet is registered (`@{hex}:{server_name}`).
* Ensure puppet is registered (`@potato_{hex}:{server_name}`).
* Set puppet display name to `long_name`.
* Send a formatted text message into `room_id` as that puppet.
* Update and persist `bridge_state.json`.
+33
View File
@@ -0,0 +1,33 @@
#!/bin/sh
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Default state file path from Config.toml unless overridden.
STATE_FILE="${STATE_FILE:-/app/bridge_state.json}"
STATE_DIR="$(dirname "$STATE_FILE")"
# Ensure state directory exists and is writable by the non-root user without
# touching the read-only config bind mount.
if [ ! -d "$STATE_DIR" ]; then
mkdir -p "$STATE_DIR"
fi
# Best-effort ownership fix; ignore if the underlying volume is read-only.
chown potatomesh:potatomesh "$STATE_DIR" 2>/dev/null || true
touch "$STATE_FILE" 2>/dev/null || true
chown potatomesh:potatomesh "$STATE_FILE" 2>/dev/null || true
exec gosu potatomesh potatomesh-matrix-bridge "$@"
+18 -4
View File
@@ -1,3 +1,17 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Deserialize;
use std::{fs, path::Path};
@@ -53,7 +67,7 @@ mod tests {
fn parse_minimal_config_from_toml_str() {
let toml_str = r#"
[potatomesh]
base_url = "https://potatomesh.net/api"
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
@@ -67,7 +81,7 @@ mod tests {
"#;
let cfg: Config = toml::from_str(toml_str).expect("toml should parse");
assert_eq!(cfg.potatomesh.base_url, "https://potatomesh.net/api");
assert_eq!(cfg.potatomesh.base_url, "https://potatomesh.net/");
assert_eq!(cfg.potatomesh.poll_interval_secs, 10);
assert_eq!(cfg.matrix.homeserver, "https://matrix.example.org");
@@ -88,7 +102,7 @@ mod tests {
fn load_from_file_valid_file() {
let toml_str = r#"
[potatomesh]
base_url = "https://potatomesh.net/api"
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
@@ -120,7 +134,7 @@ mod tests {
fn from_default_path_found() {
let toml_str = r#"
[potatomesh]
base_url = "https://potatomesh.net/api"
base_url = "https://potatomesh.net/"
poll_interval_secs = 10
[matrix]
+246 -39
View File
@@ -1,3 +1,17 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod config;
mod matrix;
mod potatomesh;
@@ -10,11 +24,12 @@ use tracing::{error, info};
use crate::config::Config;
use crate::matrix::MatrixAppserviceClient;
use crate::potatomesh::{PotatoClient, PotatoMessage};
use crate::potatomesh::{FetchParams, PotatoClient, PotatoMessage};
#[derive(Debug, serde::Serialize, serde::Deserialize, Default)]
pub struct BridgeState {
last_message_id: Option<u64>,
last_checked_at: Option<u64>,
}
impl BridgeState {
@@ -48,6 +63,92 @@ impl BridgeState {
}
}
fn build_fetch_params(state: &BridgeState) -> FetchParams {
if state.last_message_id.is_none() {
FetchParams {
limit: None,
since: None,
}
} else if let Some(ts) = state.last_checked_at {
FetchParams {
limit: None,
since: Some(ts),
}
} else {
FetchParams {
limit: Some(10),
since: None,
}
}
}
fn update_checkpoint(state: &mut BridgeState, delivered_all: bool, now_secs: u64) -> bool {
if !delivered_all {
return false;
}
if state.last_message_id.is_some() {
state.last_checked_at = Some(now_secs);
true
} else {
false
}
}
async fn poll_once(
potato: &PotatoClient,
matrix: &MatrixAppserviceClient,
state: &mut BridgeState,
state_path: &str,
now_secs: u64,
) {
let params = build_fetch_params(state);
match potato.fetch_messages(params).await {
Ok(mut msgs) => {
// sort by id ascending so we process in order
msgs.sort_by_key(|m| m.id);
let mut delivered_all = true;
for msg in &msgs {
if !state.should_forward(msg) {
continue;
}
// Filter to the ports you care about
if let Some(port) = &msg.portnum {
if port != "TEXT_MESSAGE_APP" {
state.update_with(msg);
continue;
}
}
if let Err(e) = handle_message(potato, matrix, state, msg).await {
error!("Error handling message {}: {:?}", msg.id, e);
delivered_all = false;
continue;
}
// persist after each processed message
if let Err(e) = state.save(state_path) {
error!("Error saving state: {:?}", e);
}
}
// Only advance checkpoint after successful delivery and a known last_message_id.
if update_checkpoint(state, delivered_all, now_secs) {
if let Err(e) = state.save(state_path) {
error!("Error saving state: {:?}", e);
}
}
}
Err(e) => {
error!("Error fetching PotatoMesh messages: {:?}", e);
}
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Logging: RUST_LOG=info,bridge=debug,reqwest=warn ...
@@ -64,7 +165,9 @@ async fn main() -> Result<()> {
let http = reqwest::Client::builder().build()?;
let potato = PotatoClient::new(http.clone(), cfg.potatomesh.clone());
potato.health_check().await?;
let matrix = MatrixAppserviceClient::new(http.clone(), cfg.matrix.clone());
matrix.health_check().await?;
let state_path = &cfg.state.state_file;
let mut state = BridgeState::load(state_path)?;
@@ -73,36 +176,12 @@ async fn main() -> Result<()> {
let poll_interval = Duration::from_secs(cfg.potatomesh.poll_interval_secs);
loop {
match potato.fetch_messages().await {
Ok(mut msgs) => {
// sort by id ascending so we process in order
msgs.sort_by_key(|m| m.id);
let now_secs = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
for msg in msgs {
if !state.should_forward(&msg) {
continue;
}
// Filter to the ports you care about
if msg.portnum != "TEXT_MESSAGE_APP" {
state.update_with(&msg);
continue;
}
if let Err(e) = handle_message(&potato, &matrix, &mut state, &msg).await {
error!("Error handling message {}: {:?}", msg.id, e);
}
// persist after each processed message
if let Err(e) = state.save(state_path) {
error!("Error saving state: {:?}", e);
}
}
}
Err(e) => {
error!("Error fetching PotatoMesh messages: {:?}", e);
}
}
poll_once(&potato, &matrix, &mut state, state_path, now_secs).await;
sleep(poll_interval).await;
}
@@ -129,13 +208,19 @@ async fn handle_message(
.unwrap_or_else(|| node.long_name.clone());
let body = format!(
"[{short}] {text}\n({from_id} → {to_id}, RSSI {rssi} dB, SNR {snr} dB, {chan}/{preset})",
"[{short}] {text}\n({from_id} → {to_id}, {rssi}, {snr}, {chan}/{preset})",
short = short,
text = msg.text,
from_id = msg.from_id,
to_id = msg.to_id,
rssi = msg.rssi,
snr = msg.snr,
rssi = msg
.rssi
.map(|v| format!("RSSI {v} dB"))
.unwrap_or_else(|| "RSSI n/a".to_string()),
snr = msg
.snr
.map(|v| format!("SNR {v} dB"))
.unwrap_or_else(|| "SNR n/a".to_string()),
chan = msg.channel_name,
preset = msg.modem_preset,
);
@@ -161,14 +246,14 @@ mod tests {
from_id: "!abcd1234".to_string(),
to_id: "^all".to_string(),
channel: 1,
portnum: "TEXT_MESSAGE_APP".to_string(),
portnum: Some("TEXT_MESSAGE_APP".to_string()),
text: "Ping".to_string(),
rssi: -100,
hop_limit: 1,
rssi: Some(-100),
hop_limit: Some(1),
lora_freq: 868,
modem_preset: "MediumFast".to_string(),
channel_name: "TEST".to_string(),
snr: 0.0,
snr: Some(0.0),
reply_id: None,
node_id: "!abcd1234".to_string(),
}
@@ -209,6 +294,7 @@ mod tests {
fn bridge_state_update_is_monotonic() {
let mut state = BridgeState {
last_message_id: Some(50),
last_checked_at: None,
};
let m = sample_msg(40);
@@ -225,11 +311,13 @@ mod tests {
let state = BridgeState {
last_message_id: Some(12345),
last_checked_at: Some(99),
};
state.save(path_str).unwrap();
let loaded_state = BridgeState::load(path_str).unwrap();
assert_eq!(loaded_state.last_message_id, Some(12345));
assert_eq!(loaded_state.last_checked_at, Some(99));
}
#[test]
@@ -240,6 +328,125 @@ mod tests {
let state = BridgeState::load(path_str).unwrap();
assert_eq!(state.last_message_id, None);
assert_eq!(state.last_checked_at, None);
}
#[test]
fn update_checkpoint_requires_last_message_id() {
let mut state = BridgeState {
last_message_id: None,
last_checked_at: Some(10),
};
let saved = update_checkpoint(&mut state, true, 123);
assert!(!saved);
assert_eq!(state.last_checked_at, Some(10));
}
#[test]
fn update_checkpoint_skips_when_not_delivered() {
let mut state = BridgeState {
last_message_id: Some(5),
last_checked_at: Some(10),
};
let saved = update_checkpoint(&mut state, false, 123);
assert!(!saved);
assert_eq!(state.last_checked_at, Some(10));
}
#[test]
fn update_checkpoint_sets_when_safe() {
let mut state = BridgeState {
last_message_id: Some(5),
last_checked_at: None,
};
let saved = update_checkpoint(&mut state, true, 123);
assert!(saved);
assert_eq!(state.last_checked_at, Some(123));
}
#[test]
fn fetch_params_respects_missing_last_message_id() {
let state = BridgeState {
last_message_id: None,
last_checked_at: Some(123),
};
let params = build_fetch_params(&state);
assert_eq!(params.limit, None);
assert_eq!(params.since, None);
}
#[test]
fn fetch_params_uses_since_when_safe() {
let state = BridgeState {
last_message_id: Some(1),
last_checked_at: Some(123),
};
let params = build_fetch_params(&state);
assert_eq!(params.limit, None);
assert_eq!(params.since, Some(123));
}
#[test]
fn fetch_params_defaults_to_small_window() {
let state = BridgeState {
last_message_id: Some(1),
last_checked_at: None,
};
let params = build_fetch_params(&state);
assert_eq!(params.limit, Some(10));
assert_eq!(params.since, None);
}
#[tokio::test]
async fn poll_once_persists_checkpoint_without_messages() {
let tmp_dir = tempfile::tempdir().unwrap();
let state_path = tmp_dir.path().join("state.json");
let state_str = state_path.to_str().unwrap();
let mut server = mockito::Server::new_async().await;
let mock_msgs = server
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any)
.with_status(200)
.with_header("content-type", "application/json")
.with_body("[]")
.create();
let http_client = reqwest::Client::new();
let potatomesh_cfg = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 1,
};
let matrix_cfg = MatrixConfig {
homeserver: server.url(),
as_token: "AS_TOKEN".to_string(),
server_name: "example.org".to_string(),
room_id: "!roomid:example.org".to_string(),
};
let potato = PotatoClient::new(http_client.clone(), potatomesh_cfg);
let matrix = MatrixAppserviceClient::new(http_client, matrix_cfg);
let mut state = BridgeState {
last_message_id: Some(1),
last_checked_at: None,
};
poll_once(&potato, &matrix, &mut state, state_str, 123).await;
mock_msgs.assert();
// Should have advanced checkpoint and saved it.
assert_eq!(state.last_checked_at, Some(123));
let loaded = BridgeState::load(state_str).unwrap();
assert_eq!(loaded.last_checked_at, Some(123));
assert_eq!(loaded.last_message_id, Some(1));
}
#[tokio::test]
@@ -258,11 +465,11 @@ mod tests {
};
let node_id = "abcd1234";
let user_id = format!("@{}:{}", node_id, matrix_cfg.server_name);
let user_id = format!("@potato_{}:{}", node_id, matrix_cfg.server_name);
let encoded_user = urlencoding::encode(&user_id);
let mock_get_node = server
.mock("GET", "/nodes/abcd1234")
.mock("GET", "/api/nodes/abcd1234")
.with_status(200)
.with_header("content-type", "application/json")
.with_body(r#"{"node_id": "!abcd1234", "long_name": "Test Node", "short_name": "TN"}"#)
+69 -6
View File
@@ -1,3 +1,17 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Serialize;
use std::sync::{
atomic::{AtomicU64, Ordering},
@@ -27,9 +41,24 @@ impl MatrixAppserviceClient {
}
}
/// Convert a node_id like "!deadbeef" into Matrix localpart "deadbeef".
/// Basic liveness check against the homeserver.
pub async fn health_check(&self) -> anyhow::Result<()> {
let url = format!("{}/_matrix/client/versions", self.cfg.homeserver);
let resp = self.http.get(&url).send().await?;
if resp.status().is_success() {
tracing::info!("Matrix homeserver healthy at {}", self.cfg.homeserver);
Ok(())
} else {
Err(anyhow::anyhow!(
"Matrix homeserver versions check failed with status {}",
resp.status()
))
}
}
/// Convert a node_id like "!deadbeef" into Matrix localpart "potato_deadbeef".
pub fn localpart_from_node_id(node_id: &str) -> String {
node_id.trim_start_matches('!').to_string()
format!("potato_{}", node_id.trim_start_matches('!'))
}
/// Build a full Matrix user_id from localpart.
@@ -175,11 +204,11 @@ mod tests {
fn localpart_strips_bang_correctly() {
assert_eq!(
MatrixAppserviceClient::localpart_from_node_id("!deadbeef"),
"deadbeef"
"potato_deadbeef"
);
assert_eq!(
MatrixAppserviceClient::localpart_from_node_id("cafebabe"),
"cafebabe"
"potato_cafebabe"
);
}
@@ -188,8 +217,42 @@ mod tests {
let http = reqwest::Client::builder().build().unwrap();
let client = MatrixAppserviceClient::new(http, dummy_cfg());
let uid = client.user_id("deadbeef");
assert_eq!(uid, "@deadbeef:example.org");
let uid = client.user_id("potato_deadbeef");
assert_eq!(uid, "@potato_deadbeef:example.org");
}
#[tokio::test]
async fn health_check_success() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/_matrix/client/versions")
.with_status(200)
.create();
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
let result = client.health_check().await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn health_check_failure() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/_matrix/client/versions")
.with_status(500)
.create();
let mut cfg = dummy_cfg();
cfg.homeserver = server.url();
let client = MatrixAppserviceClient::new(reqwest::Client::new(), cfg);
let result = client.health_check().await;
mock.assert();
assert!(result.is_err());
}
#[test]
+222 -24
View File
@@ -1,3 +1,17 @@
// Copyright © 2025-26 l5yth & contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::Deserialize;
use std::collections::HashMap;
use std::sync::Arc;
@@ -14,19 +28,29 @@ pub struct PotatoMessage {
pub from_id: String,
pub to_id: String,
pub channel: u8,
pub portnum: String,
#[serde(default)]
pub portnum: Option<String>,
pub text: String,
pub rssi: i16,
pub hop_limit: u8,
#[serde(default)]
pub rssi: Option<i16>,
#[serde(default)]
pub hop_limit: Option<u8>,
pub lora_freq: u32,
pub modem_preset: String,
pub channel_name: String,
pub snr: f32,
#[serde(default)]
pub snr: Option<f32>,
#[serde(default)]
pub reply_id: Option<u64>,
pub node_id: String,
}
#[derive(Debug, Default, Clone)]
pub struct FetchParams {
pub limit: Option<u32>,
pub since: Option<u64>,
}
#[allow(dead_code)]
#[derive(Debug, Deserialize, Clone)]
pub struct PotatoNode {
@@ -67,22 +91,55 @@ impl PotatoClient {
}
}
/// Build the API root; accept either a bare domain or one already ending in `/api`.
fn api_base(&self) -> String {
let trimmed = self.cfg.base_url.trim_end_matches('/');
if trimmed.ends_with("/api") {
trimmed.to_string()
} else {
format!("{}/api", trimmed)
}
}
fn messages_url(&self) -> String {
format!("{}/messages", self.cfg.base_url)
format!("{}/messages", self.api_base())
}
fn node_url(&self, hex_id: &str) -> String {
// e.g. https://potatomesh.net/api/nodes/67fc83cb
format!("{}/nodes/{}", self.cfg.base_url, hex_id)
format!("{}/nodes/{}", self.api_base(), hex_id)
}
pub async fn fetch_messages(&self) -> anyhow::Result<Vec<PotatoMessage>> {
let resp = self
.http
.get(self.messages_url())
.send()
.await?
.error_for_status()?;
/// Basic liveness check against the PotatoMesh API.
pub async fn health_check(&self) -> anyhow::Result<()> {
let base = self
.cfg
.base_url
.trim_end_matches('/')
.trim_end_matches("/api");
let url = format!("{}/version", base);
let resp = self.http.get(&url).send().await?;
if resp.status().is_success() {
tracing::info!("PotatoMesh API healthy at {}", self.cfg.base_url);
Ok(())
} else {
Err(anyhow::anyhow!(
"PotatoMesh health check failed with status {}",
resp.status()
))
}
}
pub async fn fetch_messages(&self, params: FetchParams) -> anyhow::Result<Vec<PotatoMessage>> {
let mut req = self.http.get(self.messages_url());
if let Some(limit) = params.limit {
req = req.query(&[("limit", limit)]);
}
if let Some(since) = params.since {
req = req.query(&[("since", since)]);
}
let resp = req.send().await?.error_for_status()?;
let msgs: Vec<PotatoMessage> = resp.json().await?;
Ok(msgs)
@@ -146,9 +203,38 @@ mod tests {
assert_eq!(m.id, 2947676906);
assert_eq!(m.from_id, "!da6556d4");
assert_eq!(m.node_id, "!06871773");
assert_eq!(m.portnum, "TEXT_MESSAGE_APP");
assert_eq!(m.portnum.as_deref(), Some("TEXT_MESSAGE_APP"));
assert_eq!(m.lora_freq, 868);
assert!((m.snr - (-9.0)).abs() < f32::EPSILON);
assert!((m.snr.unwrap() - (-9.0)).abs() < f32::EPSILON);
}
#[test]
fn deserialize_message_with_missing_optional_fields() {
let json = r#"
[
{
"id": 1,
"rx_time": 0,
"rx_iso": "2025-11-27T11:03:56Z",
"from_id": "!abcd1234",
"to_id": "^all",
"channel": 1,
"text": "Ping",
"lora_freq": 868,
"modem_preset": "MediumFast",
"channel_name": "TEST",
"node_id": "!abcd1234"
}
]
"#;
let msgs: Vec<PotatoMessage> = serde_json::from_str(json).expect("valid message json");
assert_eq!(msgs.len(), 1);
let m = &msgs[0];
assert!(m.portnum.is_none());
assert!(m.rssi.is_none());
assert!(m.hop_limit.is_none());
assert!(m.snr.is_none());
}
#[test]
@@ -206,7 +292,29 @@ mod tests {
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
assert_eq!(client.messages_url(), "http://localhost:8080/messages");
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
}
#[test]
fn test_messages_url_with_trailing_slash() {
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: "http://localhost:8080/".to_string(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
}
#[test]
fn test_messages_url_with_existing_api_suffix() {
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: "http://localhost:8080/api/".to_string(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
assert_eq!(client.messages_url(), "http://localhost:8080/api/messages");
}
#[test]
@@ -219,7 +327,7 @@ mod tests {
let client = PotatoClient::new(http_client, config);
assert_eq!(
client.node_url("!1234"),
"http://localhost:8080/nodes/!1234"
"http://localhost:8080/api/nodes/!1234"
);
}
@@ -227,7 +335,8 @@ mod tests {
async fn test_fetch_messages_success() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/messages")
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any) // allow optional query params
.with_status(200)
.with_header("content-type", "application/json")
.with_body(
@@ -251,7 +360,7 @@ mod tests {
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.fetch_messages().await;
let result = client.fetch_messages(FetchParams::default()).await;
mock.assert();
assert!(result.is_ok());
@@ -261,9 +370,9 @@ mod tests {
}
#[tokio::test]
async fn test_fetch_messages_error() {
async fn test_health_check_success() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/messages").with_status(500).create();
let mock = server.mock("GET", "/version").with_status(200).create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
@@ -271,12 +380,97 @@ mod tests {
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.fetch_messages().await;
let result = client.health_check().await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn test_health_check_strips_api_suffix() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/version").with_status(200).create();
let http_client = reqwest::Client::new();
let mut base = server.url();
base.push_str("/api");
let config = PotatomeshConfig {
base_url: base,
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.health_check().await;
mock.assert();
assert!(result.is_ok());
}
#[tokio::test]
async fn test_health_check_failure() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/version").with_status(500).create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.health_check().await;
mock.assert();
assert!(result.is_err());
}
#[tokio::test]
async fn test_fetch_messages_error() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/api/messages")
.match_query(mockito::Matcher::Any)
.with_status(500)
.create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let result = client.fetch_messages(FetchParams::default()).await;
mock.assert();
assert!(result.is_err());
}
#[tokio::test]
async fn test_fetch_messages_with_limit_and_since() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/api/messages")
.match_query("limit=10&since=123")
.with_status(200)
.with_header("content-type", "application/json")
.with_body("[]")
.create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
base_url: server.url(),
poll_interval_secs: 60,
};
let client = PotatoClient::new(http_client, config);
let params = FetchParams {
limit: Some(10),
since: Some(123),
};
let result = client.fetch_messages(params).await;
mock.assert();
assert!(result.is_ok());
assert!(result.unwrap().is_empty());
}
#[tokio::test]
async fn test_get_node_cache_hit() {
let http_client = reqwest::Client::new();
@@ -313,7 +507,8 @@ mod tests {
async fn test_get_node_cache_miss() {
let mut server = mockito::Server::new_async().await;
let mock = server
.mock("GET", "/nodes/1234")
.mock("GET", "/api/nodes/1234")
.match_query(mockito::Matcher::Any)
.with_status(200)
.with_header("content-type", "application/json")
.with_body(
@@ -348,7 +543,10 @@ mod tests {
#[tokio::test]
async fn test_get_node_error() {
let mut server = mockito::Server::new_async().await;
let mock = server.mock("GET", "/nodes/1234").with_status(500).create();
let mock = server
.mock("GET", "/api/nodes/1234")
.with_status(500)
.create();
let http_client = reqwest::Client::new();
let config = PotatomeshConfig {
+437
View File
@@ -0,0 +1,437 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for :mod:`data.mesh_ingestor.daemon`."""
from __future__ import annotations
import sys
import threading
import types
from pathlib import Path
from typing import Any
import pytest
REPO_ROOT = Path(__file__).resolve().parents[1]
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
from data.mesh_ingestor import daemon
class FakeEvent:
"""Test double for :class:`threading.Event` that can auto-set itself."""
instances: list["FakeEvent"] = []
def __init__(self, *, auto_set_on_wait: bool = False):
self._is_set = False
self._auto_set_on_wait = auto_set_on_wait
self.wait_calls: list[Any] = []
FakeEvent.instances.append(self)
def set(self) -> None:
"""Mark the event as set."""
self._is_set = True
def is_set(self) -> bool:
"""Return whether the event is currently set."""
return self._is_set
def wait(self, timeout: float | None = None) -> bool:
"""Record waits and optionally auto-set the flag."""
self.wait_calls.append(timeout)
if self._auto_set_on_wait:
self._is_set = True
return self._is_set
class AutoSetEvent(FakeEvent):
"""Event variant that automatically sets on each wait call."""
def __init__(self): # noqa: D401 - short initializer docstring handled by class
super().__init__(auto_set_on_wait=True)
@pytest.fixture(autouse=True)
def reset_fake_events():
"""Ensure :class:`FakeEvent` registry is cleared between tests."""
FakeEvent.instances.clear()
yield
FakeEvent.instances.clear()
def test_event_wait_default_detection(monkeypatch):
"""``_event_wait_allows_default_timeout`` matches defaulted signatures."""
assert daemon._event_wait_allows_default_timeout() is True
class _NoDefaultEvent:
def wait(self, timeout): # type: ignore[override]
return bool(timeout)
monkeypatch.setattr(
daemon, "threading", types.SimpleNamespace(Event=_NoDefaultEvent)
)
assert daemon._event_wait_allows_default_timeout() is False
def test_subscribe_receive_topics(monkeypatch):
"""Subscribing to receive topics returns the exact topic list."""
subscribed: list[str] = []
def _record_subscription(_handler, topic):
subscribed.append(topic)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=_record_subscription)
)
assert daemon._subscribe_receive_topics() == list(daemon._RECEIVE_TOPICS)
assert subscribed == list(daemon._RECEIVE_TOPICS)
def test_node_items_snapshot_handles_mutation(monkeypatch):
"""Snapshots tolerate temporary runtime errors while iterating."""
class MutatingMapping(dict):
def __bool__(self):
return True
def items(self): # type: ignore[override]
raise RuntimeError("dictionary changed size during iteration")
monkeypatch.setattr(daemon.time, "sleep", lambda _: None)
assert daemon._node_items_snapshot({"a": 1}) == [("a", 1)]
assert daemon._node_items_snapshot(MutatingMapping(), retries=1) is None
class IteratingMapping:
def __init__(self):
self.calls = 0
self._data = {"x": 10, "y": 20}
def __iter__(self):
self.calls += 1
if self.calls == 1:
raise RuntimeError("dictionary changed size during iteration")
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
mapping = IteratingMapping()
assert daemon._node_items_snapshot(mapping, retries=2) == [("x", 10), ("y", 20)]
def test_close_interface_respects_timeout(monkeypatch):
"""Long-running close calls emit a timeout debug log."""
log_calls = []
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0.01)
monkeypatch.setattr(
daemon.config, "_debug_log", lambda *args, **kwargs: log_calls.append(kwargs)
)
blocker = threading.Event()
class SlowInterface:
def close(self):
blocker.wait(timeout=0.1)
daemon._close_interface(SlowInterface())
assert any("timeout_seconds" in entry for entry in log_calls)
def test_close_interface_immediate_path(monkeypatch):
"""A zero timeout calls ``close`` inline without threading."""
flags = {"called": False}
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0)
class ImmediateInterface:
def close(self):
flags["called"] = True
daemon._close_interface(ImmediateInterface())
assert flags["called"] is True
def test_ble_interface_detection():
"""Detect BLE module names reliably."""
class BLE:
__module__ = "meshtastic.ble_interface"
class NonBLE:
__module__ = "meshtastic.serial"
assert daemon._is_ble_interface(BLE()) is True
assert daemon._is_ble_interface(NonBLE()) is False
assert daemon._is_ble_interface(None) is False
def test_process_ingestor_heartbeat_with_extracted_host(monkeypatch):
"""Host id extraction triggers heartbeat announcement flag updates."""
host_ids: list[str | None] = [None]
ingestor_ids: list[str | None] = []
queued: list[bool] = []
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: host_ids[0])
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!abcd"
)
monkeypatch.setattr(
daemon.handlers,
"register_host_node_id",
lambda node: host_ids.__setitem__(0, node),
)
monkeypatch.setattr(daemon.ingestors, "set_ingestor_node_id", ingestor_ids.append)
monkeypatch.setattr(
daemon.ingestors,
"queue_ingestor_heartbeat",
lambda force: queued.append(force) or True,
)
assert (
daemon._process_ingestor_heartbeat(object(), ingestor_announcement_sent=False)
is True
)
assert host_ids[0] == "!abcd"
assert ingestor_ids[-1] == "!abcd"
assert queued[-1] is True
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!abcd")
monkeypatch.setattr(
daemon.ingestors,
"queue_ingestor_heartbeat",
lambda force: queued.append(force) or False,
)
assert (
daemon._process_ingestor_heartbeat(object(), ingestor_announcement_sent=True)
is True
)
assert queued[-1] is False
def test_connected_state_branches(monkeypatch):
"""Connection state resolves across multiple attribute forms."""
event = threading.Event()
event.set()
assert daemon._connected_state(event) is True
class CallableCandidate:
def __call__(self):
return False
assert daemon._connected_state(CallableCandidate()) is False
class BooleanCandidate:
def __bool__(self):
raise RuntimeError("cannot bool")
assert daemon._connected_state(BooleanCandidate()) is None
class HasIsSet:
def is_set(self):
raise RuntimeError("broken")
assert daemon._connected_state(HasIsSet()) is None
def _configure_common_defaults(
monkeypatch, *, energy_saving: bool = False, inactivity: float = 0.0
):
"""Set fast configuration defaults shared by daemon integration tests."""
monkeypatch.setattr(daemon.config, "SNAPSHOT_SECS", 0)
monkeypatch.setattr(daemon.config, "_RECONNECT_INITIAL_DELAY_SECS", 0)
monkeypatch.setattr(daemon.config, "_RECONNECT_MAX_DELAY_SECS", 0)
monkeypatch.setattr(daemon.config, "_CLOSE_TIMEOUT_SECS", 0)
monkeypatch.setattr(daemon.config, "ENERGY_SAVING", energy_saving)
monkeypatch.setattr(
daemon.config, "_ENERGY_ONLINE_DURATION_SECS", 0 if energy_saving else 0.0
)
monkeypatch.setattr(daemon.config, "_ENERGY_SLEEP_SECS", 0.0)
monkeypatch.setattr(daemon.config, "_INGESTOR_HEARTBEAT_SECS", 0)
monkeypatch.setattr(daemon.config, "_INACTIVITY_RECONNECT_SECS", inactivity)
monkeypatch.setattr(daemon.config, "CONNECTION", "serial0")
class DummyInterface:
"""Lightweight mesh interface stand-in used for daemon integration tests."""
def __init__(self, *, nodes=None, is_connected=True, client_present=True):
self.nodes = nodes if nodes is not None else {"!node": {"id": 1}}
self.isConnected = is_connected
self.client = object() if client_present else None
def close(self):
return None
def test_main_happy_path(monkeypatch):
"""The main loop processes snapshots and heartbeats once before stopping."""
_configure_common_defaults(monkeypatch)
monkeypatch.setattr(
daemon,
"threading",
types.SimpleNamespace(
Event=AutoSetEvent,
current_thread=threading.current_thread,
main_thread=threading.main_thread,
),
)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
)
monkeypatch.setattr(
daemon.interfaces,
"_create_serial_interface",
lambda candidate: (DummyInterface(), candidate),
)
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
monkeypatch.setattr(
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
)
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
)
host_id = {"value": None}
monkeypatch.setattr(
daemon.handlers,
"register_host_node_id",
lambda node: host_id.__setitem__("value", node),
)
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: host_id["value"])
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
heartbeats: list[bool] = []
monkeypatch.setattr(
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(
daemon.ingestors,
"queue_ingestor_heartbeat",
lambda force: heartbeats.append(force) or True,
)
daemon.main()
assert heartbeats
assert host_id["value"] == "!host"
assert FakeEvent.instances and FakeEvent.instances[0].is_set() is True
def test_main_energy_saving_disconnect(monkeypatch):
"""Energy saving mode disconnects and sleeps when deadlines expire."""
_configure_common_defaults(monkeypatch, energy_saving=True)
monkeypatch.setattr(
daemon,
"threading",
types.SimpleNamespace(
Event=AutoSetEvent,
current_thread=threading.current_thread,
main_thread=threading.main_thread,
),
)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
)
monkeypatch.setattr(
daemon.interfaces,
"_create_serial_interface",
lambda candidate: (DummyInterface(), candidate),
)
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
monkeypatch.setattr(
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
)
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
)
monkeypatch.setattr(
daemon.handlers, "register_host_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!host")
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
monkeypatch.setattr(
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(
daemon.ingestors, "queue_ingestor_heartbeat", lambda *_args, **_kwargs: True
)
daemon.main()
assert FakeEvent.instances and FakeEvent.instances[0].is_set() is True
def test_main_inactivity_reconnect(monkeypatch):
"""Inactivity triggers reconnect attempts and respects stop events."""
_configure_common_defaults(monkeypatch, inactivity=0.5)
monkeypatch.setattr(
daemon,
"threading",
types.SimpleNamespace(
Event=AutoSetEvent,
current_thread=threading.current_thread,
main_thread=threading.main_thread,
),
)
monkeypatch.setattr(
daemon, "pub", types.SimpleNamespace(subscribe=lambda *_args, **_kwargs: None)
)
interface_cycle = iter(
[DummyInterface(is_connected=False), DummyInterface(is_connected=True)]
)
monkeypatch.setattr(
daemon.interfaces,
"_create_serial_interface",
lambda candidate: (next(interface_cycle), candidate),
)
monkeypatch.setattr(daemon.interfaces, "_ensure_radio_metadata", lambda iface: None)
monkeypatch.setattr(
daemon.interfaces, "_ensure_channel_metadata", lambda iface: None
)
monkeypatch.setattr(
daemon.interfaces, "_extract_host_node_id", lambda iface: "!host"
)
monkeypatch.setattr(
daemon.handlers, "register_host_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(daemon.handlers, "host_node_id", lambda: "!host")
monkeypatch.setattr(daemon.handlers, "upsert_node", lambda *_args, **_kwargs: None)
monotonic_calls = iter([0.0, 1.0, 2.0, 3.0, 4.0])
monkeypatch.setattr(daemon.time, "monotonic", lambda: next(monotonic_calls))
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: 0.0)
monkeypatch.setattr(
daemon.ingestors, "set_ingestor_node_id", lambda *_args, **_kwargs: None
)
monkeypatch.setattr(
daemon.ingestors, "queue_ingestor_heartbeat", lambda *_args, **_kwargs: True
)
daemon.main()
assert any(event.is_set() for event in FakeEvent.instances)
+324
View File
@@ -20,6 +20,7 @@ import re
import sys
import threading
import types
import time
"""End-to-end tests covering the mesh ingestion package."""
@@ -214,6 +215,9 @@ def mesh_module(monkeypatch):
if attr in module.__dict__:
delattr(module, attr)
module.channels._reset_channel_cache()
module.ingestors.STATE.start_time = int(time.time())
module.ingestors.STATE.last_heartbeat = None
module.ingestors.STATE.node_id = None
yield module
@@ -281,6 +285,59 @@ def test_instance_domain_infers_scheme_for_hostnames(mesh_module, monkeypatch):
mesh_module.INSTANCE = mesh_module.config.INSTANCE
def test_parse_channel_names_applies_allowlist(mesh_module):
"""Ensure allowlists reuse the shared channel parser."""
mesh = mesh_module
previous_allowed = mesh.ALLOWED_CHANNELS
try:
parsed = mesh.config._parse_channel_names(" Primary ,Chat ,primary , Ops ")
mesh.ALLOWED_CHANNELS = parsed
assert parsed == ("Primary", "Chat", "Ops")
assert mesh.channels.allowed_channel_names() == ("Primary", "Chat", "Ops")
assert mesh.channels.is_allowed_channel("chat")
assert mesh.channels.is_allowed_channel(" ops ")
assert not mesh.channels.is_allowed_channel("unknown")
assert not mesh.channels.is_allowed_channel(None)
assert mesh.config._parse_channel_names("") == ()
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
def test_allowed_channel_defaults_allow_all(mesh_module):
"""Ensure unset allowlists do not block any channels."""
mesh = mesh_module
previous_allowed = mesh.ALLOWED_CHANNELS
try:
mesh.ALLOWED_CHANNELS = ()
assert mesh.channels.is_allowed_channel("Any")
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
def test_parse_hidden_channels_deduplicates_names(mesh_module):
"""Ensure hidden channel parsing strips blanks and deduplicates."""
mesh = mesh_module
previous_hidden = mesh.HIDDEN_CHANNELS
try:
parsed = mesh.config._parse_hidden_channels(" Chat , ,Secret ,chat")
mesh.HIDDEN_CHANNELS = parsed
assert parsed == ("Chat", "Secret")
assert mesh.channels.hidden_channel_names() == ("Chat", "Secret")
assert mesh.channels.is_hidden_channel(" chat ")
assert not mesh.channels.is_hidden_channel("unknown")
assert mesh.config._parse_hidden_channels("") == ()
finally:
mesh.HIDDEN_CHANNELS = previous_hidden
def test_subscribe_receive_topics_covers_all_handlers(mesh_module, monkeypatch):
mesh = mesh_module
daemon_mod = sys.modules["data.mesh_ingestor.daemon"]
@@ -1932,6 +1989,146 @@ def test_store_packet_dict_appends_channel_name(mesh_module, monkeypatch, capsys
assert "channel_display='Chat'" in log_output
def test_store_packet_dict_skips_hidden_channel(mesh_module, monkeypatch, capsys):
mesh = mesh_module
mesh.channels._reset_channel_cache()
mesh.config.MODEM_PRESET = None
class DummyInterface:
def __init__(self) -> None:
self.localNode = SimpleNamespace(
channels=[
SimpleNamespace(
role=1,
settings=SimpleNamespace(name="Primary"),
),
SimpleNamespace(
role=2,
index=5,
settings=SimpleNamespace(name="Chat"),
),
]
)
def waitForConfig(self):
return None
mesh.channels.capture_from_interface(DummyInterface())
capsys.readouterr()
captured: list[tuple[str, dict, int]] = []
ignored: list[str] = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
monkeypatch.setattr(
mesh.handlers,
"_record_ignored_packet",
lambda packet, *, reason: ignored.append(reason),
)
previous_debug = mesh.config.DEBUG
previous_hidden = mesh.HIDDEN_CHANNELS
previous_allowed = mesh.ALLOWED_CHANNELS
mesh.config.DEBUG = True
mesh.DEBUG = True
mesh.ALLOWED_CHANNELS = ("Chat",)
mesh.HIDDEN_CHANNELS = ("Chat",)
try:
packet = {
"id": "999",
"rxTime": 24_680,
"from": "!sender",
"to": "^all",
"channel": 5,
"decoded": {"text": "hidden msg", "portnum": 1},
}
mesh.store_packet_dict(packet)
assert captured == []
assert ignored == ["hidden-channel"]
assert "Ignored packet on hidden channel" in capsys.readouterr().out
finally:
mesh.HIDDEN_CHANNELS = previous_hidden
mesh.ALLOWED_CHANNELS = previous_allowed
mesh.config.DEBUG = previous_debug
mesh.DEBUG = previous_debug
def test_store_packet_dict_skips_disallowed_channel(mesh_module, monkeypatch, capsys):
mesh = mesh_module
mesh.channels._reset_channel_cache()
mesh.config.MODEM_PRESET = None
class DummyInterface:
def __init__(self) -> None:
self.localNode = SimpleNamespace(
channels=[
SimpleNamespace(
role=1,
settings=SimpleNamespace(name="Primary"),
),
SimpleNamespace(
role=2,
index=5,
settings=SimpleNamespace(name="Chat"),
),
]
)
def waitForConfig(self):
return None
mesh.channels.capture_from_interface(DummyInterface())
capsys.readouterr()
captured: list[tuple[str, dict, int]] = []
ignored: list[str] = []
monkeypatch.setattr(
mesh,
"_queue_post_json",
lambda path, payload, *, priority: captured.append((path, payload, priority)),
)
monkeypatch.setattr(
mesh.handlers,
"_record_ignored_packet",
lambda packet, *, reason: ignored.append(reason),
)
previous_debug = mesh.config.DEBUG
previous_allowed = mesh.ALLOWED_CHANNELS
previous_hidden = mesh.HIDDEN_CHANNELS
mesh.config.DEBUG = True
mesh.DEBUG = True
mesh.ALLOWED_CHANNELS = ("Primary",)
mesh.HIDDEN_CHANNELS = ()
try:
packet = {
"id": "1001",
"rxTime": 25_680,
"from": "!sender",
"to": "^all",
"channel": 5,
"decoded": {"text": "disallowed msg", "portnum": 1},
}
mesh.store_packet_dict(packet)
assert captured == []
assert ignored == ["disallowed-channel"]
assert "Ignored packet on disallowed channel" in capsys.readouterr().out
finally:
mesh.ALLOWED_CHANNELS = previous_allowed
mesh.HIDDEN_CHANNELS = previous_hidden
mesh.config.DEBUG = previous_debug
mesh.DEBUG = previous_debug
def test_store_packet_dict_includes_encrypted_payload(mesh_module, monkeypatch):
mesh = mesh_module
captured = []
@@ -2575,6 +2772,133 @@ def test_queue_post_json_skips_when_active(mesh_module, monkeypatch):
mesh._clear_post_queue()
def test_process_ingestor_heartbeat_updates_flag(mesh_module, monkeypatch):
mesh = mesh_module
mesh.ingestors.STATE.last_heartbeat = None
mesh.ingestors.STATE.node_id = None
mesh.handlers.register_host_node_id(None)
recorded = {"force": None, "count": 0}
def fake_queue_ingestor_heartbeat(*, force):
recorded["force"] = force
recorded["count"] += 1
return True
monkeypatch.setattr(
mesh.ingestors, "queue_ingestor_heartbeat", fake_queue_ingestor_heartbeat
)
class DummyIface:
def __init__(self):
self.myNodeNum = 0xCAFEBABE
updated = mesh._process_ingestor_heartbeat(
DummyIface(), ingestor_announcement_sent=False
)
assert updated is True
assert recorded["force"] is True
assert recorded["count"] == 1
assert mesh.handlers.host_node_id() == "!cafebabe"
def test_process_ingestor_heartbeat_skips_without_host(mesh_module, monkeypatch):
mesh = mesh_module
mesh.handlers.register_host_node_id(None)
mesh.ingestors.STATE.node_id = None
mesh.ingestors.STATE.last_heartbeat = None
monkeypatch.setattr(mesh.ingestors, "queue_ingestor_heartbeat", lambda **_: False)
updated = mesh._process_ingestor_heartbeat(None, ingestor_announcement_sent=False)
assert updated is False
assert mesh.ingestors.STATE.node_id is None
assert mesh.ingestors.STATE.last_heartbeat is None
def test_ingestor_heartbeat_respects_interval_override(mesh_module, monkeypatch):
mesh = mesh_module
mesh.ingestors.STATE.start_time = 100
mesh.ingestors.STATE.last_heartbeat = 1_000
mesh.ingestors.STATE.node_id = "!abcd0001"
mesh._INGESTOR_HEARTBEAT_SECS = 10_000
monkeypatch.setattr(mesh.ingestors.time, "time", lambda: 2_000)
sent = mesh.ingestors.queue_ingestor_heartbeat()
assert sent is False
assert mesh.ingestors.STATE.last_heartbeat == 1_000
def test_setting_ingestor_attr_propagates(mesh_module):
mesh = mesh_module
mesh._INGESTOR_HEARTBEAT_SECS = 123
assert mesh.config._INGESTOR_HEARTBEAT_SECS == 123
def test_queue_ingestor_heartbeat_requires_node_id(mesh_module, monkeypatch):
mesh = mesh_module
captured = []
monkeypatch.setattr(
mesh.queue,
"_queue_post_json",
lambda path, payload, *, priority, send=None: captured.append(
(path, payload, priority)
),
)
mesh.ingestors.STATE.node_id = None
mesh.ingestors.STATE.last_heartbeat = None
queued = mesh.ingestors.queue_ingestor_heartbeat(force=True)
assert queued is False
assert captured == []
def test_queue_ingestor_heartbeat_enqueues_and_throttles(mesh_module, monkeypatch):
mesh = mesh_module
captured = []
monkeypatch.setattr(
mesh.queue,
"_queue_post_json",
lambda path, payload, *, priority, send=None: captured.append(
(path, payload, priority)
),
)
mesh.ingestors.STATE.start_time = 1_700_000_000
mesh.ingestors.STATE.last_heartbeat = None
mesh.ingestors.STATE.node_id = None
mesh.config.LORA_FREQ = 915
mesh.config.MODEM_PRESET = "LongFast"
mesh.ingestors.set_ingestor_node_id("!CAFEBABE")
first = mesh.ingestors.queue_ingestor_heartbeat(force=True)
second = mesh.ingestors.queue_ingestor_heartbeat()
assert first is True
assert second is False
assert len(captured) == 1
path, payload, priority = captured[0]
assert path == "/api/ingestors"
assert payload["node_id"] == "!cafebabe"
assert payload["start_time"] == 1_700_000_000
assert payload["last_seen_time"] >= payload["start_time"]
assert payload["version"] == mesh.VERSION
assert payload["lora_freq"] == 915
assert payload["modem_preset"] == "LongFast"
assert priority == mesh.queue._INGESTOR_POST_PRIORITY
def test_mesh_version_export_matches_package(mesh_module):
import data
mesh = mesh_module
assert mesh.VERSION == data.VERSION
def test_node_to_dict_handles_proto_fallback(mesh_module, monkeypatch):
mesh = mesh_module
@@ -199,6 +199,66 @@ module PotatoMesh
updated
end
# Insert or update an ingestor heartbeat payload.
#
# @param db [SQLite3::Database] open database handle.
# @param payload [Hash] ingestor payload from the collector.
# @return [Boolean] true when persistence succeeded.
def upsert_ingestor(db, payload)
return false unless payload.is_a?(Hash)
parts = canonical_node_parts(payload["node_id"] || payload["id"])
return false unless parts
node_id, = parts
now = Time.now.to_i
start_time = coerce_integer(payload["start_time"] || payload["startTime"]) || now
last_seen_time =
coerce_integer(payload["last_seen_time"] || payload["lastSeenTime"]) || start_time
start_time = 0 if start_time.negative?
last_seen_time = 0 if last_seen_time.negative?
start_time = now if start_time > now
last_seen_time = now if last_seen_time > now
last_seen_time = start_time if last_seen_time < start_time
version = string_or_nil(payload["version"] || payload["ingestorVersion"])
return false unless version
lora_freq = coerce_integer(payload["lora_freq"])
modem_preset = string_or_nil(payload["modem_preset"])
with_busy_retry do
db.execute <<~SQL, [node_id, start_time, last_seen_time, version, lora_freq, modem_preset]
INSERT INTO ingestors(node_id, start_time, last_seen_time, version, lora_freq, modem_preset)
VALUES(?,?,?,?,?,?)
ON CONFLICT(node_id) DO UPDATE SET
start_time = CASE
WHEN excluded.start_time > ingestors.start_time THEN excluded.start_time
ELSE ingestors.start_time
END,
last_seen_time = CASE
WHEN excluded.last_seen_time > ingestors.last_seen_time THEN excluded.last_seen_time
ELSE ingestors.last_seen_time
END,
version = COALESCE(excluded.version, ingestors.version),
lora_freq = COALESCE(excluded.lora_freq, ingestors.lora_freq),
modem_preset = COALESCE(excluded.modem_preset, ingestors.modem_preset)
SQL
end
true
rescue SQLite3::SQLException => e
warn_log(
"Failed to upsert ingestor record",
context: "data_processing.ingestors",
node_id: node_id,
error_class: e.class.name,
error_message: e.message,
)
false
end
def upsert_node(db, node_id, n)
user = n["user"] || {}
met = n["deviceMetrics"] || {}
@@ -1262,7 +1322,7 @@ module PotatoMesh
rx_time = now if rx_time.nil? || rx_time > now
rx_iso = string_or_nil(payload["rx_iso"]) || Time.at(rx_time).utc.iso8601
metrics = normalize_json_object(payload["metrics"])
metrics = normalize_json_object(payload["metrics"]) || {}
src = coerce_integer(payload["src"] || payload["source"] || payload["from"])
dest = coerce_integer(payload["dest"] || payload["destination"] || payload["to"])
rssi = coerce_integer(payload["rssi"]) || coerce_integer(metrics["rssi"])
+26 -3
View File
@@ -81,10 +81,10 @@ module PotatoMesh
return false unless File.exist?(PotatoMesh::Config.db_path)
db = open_database(readonly: true)
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops]
required = %w[nodes messages positions telemetry neighbors instances traces trace_hops ingestors]
tables =
db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops')",
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances','traces','trace_hops','ingestors')",
).flatten
(required - tables).empty?
rescue SQLite3::Exception
@@ -99,7 +99,7 @@ module PotatoMesh
def init_db
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
db = open_database
%w[nodes messages positions telemetry neighbors instances traces].each do |schema|
%w[nodes messages positions telemetry neighbors instances traces ingestors].each do |schema|
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
@@ -167,6 +167,11 @@ module PotatoMesh
instance_columns = db.execute("PRAGMA table_info(instances)").map { |row| row[1] }
unless instance_columns.include?("contact_link")
db.execute("ALTER TABLE instances ADD COLUMN contact_link TEXT")
instance_columns << "contact_link"
end
unless instance_columns.include?("nodes_count")
db.execute("ALTER TABLE instances ADD COLUMN nodes_count INTEGER")
end
telemetry_tables =
@@ -192,6 +197,24 @@ module PotatoMesh
traces_schema = File.expand_path("../../../../data/traces.sql", __dir__)
db.execute_batch(File.read(traces_schema))
end
ingestor_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ingestors'").flatten
if ingestor_tables.empty?
ingestors_schema = File.expand_path("../../../../data/ingestors.sql", __dir__)
db.execute_batch(File.read(ingestors_schema))
else
ingestor_columns = db.execute("PRAGMA table_info(ingestors)").map { |row| row[1] }
unless ingestor_columns.include?("version")
db.execute("ALTER TABLE ingestors ADD COLUMN version TEXT")
end
unless ingestor_columns.include?("lora_freq")
db.execute("ALTER TABLE ingestors ADD COLUMN lora_freq INTEGER")
end
unless ingestor_columns.include?("modem_preset")
db.execute("ALTER TABLE ingestors ADD COLUMN modem_preset TEXT")
end
end
rescue SQLite3::SQLException, Errno::ENOENT => e
warn_log(
"Failed to apply schema upgrade",
+49 -3
View File
@@ -61,6 +61,7 @@ module PotatoMesh
def self_instance_attributes
domain = self_instance_domain
last_update = latest_node_update_timestamp || Time.now.to_i
nodes_count = active_node_count_since(Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age)
{
id: app_constant(:SELF_INSTANCE_ID),
domain: domain,
@@ -74,9 +75,36 @@ module PotatoMesh
last_update_time: last_update,
is_private: private_mode?,
contact_link: sanitized_contact_link,
nodes_count: nodes_count,
}
end
# Count the number of nodes active since the supplied timestamp.
#
# @param cutoff [Integer] unix timestamp in seconds.
# @param db [SQLite3::Database, nil] optional open handle to reuse.
# @return [Integer, nil] node count or nil when unavailable.
def active_node_count_since(cutoff, db: nil)
return nil unless cutoff
handle = db || open_database(readonly: true)
count =
with_busy_retry do
handle.get_first_value("SELECT COUNT(*) FROM nodes WHERE last_heard >= ?", cutoff.to_i)
end
Integer(count)
rescue SQLite3::Exception, ArgumentError => e
warn_log(
"Failed to count active nodes",
context: "instances.nodes_count",
error_class: e.class.name,
error_message: e.message,
)
nil
ensure
handle&.close unless db
end
def sign_instance_attributes(attributes)
payload = canonical_instance_payload(attributes)
Base64.strict_encode64(
@@ -723,6 +751,7 @@ module PotatoMesh
end
processed_entries = 0
recent_cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
payload.each do |entry|
if per_response_limit && per_response_limit.positive? && processed_entries >= per_response_limit
debug_log(
@@ -777,13 +806,27 @@ module PotatoMesh
attributes[:is_private] = false if attributes[:is_private].nil?
nodes_since_path = "/api/nodes?since=#{recent_cutoff}&limit=1000"
nodes_since_window, nodes_since_metadata = fetch_instance_json(attributes[:domain], nodes_since_path)
if nodes_since_window.is_a?(Array)
attributes[:nodes_count] = nodes_since_window.length
elsif nodes_since_metadata
warn_log(
"Failed to load remote node window",
context: "federation.instances",
domain: attributes[:domain],
reason: Array(nodes_since_metadata).map(&:to_s).join("; "),
)
end
remote_nodes, node_metadata = fetch_instance_json(attributes[:domain], "/api/nodes")
remote_nodes ||= nodes_since_window if nodes_since_window.is_a?(Array)
unless remote_nodes
warn_log(
"Failed to load remote node data",
context: "federation.instances",
domain: attributes[:domain],
reason: Array(node_metadata).map(&:to_s).join("; "),
reason: Array(node_metadata || nodes_since_metadata).map(&:to_s).join("; "),
)
next
end
@@ -1059,8 +1102,8 @@ module PotatoMesh
sql = <<~SQL
INSERT INTO instances (
id, domain, pubkey, name, version, channel, frequency,
latitude, longitude, last_update_time, is_private, contact_link, signature
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
latitude, longitude, last_update_time, is_private, nodes_count, contact_link, signature
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT(id) DO UPDATE SET
domain=excluded.domain,
pubkey=excluded.pubkey,
@@ -1072,10 +1115,12 @@ module PotatoMesh
longitude=excluded.longitude,
last_update_time=excluded.last_update_time,
is_private=excluded.is_private,
nodes_count=excluded.nodes_count,
contact_link=excluded.contact_link,
signature=excluded.signature
SQL
nodes_count = coerce_integer(attributes[:nodes_count])
params = [
attributes[:id],
normalized_domain,
@@ -1088,6 +1133,7 @@ module PotatoMesh
attributes[:longitude],
attributes[:last_update_time],
attributes[:is_private] ? 1 : 0,
nodes_count,
attributes[:contact_link],
signature,
]
+2 -1
View File
@@ -143,6 +143,7 @@ module PotatoMesh
"longitude" => coerce_float(row["longitude"]),
"lastUpdateTime" => last_update_time,
"isPrivate" => private_flag,
"nodesCount" => coerce_integer(row["nodes_count"]),
"contactLink" => string_or_nil(row["contact_link"]),
"signature" => signature,
}
@@ -174,7 +175,7 @@ module PotatoMesh
min_last_update_time = now - PotatoMesh::Config.week_seconds
sql = <<~SQL
SELECT id, domain, pubkey, name, version, channel, frequency,
latitude, longitude, last_update_time, is_private, contact_link, signature
latitude, longitude, last_update_time, is_private, nodes_count, contact_link, signature
FROM instances
WHERE domain IS NOT NULL AND TRIM(domain) != ''
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
+171 -21
View File
@@ -20,6 +20,7 @@ module PotatoMesh
MAX_QUERY_LIMIT = 1000
DEFAULT_TELEMETRY_WINDOW_SECONDS = 86_400
DEFAULT_TELEMETRY_BUCKET_SECONDS = 300
TELEMETRY_ZERO_INVALID_COLUMNS = %w[battery_level voltage].freeze
TELEMETRY_AGGREGATE_COLUMNS =
%w[
battery_level
@@ -48,6 +49,9 @@ module PotatoMesh
soil_moisture
soil_temperature
].freeze
TELEMETRY_AGGREGATE_SCALERS = {
"current" => 0.001,
}.freeze
# Remove nil or empty values from an API response hash to reduce payload size
# while preserving legitimate zero-valued measurements.
@@ -78,6 +82,19 @@ module PotatoMesh
end
end
# Treat zero-valued telemetry measurements that are known to be invalid
# (such as battery level or voltage) as missing data so they are omitted
# from API responses. Metrics that can legitimately be zero will remain
# untouched when routed through this helper.
#
# @param value [Numeric, nil] telemetry measurement.
# @return [Numeric, nil] nil when the value is zero, otherwise the original value.
def nil_if_zero(value)
return nil if value.respond_to?(:zero?) && value.zero?
value
end
# Normalise a caller-provided limit to a sane, positive integer.
#
# @param limit [Object] value coerced to an integer.
@@ -99,6 +116,17 @@ module PotatoMesh
coerced
end
# Normalise a caller-supplied timestamp for API pagination windows.
#
# @param since [Object] requested lower bound expressed as seconds since the epoch.
# @param floor [Integer] minimum allowable timestamp used to clamp the value.
# @return [Integer] non-negative timestamp greater than or equal to +floor+.
def normalize_since_threshold(since, floor: 0)
threshold = coerce_integer(since)
threshold = 0 if threshold.nil? || threshold.negative?
[threshold, floor].max
end
def node_reference_tokens(node_ref)
parts = canonical_node_parts(node_ref)
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
@@ -181,12 +209,19 @@ module PotatoMesh
["(#{clauses.join(" OR ")})", params]
end
def query_nodes(limit, node_ref: nil)
# Fetch node state optionally scoped by identifier and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to narrow results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted node rows suitable for API responses.
def query_nodes(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_heard = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_last_heard)
params = []
where_clauses = []
@@ -197,7 +232,7 @@ module PotatoMesh
params.concat(clause.last)
else
where_clauses << "last_heard >= ?"
params << min_last_heard
params << since_threshold
end
if private_mode?
@@ -225,7 +260,7 @@ module PotatoMesh
.map { |value| coerce_integer(value) }
.compact
.max
last_candidate && last_candidate >= min_last_heard
last_candidate && last_candidate >= since_threshold
end
rows.each do |r|
r["role"] ||= "CLIENT"
@@ -245,6 +280,47 @@ module PotatoMesh
db&.close
end
# Fetch ingestor heartbeats with optional freshness filtering.
#
# @param limit [Integer] maximum number of ingestors to return.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted ingestor rows suitable for API responses.
def query_ingestors(limit, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
cutoff = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: cutoff)
sql = <<~SQL
SELECT node_id, start_time, last_seen_time, version, lora_freq, modem_preset
FROM ingestors
WHERE last_seen_time >= ?
ORDER BY last_seen_time DESC
LIMIT ?
SQL
rows = db.execute(sql, [since_threshold, limit])
rows.each do |row|
row.delete_if { |key, _| key.is_a?(Integer) }
start_time = coerce_integer(row["start_time"])
last_seen_time = coerce_integer(row["last_seen_time"])
start_time = now if start_time && start_time > now
last_seen_time = now if last_seen_time && last_seen_time > now
if start_time && last_seen_time && last_seen_time < start_time
last_seen_time = start_time
end
row["start_time"] = start_time
row["last_seen_time"] = last_seen_time
row["start_time_iso"] = Time.at(start_time).utc.iso8601 if start_time
row["last_seen_iso"] = Time.at(last_seen_time).utc.iso8601 if last_seen_time
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
# Fetch chat messages with optional filtering.
#
# @param limit [Integer] maximum number of rows to return.
@@ -254,8 +330,7 @@ module PotatoMesh
# @return [Array<Hash>] compacted message rows safe for API responses.
def query_messages(limit, node_ref: nil, include_encrypted: false, since: 0)
limit = coerce_query_limit(limit)
since_threshold = coerce_integer(since)
since_threshold = 0 if since_threshold.nil? || since_threshold.negative?
since_threshold = normalize_since_threshold(since, floor: 0)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
@@ -333,7 +408,13 @@ module PotatoMesh
db&.close
end
def query_positions(limit, node_ref: nil)
# Fetch positions optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted position rows suitable for API responses.
def query_positions(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -341,8 +422,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
@@ -384,7 +466,13 @@ module PotatoMesh
db&.close
end
def query_neighbors(limit, node_ref: nil)
# Fetch neighbor relationships optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted neighbor rows suitable for API responses.
def query_neighbors(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -392,8 +480,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
@@ -424,7 +513,13 @@ module PotatoMesh
db&.close
end
def query_telemetry(limit, node_ref: nil)
# Fetch telemetry packets optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted telemetry rows suitable for API responses.
def query_telemetry(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -432,8 +527,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
@@ -470,8 +566,8 @@ module PotatoMesh
r["rssi"] = coerce_integer(r["rssi"])
r["bitfield"] = coerce_integer(r["bitfield"])
r["snr"] = coerce_float(r["snr"])
r["battery_level"] = coerce_float(r["battery_level"])
r["voltage"] = coerce_float(r["voltage"])
r["battery_level"] = sanitize_zero_invalid_metric("battery_level", coerce_float(r["battery_level"]))
r["voltage"] = sanitize_zero_invalid_metric("voltage", coerce_float(r["voltage"]))
r["channel_utilization"] = coerce_float(r["channel_utilization"])
r["air_util_tx"] = coerce_float(r["air_util_tx"])
r["uptime_seconds"] = coerce_integer(r["uptime_seconds"])
@@ -479,7 +575,8 @@ module PotatoMesh
r["relative_humidity"] = coerce_float(r["relative_humidity"])
r["barometric_pressure"] = coerce_float(r["barometric_pressure"])
r["gas_resistance"] = coerce_float(r["gas_resistance"])
r["current"] = coerce_float(r["current"])
current_ma = coerce_float(r["current"])
r["current"] = current_ma.nil? ? nil : current_ma / 1000.0
r["iaq"] = coerce_integer(r["iaq"])
r["distance"] = coerce_float(r["distance"])
r["lux"] = coerce_float(r["lux"])
@@ -502,7 +599,13 @@ module PotatoMesh
db&.close
end
def query_telemetry_buckets(window_seconds:, bucket_seconds:)
# Aggregate telemetry metrics into time buckets.
#
# @param window_seconds [Integer] duration expressed in seconds to include in the query.
# @param bucket_seconds [Integer] size of each aggregation bucket in seconds.
# @param since [Integer] unix timestamp threshold applied in addition to the requested window.
# @return [Array<Hash>] aggregated telemetry metrics grouped by bucket start time.
def query_telemetry_buckets(window_seconds:, bucket_seconds:, since: 0)
window = coerce_integer(window_seconds) || DEFAULT_TELEMETRY_WINDOW_SECONDS
window = DEFAULT_TELEMETRY_WINDOW_SECONDS if window <= 0
bucket = coerce_integer(bucket_seconds) || DEFAULT_TELEMETRY_BUCKET_SECONDS
@@ -512,6 +615,7 @@ module PotatoMesh
db.results_as_hash = true
now = Time.now.to_i
min_timestamp = now - window
since_threshold = normalize_since_threshold(since, floor: min_timestamp)
bucket_expression = "((COALESCE(rx_time, telemetry_time) / ?) * ?)"
select_clauses = [
"#{bucket_expression} AS bucket_start",
@@ -521,9 +625,10 @@ module PotatoMesh
]
TELEMETRY_AGGREGATE_COLUMNS.each do |column|
select_clauses << "AVG(#{column}) AS #{column}_avg"
select_clauses << "MIN(#{column}) AS #{column}_min"
select_clauses << "MAX(#{column}) AS #{column}_max"
aggregate_source = telemetry_aggregate_source(column)
select_clauses << "AVG(#{aggregate_source}) AS #{column}_avg"
select_clauses << "MIN(#{aggregate_source}) AS #{column}_min"
select_clauses << "MAX(#{aggregate_source}) AS #{column}_max"
end
sql = <<~SQL
@@ -536,7 +641,7 @@ module PotatoMesh
ORDER BY bucket_start ASC
LIMIT ?
SQL
params = [bucket, bucket, min_timestamp, MAX_QUERY_LIMIT]
params = [bucket, bucket, since_threshold, MAX_QUERY_LIMIT]
rows = db.execute(sql, params)
rows.map do |row|
bucket_start = coerce_integer(row["bucket_start"])
@@ -549,8 +654,18 @@ module PotatoMesh
avg = coerce_float(row["#{column}_avg"])
min_value = coerce_float(row["#{column}_min"])
max_value = coerce_float(row["#{column}_max"])
scale = TELEMETRY_AGGREGATE_SCALERS[column]
if scale
avg *= scale unless avg.nil?
min_value *= scale unless min_value.nil?
max_value *= scale unless max_value.nil?
end
metrics = {}
avg = sanitize_zero_invalid_metric(column, avg)
min_value = sanitize_zero_invalid_metric(column, min_value)
max_value = sanitize_zero_invalid_metric(column, max_value)
metrics["avg"] = avg unless avg.nil?
metrics["min"] = min_value unless min_value.nil?
metrics["max"] = max_value unless max_value.nil?
@@ -578,7 +693,41 @@ module PotatoMesh
db&.close
end
def query_traces(limit, node_ref: nil)
# Normalise telemetry metrics that cannot legitimately be zero so API
# consumers do not mistake absent readings for valid measurements. Values
# for fields such as battery level and voltage are treated as missing data
# when they are zero.
#
# @param column [String] telemetry metric name.
# @param value [Numeric, nil] raw metric value.
# @return [Numeric, nil] metric value or nil when zero is invalid.
def sanitize_zero_invalid_metric(column, value)
return nil_if_zero(value) if TELEMETRY_ZERO_INVALID_COLUMNS.include?(column)
value
end
# Choose the SQL expression used to aggregate telemetry metrics. Metrics
# that cannot legitimately be zero are wrapped in a NULLIF to ensure
# invalid zero readings are ignored by aggregate functions such as AVG,
# MIN, and MAX, aligning the database semantics with API-level
# zero-as-missing handling.
#
# @param column [String] telemetry metric name.
# @return [String] SQL fragment used in aggregate expressions.
def telemetry_aggregate_source(column)
return "NULLIF(#{column}, 0)" if TELEMETRY_ZERO_INVALID_COLUMNS.include?(column)
column
end
# Fetch trace records optionally scoped by node and timestamp.
#
# @param limit [Integer] maximum number of rows to return.
# @param node_ref [String, Integer, nil] optional node reference to scope results.
# @param since [Integer] unix timestamp threshold applied in addition to the rolling window.
# @return [Array<Hash>] compacted trace rows suitable for API responses.
def query_traces(limit, node_ref: nil, since: 0)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
@@ -586,8 +735,9 @@ module PotatoMesh
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
since_threshold = normalize_since_threshold(since, floor: min_rx_time)
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
params << since_threshold
if node_ref
tokens = node_reference_tokens(node_ref)
+21 -11
View File
@@ -64,7 +64,7 @@ module PotatoMesh
app.get "/api/nodes" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_nodes(limit).to_json
query_nodes(limit, since: params["since"]).to_json
end
app.get "/api/nodes/:id" do
@@ -72,11 +72,17 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
rows = query_nodes(limit, node_ref: node_ref)
rows = query_nodes(limit, node_ref: node_ref, since: params["since"])
halt 404, { error: "not found" }.to_json if rows.empty?
rows.first.to_json
end
app.get "/api/ingestors" do
content_type :json
limit = coerce_query_limit(params["limit"])
query_ingestors(limit, since: params["since"]).to_json
end
app.get "/api/messages" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
@@ -105,7 +111,7 @@ module PotatoMesh
app.get "/api/positions" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit).to_json
query_positions(limit, since: params["since"]).to_json
end
app.get "/api/positions/:id" do
@@ -113,13 +119,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit, node_ref: node_ref).to_json
query_positions(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/neighbors" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit).to_json
query_neighbors(limit, since: params["since"]).to_json
end
app.get "/api/neighbors/:id" do
@@ -127,13 +133,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit, node_ref: node_ref).to_json
query_neighbors(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/telemetry" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit).to_json
query_telemetry(limit, since: params["since"]).to_json
end
app.get "/api/telemetry/aggregated" do
@@ -164,7 +170,11 @@ module PotatoMesh
halt 400, { error: "bucketSeconds too small for requested window" }.to_json
end
query_telemetry_buckets(window_seconds: window_seconds, bucket_seconds: bucket_seconds).to_json
query_telemetry_buckets(
window_seconds: window_seconds,
bucket_seconds: bucket_seconds,
since: params["since"],
).to_json
end
app.get "/api/telemetry/:id" do
@@ -172,13 +182,13 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit, node_ref: node_ref).to_json
query_telemetry(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/traces" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit).to_json
query_traces(limit, since: params["since"]).to_json
end
app.get "/api/traces/:id" do
@@ -186,7 +196,7 @@ module PotatoMesh
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_traces(limit, node_ref: node_ref).to_json
query_traces(limit, node_ref: node_ref, since: params["since"]).to_json
end
app.get "/api/instances" do
@@ -65,6 +65,25 @@ module PotatoMesh
db&.close
end
app.post "/api/ingestors" do
require_token!
content_type :json
begin
payload = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
unless payload.is_a?(Hash)
halt 400, { error: "invalid payload" }.to_json
end
db = open_database
stored = upsert_ingestor(db, payload)
halt 400, { error: "invalid payload" }.to_json unless stored
{ status: "ok" }.to_json
ensure
db&.close
end
app.post "/api/instances" do
content_type :json
begin
@@ -113,6 +132,7 @@ module PotatoMesh
raw_private = payload.key?("isPrivate") ? payload["isPrivate"] : payload["is_private"]
is_private = coerce_boolean(raw_private)
signature = string_or_nil(payload["signature"])
contact_link = string_or_nil(payload["contactLink"])
attributes = {
id: id,
@@ -126,6 +146,7 @@ module PotatoMesh
longitude: longitude,
last_update_time: last_update_time,
is_private: is_private,
contact_link: contact_link,
}
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
@@ -138,6 +159,10 @@ module PotatoMesh
end
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
if !signature_valid && contact_link
stripped_attributes = attributes.merge(contact_link: nil)
signature_valid = verify_instance_signature(stripped_attributes, signature, attributes[:pubkey])
end
# Some remote peers sign payloads using a canonicalised lowercase
# domain while still sending a mixed-case domain. Retry signature
# verification with the original casing when the first attempt
@@ -145,6 +170,10 @@ module PotatoMesh
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
alternate_attributes = attributes.merge(domain: raw_domain)
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
if !signature_valid && contact_link
stripped_alternate = alternate_attributes.merge(contact_link: nil)
signature_valid = verify_instance_signature(stripped_alternate, signature, attributes[:pubkey])
end
end
unless signature_valid
+3 -2
View File
@@ -42,6 +42,7 @@ module PotatoMesh
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
DEFAULT_FEDERATION_SEED_DOMAINS = %w[potatomesh.net potatomesh.jmrp.io].freeze
# Retrieve the configured API token used for authenticated requests.
#
@@ -175,7 +176,7 @@ module PotatoMesh
#
# @return [String] semantic version identifier.
def version_fallback
"0.5.6"
"0.5.8"
end
# Default refresh interval for frontend polling routines.
@@ -409,7 +410,7 @@ module PotatoMesh
#
# @return [Array<String>] list of default seed domains.
def federation_seed_domains
["potatomesh.net"].freeze
DEFAULT_FEDERATION_SEED_DOMAINS
end
# Determine how often we broadcast federation announcements.
+2 -2
View File
@@ -1,12 +1,12 @@
{
"name": "potato-mesh",
"version": "0.5.6",
"version": "0.5.8",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "potato-mesh",
"version": "0.5.6",
"version": "0.5.8",
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
+1 -1
View File
@@ -1,6 +1,6 @@
{
"name": "potato-mesh",
"version": "0.5.6",
"version": "0.5.8",
"type": "module",
"private": true,
"scripts": {
@@ -19,6 +19,7 @@ import assert from 'node:assert/strict';
import { createDomEnvironment } from './dom-environment.js';
import { initializeFederationPage } from '../federation-page.js';
import { roleColors } from '../role-helpers.js';
test('federation map centers on configured coordinates and follows theme filters', async () => {
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: true });
@@ -54,6 +55,7 @@ test('federation map centers on configured coordinates and follows theme filters
tilePane.appendChild(tileImage);
const mapSetViewCalls = [];
const mapFitBoundsCalls = [];
const circleMarkerCalls = [];
const tileLayerStub = {
addTo() {
return this;
@@ -94,7 +96,8 @@ test('federation map centers on configured coordinates and follows theme filters
}
};
},
circleMarker() {
circleMarker(latlng, options) {
circleMarkerCalls.push({ latlng, options });
return {
bindPopup() {
return this;
@@ -112,13 +115,15 @@ const fetchImpl = async () => ({
version: '1.0.0',
latitude: 10.12345,
longitude: -20.98765,
lastUpdateTime: Math.floor(Date.now() / 1000) - 90
lastUpdateTime: Math.floor(Date.now() / 1000) - 90,
nodesCount: 12
},
{
domain: 'bravo.mesh',
contactLink: null,
version: '2.0.0',
lastUpdateTime: Math.floor(Date.now() / 1000) - (2 * 86400)
lastUpdateTime: Math.floor(Date.now() / 1000) - (2 * 86400),
nodesCount: 2
}
]
});
@@ -150,14 +155,268 @@ const fetchImpl = async () => ({
assert.match(firstRowHtml, /https:\/\/chat\.alpha/);
assert.match(firstRowHtml, /10\.12345/);
assert.match(firstRowHtml, /-20\.98765/);
assert.match(firstRowHtml, />12</);
assert.match(firstRowHtml, /ago/);
const secondRowHtml = rows[1].innerHTML;
assert.match(secondRowHtml, /bravo\.mesh/);
assert.match(secondRowHtml, /<em>—<\/em>/); // no contact link
assert.match(secondRowHtml, /2\.0\.0/);
assert.match(secondRowHtml, />2</);
assert.match(secondRowHtml, /d ago/);
assert.deepEqual(mapFitBoundsCalls[0][0], [[10.12345, -20.98765]]);
assert.equal(circleMarkerCalls[0].options.fillColor, roleColors.CLIENT_HIDDEN);
} catch (error) {
console.error('federation sorting test error', error);
throw error;
} finally {
cleanup();
}
});
test('federation table sorting, contact rendering, and legend creation', async () => {
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
const { document, createElement, registerElement, cleanup } = env;
const mapEl = createElement('div', 'map');
registerElement('map', mapEl);
const statusEl = createElement('div', 'status');
registerElement('status', statusEl);
const tableEl = createElement('table', 'instances');
const tbodyEl = createElement('tbody');
registerElement('instances', tableEl);
tableEl.appendChild(tbodyEl);
const headerNameTh = createElement('th');
const headerName = createElement('span');
headerName.classList.add('sort-header');
headerName.dataset.sortKey = 'name';
headerName.dataset.sortLabel = 'Name';
headerNameTh.appendChild(headerName);
const headerDomainTh = createElement('th');
const headerDomain = createElement('span');
headerDomain.classList.add('sort-header');
headerDomain.dataset.sortKey = 'domain';
headerDomain.dataset.sortLabel = 'Domain';
headerDomainTh.appendChild(headerDomain);
const ths = [headerNameTh, headerDomainTh];
const headers = [headerName, headerDomain];
const headerHandlers = new Map();
headers.forEach(header => {
header.addEventListener = (event, handler) => {
const existing = headerHandlers.get(header) || {};
existing[event] = handler;
headerHandlers.set(header, existing);
};
header.closest = () => ths.find(th => th.childNodes.includes(header));
header.querySelector = selector => {
if (selector === '.sort-indicator') {
const span = createElement('span');
span.classList.add('sort-indicator');
return span;
}
return null;
};
});
tableEl.querySelectorAll = selector => {
if (selector === 'thead .sort-header[data-sort-key]') return headers;
if (selector === 'thead th') return ths;
return [];
};
const configPayload = {
mapCenter: { lat: 0, lon: 0 },
mapZoom: 3,
tileFilters: { light: 'none', dark: 'invert(1)' }
};
const configEl = createElement('div');
configEl.setAttribute('data-app-config', JSON.stringify(configPayload));
document.querySelector = selector => {
if (selector === '[data-app-config]') return configEl;
if (selector === '#instances tbody') return tbodyEl;
return null;
};
const legendContainers = [];
const mapSetViewCalls = [];
const mapFitBoundsCalls = [];
const circleMarkerCalls = [];
const DomUtil = {
create(tag, className, parent) {
const el = {
tagName: tag,
className,
children: [],
style: {},
textContent: '',
setAttribute() {},
appendChild(child) {
this.children.push(child);
return child;
},
};
if (parent && parent.appendChild) parent.appendChild(el);
return el;
}
};
const controlStub = () => {
const ctrl = {
onAdd: null,
container: null,
addTo(map) {
this.container = this.onAdd ? this.onAdd(map) : null;
legendContainers.push(this.container);
return this;
},
getContainer() {
return this.container;
}
};
return ctrl;
};
const markersLayer = {
layers: [],
addLayer(marker) {
this.layers.push(marker);
return marker;
},
addTo() {
return this;
}
};
const mapStub = {
addedControls: [],
setView(...args) {
mapSetViewCalls.push(args);
},
on() {},
fitBounds(...args) {
mapFitBoundsCalls.push(args);
},
addLayer(layer) {
this.addedControls.push(layer);
return layer;
}
};
const leafletStub = {
map() {
return mapStub;
},
tileLayer() {
return {
addTo() {
return this;
},
getContainer() {
return null;
},
on() {}
};
},
layerGroup() {
return markersLayer;
},
circleMarker(latlng, options) {
circleMarkerCalls.push({ latlng, options });
return {
bindPopup() {
return this;
},
addTo() {
return this;
}
};
},
control: controlStub,
DomUtil
};
const now = Math.floor(Date.now() / 1000);
const fetchImpl = async () => ({
ok: true,
json: async () => [
{
domain: 'c.mesh',
name: 'Charlie',
contactLink: 'https://charlie.example\nmatrix:#c:mesh',
version: '3.0.0',
latitude: 1,
longitude: 1,
lastUpdateTime: now - 10,
nodesCount: 0
},
{
domain: 'b.mesh',
contactLink: '',
version: '2.0.0',
latitude: 2,
longitude: 2,
lastUpdateTime: now - 60,
nodesCount: 650
},
{
domain: 'a.mesh',
name: 'Alpha',
contactLink: 'mailto:alpha@mesh',
version: '1.0.0',
latitude: 3,
longitude: 3,
lastUpdateTime: now - 30,
nodesCount: 5
}
]
});
try {
await initializeFederationPage({ config: configPayload, fetchImpl, leaflet: leafletStub });
const rows = tbodyEl.childNodes.map(node => String(node.childNodes[0]));
assert.match(rows[0], /c\.mesh/);
assert.match(rows[0], /0</);
assert.match(rows[0], /https:\/\/charlie\.example/);
assert.match(rows[0], /matrix:#c:mesh/);
assert.match(rows[1], /a\.mesh/);
assert.match(rows[2], /b\.mesh/);
const nameHandlers = headerHandlers.get(headerName);
nameHandlers.click();
const afterNameSort = tbodyEl.childNodes.map(node => String(node.childNodes[0]));
assert.match(afterNameSort[0], /a\.mesh/);
assert.match(afterNameSort[1], /c\.mesh/);
assert.match(afterNameSort[2], /b\.mesh/);
nameHandlers.click();
const descSort = tbodyEl.childNodes.map(node => String(node.childNodes[0]));
assert.match(descSort[0], /c\.mesh/);
assert.match(descSort[1], /a\.mesh/);
assert.match(descSort[2], /b\.mesh/);
assert.equal(headerName.closest().attributes.get('aria-sort'), 'descending');
assert.equal(circleMarkerCalls[0].options.fillColor, roleColors.CLIENT_HIDDEN);
assert.equal(circleMarkerCalls[1].options.fillColor, roleColors.REPEATER);
assert.deepEqual(mapSetViewCalls[0], [[0, 0], 3]);
assert.equal(mapFitBoundsCalls[0][0].length, 3);
assert.equal(legendContainers.length, 1);
const legend = legendContainers[0];
assert.ok(legend.className.includes('legend'));
const legendHeader = legend.children.find(child => child.className === 'legend-header');
const legendTitle = legendHeader && Array.isArray(legendHeader.children)
? legendHeader.children.find(child => child.className === 'legend-title')
: null;
assert.ok(legendTitle);
assert.equal(legendTitle.textContent, 'Active nodes');
} finally {
cleanup();
}
@@ -90,10 +90,29 @@ test('resolveInstanceLabel falls back to the domain when the name is missing', (
test('buildInstanceUrl normalises domains into navigable HTTPS URLs', () => {
assert.equal(buildInstanceUrl('mesh.example'), 'https://mesh.example');
assert.equal(buildInstanceUrl(' https://mesh.example '), 'https://mesh.example');
assert.equal(buildInstanceUrl('https://mesh.example/path?query#fragment'), 'https://mesh.example');
assert.equal(buildInstanceUrl('javascript:alert(1)'), null);
assert.equal(buildInstanceUrl('ftp://mesh.example'), null);
assert.equal(buildInstanceUrl('mesh.example:8080'), 'https://mesh.example:8080');
assert.equal(buildInstanceUrl('mesh.example<script>'), null);
assert.equal(buildInstanceUrl(''), null);
assert.equal(buildInstanceUrl(null), null);
});
test('buildInstanceUrl rejects malformed HTTP URLs safely', () => {
const originalWarn = console.warn;
const warnings = [];
console.warn = message => warnings.push(message);
try {
assert.equal(buildInstanceUrl('http://[::1'), null);
assert.equal(buildInstanceUrl('https://bad host.example'), null);
assert.ok(warnings.length >= 1);
} finally {
console.warn = originalWarn;
}
});
test('initializeInstanceSelector populates options alphabetically and selects the configured domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
+330 -44
View File
@@ -16,6 +16,7 @@
import { readAppConfig } from './config.js';
import { mergeConfig } from './settings.js';
import { roleColors } from './role-helpers.js';
/**
* Escape HTML special characters to prevent XSS.
@@ -78,6 +79,131 @@ function buildInstanceUrl(domain) {
return `https://${trimmed}`;
}
const NODE_COUNT_COLOR_STOPS = [
{ limit: 100, color: roleColors.CLIENT_HIDDEN },
{ limit: 200, color: roleColors.SENSOR },
{ limit: 300, color: roleColors.TRACKER },
{ limit: 400, color: roleColors.CLIENT_MUTE },
{ limit: 500, color: roleColors.CLIENT },
{ limit: 600, color: roleColors.CLIENT_BASE },
{ limit: 700, color: roleColors.REPEATER },
{ limit: 800, color: roleColors.ROUTER_LATE },
{ limit: 900, color: roleColors.ROUTER }
];
const DEFAULT_INSTANCE_COLOR = roleColors.LOST_AND_FOUND || '#3388ff';
/**
* Determine the marker colour for an instance based on its active node count.
*
* @param {*} count Raw node count value from the API.
* @returns {string} CSS colour string.
*/
function colorForNodeCount(count) {
const numeric = Number(count);
if (!Number.isFinite(numeric) || numeric < 0) return DEFAULT_INSTANCE_COLOR;
const stop = NODE_COUNT_COLOR_STOPS.find(entry => numeric < entry.limit);
return stop && stop.color ? stop.color : DEFAULT_INSTANCE_COLOR;
}
/**
* Render arbitrary contact text while hyperlinking recognised URL-like segments.
*
* @param {*} contact Raw contact value from the API.
* @returns {string} HTML markup safe for insertion.
*/
function renderContactHtml(contact) {
if (typeof contact !== 'string') return '';
const trimmed = contact.trim();
if (!trimmed) return '';
const urlPattern = /(https?:\/\/[^\s]+|mailto:[^\s]+|matrix:[^\s]+)/gi;
const parts = [];
let lastIndex = 0;
let match;
while ((match = urlPattern.exec(trimmed)) !== null) {
const textBefore = trimmed.slice(lastIndex, match.index);
if (textBefore) {
parts.push(escapeHtml(textBefore));
}
const url = match[0];
const safeUrl = escapeHtml(url);
parts.push(`<a href="${safeUrl}" target="_blank" rel="noopener noreferrer">${safeUrl}</a>`);
lastIndex = match.index + url.length;
}
const trailing = trimmed.slice(lastIndex);
if (trailing) {
parts.push(escapeHtml(trailing));
}
const html = parts.join('');
return html.replace(/\r?\n/g, '<br>');
}
/**
* Convert a value into a finite number or null when invalid.
*
* @param {*} value Raw value to convert.
* @returns {number|null} Finite number or null.
*/
function toFiniteNumber(value) {
const num = Number(value);
return Number.isFinite(num) ? num : null;
}
/**
* Compare two string-like values ignoring case.
*
* @param {*} a Left-hand operand.
* @param {*} b Right-hand operand.
* @returns {number} Comparator result.
*/
function compareString(a, b) {
const left = typeof a === 'string' ? a.toLowerCase() : String(a ?? '').toLowerCase();
const right = typeof b === 'string' ? b.toLowerCase() : String(b ?? '').toLowerCase();
return left.localeCompare(right);
}
/**
* Compare two numeric values.
*
* @param {*} a Left-hand operand.
* @param {*} b Right-hand operand.
* @returns {number} Comparator result.
*/
function compareNumber(a, b) {
const left = toFiniteNumber(a);
const right = toFiniteNumber(b);
if (left == null && right == null) return 0;
if (left == null) return 1;
if (right == null) return -1;
if (left === right) return 0;
return left < right ? -1 : 1;
}
/**
* Determine whether a string-like value is present.
*
* @param {*} value Candidate value.
* @returns {boolean} true when present.
*/
function hasStringValue(value) {
if (value == null) return false;
if (typeof value === 'string') return value.trim() !== '';
return String(value).trim() !== '';
}
/**
* Determine whether a numeric value is present.
*
* @param {*} value Candidate value.
* @returns {boolean} true when present.
*/
function hasNumberValue(value) {
return toFiniteNumber(value) != null;
}
const TILE_LAYER_URL = 'https://{s}.tile.openstreetmap.fr/hot/{z}/{x}/{y}.png';
/**
@@ -97,8 +223,12 @@ export async function initializeFederationPage(options = {}) {
const fetchImpl = options.fetchImpl || fetch;
const leaflet = options.leaflet || (typeof window !== 'undefined' ? window.L : null);
const mapContainer = document.getElementById('map');
const tableEl = document.getElementById('instances');
const tableBody = document.querySelector('#instances tbody');
const statusEl = document.getElementById('status');
const sortHeaders = tableEl
? Array.from(tableEl.querySelectorAll('thead .sort-header[data-sort-key]'))
: [];
const hasLeaflet =
typeof leaflet === 'object' &&
@@ -109,6 +239,154 @@ export async function initializeFederationPage(options = {}) {
let map = null;
let markersLayer = null;
let tileLayer = null;
const tableSorters = {
name: { getValue: inst => inst.name ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
domain: { getValue: inst => inst.domain ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
contact: { getValue: inst => inst.contactLink ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
version: { getValue: inst => inst.version ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
channel: { getValue: inst => inst.channel ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
frequency: { getValue: inst => inst.frequency ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
nodesCount: {
getValue: inst => toFiniteNumber(inst.nodesCount ?? inst.nodes_count),
compare: compareNumber,
hasValue: hasNumberValue,
defaultDirection: 'desc'
},
latitude: { getValue: inst => toFiniteNumber(inst.latitude), compare: compareNumber, hasValue: hasNumberValue, defaultDirection: 'asc' },
longitude: { getValue: inst => toFiniteNumber(inst.longitude), compare: compareNumber, hasValue: hasNumberValue, defaultDirection: 'asc' },
lastUpdateTime: {
getValue: inst => toFiniteNumber(inst.lastUpdateTime),
compare: compareNumber,
hasValue: hasNumberValue,
defaultDirection: 'desc'
}
};
let sortState = {
key: 'lastUpdateTime',
direction: tableSorters.lastUpdateTime ? tableSorters.lastUpdateTime.defaultDirection : 'desc'
};
/**
* Sort instances using the active sort configuration.
*
* @param {Array<Object>} data Instance rows.
* @returns {Array<Object>} sorted rows.
*/
const sortInstancesData = data => {
const sorter = tableSorters[sortState.key];
if (!sorter) return Array.isArray(data) ? [...data] : [];
const dir = sortState.direction === 'asc' ? 1 : -1;
return [...(data || [])].sort((a, b) => {
const aVal = sorter.getValue(a);
const bVal = sorter.getValue(b);
const aHas = sorter.hasValue ? sorter.hasValue(aVal) : hasStringValue(aVal);
const bHas = sorter.hasValue ? sorter.hasValue(bVal) : hasStringValue(bVal);
if (aHas && bHas) {
return sorter.compare(aVal, bVal) * dir;
}
if (aHas) return -1;
if (bHas) return 1;
return 0;
});
};
/**
* Update the visual sort indicators for the active column.
*
* @returns {void}
*/
const syncSortIndicators = () => {
if (!tableEl || !sortHeaders.length) return;
tableEl.querySelectorAll('thead th').forEach(th => th.removeAttribute('aria-sort'));
sortHeaders.forEach(header => {
header.removeAttribute('data-sort-active');
const indicator = header.querySelector('.sort-indicator');
if (indicator) indicator.textContent = '';
});
const active = sortHeaders.find(header => header.dataset.sortKey === sortState.key);
if (!active) return;
const indicator = active.querySelector('.sort-indicator');
if (indicator) indicator.textContent = sortState.direction === 'asc' ? '▲' : '▼';
active.setAttribute('data-sort-active', 'true');
const th = active.closest('th');
if (th) {
th.setAttribute('aria-sort', sortState.direction === 'asc' ? 'ascending' : 'descending');
}
};
/**
* Render the instances table body with sorting applied.
*
* @param {Array<Object>} data Instance rows.
* @param {number} nowSec Reference timestamp for relative time rendering.
* @returns {void}
*/
const renderTableRows = (data, nowSec) => {
if (!tableBody) return;
const frag = document.createDocumentFragment();
const sorted = sortInstancesData(data);
for (const instance of sorted) {
const tr = document.createElement('tr');
const url = buildInstanceUrl(instance.domain);
const nameHtml = instance.name ? escapeHtml(instance.name) : '<em>—</em>';
const domainHtml = url
? `<a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(instance.domain || '')}</a>`
: escapeHtml(instance.domain || '');
const contactHtml = renderContactHtml(instance.contactLink);
const nodesCountValue = toFiniteNumber(instance.nodesCount ?? instance.nodes_count);
const nodesCountText = nodesCountValue == null ? '<em>—</em>' : escapeHtml(String(nodesCountValue));
tr.innerHTML = `
<td class="instances-col instances-col--name">${nameHtml}</td>
<td class="instances-col instances-col--domain mono">${domainHtml}</td>
<td class="instances-col instances-col--contact">${contactHtml || '<em>—</em>'}</td>
<td class="instances-col instances-col--version mono">${escapeHtml(instance.version || '')}</td>
<td class="instances-col instances-col--channel">${escapeHtml(instance.channel || '')}</td>
<td class="instances-col instances-col--frequency">${escapeHtml(instance.frequency || '')}</td>
<td class="instances-col instances-col--nodes mono">${nodesCountText}</td>
<td class="instances-col instances-col--latitude mono">${fmtCoords(instance.latitude)}</td>
<td class="instances-col instances-col--longitude mono">${fmtCoords(instance.longitude)}</td>
<td class="instances-col instances-col--last-update mono">${timeAgo(instance.lastUpdateTime, nowSec)}</td>
`;
frag.appendChild(tr);
}
tableBody.replaceChildren(frag);
syncSortIndicators();
};
/**
* Wire up click and keyboard handlers for sortable headers.
*
* @param {Function} rerender Callback to refresh the table.
* @returns {void}
*/
const attachSortHandlers = rerender => {
if (!sortHeaders.length) return;
const applySortKey = key => {
if (!key) return;
if (sortState.key === key) {
sortState = { key, direction: sortState.direction === 'asc' ? 'desc' : 'asc' };
} else {
const defaultDir = tableSorters[key]?.defaultDirection || 'asc';
sortState = { key, direction: defaultDir };
}
rerender();
};
sortHeaders.forEach(header => {
const key = header.dataset.sortKey;
header.addEventListener('click', () => applySortKey(key));
header.addEventListener('keydown', event => {
if (event.key === 'Enter' || event.key === ' ') {
event.preventDefault();
applySortKey(key);
}
});
});
};
/**
* Resolve the active theme based on the DOM state.
@@ -202,6 +480,38 @@ export async function initializeFederationPage(options = {}) {
// Render map markers
if (map && markersLayer && hasLeaflet && Array.isArray(instances)) {
const bounds = [];
const canRenderLegend =
typeof leaflet.control === 'function' && leaflet.DomUtil && typeof leaflet.DomUtil.create === 'function';
if (canRenderLegend) {
const legendStops = NODE_COUNT_COLOR_STOPS.map((stop, index) => {
const lower = index === 0 ? 0 : NODE_COUNT_COLOR_STOPS[index - 1].limit;
const upper = stop.limit - 1;
const label = index === 0 ? `< ${stop.limit} nodes` : `${lower}-${upper} nodes`;
return { color: stop.color || DEFAULT_INSTANCE_COLOR, label };
});
const lastLimit = NODE_COUNT_COLOR_STOPS[NODE_COUNT_COLOR_STOPS.length - 1]?.limit || 900;
legendStops.push({ color: DEFAULT_INSTANCE_COLOR, label: `${lastLimit} nodes` });
const legend = leaflet.control({ position: 'bottomright' });
legend.onAdd = function onAdd() {
const container = leaflet.DomUtil.create('div', 'legend legend--instances');
container.setAttribute('aria-label', 'Active nodes legend');
const header = leaflet.DomUtil.create('div', 'legend-header', container);
const title = leaflet.DomUtil.create('span', 'legend-title', header);
title.textContent = 'Active nodes';
const items = leaflet.DomUtil.create('div', 'legend-items', container);
legendStops.forEach(stop => {
const item = leaflet.DomUtil.create('div', 'legend-item', items);
item.setAttribute('aria-hidden', 'true');
const swatch = leaflet.DomUtil.create('span', 'legend-swatch', item);
swatch.style.background = stop.color;
const label = leaflet.DomUtil.create('span', 'legend-label', item);
label.textContent = stop.label;
});
return container;
};
legend.addTo(map);
}
for (const instance of instances) {
const lat = Number(instance.latitude);
@@ -213,24 +523,28 @@ export async function initializeFederationPage(options = {}) {
const name = instance.name || instance.domain || 'Unknown';
const url = buildInstanceUrl(instance.domain);
const popupContent = url
? `<strong><a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(name)}</a></strong><br>
<span class="mono">${escapeHtml(instance.domain || '')}</span><br>
${instance.channel ? `Channel: ${escapeHtml(instance.channel)}<br>` : ''}
${instance.frequency ? `Frequency: ${escapeHtml(instance.frequency)}<br>` : ''}
${instance.version ? `Version: ${escapeHtml(instance.version)}` : ''}`
: `<strong>${escapeHtml(name)}</strong>`;
const nodeCountValue = toFiniteNumber(instance.nodesCount ?? instance.nodes_count);
const popupLines = [
url
? `<strong><a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(name)}</a></strong>`
: `<strong>${escapeHtml(name)}</strong>`,
`<span class="mono">${escapeHtml(instance.domain || '')}</span>`,
instance.channel ? `Channel: ${escapeHtml(instance.channel)}` : '',
instance.frequency ? `Frequency: ${escapeHtml(instance.frequency)}` : '',
instance.version ? `Version: ${escapeHtml(instance.version)}` : '',
nodeCountValue != null ? `Active nodes (24h): ${escapeHtml(String(nodeCountValue))}` : ''
].filter(Boolean);
const marker = leaflet.circleMarker([lat, lon], {
radius: 8,
fillColor: '#4CAF50',
color: '#2E7D32',
weight: 2,
opacity: 1,
fillOpacity: 0.8
radius: 9,
fillColor: colorForNodeCount(nodeCountValue),
color: '#000',
weight: 1,
opacity: 0.8,
fillOpacity: 0.75
});
marker.bindPopup(popupContent);
marker.bindPopup(popupLines.join('<br>'));
markersLayer.addLayer(marker);
}
@@ -245,35 +559,7 @@ export async function initializeFederationPage(options = {}) {
// Render table
if (tableBody && Array.isArray(instances)) {
const frag = document.createDocumentFragment();
for (const instance of instances) {
const tr = document.createElement('tr');
const url = buildInstanceUrl(instance.domain);
const nameHtml = instance.name
? escapeHtml(instance.name)
: '<em>—</em>';
const domainHtml = url
? `<a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(instance.domain || '')}</a>`
: escapeHtml(instance.domain || '');
const contact = instance.contactLink ? escapeHtml(instance.contactLink) : '';
const contactHtml = contact ? `<span class="mono">${contact}</span>` : '<em>—</em>';
tr.innerHTML = `
<td class="instances-col instances-col--name">${nameHtml}</td>
<td class="instances-col instances-col--domain mono">${domainHtml}</td>
<td class="instances-col instances-col--contact">${contactHtml}</td>
<td class="instances-col instances-col--version mono">${escapeHtml(instance.version || '')}</td>
<td class="instances-col instances-col--channel">${escapeHtml(instance.channel || '')}</td>
<td class="instances-col instances-col--frequency">${escapeHtml(instance.frequency || '')}</td>
<td class="instances-col instances-col--latitude mono">${fmtCoords(instance.latitude)}</td>
<td class="instances-col instances-col--longitude mono">${fmtCoords(instance.longitude)}</td>
<td class="instances-col instances-col--last-update mono">${timeAgo(instance.lastUpdateTime, nowSec)}</td>
`;
frag.appendChild(tr);
}
tableBody.replaceChildren(frag);
attachSortHandlers(() => renderTableRows(instances, nowSec));
renderTableRows(instances, nowSec);
}
}
+32 -8
View File
@@ -34,12 +34,15 @@ function resolveInstanceLabel(entry) {
return domain;
}
/**
* Construct a navigable URL for the provided instance domain.
*
* @param {string} domain Instance domain as returned by the federation catalog.
* @returns {string|null} Navigable absolute URL or ``null`` when the domain is empty.
*/
/**
* Construct a navigable URL for the provided instance domain.
*
* The returned URL is guaranteed to use HTTP(S) and a host-only component to avoid
* interpreting arbitrary DOM-controlled text as executable content.
*
* @param {string} domain Instance domain as returned by the federation catalog.
* @returns {string|null} Navigable absolute URL or ``null`` when the domain is empty or unsafe.
*/
export function buildInstanceUrl(domain) {
if (typeof domain !== 'string') {
return null;
@@ -50,8 +53,29 @@ export function buildInstanceUrl(domain) {
return null;
}
if (/^[a-zA-Z][a-zA-Z\d+.-]*:\/\//.test(trimmed)) {
return trimmed;
const allowedHostPattern = /^[a-zA-Z0-9.-]+(?::\d{1,5})?$/;
if (/^https?:\/\//i.test(trimmed)) {
try {
const parsed = new URL(trimmed);
if (!['http:', 'https:'].includes(parsed.protocol)) {
return null;
}
const sanitizedHost = parsed.host.trim();
if (!allowedHostPattern.test(sanitizedHost)) {
return null;
}
return `${parsed.protocol}//${sanitizedHost}`;
} catch (error) {
console.warn('Rejected invalid instance URL', error);
return null;
}
}
if (!allowedHostPattern.test(trimmed)) {
return null;
}
return `https://${trimmed}`;
+27
View File
@@ -1373,6 +1373,19 @@ button:not(.chat-tab):not(.sort-button):hover {
outline-offset: 2px;
}
.sort-header {
display: inline-flex;
align-items: center;
gap: 4px;
cursor: pointer;
user-select: none;
}
.sort-header:focus-visible {
outline: 2px solid #4a90e2;
outline-offset: 2px;
}
.sort-indicator {
font-size: 0.75em;
opacity: 0.6;
@@ -1850,6 +1863,10 @@ body.dark .sort-button {
color: inherit;
}
body.dark .sort-header {
color: inherit;
}
body.dark .sort-button:hover {
background: none;
}
@@ -2075,6 +2092,12 @@ body.dark #map .leaflet-tile.map-tiles {
min-width: 180px;
}
.instances-col--contact {
min-width: 160px;
white-space: pre-wrap;
word-break: break-word;
}
.instances-col--version {
min-width: 80px;
}
@@ -2084,6 +2107,10 @@ body.dark #map .leaflet-tile.map-tiles {
min-width: 100px;
}
.instances-col--nodes {
min-width: 110px;
}
.instances-col--latitude,
.instances-col--longitude {
min-width: 100px;
+550 -4
View File
@@ -103,6 +103,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
db.execute("DELETE FROM nodes")
db.execute("DELETE FROM positions")
db.execute("DELETE FROM telemetry")
db.execute("DELETE FROM ingestors")
end
ensure_self_instance_record!
end
@@ -1079,7 +1080,8 @@ RSpec.describe "Potato Mesh Sinatra app" do
targets = application_class.federation_target_domains("self.mesh")
expect(targets.first).to eq("potatomesh.net")
seed_domains = PotatoMesh::Config.federation_seed_domains.map(&:downcase)
expect(targets.first(seed_domains.length)).to eq(seed_domains)
expect(targets).to include("remote.mesh")
expect(targets).not_to include("self.mesh")
end
@@ -1089,7 +1091,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
targets = application_class.federation_target_domains("self.mesh")
expect(targets).to eq(["potatomesh.net"])
expect(targets).to eq(PotatoMesh::Config.federation_seed_domains.map(&:downcase))
end
it "ignores remote instances that have not updated within a week" do
@@ -1117,7 +1119,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
targets = application_class.federation_target_domains("self.mesh")
expect(targets).to eq(["potatomesh.net"])
expect(targets).to eq(PotatoMesh::Config.federation_seed_domains.map(&:downcase))
end
end
@@ -1606,6 +1608,161 @@ RSpec.describe "Potato Mesh Sinatra app" do
end
end
it "accepts registrations when contactLink is part of the signed payload" do
contact_link = "https://example.test/contact"
linked_attributes = instance_attributes.merge(contact_link: contact_link)
linked_signature_payload = canonical_instance_payload(linked_attributes)
linked_signature = Base64.strict_encode64(
instance_key.sign(OpenSSL::Digest::SHA256.new, linked_signature_payload),
)
linked_payload = instance_payload.merge(
"contactLink" => contact_link,
"signature" => linked_signature,
)
post "/api/instances", linked_payload.to_json, { "CONTENT_TYPE" => "application/json" }
expect(last_response.status).to eq(201)
expect(JSON.parse(last_response.body)).to eq("status" => "registered")
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT contact_link, signature FROM instances WHERE id = ?",
[instance_attributes[:id]],
)
expect(row).not_to be_nil
expect(row["contact_link"]).to eq(contact_link)
expect(row["signature"]).to eq(linked_signature)
end
end
it "accepts instance announcement payloads produced by the application including contactLink" do
contact_link = "https://example.test/contact"
announcement_attributes = instance_attributes.merge(contact_link: contact_link)
announcement_signature = Base64.strict_encode64(
instance_key.sign(
OpenSSL::Digest::SHA256.new,
canonical_instance_payload(announcement_attributes),
),
)
announcement_payload = application_class.instance_announcement_payload(
announcement_attributes,
announcement_signature,
)
post "/api/instances", announcement_payload.to_json, { "CONTENT_TYPE" => "application/json" }
expect(last_response.status).to eq(201)
expect(JSON.parse(last_response.body)).to eq("status" => "registered")
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT contact_link, signature FROM instances WHERE id = ?",
[instance_attributes[:id]],
)
expect(row).not_to be_nil
expect(row["contact_link"]).to eq(contact_link)
expect(row["signature"]).to eq(announcement_signature)
end
end
it "accepts signatures that omit contactLink for backwards compatibility" do
contact_link = "https://legacy.example/contact"
legacy_signature_payload = canonical_instance_payload(instance_attributes)
legacy_signature = Base64.strict_encode64(
instance_key.sign(OpenSSL::Digest::SHA256.new, legacy_signature_payload),
)
legacy_payload = instance_payload.merge(
"contactLink" => contact_link,
"signature" => legacy_signature,
)
post "/api/instances", legacy_payload.to_json, { "CONTENT_TYPE" => "application/json" }
expect(last_response.status).to eq(201)
expect(JSON.parse(last_response.body)).to eq("status" => "registered")
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT contact_link, signature FROM instances WHERE id = ?",
[instance_attributes[:id]],
)
expect(row).not_to be_nil
expect(row["contact_link"]).to eq(contact_link)
expect(row["signature"]).to eq(legacy_signature)
end
end
it "accepts mixed-case domains when the signature omits contactLink but the payload includes it" do
raw_domain = "Mesh.Example"
normalized_domain = raw_domain.downcase
contact_link = "https://mixed.example/contact"
mixed_attributes = instance_attributes.merge(domain: raw_domain)
mixed_signature_payload = canonical_instance_payload(mixed_attributes)
mixed_signature = Base64.strict_encode64(
instance_key.sign(OpenSSL::Digest::SHA256.new, mixed_signature_payload),
)
mixed_payload = instance_payload.merge(
"domain" => raw_domain,
"contactLink" => contact_link,
"signature" => mixed_signature,
)
mixed_remote_payload = JSON.generate(
{
"publicKey" => pubkey,
"name" => instance_attributes[:name],
"version" => instance_attributes[:version],
"domain" => normalized_domain,
"lastUpdate" => last_update_time,
},
sort_keys: true,
)
mixed_document = well_known_document.merge(
"domain" => normalized_domain,
"signedPayload" => Base64.strict_encode64(mixed_remote_payload),
"signature" => Base64.strict_encode64(
instance_key.sign(OpenSSL::Digest::SHA256.new, mixed_remote_payload),
),
)
allow_any_instance_of(Sinatra::Application).to receive(:fetch_instance_json) do |_instance, host, path|
case path
when "/.well-known/potato-mesh"
[mixed_document, URI("https://#{host}#{path}")]
when "/api/nodes"
[remote_nodes, URI("https://#{host}#{path}")]
else
[nil, []]
end
end
post "/api/instances", mixed_payload.to_json, { "CONTENT_TYPE" => "application/json" }
expect(last_response.status).to eq(201)
expect(JSON.parse(last_response.body)).to eq("status" => "registered")
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT domain, contact_link, signature FROM instances WHERE id = ?",
[mixed_attributes[:id]],
)
expect(row).not_to be_nil
expect(row["domain"]).to eq(normalized_domain)
expect(row["contact_link"]).to eq(contact_link)
expect(row["signature"]).to eq(mixed_signature)
end
end
it "rejects registrations with invalid domains" do
invalid_payload = instance_payload.merge("domain" => "mesh-instance")
@@ -3467,6 +3624,43 @@ RSpec.describe "Potato Mesh Sinatra app" do
end
end
it "accepts traceroutes without metrics or RSSI fields" do
allow(Time).to receive(:now).and_return(reference_time)
payload = [
{
"id" => 9_003,
"request_id" => 42,
"src" => 0xAAAA0001,
"dest" => 0xAAAA0002,
"rx_time" => reference_time.to_i - 1,
"hops" => [0xAAAA0001, 0xAAAA0003, 0xAAAA0002],
},
]
post "/api/traces", payload.to_json, auth_headers
expect(last_response).to be_ok
expect(JSON.parse(last_response.body)).to eq("status" => "ok")
with_db(readonly: true) do |db|
db.results_as_hash = true
stored = db.get_first_row("SELECT * FROM traces WHERE id = ?", [payload.first["id"]])
expect(stored["rx_time"]).to eq(payload.first["rx_time"])
expect(stored["rx_iso"]).to eq(Time.at(payload.first["rx_time"]).utc.iso8601)
expect(stored["rssi"]).to be_nil
expect(stored["snr"]).to be_nil
expect(stored["elapsed_ms"]).to be_nil
hops = db.execute(
"SELECT hop_index, node_id FROM trace_hops WHERE trace_id = ? ORDER BY hop_index",
[stored["id"]],
)
expect(hops.map { |row| row["node_id"] }).to eq(payload.first["hops"])
end
end
it "returns 400 when the payload is not valid JSON" do
post "/api/traces", "{", auth_headers
@@ -3913,6 +4107,39 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(payload["node_id"]).to eq("!fresh-node")
end
it "filters node results using the since parameter for collections and single lookups" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_last_heard = now - 120
recent_last_heard = now - 30
with_db do |db|
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!older-node", "old", "Older", "TBEAM", "CLIENT", 0.0, older_last_heard, older_last_heard],
)
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!recent-node", "new", "Recent", "TBEAM", "CLIENT", 0.0, recent_last_heard, recent_last_heard],
)
end
get "/api/nodes?since=#{recent_last_heard}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["node_id"] }).to eq(["!recent-node"])
get "/api/nodes/!older-node?since=#{recent_last_heard}"
expect(last_response.status).to eq(404)
get "/api/nodes/!recent-node?since=#{recent_last_heard}"
expect(last_response).to be_ok
detail = JSON.parse(last_response.body)
expect(detail["node_id"]).to eq("!recent-node")
end
it "omits blank values from node responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4274,6 +4501,37 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.map { |row| row["id"] }).to eq([2])
end
it "filters positions using the since parameter for both global and node queries" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 180
recent_rx = now - 15
with_db do |db|
db.execute(
"INSERT INTO positions(id, node_id, node_num, rx_time, rx_iso, position_time, latitude, longitude) VALUES(?,?,?,?,?,?,?,?)",
[10, "!pos-since", 101, older_rx, Time.at(older_rx).utc.iso8601, older_rx - 5, 52.0, 13.0],
)
db.execute(
"INSERT INTO positions(id, node_id, node_num, rx_time, rx_iso, position_time, latitude, longitude) VALUES(?,?,?,?,?,?,?,?)",
[11, "!pos-since", 101, recent_rx, Time.at(recent_rx).utc.iso8601, recent_rx - 5, 53.0, 14.0],
)
end
get "/api/positions?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["id"] }).to eq([11])
get "/api/positions/!pos-since?since=#{recent_rx}"
expect(last_response).to be_ok
filtered = JSON.parse(last_response.body)
expect(filtered.map { |row| row["id"] }).to eq([11])
end
it "omits blank values from position responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4372,6 +4630,49 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.first["rx_time"]).to eq(fresh_rx)
end
it "honours the since parameter for neighbor queries" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 300
recent_rx = now - 30
with_db do |db|
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!origin-since", "orig", "Origin", "TBEAM", "CLIENT", 0.0, now, now],
)
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!neighbor-old", "oldn", "Neighbor Old", "TBEAM", "CLIENT", 0.0, now, now],
)
db.execute(
"INSERT INTO nodes(node_id, short_name, long_name, hw_model, role, snr, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)",
["!neighbor-new", "newn", "Neighbor New", "TBEAM", "CLIENT", 0.0, now, now],
)
db.execute(
"INSERT INTO neighbors(node_id, neighbor_id, snr, rx_time) VALUES(?,?,?,?)",
["!origin-since", "!neighbor-old", 1.5, older_rx],
)
db.execute(
"INSERT INTO neighbors(node_id, neighbor_id, snr, rx_time) VALUES(?,?,?,?)",
["!origin-since", "!neighbor-new", 7.5, recent_rx],
)
end
get "/api/neighbors?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["neighbor_id"] }).to eq(["!neighbor-new"])
get "/api/neighbors/!origin-since?since=#{recent_rx}"
expect(last_response).to be_ok
filtered = JSON.parse(last_response.body)
expect(filtered.map { |row| row["neighbor_id"] }).to eq(["!neighbor-new"])
end
it "omits blank values from neighbor responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4434,7 +4735,8 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(first_entry["telemetry_time_iso"]).to eq(Time.at(latest["telemetry_time"]).utc.iso8601)
expect(first_entry).not_to have_key("device_metrics")
expect_same_value(first_entry["battery_level"], telemetry_metric(latest, "battery_level"))
expect_same_value(first_entry["current"], telemetry_metric(latest, "current"))
expected_current = telemetry_metric(latest, "current")
expect_same_value(first_entry["current"], expected_current.nil? ? nil : expected_current / 1000.0)
expect_same_value(first_entry["distance"], telemetry_metric(latest, "distance"))
expect_same_value(first_entry["lux"], telemetry_metric(latest, "lux"))
expect_same_value(first_entry["wind_direction"], telemetry_metric(latest, "wind_direction"))
@@ -4501,6 +4803,37 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.map { |row| row["id"] }).to eq([2])
end
it "filters telemetry rows using the since parameter for both global and node-scoped queries" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 300
recent_rx = now - 60
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, node_num, rx_time, rx_iso, telemetry_time, battery_level, voltage) VALUES(?,?,?,?,?,?,?,?)",
[10, "!tele-since", 21, older_rx, Time.at(older_rx).utc.iso8601, older_rx - 5, 20.0, 3.9],
)
db.execute(
"INSERT INTO telemetry(id, node_id, node_num, rx_time, rx_iso, telemetry_time, battery_level, voltage) VALUES(?,?,?,?,?,?,?,?)",
[11, "!tele-since", 21, recent_rx, Time.at(recent_rx).utc.iso8601, recent_rx - 5, 80.0, 4.1],
)
end
get "/api/telemetry?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["id"] }).to eq([11])
get "/api/telemetry/!tele-since?since=#{recent_rx}"
expect(last_response).to be_ok
filtered = JSON.parse(last_response.body)
expect(filtered.map { |row| row["id"] }).to eq([11])
end
it "omits blank values from telemetry responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
@@ -4555,6 +4888,51 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(filtered.first).not_to have_key("battery_level")
expect(filtered.first).not_to have_key("portnum")
end
it "omits zero-valued battery and voltage metrics from telemetry responses" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level, voltage, uptime_seconds, channel_utilization) VALUES(?,?,?,?,?,?,?,?,?)",
[
88,
"!tele-zero",
now,
Time.at(now).utc.iso8601,
now - 60,
0,
0,
0,
0.5,
],
)
end
get "/api/telemetry"
expect(last_response).to be_ok
rows = JSON.parse(last_response.body)
expect(rows.length).to eq(1)
entry = rows.first
expect(entry["node_id"]).to eq("!tele-zero")
expect(entry["rx_time"]).to eq(now)
expect(entry["telemetry_time"]).to eq(now - 60)
expect(entry).not_to have_key("battery_level")
expect(entry).not_to have_key("voltage")
expect(entry["uptime_seconds"]).to eq(0)
expect(entry["channel_utilization"]).to eq(0.5)
get "/api/telemetry/!tele-zero"
expect(last_response).to be_ok
scoped_rows = JSON.parse(last_response.body)
expect(scoped_rows.length).to eq(1)
expect(scoped_rows.first).not_to have_key("battery_level")
expect(scoped_rows.first).not_to have_key("voltage")
end
end
describe "GET /api/telemetry/aggregated" do
@@ -4576,6 +4954,35 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(a_bucket["aggregates"]).to have_key("battery_level")
expect(a_bucket["aggregates"]["battery_level"]).to include("avg")
expect(a_bucket).not_to have_key("device_metrics")
buckets_by_start = {}
buckets.each do |bucket|
start_time = bucket["bucket_start"]
buckets_by_start[start_time] = bucket if start_time
end
bucket_seconds = 300
current_by_bucket = Hash.new { |hash, key| hash[key] = [] }
telemetry_fixture.each do |entry|
timestamp = entry["rx_time"] || entry["telemetry_time"]
next unless timestamp
bucket_start = (timestamp / bucket_seconds) * bucket_seconds
current_value = telemetry_metric(entry, "current")
next if current_value.nil?
current_by_bucket[bucket_start] << current_value
end
current_by_bucket.each do |bucket_start, values|
bucket = buckets_by_start[bucket_start]
next unless bucket
aggregates = bucket.fetch("aggregates", {})
metrics = aggregates["current"]
expect(metrics).not_to be_nil
expect_same_value(metrics["avg"], values.sum / values.length / 1000.0)
expect_same_value(metrics["min"], values.min / 1000.0)
expect_same_value(metrics["max"], values.max / 1000.0)
end
end
it "applies default window and bucket sizes when parameters are omitted" do
@@ -4590,6 +4997,114 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(buckets.first["bucket_seconds"]).to eq(PotatoMesh::App::Queries::DEFAULT_TELEMETRY_BUCKET_SECONDS)
end
it "filters aggregated telemetry buckets using the since parameter" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 1800
recent_rx = now - 120
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level) VALUES(?,?,?,?,?,?)",
[801, "!agg-since", older_rx, Time.at(older_rx).utc.iso8601, older_rx - 30, 30.0],
)
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level) VALUES(?,?,?,?,?,?)",
[802, "!agg-since", recent_rx, Time.at(recent_rx).utc.iso8601, recent_rx - 30, 80.0],
)
end
get "/api/telemetry/aggregated?windowSeconds=3600&bucketSeconds=300&since=#{recent_rx}"
expect(last_response).to be_ok
buckets = JSON.parse(last_response.body)
expect(buckets.length).to eq(1)
aggregates = buckets.first.fetch("aggregates")
expect(aggregates).to have_key("battery_level")
expect_same_value(aggregates.dig("battery_level", "avg"), 80.0)
end
it "omits zero-valued battery and voltage aggregates" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level, voltage, channel_utilization) VALUES(?,?,?,?,?,?,?,?)",
[
991,
"!tele-agg-zero",
now,
Time.at(now).utc.iso8601,
now - 30,
0,
0,
0.25,
],
)
end
get "/api/telemetry/aggregated?windowSeconds=3600&bucketSeconds=300"
expect(last_response).to be_ok
buckets = JSON.parse(last_response.body)
expect(buckets.length).to eq(1)
aggregates = buckets.first.fetch("aggregates")
expect(aggregates).not_to have_key("battery_level")
expect(aggregates).not_to have_key("voltage")
expect(aggregates.dig("channel_utilization", "avg")).to eq(0.25)
end
it "ignores zero-valued telemetry when aggregating mixed buckets" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
with_db do |db|
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level, voltage) VALUES(?,?,?,?,?,?,?)",
[
992,
"!tele-agg-mixed",
now,
Time.at(now).utc.iso8601,
now - 120,
0,
0,
],
)
db.execute(
"INSERT INTO telemetry(id, node_id, rx_time, rx_iso, telemetry_time, battery_level, voltage) VALUES(?,?,?,?,?,?,?)",
[
993,
"!tele-agg-mixed",
now,
Time.at(now).utc.iso8601,
now - 60,
80.0,
3.7,
],
)
end
get "/api/telemetry/aggregated?windowSeconds=3600&bucketSeconds=300"
expect(last_response).to be_ok
buckets = JSON.parse(last_response.body)
expect(buckets.length).to eq(1)
aggregates = buckets.first.fetch("aggregates")
expect(aggregates).to have_key("battery_level")
expect(aggregates.dig("battery_level", "avg")).to eq(80.0)
expect(aggregates.dig("battery_level", "min")).to eq(80.0)
expect(aggregates.dig("battery_level", "max")).to eq(80.0)
expect(aggregates.dig("voltage", "avg")).to eq(3.7)
expect(aggregates.dig("voltage", "min")).to eq(3.7)
expect(aggregates.dig("voltage", "max")).to eq(3.7)
end
it "rejects invalid bucket and window parameters" do
get "/api/telemetry/aggregated?windowSeconds=0&bucketSeconds=300"
expect(last_response.status).to eq(400)
@@ -4677,6 +5192,37 @@ RSpec.describe "Potato Mesh Sinatra app" do
ids = JSON.parse(last_response.body).map { |row| row["id"] }
expect(ids).to eq([50_001])
end
it "filters traces using the since parameter for collection and scoped requests" do
clear_database
allow(Time).to receive(:now).and_return(reference_time)
now = reference_time.to_i
older_rx = now - 300
recent_rx = now - 25
with_db do |db|
db.execute(
"INSERT INTO traces(id, src, dest, rx_time, rx_iso) VALUES(?,?,?,?,?)",
[60_001, 123, 456, older_rx, Time.at(older_rx).utc.iso8601],
)
db.execute(
"INSERT INTO traces(id, src, dest, rx_time, rx_iso) VALUES(?,?,?,?,?)",
[60_002, 123, 456, recent_rx, Time.at(recent_rx).utc.iso8601],
)
end
get "/api/traces?since=#{recent_rx}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |row| row["id"] }).to eq([60_002])
get "/api/traces/123?since=#{recent_rx}"
expect(last_response).to be_ok
scoped = JSON.parse(last_response.body)
expect(scoped.map { |row| row["id"] }).to eq([60_002])
end
end
describe "GET /nodes/:id" do
+61
View File
@@ -321,6 +321,39 @@ RSpec.describe PotatoMesh::App::Federation do
expect(visited).not_to include(attributes_list[1][:domain], attributes_list[2][:domain])
expect(federation_helpers.debug_messages).to include(a_string_including("crawl limit"))
end
it "requests an expanded recent node window when counting remote activity" do
now = Time.at(1_700_000_000)
allow(Time).to receive(:now).and_return(now)
allow(PotatoMesh::Config).to receive(:remote_instance_max_node_age).and_return(900)
recent_cutoff = now.to_i - 900
mapping = { [seed_domain, "/api/instances"] => [payload_entries, :instances] }
attributes_list.each_with_index do |attributes, index|
mapping[[attributes[:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/nodes"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/instances"]] = [[], :instances]
allow(federation_helpers).to receive(:remote_instance_attributes_from_payload).with(payload_entries[index]).and_return([attributes, "signature-#{index}", nil])
end
captured_paths = []
allow(federation_helpers).to receive(:fetch_instance_json) do |host, path|
captured_paths << [host, path]
mapping.fetch([host, path]) { [nil, []] }
end
allow(federation_helpers).to receive(:verify_instance_signature).and_return(true)
allow(federation_helpers).to receive(:validate_remote_nodes).and_return([true, nil])
allow(federation_helpers).to receive(:upsert_instance_record)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(captured_paths).to include(
[attributes_list[0][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
[attributes_list[1][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
[attributes_list[2][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
)
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(node_payload.length))
end
end
describe ".upsert_instance_record" do
@@ -400,6 +433,34 @@ RSpec.describe PotatoMesh::App::Federation do
expect(row[1]).to eq("sig-3")
end
end
it "stores the nodes_count for new records" do
with_db do |db|
federation_helpers.send(:upsert_instance_record, db, base_attributes.merge(nodes_count: 77), "sig-1")
stored = db.get_first_value("SELECT nodes_count FROM instances WHERE id = ?", base_attributes[:id])
expect(stored).to eq(77)
end
end
it "updates the nodes_count on conflict" do
with_db do |db|
federation_helpers.send(:upsert_instance_record, db, base_attributes.merge(nodes_count: 12), "sig-1")
federation_helpers.send(
:upsert_instance_record,
db,
base_attributes.merge(nodes_count: 99, name: "Renamed Mesh"),
"sig-2",
)
row =
db.get_first_row("SELECT nodes_count, name, signature FROM instances WHERE id = ?", base_attributes[:id])
expect(row[0]).to eq(99)
expect(row[1]).to eq("Renamed Mesh")
expect(row[2]).to eq("sig-2")
end
end
end
describe ".federation_user_agent_header" do
+206
View File
@@ -0,0 +1,206 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "spec_helper"
require "json"
require "time"
RSpec.describe "Ingestor endpoints" do
let(:app) { Sinatra::Application }
let(:api_token) { "secret-token" }
let(:auth_headers) do
{
"CONTENT_TYPE" => "application/json",
"HTTP_AUTHORIZATION" => "Bearer #{api_token}",
}
end
before do
@original_token = ENV["API_TOKEN"]
ENV["API_TOKEN"] = api_token
clear_ingestors_table
end
after do
ENV["API_TOKEN"] = @original_token
clear_ingestors_table
end
def clear_ingestors_table
with_db do |db|
db.execute("DELETE FROM ingestors")
db.execute("VACUUM")
end
end
def with_db(readonly: false)
db = PotatoMesh::Application.open_database(readonly: readonly)
db.busy_timeout = PotatoMesh::Config.db_busy_timeout_ms
db.execute("PRAGMA foreign_keys = ON")
yield db
ensure
db&.close
end
def ingestor_payload(overrides = {})
now = Time.now.to_i
{
node_id: "!abc12345",
start_time: now - 120,
last_seen_time: now - 60,
version: "0.5.8",
lora_freq: 915,
modem_preset: "LongFast",
}.merge(overrides)
end
describe "POST /api/ingestors" do
it "requires a bearer token" do
post "/api/ingestors", ingestor_payload.to_json, { "CONTENT_TYPE" => "application/json" }
expect(last_response.status).to eq(403)
end
it "upserts ingestor state without regressing start time" do
payload = ingestor_payload
post "/api/ingestors", payload.to_json, auth_headers
expect(last_response.status).to eq(200)
newer_last_seen = payload[:last_seen_time] + 3_600
older_start = payload[:start_time] - 500
post "/api/ingestors",
payload.merge(last_seen_time: newer_last_seen, start_time: older_start).to_json,
auth_headers
expect(last_response.status).to eq(200)
with_db(readonly: true) do |db|
row = db.get_first_row(
"SELECT node_id, start_time, last_seen_time, version, lora_freq, modem_preset FROM ingestors WHERE node_id = ?",
[payload[:node_id]],
)
expect(row[0]).to eq(payload[:node_id])
expect(row[1]).to eq(payload[:start_time])
expect(row[2]).to be >= payload[:last_seen_time]
expect(row[2]).to be <= Time.now.to_i
expect(row[3]).to eq(payload[:version])
expect(row[4]).to eq(payload[:lora_freq])
expect(row[5]).to eq(payload[:modem_preset])
end
end
it "rejects payloads missing required fields" do
post "/api/ingestors", { node_id: "!abcd0001" }.to_json, auth_headers
expect(last_response.status).to eq(400)
end
it "rejects invalid JSON" do
post "/api/ingestors", "{", auth_headers
expect(last_response.status).to eq(400)
end
it "rejects payloads missing version" do
post "/api/ingestors", ingestor_payload(version: nil).to_json, auth_headers
expect(last_response.status).to eq(400)
end
it "rejects non-object payloads" do
post "/api/ingestors", [].to_json, auth_headers
expect(last_response.status).to eq(400)
end
end
describe "GET /api/ingestors" do
it "returns recent ingestors and omits stale rows" do
now = Time.now.to_i
with_db do |db|
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!fresh000", now - 100, now - 10, "0.5.8"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!stale000", now - (9 * 24 * 60 * 60), now - (9 * 24 * 60 * 60), "0.5.6"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version, lora_freq, modem_preset) VALUES(?,?,?,?,?,?)",
["!rich000", now - 200, now - 100, "0.5.8", 915, "MediumFast"],
)
end
get "/api/ingestors"
expect(last_response.status).to eq(200)
payload = JSON.parse(last_response.body)
expect(payload).to all(include("node_id", "start_time", "last_seen_time", "version"))
node_ids = payload.map { |entry| entry["node_id"] }
expect(node_ids).to include("!fresh000")
expect(node_ids).not_to include("!stale000")
rich = payload.find { |row| row["node_id"] == "!rich000" }
expect(rich["lora_freq"]).to eq(915)
expect(rich["modem_preset"]).to eq("MediumFast")
expect(rich["start_time_iso"]).to be_a(String)
expect(rich["last_seen_iso"]).to be_a(String)
end
it "filters ingestors using the since parameter" do
frozen_time = Time.at(1_700_000_000)
allow(Time).to receive(:now).and_return(frozen_time)
now = frozen_time.to_i
recent_cutoff = now - 120
with_db do |db|
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!old-ingestor", now - 600, now - 300, "0.5.5"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!new-ingestor", now - 60, now - 30, "0.5.8"],
)
end
get "/api/ingestors?since=#{recent_cutoff}"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload.map { |entry| entry["node_id"] }).to eq(["!new-ingestor"])
end
end
describe "schema migrations" do
it "creates the ingestors table with frequency and modem columns" do
tmp_db = File.join(SPEC_TMPDIR, "ingestor-migrate.db")
FileUtils.rm_f(tmp_db)
original = PotatoMesh::Config.db_path
allow(PotatoMesh::Config).to receive(:db_path).and_return(tmp_db)
begin
PotatoMesh::Application.init_db
with_db(readonly: true) do |db|
columns = db.execute("PRAGMA table_info(ingestors)").map { |row| row[1] }
expect(columns).to include("lora_freq", "modem_preset", "version")
end
ensure
allow(PotatoMesh::Config).to receive(:db_path).and_return(original)
end
end
end
end
+44
View File
@@ -38,6 +38,7 @@ RSpec.describe PotatoMesh::App::Instances do
before do
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
application_class.init_db unless application_class.db_schema_present?
application_class.ensure_schema_upgrades
with_db do |db|
db.execute("DELETE FROM instances")
end
@@ -132,5 +133,48 @@ RSpec.describe PotatoMesh::App::Instances do
expect(with_contact["contactLink"]).to eq("https://example.org/contact")
expect(without_contact.key?("contactLink")).to be(false)
end
it "includes nodesCount values, preserving zeros" do
fixed_time = Time.utc(2025, 2, 2, 8, 0, 0)
allow(Time).to receive(:now).and_return(fixed_time)
with_db do |db|
db.execute(
<<~SQL,
INSERT INTO instances (id, domain, pubkey, last_update_time, is_private, nodes_count)
VALUES (?, ?, ?, ?, ?, ?)
SQL
[
"instance-with-nodes",
"gamma.mesh.test",
PotatoMesh::Application::INSTANCE_PUBLIC_KEY_PEM,
fixed_time.to_i,
0,
42,
],
)
db.execute(
<<~SQL,
INSERT INTO instances (id, domain, pubkey, last_update_time, is_private, nodes_count)
VALUES (?, ?, ?, ?, ?, ?)
SQL
[
"instance-with-zero",
"delta.mesh.test",
PotatoMesh::Application::INSTANCE_PUBLIC_KEY_PEM,
fixed_time.to_i,
0,
0,
],
)
end
payload = application_class.load_instances_for_api
with_nodes = payload.find { |row| row["domain"] == "gamma.mesh.test" }
zero_nodes = payload.find { |row| row["domain"] == "delta.mesh.test" }
expect(with_nodes["nodesCount"]).to eq(42)
expect(zero_nodes["nodesCount"]).to eq(0)
end
end
end
+10 -9
View File
@@ -17,15 +17,16 @@
<table id="instances">
<thead>
<tr>
<th class="instances-col instances-col--name">Name</th>
<th class="instances-col instances-col--domain">Domain</th>
<th class="instances-col instances-col--contact">Contact</th>
<th class="instances-col instances-col--version">Version</th>
<th class="instances-col instances-col--channel">Channel</th>
<th class="instances-col instances-col--frequency">Frequency</th>
<th class="instances-col instances-col--latitude">Latitude</th>
<th class="instances-col instances-col--longitude">Longitude</th>
<th class="instances-col instances-col--last-update">Last Update</th>
<th class="instances-col instances-col--name" data-sort-key="name"><span class="sort-header" role="button" tabindex="0" data-sort-key="name" data-sort-label="Name">Name <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--domain" data-sort-key="domain"><span class="sort-header" role="button" tabindex="0" data-sort-key="domain" data-sort-label="Domain">Domain <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--contact" data-sort-key="contact"><span class="sort-header" role="button" tabindex="0" data-sort-key="contact" data-sort-label="Contact">Contact <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--version" data-sort-key="version"><span class="sort-header" role="button" tabindex="0" data-sort-key="version" data-sort-label="Version">Version <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--channel" data-sort-key="channel"><span class="sort-header" role="button" tabindex="0" data-sort-key="channel" data-sort-label="Channel">Channel <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--frequency" data-sort-key="frequency"><span class="sort-header" role="button" tabindex="0" data-sort-key="frequency" data-sort-label="Frequency">Frequency <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--nodes" data-sort-key="nodesCount"><span class="sort-header" role="button" tabindex="0" data-sort-key="nodesCount" data-sort-label="Active Nodes (24h)">Active Nodes (24h) <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--latitude" data-sort-key="latitude"><span class="sort-header" role="button" tabindex="0" data-sort-key="latitude" data-sort-label="Latitude">Latitude <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--longitude" data-sort-key="longitude"><span class="sort-header" role="button" tabindex="0" data-sort-key="longitude" data-sort-label="Longitude">Longitude <span class="sort-indicator" aria-hidden="true"></span></span></th>
<th class="instances-col instances-col--last-update" data-sort-key="lastUpdateTime"><span class="sort-header" role="button" tabindex="0" data-sort-key="lastUpdateTime" data-sort-label="Last Update">Last Update <span class="sort-indicator" aria-hidden="true"></span></span></th>
</tr>
</thead>
<tbody></tbody>