mirror of
https://github.com/l5yth/potato-mesh.git
synced 2026-05-14 21:35:50 +02:00
Compare commits
24 Commits
v0.6.0-rc0
..
v0.6.2
| Author | SHA1 | Date | |
|---|---|---|---|
| 161f22bf12 | |||
| e8f3f17b85 | |||
| a4902548d3 | |||
| 491678f75b | |||
| db236d58e2 | |||
| a6cac6ced5 | |||
| f866cf8837 | |||
| 7f5b52db67 | |||
| 8ac12ee2c8 | |||
| 580c2fb6ea | |||
| 13b2ce9067 | |||
| 5a73e212a3 | |||
| 07c8e85caa | |||
| c08b3f2c2d | |||
| 851b2180dd | |||
| c175445251 | |||
| b951dbffeb | |||
| 10e6c99196 | |||
| aeb97477f0 | |||
| 81e588e44c | |||
| 083de6418f | |||
| 5b9e6e3d48 | |||
| 4a6ba38e94 | |||
| 4d38ddd341 |
+11
-1
@@ -1,3 +1,6 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
# Licensed under the Apache License, Version 2.0 (see LICENSE)
|
||||
#
|
||||
# PotatoMesh Environment Configuration
|
||||
# Copy this file to .env and customize for your setup
|
||||
|
||||
@@ -14,7 +17,7 @@ INSTANCE_DOMAIN="mesh.example.org"
|
||||
# Generate a secure token: openssl rand -hex 32
|
||||
API_TOKEN="your-secure-api-token-here"
|
||||
|
||||
# Meshtastic connection target (required for ingestor)
|
||||
# Mesh radio connection target (required for ingestor)
|
||||
# Common serial paths:
|
||||
# - Linux: /dev/ttyACM0, /dev/ttyUSB0
|
||||
# - macOS: /dev/cu.usbserial-*
|
||||
@@ -23,6 +26,10 @@ API_TOKEN="your-secure-api-token-here"
|
||||
# Bluetooth address (e.g. ED:4D:9E:95:CF:60).
|
||||
CONNECTION="/dev/ttyACM0"
|
||||
|
||||
# Mesh protocol to use (meshtastic or meshcore)
|
||||
# Default: meshtastic
|
||||
PROTOCOL="meshtastic"
|
||||
|
||||
# =============================================================================
|
||||
# SITE CUSTOMIZATION
|
||||
# =============================================================================
|
||||
@@ -68,6 +75,9 @@ PRIVATE=0
|
||||
# Debug mode (0=off, 1=on)
|
||||
DEBUG=0
|
||||
|
||||
# Energy saving mode — sleep between ingestion cycles (0=off, 1=on)
|
||||
ENERGY_SAVING=0
|
||||
|
||||
# Default map zoom override
|
||||
# MAP_ZOOM=15
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# GitHub Actions Workflows
|
||||
|
||||
## Workflows
|
||||
@@ -10,12 +13,3 @@
|
||||
- **`mobile.yml`** - Flutter mobile tests with coverage reporting
|
||||
- **`release.yml`** - Tag-triggered Flutter release builds for Android and iOS
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build locally
|
||||
docker-compose build
|
||||
|
||||
# Deploy
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
@@ -23,7 +23,7 @@ on:
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
packages: read
|
||||
|
||||
@@ -74,6 +74,9 @@ web/.config
|
||||
node_modules/
|
||||
web/node_modules/
|
||||
|
||||
# Operator-customised static pages (keep only the shipped default)
|
||||
web/pages/*.md
|
||||
|
||||
# Debug symbols
|
||||
ignored.txt
|
||||
ignored-*.txt
|
||||
|
||||
+111
-28
@@ -1,39 +1,122 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# CHANGELOG
|
||||
|
||||
## v0.6.2
|
||||
|
||||
This is a service release of the radio mesh app-suite `potato-mesh` v0.6.2, focused on Meshcore-related fixes, federation accuracy, and bridge coverage. The Matrix bridge now understands Meshcore traffic, and several duplication and classification issues in the web app and ingestor have been tightened up.
|
||||
|
||||
Demo: <https://potatomesh.net/>
|
||||
|
||||
### Features
|
||||
* Matrix: enable meshcore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/761>
|
||||
* Web: show colocated nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/753>
|
||||
|
||||
### Fixes
|
||||
* Web: fix emoji pattern render in short names by @l5yth in <https://github.com/l5yth/potato-mesh/pull/760>
|
||||
* Data: catch packet handler errors by @l5yth in <https://github.com/l5yth/potato-mesh/pull/759>
|
||||
* Web: fix meshcore message duplication with 120s dupe protection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/758>
|
||||
* Web: fix node duplication through message synthetization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/757>
|
||||
* Ingestor: deduplicate meshcore messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/752>
|
||||
* Fix reaction handling and classification by @l5yth in <https://github.com/l5yth/potato-mesh/pull/750>
|
||||
* Web: fix federation node counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/749>
|
||||
|
||||
## v0.6.1
|
||||
|
||||
This is a service release of the radio mesh app-suite `potato-mesh` v0.6.1, focused on Meshcore polish, federation resilience, and ingestor stability in the wake of the v0.6.0 multi-protocol release.
|
||||
|
||||
Demo: <https://potatomesh.net/>
|
||||
|
||||
### Features
|
||||
* Web: per protocol active node counts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/735>
|
||||
* Web: optimize caching by @l5yth in <https://github.com/l5yth/potato-mesh/pull/744>
|
||||
* Data: better lora frequency handling for meshtastic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/733>
|
||||
|
||||
### Fixes
|
||||
* Web: fix meshcore node misclassification by @l5yth in <https://github.com/l5yth/potato-mesh/pull/748>
|
||||
* Web: fix federation resolver issue with multi addresses by @l5yth in <https://github.com/l5yth/potato-mesh/pull/743>
|
||||
* Web: restore refresh and protocol buttons by @l5yth in <https://github.com/l5yth/potato-mesh/pull/742>
|
||||
* Ingestor: fix serial connection failures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/736>
|
||||
|
||||
### Chores
|
||||
* Chore: bump version to 0.6.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/726>
|
||||
* Build(deps): bump rand from 0.9.2 to 0.9.4 in /matrix by @dependabot in <https://github.com/l5yth/potato-mesh/pull/741>
|
||||
|
||||
## v0.6.0
|
||||
|
||||
**Breaking changes — remove deprecated environment variable aliases:**
|
||||
This is a service release of the radio mesh app-suite `potato-mesh` v0.6.0 which introduces new features and overhauls the user interface. The primary notable change is added support for multi-protocol along with an implementation of **Meshcore** in ingestor, web app, and frontend.
|
||||
|
||||
* Ingestor: remove `POTATOMESH_INSTANCE` env var — use `INSTANCE_DOMAIN` by @l5yth
|
||||
* Ingestor: remove `PROVIDER` env var — use `PROTOCOL` by @l5yth
|
||||
* Ingestor: remove `MESH_SERIAL` env var — use `CONNECTION` by @l5yth
|
||||
* Ingestor: remove `PORT` config alias — use `CONNECTION` by @l5yth
|
||||
* Docker: give `INSTANCE_DOMAIN` a default of `http://web:41447` in compose by @l5yth
|
||||
* Chore: bump version to 0.6.0 across web, matrix bridge, and mobile app by @l5yth
|
||||
Demo: <https://potatomesh.net/>
|
||||
|
||||
### Meshcore
|
||||
|
||||
To start ingesting Meshcore data to an upgraded potato-mesh web app, simply tell your ingestor to use the `PROTOCOL="meshcore"`.
|
||||
|
||||
### About Pages
|
||||
|
||||
The other notable feature is the removal of the "darkmode" and "info" buttons in favor of customizable markdown pages that allow for more flexibility with regard to custom content (info about presets, contact information, etc.) - see `/pages/*.md` in the web app ([#723](https://github.com/l5yth/potato-mesh/pull/723)).
|
||||
|
||||
### Breaking Variable Changes
|
||||
|
||||
The following deprecated environmental variables have been removed in this release finally ([#704](https://github.com/l5yth/potato-mesh/pull/704)):
|
||||
* ~~POTATOMESH_INSTANCE~~ - please use `INSTANCE_DOMAIN`
|
||||
* ~~MESH_SERIAL~~ and ~~PORT~~ - please use `CONNECTION`
|
||||
|
||||
### Features
|
||||
* Web: add markdown static pages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/723>
|
||||
* Data: trace analysus multi ingestor support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/721>
|
||||
* Web: facelift by @l5yth in <https://github.com/l5yth/potato-mesh/pull/716>
|
||||
* Web: sort channels by activity not index by @l5yth in <https://github.com/l5yth/potato-mesh/pull/711>
|
||||
* Data: derive meshcore channel probe bound from device max_channels by @l5yth in <https://github.com/l5yth/potato-mesh/pull/701>
|
||||
* Web: define meshcore modem presets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/696>
|
||||
* Data: register meshcore channel mappings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/695>
|
||||
* Data: provide frequency and modem preset for meshcore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/694>
|
||||
* Web: distinguish meshcore from meshtastic in frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/688>
|
||||
* [Meshcore] fix: get meshcore protocol icon displaying correctly by @benallfree in <https://github.com/l5yth/potato-mesh/pull/681>
|
||||
|
||||
### Fixes
|
||||
* Web: fix federation for multi protocol by @l5yth in <https://github.com/l5yth/potato-mesh/pull/722>
|
||||
* Data: fix position time updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/715>
|
||||
* Data: fix meshcore ingestor self reporting by @l5yth in <https://github.com/l5yth/potato-mesh/pull/713>
|
||||
* Web: reference meshcore nodes in chat by @l5yth in <https://github.com/l5yth/potato-mesh/pull/709>
|
||||
* Web: fix node disappearance role reset by @l5yth in <https://github.com/l5yth/potato-mesh/pull/707>
|
||||
* Web: protect real node names from fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/702>
|
||||
* Web: add proper short names for meshcore companions by @l5yth in <https://github.com/l5yth/potato-mesh/pull/693>
|
||||
* Fix: address review comments from PRs #676 and #681 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/689>
|
||||
* [Meshcore] fix: race condition by @benallfree in <https://github.com/l5yth/potato-mesh/pull/676>
|
||||
|
||||
### Chores
|
||||
* Release: v0.6.0 — remove deprecated env var aliases by @l5yth in <https://github.com/l5yth/potato-mesh/pull/704>
|
||||
* Chore: prepare codebase for breaking release by @l5yth in <https://github.com/l5yth/potato-mesh/pull/718>
|
||||
|
||||
## v0.5.12
|
||||
|
||||
* Enh: surface meshcore role types (#680) by @l5yth in <https://github.com/l5yth/potato-mesh/pull/685>
|
||||
* Chore: refactor codebase before meshcore release by @l5yth in <https://github.com/l5yth/potato-mesh/pull/682>
|
||||
* [Meshcore] enh: short name should be 1st 4 hex digits of public key by @benallfree in <https://github.com/l5yth/potato-mesh/pull/679>
|
||||
* Chore: update xcode deps by @benallfree in <https://github.com/l5yth/potato-mesh/pull/674>
|
||||
* Chore: update mesh.sh to use requirements file by @benallfree in <https://github.com/l5yth/potato-mesh/pull/675>
|
||||
* Data/meshcore: fix ble and enable tcp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/669>
|
||||
* Data: handle store_forward and router_heartbeat portnum by @l5yth in <https://github.com/l5yth/potato-mesh/pull/667>
|
||||
* Feat: implement meshcore provider by @l5yth in <https://github.com/l5yth/potato-mesh/pull/663>
|
||||
* Ci: update dependabot and codecov settings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/666>
|
||||
* Web: prepare release by @l5yth in <https://github.com/l5yth/potato-mesh/pull/665>
|
||||
* App: only query meshtastic provider by @l5yth in <https://github.com/l5yth/potato-mesh/pull/664>
|
||||
* Data: prepare ingestor for meshcore by @l5yth in <https://github.com/l5yth/potato-mesh/pull/658>
|
||||
* Web: fix css issues by @l5yth in <https://github.com/l5yth/potato-mesh/pull/659>
|
||||
* Web: prepare frontend for multi protocol by @l5yth in <https://github.com/l5yth/potato-mesh/pull/657>
|
||||
* Feat: split device and power-sensor telemetry charts (#643) by @l5yth in <https://github.com/l5yth/potato-mesh/pull/656>
|
||||
* Web: implement a 'protocol' field across systems by @l5yth in <https://github.com/l5yth/potato-mesh/pull/655>
|
||||
* Fix upsert clearing node coordinates bug by @l5yth in <https://github.com/l5yth/potato-mesh/pull/654>
|
||||
* Data: resolve circular dependency of deamon.py by @l5yth in <https://github.com/l5yth/potato-mesh/pull/653>
|
||||
* Proposal: mesh provider pattern refactor by @benallfree in <https://github.com/l5yth/potato-mesh/pull/651>
|
||||
* Build(deps): bump rustls-webpki from 0.103.8 to 0.103.10 in /matrix by @dependabot[bot]< in https://github.com/l5yth/potato-mesh/pull/649>
|
||||
* Build(deps): bump quinn-proto from 0.11.13 to 0.11.14 in /matrix by @dependabot[bot]< in https://github.com/l5yth/potato-mesh/pull/646>
|
||||
This is a service release of the app potato-mesh v0.5.12 which improves performance and stability.
|
||||
|
||||
Notably, the frontend went through some graphical tweaks to prepare for an upcoming multi-protocol release (meshcore, reticulum, etc.).
|
||||
|
||||
* Enh: surface meshcore role types (#680) by @l5yth in https://github.com/l5yth/potato-mesh/pull/685
|
||||
* Chore: refactor codebase before meshcore release by @l5yth in https://github.com/l5yth/potato-mesh/pull/682
|
||||
* [Meshcore] enh: short name should be 1st 4 hex digits of public key by @benallfree in https://github.com/l5yth/potato-mesh/pull/679
|
||||
* Chore: update xcode deps by @benallfree in https://github.com/l5yth/potato-mesh/pull/674
|
||||
* Chore: update mesh.sh to use requirements file by @benallfree in https://github.com/l5yth/potato-mesh/pull/675
|
||||
* Data/meshcore: fix ble and enable tcp by @l5yth in https://github.com/l5yth/potato-mesh/pull/669
|
||||
* Data: handle store_forward and router_heartbeat portnum by @l5yth in https://github.com/l5yth/potato-mesh/pull/667
|
||||
* Feat: implement meshcore provider by @l5yth in https://github.com/l5yth/potato-mesh/pull/663
|
||||
* Ci: update dependabot and codecov settings by @l5yth in https://github.com/l5yth/potato-mesh/pull/666
|
||||
* Web: prepare release by @l5yth in https://github.com/l5yth/potato-mesh/pull/665
|
||||
* App: only query meshtastic provider by @l5yth in https://github.com/l5yth/potato-mesh/pull/664
|
||||
* Data: prepare ingestor for meshcore by @l5yth in https://github.com/l5yth/potato-mesh/pull/658
|
||||
* Web: fix css issues by @l5yth in https://github.com/l5yth/potato-mesh/pull/659
|
||||
* Web: prepare frontend for multi protocol by @l5yth in https://github.com/l5yth/potato-mesh/pull/657
|
||||
* Feat: split device and power-sensor telemetry charts (#643) by @l5yth in https://github.com/l5yth/potato-mesh/pull/656
|
||||
* Web: implement a 'protocol' field across systems by @l5yth in https://github.com/l5yth/potato-mesh/pull/655
|
||||
* Fix upsert clearing node coordinates bug by @l5yth in https://github.com/l5yth/potato-mesh/pull/654
|
||||
* Data: resolve circular dependency of deamon.py by @l5yth in https://github.com/l5yth/potato-mesh/pull/653
|
||||
* Proposal: mesh provider pattern refactor by @benallfree in https://github.com/l5yth/potato-mesh/pull/651
|
||||
* Build(deps): bump rustls-webpki from 0.103.8 to 0.103.10 in /matrix by @dependabot[bot] in https://github.com/l5yth/potato-mesh/pull/649
|
||||
* Build(deps): bump quinn-proto from 0.11.13 to 0.11.14 in /matrix by @dependabot[bot] in https://github.com/l5yth/potato-mesh/pull/646
|
||||
|
||||
## v0.5.11
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# Repository Guidelines
|
||||
|
||||
Keep code as modular as possible to reduce duplication and improve reusability and readability. If a module grows large, split it into a submodule structure. Prefer composing small, single-purpose units over monolithic files.
|
||||
Keep code as modular as possible to reduce duplication and improve reusability and readability — this applies to tests as well as production code. If a module grows large, split it into a submodule structure. Prefer composing small, single-purpose units over monolithic files.
|
||||
|
||||
Make sure all tests pass for Python (`pytest`), Ruby (`rspec`), and JavaScript (`npm test`).
|
||||
|
||||
@@ -8,7 +11,7 @@ All code must be 100% unit tested — every line, branch, and code path must hav
|
||||
|
||||
All code must be 100% documented according to the language's API-doc standard (PDoc for Python, RDoc for Ruby, JSDoc for JavaScript, rustdoc for Rust, dartdoc for Dart). Documentation must be sufficient to generate complete API docs from source. In addition to API-level docs, add inline comments wherever the logic is not immediately self-evident.
|
||||
|
||||
New source files should have Apache v2 license headers using the exact string `Copyright © 2025-26 l5yth & contributors`.
|
||||
Every file in the repository must carry an Apache v2 license notice using the exact string `Copyright © 2025-26 l5yth & contributors`. **Source-code files** (`.rb`, `.py`, `.js`, `.rs`, `.dart`, etc.) must include the full Apache v2 license header block. **Non-source files** (docs, configs, YAML, TOML, Dockerfiles, etc.) must include a short 2-line Apache v2 notice (copyright line + license reference).
|
||||
|
||||
Run linters for Python (`black`) and Ruby (`rufo`) to ensure consistent code formatting.
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# PotatoMesh Docker Guide
|
||||
|
||||
PotatoMesh publishes ready-to-run container images to the GitHub Packages container
|
||||
@@ -78,6 +81,18 @@ the container. This path stores the instance private key and staged
|
||||
of container lifecycle events, generated credentials are not replaced on reboot
|
||||
or re-deploy.
|
||||
|
||||
The `potatomesh_pages` volume mounts to `/app/pages` and holds operator-managed
|
||||
Markdown files that are rendered as static content pages in the web UI. On first
|
||||
start the default `1-about.md` page is copied from the image into the volume.
|
||||
You can add, edit, or remove `.md` files in this volume to customise your
|
||||
instance's navigation. To use a host directory instead of a named volume, replace
|
||||
the volume entry with a bind mount:
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
- ./my-pages:/app/pages
|
||||
```
|
||||
|
||||
## Start the stack
|
||||
|
||||
From the directory containing the Compose file:
|
||||
|
||||
+30
-9
@@ -1,3 +1,4 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -25,6 +26,9 @@ ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
python3 \
|
||||
py3-pip \
|
||||
py3-virtualenv \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
@@ -40,11 +44,16 @@ RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Install Meshtastic decoder dependencies in a dedicated venv
|
||||
RUN python3 -m venv /opt/meshtastic-venv && \
|
||||
/opt/meshtastic-venv/bin/pip install --no-cache-dir meshtastic protobuf
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
python3 \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
@@ -58,18 +67,27 @@ WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
COPY --from=builder /opt/meshtastic-venv /opt/meshtastic-venv
|
||||
|
||||
# Copy application code (exclude Dockerfile from web directory)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb web/app.sh web/Gemfile web/Gemfile.lock* web/spec/ ./
|
||||
# Copy application code (excluding the Dockerfile which is not required at runtime)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb ./
|
||||
COPY --chown=potatomesh:potatomesh web/app.sh ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile.lock* ./
|
||||
COPY --chown=potatomesh:potatomesh web/lib ./lib
|
||||
COPY --chown=potatomesh:potatomesh web/spec ./spec
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views/ ./views/
|
||||
COPY --chown=potatomesh:potatomesh web/views ./views
|
||||
COPY --chown=potatomesh:potatomesh web/scripts ./scripts
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
COPY --chown=potatomesh:potatomesh data/mesh_ingestor/decode_payload.py /app/data/mesh_ingestor/decode_payload.py
|
||||
|
||||
# Create data directory for SQLite database
|
||||
RUN mkdir -p /app/data /app/.local/share/potato-mesh && \
|
||||
chown -R potatomesh:potatomesh /app/data /app/.local
|
||||
# Create data and configuration directories with correct ownership
|
||||
RUN mkdir -p /app/.local/share/potato-mesh \
|
||||
&& mkdir -p /app/.config/potato-mesh/well-known \
|
||||
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
@@ -78,13 +96,16 @@ USER potatomesh
|
||||
EXPOSE 41447
|
||||
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV APP_ENV=production \
|
||||
RACK_ENV=production \
|
||||
ENV RACK_ENV=production \
|
||||
APP_ENV=production \
|
||||
MESHTASTIC_PYTHON=/opt/meshtastic-venv/bin/python \
|
||||
XDG_DATA_HOME=/app/.local/share \
|
||||
XDG_CONFIG_HOME=/app/.config \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
INSTANCE_DOMAIN="potato.example.com" \
|
||||
CHANNEL="#LongFast" \
|
||||
FREQUENCY="915MHz" \
|
||||
MAP_CENTER="38.761944,-27.090833" \
|
||||
MAP_ZOOM="" \
|
||||
MAX_DISTANCE=42 \
|
||||
CONTACT_LINK="#potatomesh:dod.ngo" \
|
||||
DEBUG=0
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# Prometheus Monitoring for PotatoMesh
|
||||
|
||||
PotatoMesh exposes runtime telemetry through a dedicated Prometheus endpoint so you can
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# 🥔 PotatoMesh
|
||||
|
||||
[](https://github.com/l5yth/potato-mesh/actions)
|
||||
@@ -30,7 +33,7 @@ _No MQTT clutter, just local LoRa aether._
|
||||
|
||||
Live demo for Berlin: [potatomesh.net](https://potatomesh.net)
|
||||
|
||||

|
||||

|
||||
|
||||
## Web App
|
||||
|
||||
@@ -125,6 +128,28 @@ well-known document is staged in
|
||||
|
||||
The database can be found in `$XDG_DATA_HOME/potato-mesh`.
|
||||
|
||||
### Custom Pages
|
||||
|
||||
Instance operators can publish static content pages (contact details, mesh
|
||||
protocol information, legal notices, etc.) by placing Markdown files in the
|
||||
`pages/` directory inside `web/`. Each `.md` file automatically becomes a nav
|
||||
entry and a route under `/pages/<slug>`.
|
||||
|
||||
Files are named `<sort-prefix>-<slug>.md` — the numeric prefix controls
|
||||
navigation order and the slug becomes the URL path and nav label:
|
||||
|
||||
| Filename | Nav Label | URL |
|
||||
| ---------------------- | -------------- | ----------------------- |
|
||||
| `1-about.md` | About | `/pages/about` |
|
||||
| `5-rules.md` | Rules | `/pages/rules` |
|
||||
| `9-contact.md` | Contact | `/pages/contact` |
|
||||
| `20-impressum.md` | Impressum | `/pages/impressum` |
|
||||
|
||||
A default `1-about.md` ships with the app. In Docker deployments the directory
|
||||
is exposed as the `potatomesh_pages` volume (mounted at `/app/pages`) so you can
|
||||
add or edit pages without rebuilding the image. The pages directory can also be
|
||||
overridden with the `PAGES_DIR` environment variable.
|
||||
|
||||
### Federation
|
||||
|
||||
PotatoMesh instances can optionally federate by publishing signed metadata and
|
||||
@@ -275,9 +300,9 @@ docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-arm64:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-armv7:latest
|
||||
|
||||
# version-pinned examples
|
||||
docker pull ghcr.io/l5yth/potato-mesh-web-linux-amd64:v0.5.5
|
||||
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:v0.5.5
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-amd64:v0.5.5
|
||||
docker pull ghcr.io/l5yth/potato-mesh-web-linux-amd64:v0.6.2
|
||||
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:v0.6.2
|
||||
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-amd64:v0.6.2
|
||||
```
|
||||
|
||||
Note: `latest` is only published for non-prerelease versions. Pre-release tags
|
||||
|
||||
+6
-2
@@ -1,6 +1,10 @@
|
||||
# Meshtastic Reader
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
Meshtastic Reader – read-only PotatoMesh chat client for Android and iOS.
|
||||
# PotatoMesh Mobile
|
||||
|
||||
PotatoMesh Mobile — read-only mesh chat client for Android and iOS.
|
||||
Supports Meshtastic and MeshCore networks.
|
||||
|
||||
## Setup
|
||||
|
||||
|
||||
@@ -15,11 +15,11 @@
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>FMWK</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>0.6.0</string>
|
||||
<string>0.6.2</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>0.6.0</string>
|
||||
<string>0.6.2</string>
|
||||
<key>MinimumOSVersion</key>
|
||||
<string>14.0</string>
|
||||
</dict>
|
||||
|
||||
+1
-1
@@ -1,7 +1,7 @@
|
||||
name: potato_mesh_reader
|
||||
description: Meshtastic Reader — read-only view for PotatoMesh messages.
|
||||
publish_to: "none"
|
||||
version: 0.6.0
|
||||
version: 0.6.2
|
||||
|
||||
environment:
|
||||
sdk: ">=3.4.0 <4.0.0"
|
||||
|
||||
@@ -50,6 +50,7 @@ USER potatomesh
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
PROTOCOL=meshtastic \
|
||||
ALLOWED_CHANNELS="" \
|
||||
HIDDEN_CHANNELS="" \
|
||||
INSTANCE_DOMAIN="" \
|
||||
@@ -77,6 +78,7 @@ USER ContainerUser
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
PROTOCOL=meshtastic \
|
||||
ALLOWED_CHANNELS="" \
|
||||
HIDDEN_CHANNELS="" \
|
||||
INSTANCE_DOMAIN="" \
|
||||
|
||||
+1
-1
@@ -18,7 +18,7 @@ The ``data.mesh`` module exposes helpers for reading Meshtastic node and
|
||||
message information before forwarding it to the accompanying web application.
|
||||
"""
|
||||
|
||||
VERSION = "0.6.0"
|
||||
VERSION = "0.6.2"
|
||||
"""Semantic version identifier shared with the dashboard and front-end."""
|
||||
|
||||
__version__ = VERSION
|
||||
|
||||
@@ -27,6 +27,8 @@ CREATE TABLE IF NOT EXISTS instances (
|
||||
last_update_time INTEGER,
|
||||
is_private BOOLEAN NOT NULL DEFAULT 0,
|
||||
nodes_count INTEGER,
|
||||
meshcore_nodes_count INTEGER,
|
||||
meshtastic_nodes_count INTEGER,
|
||||
contact_link TEXT,
|
||||
signature TEXT
|
||||
);
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
## Mesh ingestor contracts (stable interfaces)
|
||||
|
||||
This repo’s ingestion pipeline is split into:
|
||||
@@ -53,6 +56,32 @@ Single message payload:
|
||||
- RF: `snr` (float|nil), `rssi` (int|nil), `hop_limit` (int|nil)
|
||||
- Meta: `channel_name` (string; only when not encrypted and known), `ingestor` (canonical host id), `lora_freq`, `modem_preset`
|
||||
|
||||
**Cross-ingestor deduplication.** The `id` field is the sole dedup key — the server collapses repeat POSTs on the `messages.id` PRIMARY KEY. Protocols that lack a firmware-assigned packet ID MUST derive a stable, sender-side fingerprint so that the same physical transmission heard by multiple ingestors produces the same `id`. The id MUST fit in 53 bits (`0 <= id <= (1 << 53) - 1`) to round-trip through the JavaScript frontend without precision loss.
|
||||
|
||||
For MeshCore the canonical fingerprint is:
|
||||
|
||||
```
|
||||
v1:<sender_identity>:<sender_timestamp>:<discriminator>:<text>
|
||||
```
|
||||
|
||||
hashed with SHA-256 and truncated to 53 bits (first 7 bytes, masked). Components:
|
||||
|
||||
- `sender_identity` — for channel messages, the lowercased+stripped sender name parsed from a leading `SenderName:` prefix in the message text (split on the first colon, surrounding whitespace stripped); for direct messages, the sender's `pubkey_prefix` from the MeshCore event payload. Empty string when unavailable — when the channel-message text lacks any `SenderName:` prefix the dedup degrades and two distinct senders sharing timestamp + channel + text collide. In practice MeshCore clients always prefix the name; the residual risk is anonymous/malformed transmissions.
|
||||
- `sender_timestamp` — Unix seconds from the sender's clock (identical across receivers).
|
||||
- `discriminator` — `c<N>` for channel messages on channel `N`, `dm` for direct messages.
|
||||
- `text` — the message text exactly as transmitted.
|
||||
|
||||
The `v1:` prefix lets the format evolve (e.g. add a channel-secret hash) without colliding with previously-written ids.
|
||||
|
||||
**Known limitations of the v1 fingerprint:**
|
||||
|
||||
- *Format-string ambiguity around `:`.* Components are joined with literal colons and not length-prefixed, so a colon embedded in `sender_identity` or `text` shifts the boundary between fields. In theory two distinct triples (e.g. `sender_identity="a:b"` vs `sender_identity="a"` with a leading `b:` in `text`) can produce the same fingerprint. In practice this is vanishingly rare — MeshCore sender names rarely contain colons and even then both senders would have to land on the same timestamp/channel — but a `v2` revision should switch to a delimiter that cannot appear in any component (e.g. `\x00`) or length-prefix each field.
|
||||
- *meshcore_py text-decoding inconsistency.* The upstream `meshcore_py` reader strips trailing `\0` bytes on the real-time `CHANNEL_MSG_RECV` path but not on the sync-replay path. If the same physical message is heard once in real-time and once via sync-replay, the byte sequences differ → different fingerprints → duplicate row. Out of scope for the ingestor; track upstream.
|
||||
- *Sender-side clock reset.* MeshCore nodes without an RTC start `sender_timestamp` from `0` after reboot. Two messages from the same sender containing the same text within one second of power-on collapse into a single row. Acceptable trade-off given the alternative (no dedup at all).
|
||||
- *Relay-rewritten `sender_timestamp` (#756).* MeshCore has been observed delivering the same physical packet twice with a rewritten `sender_timestamp` (≈10 s later, same `from_id`/`channel`/`text`), which flips the v1 fingerprint and bypasses the `messages.id` PK collapse. To cover this, the web app runs an additional content-level dedup on insert: for `protocol = "meshcore"` with non-empty `text` and a known `from_id`, a second row matching `(from_id, to_id, channel, text)` within ±30 s of `rx_time` is dropped (window lives in `MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS`). The window is ~3× the observed relay delta; legitimate rapid re-sends of identical short text (e.g. `hi`, `ack`, `ok`, `test`) from the same sender on the same channel **within 30 s** will be silently collapsed into one row. Ingestors MUST still produce deterministic v1 ids — this content-level layer is additive, not a replacement. Pre-existing duplicates are cleared once by a `PRAGMA user_version`-gated one-shot backfill on startup.
|
||||
- *Concurrent-insert race (#756).* The content-dedup SELECT and the downstream INSERT are not currently wrapped in a shared transaction, so two concurrent Puma threads carrying the same content with different ids can both pass the pre-check and both insert. Duplicates produced this way are narrow (single-node multi-threaded ingest) and are not cleaned up on subsequent boots because the backfill is one-shot. If the race is ever observed in production, tighten `insert_message` to wrap the meshcore pre-check + id-PK path in `db.transaction(:immediate)`.
|
||||
- *Upstream `meshcore` reader crash on truncated advertisements (#754).* `meshcore-py` 2.3.6 (latest at the time of writing) raises `IndexError` from `MessageReader.handle_rx` at `reader.py:365` when a `DEVICE_INFO`/advertisement frame declares `fw_ver >= 10` but omits the trailing `path_hash_mode` byte. Because the frame is parsed inside a detached `asyncio.create_task(...)`, the exception surfaces as `Task exception was never retrieved` on stderr and the event for that frame is lost. The ingestor installs a runtime patch (`data/mesh_ingestor/protocols/_meshcore_patches.py`) that wraps `handle_rx`, logs one line with the first 32 bytes of the offending frame under `context=meshcore.reader.patch`, and lets the task exit cleanly; a loop-level handler (`context=asyncio.unhandled`) catches anything the targeted patch misses. Both shims are additive and will be removed once upstream ships a defensive length check.
|
||||
|
||||
#### `POST /api/positions`
|
||||
|
||||
Single position payload:
|
||||
|
||||
@@ -70,6 +70,7 @@ _CONFIG_ATTRS = {
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"INSTANCES",
|
||||
"API_TOKEN",
|
||||
"ALLOWED_CHANNELS",
|
||||
"HIDDEN_CHANNELS",
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
@@ -81,6 +82,37 @@ Accepted values are ``meshtastic`` (default) and ``meshcore``.
|
||||
"""
|
||||
|
||||
|
||||
def _parse_lora_freq_env(raw: str | None) -> float | int | None:
|
||||
"""Parse the ``FREQUENCY`` environment variable into a numeric LoRa frequency.
|
||||
|
||||
Returns an :class:`int` for whole-number strings (e.g. ``"868"``), a
|
||||
:class:`float` for decimal strings (e.g. ``"869.525"``), or ``None`` when
|
||||
*raw* is empty, absent, non-numeric, or non-finite (e.g. ``"inf"``).
|
||||
|
||||
Non-numeric labels such as ``"EU_868"`` intentionally return ``None`` so
|
||||
that :data:`LORA_FREQ` is left unset and :func:`~interfaces._ensure_radio_metadata`
|
||||
can still populate it from the detected radio configuration.
|
||||
|
||||
Parameters:
|
||||
raw: Raw value of the ``FREQUENCY`` environment variable.
|
||||
|
||||
Returns:
|
||||
Numeric frequency value, or ``None``.
|
||||
"""
|
||||
if not raw:
|
||||
return None
|
||||
stripped = raw.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
try:
|
||||
as_float = float(stripped)
|
||||
except ValueError:
|
||||
return None
|
||||
if not math.isfinite(as_float):
|
||||
return None
|
||||
return int(as_float) if as_float == int(as_float) else as_float
|
||||
|
||||
|
||||
def _parse_channel_names(raw_value: str | None) -> tuple[str, ...]:
|
||||
"""Normalise a comma-separated list of channel names.
|
||||
|
||||
@@ -129,6 +161,11 @@ def _resolve_instance_domain() -> str:
|
||||
|
||||
Reads the :envvar:`INSTANCE_DOMAIN` variable. When the value does not
|
||||
contain a scheme, ``https://`` is prepended automatically.
|
||||
|
||||
.. note::
|
||||
|
||||
Kept for backward compatibility with existing tests and callers.
|
||||
New code should use :func:`_resolve_instance_domains` instead.
|
||||
"""
|
||||
|
||||
configured_instance = os.environ.get("INSTANCE_DOMAIN", "").rstrip("/")
|
||||
@@ -139,13 +176,91 @@ def _resolve_instance_domain() -> str:
|
||||
return configured_instance
|
||||
|
||||
|
||||
INSTANCE = _resolve_instance_domain()
|
||||
API_TOKEN = os.environ.get("API_TOKEN", "")
|
||||
def _normalise_domain(raw: str) -> str:
|
||||
"""Strip whitespace and trailing slashes, prepend ``https://`` when needed.
|
||||
|
||||
Parameters:
|
||||
raw: Single domain string to normalise.
|
||||
|
||||
Returns:
|
||||
A URL string with a scheme prefix.
|
||||
"""
|
||||
|
||||
domain = raw.strip().rstrip("/")
|
||||
if domain and "://" not in domain:
|
||||
return f"https://{domain}"
|
||||
return domain
|
||||
|
||||
|
||||
def _resolve_instance_domains() -> tuple[tuple[str, str], ...]:
|
||||
"""Parse :envvar:`INSTANCE_DOMAIN` and :envvar:`API_TOKEN` into paired tuples.
|
||||
|
||||
When ``INSTANCE_DOMAIN`` contains comma-separated values, each entry is
|
||||
treated as an independent target. ``API_TOKEN`` is either broadcast to
|
||||
every target (single value) or positionally paired (comma-separated with
|
||||
a matching count).
|
||||
|
||||
Returns:
|
||||
A tuple of ``(instance_url, api_token)`` pairs, deduplicated by URL.
|
||||
|
||||
Raises:
|
||||
ValueError: When the number of comma-separated tokens exceeds the
|
||||
number of domains.
|
||||
"""
|
||||
|
||||
raw_domain = os.environ.get("INSTANCE_DOMAIN", "")
|
||||
raw_token = os.environ.get("API_TOKEN", "")
|
||||
|
||||
domains: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for part in raw_domain.split(","):
|
||||
normalised = _normalise_domain(part)
|
||||
if not normalised:
|
||||
continue
|
||||
key = normalised.casefold()
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
domains.append(normalised)
|
||||
|
||||
if not domains:
|
||||
return ()
|
||||
|
||||
tokens = [t.strip() for t in raw_token.split(",")]
|
||||
# A single token (including empty string) is broadcast to all domains.
|
||||
if len(tokens) == 1:
|
||||
token = tokens[0]
|
||||
return tuple((d, token) for d in domains)
|
||||
|
||||
if len(tokens) != len(domains):
|
||||
raise ValueError(
|
||||
f"API_TOKEN has {len(tokens)} comma-separated values but "
|
||||
f"INSTANCE_DOMAIN has {len(domains)}; counts must match or "
|
||||
f"API_TOKEN must be a single value"
|
||||
)
|
||||
|
||||
return tuple(zip(domains, tokens))
|
||||
|
||||
|
||||
INSTANCES: tuple[tuple[str, str], ...] = _resolve_instance_domains()
|
||||
"""Paired ``(instance_url, api_token)`` tuples derived from the environment."""
|
||||
|
||||
INSTANCE = INSTANCES[0][0] if INSTANCES else _resolve_instance_domain()
|
||||
"""First configured instance URL, kept for backward compatibility."""
|
||||
|
||||
API_TOKEN = INSTANCES[0][1] if INSTANCES else os.environ.get("API_TOKEN", "")
|
||||
"""API token for the first configured instance, kept for backward compatibility."""
|
||||
ENERGY_SAVING = os.environ.get("ENERGY_SAVING") == "1"
|
||||
"""When ``True``, enables the ingestor's energy saving mode."""
|
||||
|
||||
LORA_FREQ: float | int | str | None = None
|
||||
"""Frequency of the local node's configured LoRa region in MHz or raw region label."""
|
||||
LORA_FREQ: float | int | str | None = _parse_lora_freq_env(os.environ.get("FREQUENCY"))
|
||||
"""Frequency of the local node's configured LoRa region in MHz or raw region label.
|
||||
|
||||
Pre-seeded from the ``FREQUENCY`` environment variable when set to a finite
|
||||
numeric value, allowing operators to override auto-detected values.
|
||||
Non-numeric or non-finite values are ignored so that auto-detection from the
|
||||
radio interface can still fill this in.
|
||||
"""
|
||||
|
||||
MODEM_PRESET: str | None = None
|
||||
"""CamelCase modem preset name reported by the local node."""
|
||||
@@ -202,6 +317,7 @@ __all__ = [
|
||||
"HIDDEN_CHANNELS",
|
||||
"ALLOWED_CHANNELS",
|
||||
"INSTANCE",
|
||||
"INSTANCES",
|
||||
"API_TOKEN",
|
||||
"ENERGY_SAVING",
|
||||
"LORA_FREQ",
|
||||
|
||||
@@ -24,7 +24,7 @@ import time
|
||||
|
||||
from pubsub import pub
|
||||
|
||||
from . import config, handlers, ingestors, interfaces
|
||||
from . import config, handlers, ingestors, interfaces, queue
|
||||
from .mesh_protocol import MeshProtocol
|
||||
from .utils import _retry_dict_snapshot
|
||||
|
||||
@@ -488,22 +488,32 @@ def _check_inactivity_reconnect(state: _DaemonState) -> bool:
|
||||
):
|
||||
return False
|
||||
|
||||
if (
|
||||
state.last_inactivity_reconnect is not None
|
||||
and now - state.last_inactivity_reconnect < state.inactivity_reconnect_secs
|
||||
):
|
||||
return False
|
||||
if state.last_inactivity_reconnect is not None:
|
||||
# For explicit disconnects use the shorter max-reconnect-delay window
|
||||
# so the daemon reconnects promptly without thrashing. For inactivity-
|
||||
# only triggers retain the full inactivity window as the throttle.
|
||||
throttle_secs = (
|
||||
config._RECONNECT_MAX_DELAY_SECS
|
||||
if believed_disconnected
|
||||
else state.inactivity_reconnect_secs
|
||||
)
|
||||
if now - state.last_inactivity_reconnect < throttle_secs:
|
||||
return False
|
||||
|
||||
reason = (
|
||||
"disconnected"
|
||||
if believed_disconnected
|
||||
else f"no data for {inactivity_elapsed:.0f}s"
|
||||
)
|
||||
# Uses the module-level global STATE — acceptable because there is only
|
||||
# one queue in production, and in tests this is purely informational.
|
||||
queue_depth = len(queue.STATE.queue)
|
||||
config._debug_log(
|
||||
"Mesh interface inactivity detected",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
reason=reason,
|
||||
queue_depth=queue_depth,
|
||||
)
|
||||
state.last_inactivity_reconnect = now
|
||||
_close_interface(state.iface)
|
||||
@@ -631,6 +641,17 @@ def main(*, provider: MeshProtocol | None = None) -> None:
|
||||
topics=subscribed,
|
||||
)
|
||||
|
||||
if not config.INSTANCES and not config.INSTANCE:
|
||||
config._debug_log(
|
||||
"No INSTANCE_DOMAIN configured — cannot forward data; exiting",
|
||||
context="daemon.main",
|
||||
severity="error",
|
||||
always=True,
|
||||
)
|
||||
return
|
||||
|
||||
queue._start_queue_drainer(queue.STATE)
|
||||
|
||||
state = _DaemonState(
|
||||
provider=provider,
|
||||
stop=threading.Event(),
|
||||
@@ -666,11 +687,12 @@ def main(*, provider: MeshProtocol | None = None) -> None:
|
||||
signal.signal(signal.SIGINT, handle_sigint)
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
instance_label = ", ".join(inst for inst, _ in config.INSTANCES)
|
||||
config._debug_log(
|
||||
"Mesh daemon starting",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
target=config.INSTANCE or "(no INSTANCE_DOMAIN configured)",
|
||||
target=instance_label,
|
||||
port=config.CONNECTION or "auto",
|
||||
channel=config.CHANNEL_INDEX,
|
||||
)
|
||||
|
||||
@@ -40,7 +40,10 @@ from ._state import (
|
||||
register_host_node_id,
|
||||
)
|
||||
from .generic import (
|
||||
_coerce_emoji_codepoint,
|
||||
_is_encrypted_flag,
|
||||
_is_likely_reaction,
|
||||
_is_reaction_placeholder_text,
|
||||
_portnum_candidates,
|
||||
on_receive,
|
||||
store_packet_dict,
|
||||
@@ -79,7 +82,10 @@ __all__ = [
|
||||
"_VALID_TELEMETRY_TYPES",
|
||||
"_apply_radio_metadata",
|
||||
"_apply_radio_metadata_to_nodes",
|
||||
"_coerce_emoji_codepoint",
|
||||
"_is_encrypted_flag",
|
||||
"_is_likely_reaction",
|
||||
"_is_reaction_placeholder_text",
|
||||
"_mark_packet_seen",
|
||||
"_normalize_trace_hops",
|
||||
"_portnum_candidates",
|
||||
|
||||
@@ -82,6 +82,141 @@ def _portnum_candidates(name: str) -> set[int]:
|
||||
return candidates
|
||||
|
||||
|
||||
def _coerce_emoji_codepoint(raw: object) -> str | None:
|
||||
"""Normalise an emoji candidate, converting numeric codepoints to characters.
|
||||
|
||||
Meshtastic firmware may transmit reaction emoji as a Unicode codepoint
|
||||
integer (e.g. ``128077`` for 👍) rather than as the character itself.
|
||||
Values above 127 are treated as codepoints and converted via :func:`chr`;
|
||||
small values (≤ 127) are preserved as strings so that slot markers such as
|
||||
``"1"`` pass through unchanged.
|
||||
|
||||
When a numeric value claims to be a codepoint but lies outside the valid
|
||||
Unicode range (``> 0x10FFFF``), ``None`` is returned rather than the
|
||||
decimal string form — storing a multi-digit integer as the emoji would
|
||||
leak garbage into the rendered chat (numeric strings of length > 1 are
|
||||
not valid slot markers either).
|
||||
|
||||
Parameters:
|
||||
raw: Raw emoji value from a decoded packet field.
|
||||
|
||||
Returns:
|
||||
Normalised emoji string, or ``None`` when *raw* is empty or invalid.
|
||||
"""
|
||||
|
||||
if raw is None:
|
||||
return None
|
||||
|
||||
# Numeric value (int / float) -------------------------------------------
|
||||
if isinstance(raw, (int, float)):
|
||||
n = int(raw)
|
||||
if n > 127:
|
||||
try:
|
||||
return chr(n)
|
||||
except (ValueError, OverflowError):
|
||||
# Value claimed to be a codepoint but is out of Unicode range;
|
||||
# do NOT preserve the decimal form (would render as garbage).
|
||||
return None
|
||||
text = str(raw).strip()
|
||||
return text or None
|
||||
|
||||
# String (possibly a digit-encoded codepoint) ---------------------------
|
||||
try:
|
||||
text = str(raw).strip()
|
||||
except Exception:
|
||||
return None
|
||||
if not text:
|
||||
return None
|
||||
if text.isdigit():
|
||||
n = int(text)
|
||||
if n > 127:
|
||||
try:
|
||||
return chr(n)
|
||||
except (ValueError, OverflowError):
|
||||
# See comment above — multi-digit numeric strings outside the
|
||||
# Unicode range are not valid emoji nor slot markers.
|
||||
return None
|
||||
return text
|
||||
|
||||
|
||||
#: Maximum Unicode codepoint length for text that may still qualify as a
|
||||
#: reaction placeholder. A bare emoji (single grapheme) is at most 2
|
||||
#: codepoints — for example a base character plus a single variation
|
||||
#: selector (U+FE0F). Multi-codepoint ZWJ families (👨👩👧, 🏳️🌈) are
|
||||
#: NOT accepted as placeholder text intentionally: matching them would
|
||||
#: also let through short CJK messages like ``"你好世界吗"`` (5 codepoints,
|
||||
#: no ASCII letters), causing real prose to be misclassified as a reaction.
|
||||
#: This constant must stay aligned with the JS frontend's
|
||||
#: ``isReactionPlaceholderText`` (``message-replies.js``); changing one
|
||||
#: side without the other re-introduces ingest/render disagreement.
|
||||
_REACTION_PLACEHOLDER_MAX_CODEPOINTS = 2
|
||||
|
||||
|
||||
def _is_reaction_placeholder_text(text: str | None) -> bool:
|
||||
"""Return ``True`` when *text* looks like a reaction slot or count marker.
|
||||
|
||||
Reaction packets carry either no text at all, a small numeric count (e.g.
|
||||
``"1"``, ``"3"``), or occasionally a bare emoji character. Anything that
|
||||
looks like substantive prose should cause the packet to be classified as a
|
||||
regular text message instead of a reaction.
|
||||
|
||||
Parameters:
|
||||
text: Message text to inspect (may be ``None``).
|
||||
|
||||
Returns:
|
||||
``True`` when *text* is absent, blank, a digit string, or a short
|
||||
non-ASCII-letter sequence (bare emoji).
|
||||
"""
|
||||
|
||||
if not text:
|
||||
return True
|
||||
stripped = text.strip()
|
||||
if not stripped:
|
||||
return True
|
||||
if stripped.isdigit():
|
||||
return True
|
||||
# Bare emoji heuristic — see _REACTION_PLACEHOLDER_MAX_CODEPOINTS.
|
||||
if len(stripped) <= _REACTION_PLACEHOLDER_MAX_CODEPOINTS and not any(
|
||||
c.isascii() and c.isalpha() for c in stripped
|
||||
):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _is_likely_reaction(
|
||||
portnum: str | None,
|
||||
portnum_int: int | None,
|
||||
reply_id: int | None,
|
||||
emoji: str | None,
|
||||
text: str | None,
|
||||
) -> bool:
|
||||
"""Determine whether a packet should be classified as a reaction.
|
||||
|
||||
A packet is a reaction when it carries the ``REACTION_APP`` portnum
|
||||
explicitly, **or** when it has both a ``reply_id`` and an ``emoji`` and its
|
||||
text content is absent or a mere placeholder (digit slot / bare emoji).
|
||||
|
||||
Parameters:
|
||||
portnum: String portnum label from the packet.
|
||||
portnum_int: Integer portnum, if available.
|
||||
reply_id: Reply-to message identifier.
|
||||
emoji: Normalised emoji string (after codepoint coercion).
|
||||
text: Message text extracted from the packet.
|
||||
|
||||
Returns:
|
||||
``True`` when the packet should be treated as a reaction.
|
||||
"""
|
||||
|
||||
if portnum == "REACTION_APP":
|
||||
return True
|
||||
reaction_port_candidates = _portnum_candidates("REACTION_APP")
|
||||
if portnum_int is not None and portnum_int in reaction_port_candidates:
|
||||
return True
|
||||
if reply_id is not None and emoji is not None:
|
||||
return _is_reaction_placeholder_text(text)
|
||||
return False
|
||||
|
||||
|
||||
def _is_encrypted_flag(value: object) -> bool:
|
||||
"""Return ``True`` when ``value`` represents an encrypted payload.
|
||||
|
||||
@@ -244,16 +379,7 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
"emoji",
|
||||
default=None,
|
||||
)
|
||||
emoji = None
|
||||
if emoji_raw is not None:
|
||||
try:
|
||||
emoji_text = str(emoji_raw)
|
||||
except Exception:
|
||||
emoji_text = None
|
||||
else:
|
||||
emoji_text = emoji_text.strip()
|
||||
if emoji_text:
|
||||
emoji = emoji_text
|
||||
emoji = _coerce_emoji_codepoint(emoji_raw)
|
||||
|
||||
routing_section = decoded.get("routing") if isinstance(decoded, Mapping) else None
|
||||
routing_port_candidates = _portnum_candidates("ROUTING_APP")
|
||||
@@ -292,8 +418,8 @@ def store_packet_dict(packet: Mapping) -> None:
|
||||
allowed_port_ints.add(portnum_int)
|
||||
allowed_port_values.add(str(portnum_int))
|
||||
|
||||
is_reaction_packet = portnum == "REACTION_APP" or (
|
||||
reply_id is not None and emoji is not None
|
||||
is_reaction_packet = _is_likely_reaction(
|
||||
portnum, portnum_int, reply_id, emoji, text
|
||||
)
|
||||
if is_reaction_packet and portnum_int is not None:
|
||||
allowed_port_ints.add(portnum_int)
|
||||
|
||||
@@ -511,16 +511,96 @@ def _resolve_lora_message(local_config: Any) -> Any | None:
|
||||
return None
|
||||
|
||||
|
||||
# Maps Meshtastic region enum name to (base_freq_MHz, channel_spacing_MHz).
|
||||
# Values are derived from the Meshtastic firmware RegionInfo tables.
|
||||
# Used by _computed_channel_frequency to derive the actual radio frequency
|
||||
# from the region and channel index.
|
||||
_REGION_CHANNEL_PARAMS: dict[str, tuple[float, float]] = {
|
||||
"US": (902.0, 0.25), # 902–928 MHz; e.g. ch 52 ≈ 915 MHz at 250 kHz spacing
|
||||
"EU_433": (433.175, 0.2),
|
||||
"EU_868": (869.525, 0.5), # actual primary ≈ 869.525 MHz, not 868
|
||||
"CN": (470.0, 0.2),
|
||||
"JP": (920.875, 0.5),
|
||||
"ANZ": (916.0, 0.5),
|
||||
"KR": (921.9, 0.5),
|
||||
"TW": (923.0, 0.5),
|
||||
"RU": (868.9, 0.5),
|
||||
"IN": (865.0, 0.5),
|
||||
"NZ_865": (864.0, 0.5),
|
||||
"TH": (920.0, 0.5),
|
||||
"LORA_24": (2400.0, 0.5),
|
||||
"UA_433": (433.175, 0.2),
|
||||
"UA_868": (868.0, 0.5),
|
||||
"MY_433": (433.0, 0.2),
|
||||
"MY_919": (919.0, 0.5),
|
||||
"SG_923": (923.0, 0.5),
|
||||
"PH_433": (433.0, 0.2),
|
||||
"PH_868": (868.0, 0.5),
|
||||
"PH_915": (915.0, 0.5),
|
||||
"ANZ_433": (433.0, 0.2),
|
||||
"KZ_433": (433.0, 0.2),
|
||||
"KZ_863": (863.125, 0.5),
|
||||
"NP_865": (865.0, 0.5),
|
||||
"BR_902": (902.0, 0.25),
|
||||
# IL (Israel) is absent from meshtastic Python lib 2.7.8 protobufs; the
|
||||
# enum value is unresolvable at runtime. Operators on IL firmware should
|
||||
# set the FREQUENCY environment variable to override.
|
||||
}
|
||||
|
||||
|
||||
def _computed_channel_frequency(
|
||||
enum_name: str | None,
|
||||
channel_num: int | None,
|
||||
) -> int | None:
|
||||
"""Compute the floor MHz frequency for a known region and channel index.
|
||||
|
||||
Looks up *enum_name* in :data:`_REGION_CHANNEL_PARAMS` and returns
|
||||
``floor(base_freq + channel_num * spacing)``. Returns ``None`` when the
|
||||
region is not in the table. A missing or negative *channel_num* is
|
||||
treated as 0 so the base frequency is always usable.
|
||||
|
||||
Args:
|
||||
enum_name: Region enum name as returned by
|
||||
:func:`_enum_name_from_field`, e.g. ``"EU_868"`` or ``"US"``.
|
||||
channel_num: Zero-based channel index from the device LoRa config.
|
||||
|
||||
Returns:
|
||||
Floored MHz as :class:`int`, or ``None`` if the region is unknown.
|
||||
"""
|
||||
if enum_name is None:
|
||||
return None
|
||||
params = _REGION_CHANNEL_PARAMS.get(enum_name)
|
||||
if params is None:
|
||||
return None
|
||||
base, spacing = params
|
||||
idx = channel_num if (isinstance(channel_num, int) and channel_num >= 0) else 0
|
||||
return math.floor(base + idx * spacing)
|
||||
|
||||
|
||||
def _region_frequency(lora_message: Any) -> int | float | str | None:
|
||||
"""Derive the LoRa region frequency in MHz or the region label from ``lora_message``.
|
||||
|
||||
Numeric override values are floored to the nearest MHz to align with the
|
||||
integer frequencies expected elsewhere in the ingestion pipeline.
|
||||
Frequency sources are tried in priority order:
|
||||
|
||||
1. ``override_frequency > 0`` — explicit radio override, floored to MHz.
|
||||
2. :data:`_REGION_CHANNEL_PARAMS` lookup + ``channel_num`` — actual
|
||||
band-plan frequency derived from the device's region and channel index,
|
||||
floored to MHz.
|
||||
3. Largest digit token ≥ 100 parsed from the region enum name string.
|
||||
4. Largest digit token < 100 from the enum name (reversed scan).
|
||||
5. Full enum name string, raw integer ≥ 100, or raw string as a label.
|
||||
|
||||
Args:
|
||||
lora_message: A LoRa config protobuf message or compatible object.
|
||||
|
||||
Returns:
|
||||
An integer MHz frequency, a fallback string label, or ``None``.
|
||||
"""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
|
||||
# Step 1 — explicit radio override
|
||||
override_frequency = getattr(lora_message, "override_frequency", None)
|
||||
if override_frequency is not None:
|
||||
if isinstance(override_frequency, (int, float)):
|
||||
@@ -533,6 +613,15 @@ def _region_frequency(lora_message: Any) -> int | float | str | None:
|
||||
if region_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, "region", region_value)
|
||||
|
||||
# Step 2 — lookup table + channel offset (actual band-plan frequency)
|
||||
if enum_name:
|
||||
channel_num = getattr(lora_message, "channel_num", None)
|
||||
computed = _computed_channel_frequency(enum_name, channel_num)
|
||||
if computed is not None:
|
||||
return computed
|
||||
|
||||
# Steps 3–5 — parse digits from enum name (fallback for unknown regions)
|
||||
if enum_name:
|
||||
digits = re.findall(r"\d+", enum_name)
|
||||
for token in digits:
|
||||
|
||||
@@ -0,0 +1,161 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Runtime patches applied to the upstream ``meshcore`` library.
|
||||
|
||||
This module exists solely to paper over bugs in the third-party
|
||||
``meshcore-py`` package while we wait for upstream fixes. Each patch is
|
||||
narrow, idempotent, and preserves the original method on the target class so
|
||||
that it can be reverted cleanly once a fix ships upstream.
|
||||
|
||||
Current patches:
|
||||
|
||||
* :func:`_wrap_handle_rx` — guards :meth:`meshcore.reader.MessageReader.handle_rx`
|
||||
against unhandled exceptions raised while decoding a single radio frame.
|
||||
Upstream 2.3.6 (latest at the time of writing) raises ``IndexError`` at
|
||||
``reader.py:365`` when parsing a truncated ``DEVICE_INFO`` advertisement
|
||||
(``path_hash_mode = dbuf.read(1)[0]`` with an already-exhausted buffer).
|
||||
Because the frame is parsed inside a detached
|
||||
``asyncio.create_task(...)`` the resulting exception surfaces as a noisy
|
||||
``Task exception was never retrieved`` stderr dump and the decoded event
|
||||
for that frame is lost. See GitHub issue #754.
|
||||
|
||||
Apply the patches by calling :func:`apply` as early as possible after the
|
||||
``meshcore`` package is imported. Re-invoking :func:`apply` is a no-op.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from .. import config
|
||||
|
||||
# Sentinel attribute set on a patched method so repeated imports/tests do
|
||||
# not wrap the same function more than once. The name intentionally
|
||||
# includes the project slug so we can grep for it while diagnosing.
|
||||
_PATCH_MARKER = "_potato_mesh_patched"
|
||||
|
||||
# Cap on hex bytes dumped into the log per failure. Keeps the log line
|
||||
# under a few hundred characters even for maximum-sized frames.
|
||||
_PACKET_LOG_MAX_BYTES = 32
|
||||
|
||||
|
||||
def apply() -> bool:
|
||||
"""Install every known-needed patch on the upstream ``meshcore`` library.
|
||||
|
||||
Safe to call multiple times; each patch is individually idempotent.
|
||||
|
||||
Implicit contract with upstream: every patch here rebinds a method on
|
||||
the target *class*. This only affects call sites that perform an
|
||||
attribute lookup at call time (``reader.handle_rx(data)``) — not call
|
||||
sites that captured an unbound reference before :func:`apply` ran
|
||||
(``_rx = reader.handle_rx``). As of ``meshcore-py`` 2.3.6 the library
|
||||
always uses attribute-lookup-at-call, so this is fine; if a future
|
||||
release flips that, the patch silently no-ops and the original bug
|
||||
resurfaces. Spot-check after every upstream bump.
|
||||
|
||||
Returns:
|
||||
``True`` when at least one patch was installed during this call,
|
||||
``False`` when every patch had already been applied (or when the
|
||||
``meshcore`` library is not importable in this environment, e.g. a
|
||||
meshtastic-only test runner).
|
||||
"""
|
||||
try:
|
||||
import meshcore.reader as _reader # type: ignore[import-not-found]
|
||||
except ImportError:
|
||||
# Meshtastic-only runtimes never load this module's caller, but
|
||||
# imports from tests may still land here. Nothing to patch.
|
||||
return False
|
||||
|
||||
return _wrap_handle_rx(_reader.MessageReader)
|
||||
|
||||
|
||||
def _wrap_handle_rx(reader_cls: Any) -> bool:
|
||||
"""Wrap ``reader_cls.handle_rx`` with an exception-swallowing shim.
|
||||
|
||||
Parameters:
|
||||
reader_cls: The ``MessageReader`` class to patch in place.
|
||||
|
||||
Returns:
|
||||
``True`` when the wrap was installed on this call; ``False`` when
|
||||
the method had already been wrapped.
|
||||
"""
|
||||
original = getattr(reader_cls, "handle_rx", None)
|
||||
if original is None:
|
||||
return False
|
||||
if getattr(original, _PATCH_MARKER, False):
|
||||
return False
|
||||
|
||||
async def safe_handle_rx(self, data, *args, **kwargs): # type: ignore[no-untyped-def]
|
||||
"""Run the original ``handle_rx`` and convert hard failures to logs.
|
||||
|
||||
A single malformed frame would otherwise kill the
|
||||
``asyncio.create_task(reader.handle_rx(data))`` task spawned by the
|
||||
upstream connection layer, surfacing as ``Task exception was never
|
||||
retrieved`` in stderr and losing the event silently. We log once
|
||||
with the first few bytes of the offending frame for forensics and
|
||||
then return ``None`` so the task exits cleanly.
|
||||
"""
|
||||
try:
|
||||
return await original(self, data, *args, **kwargs)
|
||||
except Exception as exc: # noqa: BLE001 — deliberately broad: a
|
||||
# single malformed frame must not kill the reader. Narrower
|
||||
# excepts would hide future upstream failure modes (e.g.
|
||||
# ``struct.error``) the same way the current IndexError was
|
||||
# hidden before we added this shim.
|
||||
config._debug_log(
|
||||
"Suppressed meshcore reader exception on malformed frame",
|
||||
context="meshcore.reader.patch",
|
||||
severity="warning",
|
||||
always=True,
|
||||
error_class=type(exc).__name__,
|
||||
error_message=str(exc),
|
||||
packet_len=_safe_len(data),
|
||||
packet_hex=_hex_preview(data, _PACKET_LOG_MAX_BYTES),
|
||||
)
|
||||
return None
|
||||
|
||||
setattr(safe_handle_rx, _PATCH_MARKER, True)
|
||||
# Preserve the pre-patch method under a stable name so operators and
|
||||
# future maintainers can revert the patch with one line.
|
||||
reader_cls._orig_handle_rx = original
|
||||
reader_cls.handle_rx = safe_handle_rx
|
||||
return True
|
||||
|
||||
|
||||
def _safe_len(data: Any) -> int | None:
|
||||
"""Return ``len(data)`` or ``None`` when the object is not sized."""
|
||||
try:
|
||||
return len(data)
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
|
||||
def _hex_preview(data: Any, limit: int) -> str:
|
||||
"""Return the first *limit* bytes of ``data`` as a lowercase hex string.
|
||||
|
||||
Accepts anything that is a :class:`bytes`-like or supports ``bytes(data)``.
|
||||
On conversion failure returns an empty string — the log caller still gets
|
||||
the error class and message.
|
||||
"""
|
||||
try:
|
||||
if not isinstance(data, (bytes, bytearray, memoryview)):
|
||||
data = bytes(data)
|
||||
except Exception: # noqa: BLE001 — pure diagnostic path, never raise.
|
||||
return ""
|
||||
prefix = bytes(data[:limit])
|
||||
return prefix.hex()
|
||||
|
||||
|
||||
__all__ = ["apply"]
|
||||
@@ -35,8 +35,8 @@ Connection type is detected automatically from the target string:
|
||||
Node identities are derived from the first four bytes (eight hex characters)
|
||||
of each contact's 32-byte public key, formatted as ``!xxxxxxxx`` to match
|
||||
the canonical node-ID schema used across the system. Ingested
|
||||
``user.shortName`` is the first four hex digits of that key (two bytes),
|
||||
not the advertised name.
|
||||
``user.shortName`` is the first two bytes (four hex characters) of the
|
||||
node ID, not the advertised name.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -70,6 +70,21 @@ from meshcore import (
|
||||
TCPConnection,
|
||||
)
|
||||
|
||||
from . import _meshcore_patches
|
||||
|
||||
# Apply upstream-library patches before any ``MeshCore`` instance is built,
|
||||
# otherwise the first malformed advertisement dies inside a detached asyncio
|
||||
# task before our handler can observe it. See
|
||||
# :mod:`data.mesh_ingestor.protocols._meshcore_patches` for the specific
|
||||
# upstream bugs covered.
|
||||
#
|
||||
# This mutates the upstream class at import time. The blast radius is
|
||||
# narrow because ``protocols/__init__.py`` exposes this module only through
|
||||
# a lazy ``__getattr__`` and the daemon resolves it only when
|
||||
# ``PROTOCOL=meshcore`` is active. Any future diagnostic CLI that imports
|
||||
# this module will inherit the shim.
|
||||
_meshcore_patches.apply()
|
||||
|
||||
from .. import config, ingestors as _ingestors, queue as _queue
|
||||
from ..connection import default_serial_targets, parse_ble_target, parse_tcp_target
|
||||
from ..serialization import _iso, _node_num_from_id
|
||||
@@ -123,27 +138,58 @@ _MESHCORE_ADV_TYPE_ROLE: dict[int, str] = {
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _derive_message_id(sender_ts: int, discriminator: str, text: str) -> int:
|
||||
"""Derive a stable 32-bit message ID from available MeshCore fields.
|
||||
_MESHCORE_ID_BITS = 53
|
||||
"""Width of the synthetic MeshCore message ID, in bits.
|
||||
|
||||
MeshCore does not assign firmware-side packet IDs. This function
|
||||
produces a deterministic 32-bit integer so that re-delivered messages
|
||||
resolve to the same database row via the UPSERT ON CONFLICT path, while
|
||||
messages that differ in timestamp, channel/peer, or text content produce
|
||||
distinct IDs.
|
||||
53 bits keeps the value within :js:data:`Number.MAX_SAFE_INTEGER`
|
||||
(``2**53 - 1``) so the JSON ID round-trips through the JavaScript frontend
|
||||
without precision loss, while giving roughly :math:`2^{26.5}` (~95 million)
|
||||
distinct messages of birthday-collision headroom.
|
||||
"""
|
||||
|
||||
_MESHCORE_ID_MASK = (1 << _MESHCORE_ID_BITS) - 1
|
||||
"""Bitmask applied to the SHA-256 prefix to clamp the id to 53 bits."""
|
||||
|
||||
|
||||
def _derive_message_id(
|
||||
sender_identity: str,
|
||||
sender_ts: int,
|
||||
discriminator: str,
|
||||
text: str,
|
||||
) -> int:
|
||||
"""Derive a stable 53-bit message ID from sender-side MeshCore fields.
|
||||
|
||||
MeshCore does not assign firmware-side packet IDs. This function produces
|
||||
a deterministic 53-bit integer fingerprint of a physical transmission so
|
||||
that the same packet heard by multiple ingestors collapses to a single
|
||||
``messages`` row via the ``messages.id`` PRIMARY KEY upsert path. Every
|
||||
component of the fingerprint is sender-side, ensuring two receivers with
|
||||
different clocks or roster state still compute the same value.
|
||||
|
||||
Parameters:
|
||||
sender_ts: Unix timestamp from the sender's clock.
|
||||
discriminator: Channel index (``"c<N>"`` for channel messages) or
|
||||
pubkey prefix (for direct messages) to separate messages with
|
||||
the same timestamp.
|
||||
text: Message text.
|
||||
sender_identity: Stable sender identifier shared across receivers.
|
||||
For channel messages this is the lowercased+stripped sender name
|
||||
parsed from the message text via :func:`_parse_sender_name`; for
|
||||
direct messages it is the sender's MeshCore ``pubkey_prefix``.
|
||||
Must be a string (use ``""`` when unavailable).
|
||||
sender_ts: Unix timestamp from the sender's clock (identical across
|
||||
receivers regardless of receiver-side clock skew).
|
||||
discriminator: Namespace tag separating message classes that could
|
||||
otherwise collide. ``"c<N>"`` is reserved for channel messages
|
||||
on channel ``N``; ``"dm"`` is reserved for direct messages.
|
||||
text: Message text exactly as transmitted by the sender.
|
||||
|
||||
Returns:
|
||||
A non-negative 32-bit integer suitable for the ``id`` column.
|
||||
A non-negative 53-bit integer suitable for the ``id`` column. The
|
||||
value is bounded by ``0 <= id <= (1 << 53) - 1`` so it survives the
|
||||
JSON → JavaScript number round-trip without precision loss.
|
||||
"""
|
||||
data = f"{sender_ts}:{discriminator}:{text}".encode("utf-8", errors="replace")
|
||||
return int.from_bytes(hashlib.sha256(data).digest()[:4], "big")
|
||||
# The ``v1:`` prefix lets us evolve the fingerprint format (e.g. add a
|
||||
# channel-secret hash) by bumping to ``v2:`` without colliding with
|
||||
# existing ids written under the v1 scheme.
|
||||
fingerprint = f"v1:{sender_identity}:{sender_ts}:{discriminator}:{text}"
|
||||
digest = hashlib.sha256(fingerprint.encode("utf-8", errors="replace")).digest()
|
||||
return int.from_bytes(digest[:7], "big") & _MESHCORE_ID_MASK
|
||||
|
||||
|
||||
def _meshcore_node_id(public_key_hex: str | None) -> str | None:
|
||||
@@ -165,23 +211,28 @@ def _meshcore_node_id(public_key_hex: str | None) -> str | None:
|
||||
return "!" + public_key_hex[:8].lower()
|
||||
|
||||
|
||||
def _meshcore_short_name(public_key_hex: str | None) -> str:
|
||||
"""Return the first four hex digits of a MeshCore public key as short name.
|
||||
def _meshcore_short_name(node_id: str | None) -> str:
|
||||
"""Derive a four-character short name from a canonical node ID.
|
||||
|
||||
Meshtastic-style ``shortName`` fields are four characters wide; MeshCore
|
||||
ingest uses the leading two bytes of the 32-byte public key in lowercase
|
||||
hex so the label is stable and unique per key prefix.
|
||||
Uses the first two bytes (four hex characters) of the ``!xxxxxxxx`` node
|
||||
ID. This keeps the short name consistent with the node ID itself — if the
|
||||
node ID is later replaced when the real public key is heard, the short name
|
||||
will update alongside it.
|
||||
|
||||
Parameters:
|
||||
public_key_hex: Full public key as a hex string from the MeshCore API.
|
||||
node_id: Canonical ``!xxxxxxxx`` node ID string (as returned by
|
||||
:func:`_meshcore_node_id`).
|
||||
|
||||
Returns:
|
||||
Four lowercase hex characters (e.g. ``"aabb"``), or an empty string
|
||||
when the key is missing or shorter than four hex characters.
|
||||
Four lowercase hex characters (e.g. ``"cafe"``), or an empty string
|
||||
when the node ID is missing or too short.
|
||||
"""
|
||||
if not public_key_hex or len(public_key_hex) < 4:
|
||||
if not node_id:
|
||||
return ""
|
||||
return public_key_hex[:4].lower()
|
||||
raw = node_id.lstrip("!")
|
||||
if len(raw) < 4:
|
||||
return ""
|
||||
return raw[:4].lower()
|
||||
|
||||
|
||||
def _meshcore_adv_type_to_role(adv_type: object) -> str | None:
|
||||
@@ -324,6 +375,7 @@ def _contact_to_node_dict(contact: dict) -> dict:
|
||||
Node dict compatible with the ``POST /api/nodes`` payload format.
|
||||
"""
|
||||
pub_key = contact.get("public_key", "")
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
name = (contact.get("adv_name") or "").strip()
|
||||
role = _meshcore_adv_type_to_role(contact.get("type"))
|
||||
node: dict = {
|
||||
@@ -331,7 +383,7 @@ def _contact_to_node_dict(contact: dict) -> dict:
|
||||
"protocol": "meshcore",
|
||||
"user": {
|
||||
"longName": name,
|
||||
"shortName": _meshcore_short_name(pub_key),
|
||||
"shortName": _meshcore_short_name(node_id),
|
||||
"publicKey": pub_key,
|
||||
**({"role": role} if role is not None else {}),
|
||||
},
|
||||
@@ -377,13 +429,14 @@ def _self_info_to_node_dict(self_info: dict) -> dict:
|
||||
"""
|
||||
name = (self_info.get("name") or "").strip()
|
||||
pub_key = self_info.get("public_key", "")
|
||||
node_id = _meshcore_node_id(pub_key)
|
||||
role = _meshcore_adv_type_to_role(self_info.get("adv_type"))
|
||||
node: dict = {
|
||||
"lastHeard": int(time.time()),
|
||||
"protocol": "meshcore",
|
||||
"user": {
|
||||
"longName": name,
|
||||
"shortName": _meshcore_short_name(pub_key),
|
||||
"shortName": _meshcore_short_name(node_id),
|
||||
"publicKey": pub_key,
|
||||
**({"role": role} if role is not None else {}),
|
||||
},
|
||||
@@ -897,8 +950,18 @@ def _make_event_handlers(iface: _MeshcoreInterface, target: str | None) -> dict:
|
||||
)
|
||||
iface._synthetic_node_ids.add(mention_id)
|
||||
|
||||
# The dedup fingerprint uses the parsed sender name (lowercased and
|
||||
# stripped) rather than ``from_id``: each ingestor independently
|
||||
# resolves Alice to either her real ``!aabbccdd`` (when she is in its
|
||||
# contact roster) or to a synthetic id derived from her name; the
|
||||
# parsed name lives in the message text itself, so it is identical
|
||||
# across all receivers regardless of roster state.
|
||||
sender_identity = (sender_name or "").strip().lower()
|
||||
|
||||
packet = {
|
||||
"id": _derive_message_id(sender_ts, f"c{channel_idx}", text),
|
||||
"id": _derive_message_id(
|
||||
sender_identity, sender_ts, f"c{channel_idx}", text
|
||||
),
|
||||
"rxTime": rx_time,
|
||||
"rx_time": rx_time,
|
||||
"from_id": from_id,
|
||||
@@ -934,8 +997,12 @@ def _make_event_handlers(iface: _MeshcoreInterface, target: str | None) -> dict:
|
||||
pubkey_prefix = payload.get("pubkey_prefix", "")
|
||||
from_id = iface.lookup_node_id(pubkey_prefix)
|
||||
|
||||
# ``pubkey_prefix`` is already a sender-side stable identifier (the
|
||||
# first six bytes of the sender's public key); ``"dm"`` namespaces
|
||||
# direct messages so they cannot collide with channel messages that
|
||||
# happen to share the other components.
|
||||
packet = {
|
||||
"id": _derive_message_id(sender_ts, pubkey_prefix or "", text),
|
||||
"id": _derive_message_id(pubkey_prefix or "", sender_ts, "dm", text),
|
||||
"rxTime": rx_time,
|
||||
"rx_time": rx_time,
|
||||
"from_id": from_id,
|
||||
@@ -1008,6 +1075,46 @@ def _make_connection(target: str, baudrate: int) -> object:
|
||||
return SerialConnection(target, baudrate)
|
||||
|
||||
|
||||
def _log_unhandled_loop_exception(
|
||||
loop: asyncio.AbstractEventLoop, context: dict
|
||||
) -> None:
|
||||
"""Route asyncio's "unhandled task exception" warnings through our logger.
|
||||
|
||||
The upstream ``meshcore`` library spawns detached
|
||||
``asyncio.create_task`` tasks for every inbound radio frame. When one
|
||||
of those tasks raises and nobody awaits the future, asyncio's default
|
||||
handler writes ``Task exception was never retrieved`` to stderr. That
|
||||
bypasses our structured log pipeline and clutters container logs.
|
||||
This handler preserves the same information under
|
||||
``context=asyncio.unhandled`` so operators grep for one place.
|
||||
|
||||
Parameters:
|
||||
loop: Event loop that surfaced the exception (unused but required
|
||||
by the asyncio handler signature).
|
||||
context: Asyncio exception-context dictionary. Fields we care
|
||||
about: ``message`` (human summary) and ``exception`` (the raw
|
||||
exception object, when available).
|
||||
"""
|
||||
del loop
|
||||
exception = context.get("exception")
|
||||
task = context.get("task")
|
||||
task_name = None
|
||||
if task is not None:
|
||||
# Prefer the friendly ``get_name()``; fall back to ``repr`` for any
|
||||
# future Task-like object that does not implement it.
|
||||
get_name = getattr(task, "get_name", None)
|
||||
task_name = get_name() if callable(get_name) else repr(task)
|
||||
config._debug_log(
|
||||
context.get("message") or "Unhandled asyncio task exception",
|
||||
context="asyncio.unhandled",
|
||||
severity="error",
|
||||
always=True,
|
||||
error_class=type(exception).__name__ if exception else None,
|
||||
error_message=str(exception) if exception else None,
|
||||
task=task_name,
|
||||
)
|
||||
|
||||
|
||||
async def _run_meshcore(
|
||||
iface: _MeshcoreInterface,
|
||||
target: str,
|
||||
@@ -1202,6 +1309,12 @@ class MeshcoreProvider:
|
||||
def _run_loop() -> None:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
# Second line of defence around issue #754: if a detached task
|
||||
# inside the upstream ``meshcore`` library ever raises an
|
||||
# exception we do not anticipate in ``_meshcore_patches``, funnel
|
||||
# it through our logger instead of the default handler (which
|
||||
# only writes ``Task exception was never retrieved`` to stderr).
|
||||
loop.set_exception_handler(_log_unhandled_loop_exception)
|
||||
iface._loop = loop
|
||||
try:
|
||||
loop.run_until_complete(
|
||||
|
||||
+338
-26
@@ -83,43 +83,51 @@ _POSITION_POST_PRIORITY = 60
|
||||
_TELEMETRY_POST_PRIORITY = 70
|
||||
_DEFAULT_POST_PRIORITY = 90
|
||||
|
||||
_MAX_SEND_RETRIES = 3
|
||||
"""Maximum number of times a failed POST item is re-queued before being dropped."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueueState:
|
||||
"""Mutable state for the HTTP POST priority queue."""
|
||||
|
||||
lock: threading.Lock = field(default_factory=threading.Lock)
|
||||
queue: list[tuple[int, int, str, dict]] = field(default_factory=list)
|
||||
# Heap tuple: (priority, counter, path, payload, retries).
|
||||
queue: list[tuple[int, int, str, dict, int]] = field(default_factory=list)
|
||||
counter: Iterable[int] = field(default_factory=itertools.count)
|
||||
active: bool = False
|
||||
# Background drain thread. When the drainer is alive, _queue_post_json
|
||||
# signals drain_event instead of blocking the caller with HTTP calls.
|
||||
drain_event: threading.Event = field(default_factory=threading.Event)
|
||||
drainer: threading.Thread | None = None
|
||||
# Set to request the drainer thread to exit its loop cleanly.
|
||||
shutdown: threading.Event = field(default_factory=threading.Event)
|
||||
|
||||
|
||||
STATE = QueueState()
|
||||
|
||||
|
||||
def _post_json(
|
||||
def _send_single(
|
||||
instance: str,
|
||||
api_token: str,
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
instance: str | None = None,
|
||||
api_token: str | None = None,
|
||||
) -> None:
|
||||
"""Send a JSON payload to the configured web API.
|
||||
) -> bool:
|
||||
"""Transmit a single JSON payload to one instance.
|
||||
|
||||
Parameters:
|
||||
path: API path relative to the configured instance root.
|
||||
instance: Base URL of the target instance.
|
||||
api_token: Bearer token for this instance (may be empty).
|
||||
path: API path relative to the instance root.
|
||||
payload: JSON-serialisable body to transmit.
|
||||
instance: Optional override for :data:`config.INSTANCE`.
|
||||
api_token: Optional override for :data:`config.API_TOKEN`.
|
||||
|
||||
Returns:
|
||||
``True`` when the request succeeded, ``False`` on failure.
|
||||
"""
|
||||
|
||||
if instance is None:
|
||||
instance = config.INSTANCE
|
||||
if api_token is None:
|
||||
api_token = config.API_TOKEN
|
||||
|
||||
if not instance:
|
||||
return
|
||||
return True
|
||||
|
||||
url = f"{instance}{path}"
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
|
||||
@@ -144,15 +152,80 @@ def _post_json(
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
resp.read()
|
||||
except Exception as exc: # pragma: no cover - exercised in production
|
||||
return True
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"POST request failed",
|
||||
context="queue.post_json",
|
||||
severity="warn",
|
||||
always=True,
|
||||
url=url,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
def _post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
instance: str | None = None,
|
||||
api_token: str | None = None,
|
||||
) -> bool:
|
||||
"""Send a JSON payload to one or more configured web API instances.
|
||||
|
||||
When ``instance`` is provided explicitly the payload is sent to that
|
||||
single target. Otherwise every ``(url, token)`` pair in
|
||||
:data:`config.INSTANCES` receives the payload independently so that
|
||||
one failure does not block delivery to the remaining targets.
|
||||
|
||||
Parameters:
|
||||
path: API path relative to the instance root.
|
||||
payload: JSON-serialisable body to transmit.
|
||||
instance: Optional single-instance override.
|
||||
api_token: Optional token override (only used with ``instance``).
|
||||
|
||||
Returns:
|
||||
``True`` when at least one instance received the payload
|
||||
successfully, ``False`` when all targets failed. A missing
|
||||
configuration is not a transient failure and returns ``True``
|
||||
(retrying would not help).
|
||||
"""
|
||||
|
||||
if instance is not None:
|
||||
if not instance:
|
||||
return True
|
||||
return _send_single(instance, api_token or "", path, payload)
|
||||
|
||||
targets: tuple[tuple[str, str], ...] = config.INSTANCES
|
||||
if not targets:
|
||||
# Backward-compatible fallback for callers that only set
|
||||
# config.INSTANCE / config.API_TOKEN directly.
|
||||
inst = config.INSTANCE
|
||||
if not inst:
|
||||
try:
|
||||
config._debug_log(
|
||||
"No target instances configured; discarding payload",
|
||||
context="queue.post_json",
|
||||
severity="error",
|
||||
always=True,
|
||||
path=path,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
return _send_single(inst, api_token or config.API_TOKEN, path, payload)
|
||||
|
||||
any_ok = False
|
||||
any_attempted = False
|
||||
for inst, token in targets:
|
||||
if not inst:
|
||||
continue
|
||||
any_attempted = True
|
||||
if _send_single(inst, token, path, payload):
|
||||
any_ok = True
|
||||
return any_ok or not any_attempted
|
||||
|
||||
|
||||
def _enqueue_post_json(
|
||||
@@ -161,6 +234,7 @@ def _enqueue_post_json(
|
||||
priority: int,
|
||||
*,
|
||||
state: QueueState = STATE,
|
||||
retries: int = 0,
|
||||
) -> None:
|
||||
"""Store a POST request in the priority queue.
|
||||
|
||||
@@ -169,15 +243,17 @@ def _enqueue_post_json(
|
||||
payload: JSON-serialisable body.
|
||||
priority: Lower values execute first.
|
||||
state: Shared queue state, injectable for testing.
|
||||
retries: Number of prior failed send attempts for this item.
|
||||
"""
|
||||
|
||||
with state.lock:
|
||||
counter = next(state.counter)
|
||||
# Heap tuple: (priority, counter, path, payload). Lower priority
|
||||
# values are dequeued first (min-heap semantics). The monotonically
|
||||
# increasing counter breaks ties so equal-priority items are processed
|
||||
# in FIFO order without comparing the non-orderable payload dict.
|
||||
heapq.heappush(state.queue, (priority, counter, path, payload))
|
||||
# Heap tuple: (priority, counter, path, payload, retries). Lower
|
||||
# priority values are dequeued first (min-heap semantics). The
|
||||
# monotonically increasing counter breaks ties so equal-priority
|
||||
# items are processed in FIFO order without comparing the
|
||||
# non-orderable payload dict.
|
||||
heapq.heappush(state.queue, (priority, counter, path, payload, retries))
|
||||
|
||||
|
||||
def _drain_post_queue(
|
||||
@@ -185,6 +261,12 @@ def _drain_post_queue(
|
||||
) -> None:
|
||||
"""Process queued POST requests in priority order.
|
||||
|
||||
When the *send* callable returns ``False`` (transient failure) the item
|
||||
is re-queued up to :data:`_MAX_SEND_RETRIES` times. Items exceeding
|
||||
the limit are dropped with a warning. Custom *send* callables that
|
||||
return ``None`` (the typical test/heartbeat pattern) are never retried
|
||||
— the ``result is False`` identity check ensures backward compatibility.
|
||||
|
||||
Parameters:
|
||||
state: Queue container holding pending items.
|
||||
send: Optional callable used to transmit requests.
|
||||
@@ -199,13 +281,184 @@ def _drain_post_queue(
|
||||
if not state.queue:
|
||||
state.active = False
|
||||
return
|
||||
_priority, _idx, path, payload = heapq.heappop(state.queue)
|
||||
send(path, payload)
|
||||
item = heapq.heappop(state.queue)
|
||||
|
||||
# Support both 5-tuple (current) and 4-tuple (legacy/test) items.
|
||||
if len(item) >= 5:
|
||||
priority, _idx, path, payload, retries = item[:5]
|
||||
else:
|
||||
priority, _idx, path, payload = item[:4]
|
||||
retries = 0
|
||||
|
||||
result = send(path, payload)
|
||||
|
||||
# Only retry when the send callable explicitly signals failure
|
||||
# (returns False). Custom send callables (tests, heartbeat)
|
||||
# return None and must NOT be treated as failures.
|
||||
if result is False:
|
||||
if retries < _MAX_SEND_RETRIES:
|
||||
_enqueue_post_json(
|
||||
path, payload, priority, state=state, retries=retries + 1
|
||||
)
|
||||
else:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Dropping item after max retries",
|
||||
context="queue.drain",
|
||||
severity="warn",
|
||||
always=True,
|
||||
path=path,
|
||||
retries=retries,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
with state.lock:
|
||||
state.active = False
|
||||
|
||||
|
||||
_QUEUE_DEPTH_WARNING_THRESHOLD = 100
|
||||
"""Log a warning when the queue grows past this many items."""
|
||||
|
||||
|
||||
def _queue_drainer_loop(state: QueueState = STATE) -> None:
|
||||
"""Body of the background queue-drain daemon thread.
|
||||
|
||||
Blocks on :attr:`QueueState.drain_event`, clears it, then empties the
|
||||
queue by calling :func:`_drain_post_queue`. The thread is created as a
|
||||
daemon so it terminates automatically when the process exits.
|
||||
|
||||
The loop exits cleanly when :attr:`QueueState.shutdown` is set, allowing
|
||||
tests (and graceful-shutdown paths) to join the thread instead of leaking
|
||||
daemon threads that accumulate across a test run.
|
||||
|
||||
The loop is deliberately hardened so that **no** :class:`Exception` can
|
||||
kill the thread. The ``_debug_log`` calls inside the error handler are
|
||||
themselves wrapped in ``try/except`` to prevent cascading failures
|
||||
(e.g. ``BrokenPipeError`` from ``print()`` to a closed stdout).
|
||||
|
||||
.. note::
|
||||
There is a benign race between ``drain_event.clear()`` and the end
|
||||
of :func:`_drain_post_queue`: a signal arriving in that window is
|
||||
consumed by ``clear()`` but the item is still drained because the
|
||||
drain loop empties the queue completely. However, an item enqueued
|
||||
*after* the drain loop finds the queue empty and *before*
|
||||
``wait()`` re-blocks will sit until the next ``drain_event.set()``
|
||||
call (i.e. the next enqueue). This is acceptable for a best-effort
|
||||
ingestor — maximum extra latency equals the inter-packet interval.
|
||||
|
||||
Parameters:
|
||||
state: Queue state instance to drain.
|
||||
"""
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue drainer thread started",
|
||||
context="queue.drainer",
|
||||
severity="info",
|
||||
always=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
while not state.shutdown.is_set():
|
||||
state.drain_event.wait(timeout=1.0)
|
||||
if state.shutdown.is_set():
|
||||
break
|
||||
state.drain_event.clear()
|
||||
|
||||
depth = len(state.queue)
|
||||
if depth > _QUEUE_DEPTH_WARNING_THRESHOLD:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue depth warning",
|
||||
context="queue.drainer",
|
||||
severity="warn",
|
||||
always=True,
|
||||
depth=depth,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
_drain_post_queue(state)
|
||||
except Exception as exc:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue drainer error",
|
||||
context="queue.drainer",
|
||||
severity="error",
|
||||
always=True,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
config._debug_log(
|
||||
"Queue drainer thread exiting",
|
||||
context="queue.drainer",
|
||||
severity="info",
|
||||
always=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _start_queue_drainer(state: QueueState = STATE) -> None:
|
||||
"""Idempotently start the background queue-drain thread.
|
||||
|
||||
Calling this function when a drainer thread is already alive is a
|
||||
no-op. The thread is created as a daemon so it does not prevent
|
||||
process exit. The check-and-start is performed under :attr:`state.lock`
|
||||
to avoid starting duplicate threads under concurrent callers.
|
||||
|
||||
If items are already in the queue when the drainer is started,
|
||||
:attr:`QueueState.drain_event` is signalled immediately so they are not
|
||||
stranded waiting for the next packet to arrive.
|
||||
|
||||
Parameters:
|
||||
state: Queue state whose :func:`_queue_drainer_loop` to start.
|
||||
"""
|
||||
with state.lock:
|
||||
if state.drainer is not None and state.drainer.is_alive():
|
||||
return
|
||||
# Reset in case the prior thread was stopped or crashed while
|
||||
# shutdown was already set.
|
||||
state.shutdown.clear()
|
||||
t = threading.Thread(
|
||||
target=_queue_drainer_loop,
|
||||
args=(state,),
|
||||
name="queue-drainer",
|
||||
daemon=True,
|
||||
)
|
||||
t.start()
|
||||
state.drainer = t
|
||||
if state.queue:
|
||||
state.drain_event.set()
|
||||
|
||||
|
||||
def _stop_queue_drainer(state: QueueState = STATE, timeout: float = 5.0) -> None:
|
||||
"""Signal the drainer thread to exit and wait for it to finish.
|
||||
|
||||
Sets :attr:`QueueState.shutdown` and :attr:`QueueState.drain_event` so
|
||||
the loop wakes up, observes the shutdown flag, and terminates. After
|
||||
joining (up to *timeout* seconds) the drainer reference is cleared.
|
||||
|
||||
Safe to call when no drainer is running (no-op).
|
||||
|
||||
Parameters:
|
||||
state: Queue state whose drainer to stop.
|
||||
timeout: Maximum seconds to wait for the thread to finish.
|
||||
"""
|
||||
if state.drainer is None or not state.drainer.is_alive():
|
||||
return
|
||||
state.shutdown.set()
|
||||
state.drain_event.set()
|
||||
state.drainer.join(timeout=timeout)
|
||||
state.drainer = None
|
||||
|
||||
|
||||
def _queue_post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
@@ -214,14 +467,32 @@ def _queue_post_json(
|
||||
state: QueueState = STATE,
|
||||
send: Callable[[str, dict], None] | None = None,
|
||||
) -> None:
|
||||
"""Queue a POST request and start processing if idle.
|
||||
"""Queue a POST request and wake the drain thread (or drain inline).
|
||||
|
||||
When a background drainer thread is running (started via
|
||||
:func:`_start_queue_drainer`), this function enqueues the item and
|
||||
signals :attr:`QueueState.drain_event` without blocking — the drain
|
||||
happens on the dedicated thread. This keeps the caller's thread (which
|
||||
may be the Meshtastic asyncio I/O thread) free to process serial events.
|
||||
|
||||
When no background drainer is alive the call falls back to a
|
||||
synchronous inline drain. This path is used by tests (which pass a
|
||||
``send`` override via :func:`_fresh_state`) and for any standalone use
|
||||
without calling :func:`_start_queue_drainer`.
|
||||
|
||||
.. note::
|
||||
The background drainer is used **only** when no custom ``send``
|
||||
override is provided (i.e. the production ``_post_json`` path).
|
||||
Any caller that supplies a custom ``send`` (tests, heartbeat
|
||||
helpers) always gets the synchronous inline drain so its transport
|
||||
is honoured correctly.
|
||||
|
||||
Parameters:
|
||||
path: API path for the request.
|
||||
payload: JSON payload to send.
|
||||
priority: Scheduling priority where lower values run first.
|
||||
state: Queue container used to store pending requests.
|
||||
send: Optional transport override, primarily for tests.
|
||||
send: Optional transport override (synchronous fallback only).
|
||||
"""
|
||||
|
||||
if send is None:
|
||||
@@ -241,6 +512,42 @@ def _queue_post_json(
|
||||
)
|
||||
|
||||
_enqueue_post_json(path, payload, priority, state=state)
|
||||
|
||||
# Use the background drainer only when it is alive AND no custom send
|
||||
# override is in play. A custom send (used by tests and callers such as
|
||||
# ingestors.queue_ingestor_heartbeat) must be honoured synchronously
|
||||
# because the background drainer always calls _drain_post_queue without
|
||||
# a send override.
|
||||
#
|
||||
# The ``is`` check is intentional: _post_json is a module-level function
|
||||
# so identity comparison reliably detects the "no override" default that
|
||||
# was assigned at the top of this function.
|
||||
if send is _post_json:
|
||||
if state.drainer is not None and state.drainer.is_alive():
|
||||
state.drain_event.set()
|
||||
return
|
||||
|
||||
# The drainer was previously started but has died (e.g. unhandled
|
||||
# exception). Restart it so the caller stays non-blocking and the
|
||||
# MeshCore asyncio event loop is not stalled by inline HTTP calls.
|
||||
if state.drainer is not None:
|
||||
try:
|
||||
config._debug_log(
|
||||
"Restarting dead queue drainer thread",
|
||||
context="queue.queue_post_json",
|
||||
severity="warn",
|
||||
always=True,
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
_start_queue_drainer(state)
|
||||
# If the restart succeeded, delegate to the background thread.
|
||||
if state.drainer is not None and state.drainer.is_alive():
|
||||
state.drain_event.set()
|
||||
return
|
||||
|
||||
# Synchronous fallback: no drainer was ever started, the restart
|
||||
# failed, or a custom send override is in play.
|
||||
with state.lock:
|
||||
if state.active:
|
||||
return
|
||||
@@ -266,15 +573,20 @@ __all__ = [
|
||||
"_CHANNEL_POST_PRIORITY",
|
||||
"_DEFAULT_POST_PRIORITY",
|
||||
"_INGESTOR_POST_PRIORITY",
|
||||
"_MAX_SEND_RETRIES",
|
||||
"_MESSAGE_POST_PRIORITY",
|
||||
"_NEIGHBOR_POST_PRIORITY",
|
||||
"_NODE_POST_PRIORITY",
|
||||
"_POSITION_POST_PRIORITY",
|
||||
"_QUEUE_DEPTH_WARNING_THRESHOLD",
|
||||
"_TRACE_POST_PRIORITY",
|
||||
"_TELEMETRY_POST_PRIORITY",
|
||||
"_clear_post_queue",
|
||||
"_drain_post_queue",
|
||||
"_enqueue_post_json",
|
||||
"_post_json",
|
||||
"_queue_drainer_loop",
|
||||
"_queue_post_json",
|
||||
"_start_queue_drainer",
|
||||
"_stop_queue_drainer",
|
||||
]
|
||||
|
||||
@@ -49,3 +49,21 @@ services:
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
matrix-bridge:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: matrix/Dockerfile
|
||||
target: runtime
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
matrix-bridge-bridge:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: matrix/Dockerfile
|
||||
target: runtime
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
@@ -34,6 +34,7 @@ x-web-base: &web-base
|
||||
- potatomesh_data:/app/.local/share/potato-mesh
|
||||
- potatomesh_config:/app/.config/potato-mesh
|
||||
- potatomesh_logs:/app/logs
|
||||
- potatomesh_pages:/app/pages
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
@@ -54,6 +55,8 @@ x-ingestor-base: &ingestor-base
|
||||
API_TOKEN: ${API_TOKEN}
|
||||
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN:-http://web:41447}
|
||||
DEBUG: ${DEBUG:-0}
|
||||
PROTOCOL: ${PROTOCOL:-meshtastic}
|
||||
ENERGY_SAVING: ${ENERGY_SAVING:-0}
|
||||
FEDERATION: ${FEDERATION:-1}
|
||||
PRIVATE: ${PRIVATE:-0}
|
||||
volumes:
|
||||
@@ -158,6 +161,8 @@ volumes:
|
||||
driver: local
|
||||
potatomesh_logs:
|
||||
driver: local
|
||||
potatomesh_pages:
|
||||
driver: local
|
||||
potatomesh_matrix_bridge_state:
|
||||
driver: local
|
||||
|
||||
|
||||
Generated
+3
-3
@@ -969,7 +969,7 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
|
||||
|
||||
[[package]]
|
||||
name = "potatomesh-matrix-bridge"
|
||||
version = "0.6.0"
|
||||
version = "0.6.2"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
@@ -1087,9 +1087,9 @@ checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
|
||||
|
||||
[[package]]
|
||||
name = "rand"
|
||||
version = "0.9.2"
|
||||
version = "0.9.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
|
||||
checksum = "44c5af06bb1b7d3216d91932aed5265164bf384dc89cd6ba05cf59a35f5f76ea"
|
||||
dependencies = [
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
|
||||
+1
-1
@@ -14,7 +14,7 @@
|
||||
|
||||
[package]
|
||||
name = "potatomesh-matrix-bridge"
|
||||
version = "0.6.0"
|
||||
version = "0.6.2"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
|
||||
+4
-1
@@ -1,3 +1,6 @@
|
||||
<!-- Copyright © 2025-26 l5yth & contributors -->
|
||||
<!-- Licensed under the Apache License, Version 2.0 (see LICENSE) -->
|
||||
|
||||
# potatomesh-matrix-bridge
|
||||
|
||||
A small Rust daemon that bridges **PotatoMesh** LoRa messages into a **Matrix** room.
|
||||
@@ -90,7 +93,7 @@ room_id = "!yourroomid:example.org"
|
||||
[state]
|
||||
# Where to persist last seen message id
|
||||
state_file = "bridge_state.json"
|
||||
````
|
||||
```
|
||||
|
||||
The `hs_token` is used to validate inbound appservice transactions. Keep it identical in `Config.toml` and your Matrix appservice registration file.
|
||||
|
||||
|
||||
+61
-6
@@ -257,8 +257,9 @@ async fn handle_message(
|
||||
|
||||
// Format the bridged message
|
||||
let preset_short = modem_preset_short(&msg.modem_preset);
|
||||
let tag = protocol_tag(msg.protocol.as_deref());
|
||||
let prefix = format!(
|
||||
"[{freq}][{preset_short}][{channel}]",
|
||||
"{tag}[{freq}][{preset_short}][{channel}]",
|
||||
freq = msg.lora_freq,
|
||||
preset_short = preset_short,
|
||||
channel = msg.channel_name,
|
||||
@@ -275,6 +276,20 @@ async fn handle_message(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Short tag prepended to the message prefix so readers can tell the source
|
||||
/// mesh protocol apart at a glance. `"[MT]"` identifies Meshtastic (also the
|
||||
/// default when the protocol field is missing, since the full stack treats a
|
||||
/// missing protocol as Meshtastic) and `"[MC]"` identifies MeshCore. Any other
|
||||
/// value renders as `"[??]"` so unknown protocols surface visibly instead of
|
||||
/// being silently relabeled as Meshtastic.
|
||||
fn protocol_tag(protocol: Option<&str>) -> &'static str {
|
||||
match protocol {
|
||||
Some("meshcore") => "[MC]",
|
||||
Some("meshtastic") | None => "[MT]",
|
||||
Some(_) => "[??]",
|
||||
}
|
||||
}
|
||||
|
||||
/// Build a compact modem preset label like "LF" for "LongFast".
|
||||
fn modem_preset_short(preset: &str) -> String {
|
||||
let letters: String = preset
|
||||
@@ -349,6 +364,7 @@ mod tests {
|
||||
snr: Some(0.0),
|
||||
reply_id: None,
|
||||
node_id: "!abcd1234".to_string(),
|
||||
protocol: Some("meshtastic".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -380,6 +396,17 @@ mod tests {
|
||||
assert_eq!(formatted, "<code>[868][LF]</code> Hello <&>");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn protocol_tag_returns_expected_label() {
|
||||
assert_eq!(protocol_tag(Some("meshcore")), "[MC]");
|
||||
assert_eq!(protocol_tag(Some("meshtastic")), "[MT]");
|
||||
// Missing protocol keeps the Meshtastic default for legacy payloads.
|
||||
assert_eq!(protocol_tag(None), "[MT]");
|
||||
// Unknown protocols surface as "[??]" rather than silently claiming Meshtastic.
|
||||
assert_eq!(protocol_tag(Some("reticulum")), "[??]");
|
||||
assert_eq!(protocol_tag(Some("")), "[??]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn escape_html_escapes_quotes() {
|
||||
assert_eq!(escape_html("a\"b'c"), "a"b'c");
|
||||
@@ -728,8 +755,10 @@ mod tests {
|
||||
assert_eq!(loaded.last_rx_time_ids, vec![1]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_handle_message() {
|
||||
/// Drive `handle_message` end-to-end against a mocked Matrix homeserver
|
||||
/// and PotatoMesh API, asserting that the bridged message body carries
|
||||
/// the expected protocol tag. Shared by the per-protocol test cases below.
|
||||
async fn assert_handle_message_emits_tag(protocol: Option<&str>, expected_tag: &str) {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
|
||||
let potatomesh_cfg = PotatomeshConfig {
|
||||
@@ -793,6 +822,9 @@ mod tests {
|
||||
.txn_counter
|
||||
.load(std::sync::atomic::Ordering::SeqCst);
|
||||
|
||||
let expected_body = format!("`{expected_tag}[868][MF][TEST]` Ping");
|
||||
let expected_formatted = format!("<code>{expected_tag}[868][MF][TEST]</code> Ping");
|
||||
|
||||
let mock_send = server
|
||||
.mock(
|
||||
"PUT",
|
||||
@@ -806,16 +838,19 @@ mod tests {
|
||||
.match_header("authorization", "Bearer AS_TOKEN")
|
||||
.match_body(mockito::Matcher::PartialJson(serde_json::json!({
|
||||
"msgtype": "m.text",
|
||||
"body": "`[868][MF][TEST]` Ping",
|
||||
"body": expected_body,
|
||||
"format": "org.matrix.custom.html",
|
||||
"formatted_body": "<code>[868][MF][TEST]</code> Ping",
|
||||
"formatted_body": expected_formatted,
|
||||
})))
|
||||
.with_status(200)
|
||||
.create();
|
||||
|
||||
let potato_client = PotatoClient::new(http_client.clone(), potatomesh_cfg);
|
||||
let mut state = BridgeState::default();
|
||||
let msg = sample_msg(100);
|
||||
let msg = PotatoMessage {
|
||||
protocol: protocol.map(str::to_string),
|
||||
..sample_msg(100)
|
||||
};
|
||||
|
||||
let result = handle_message(&potato_client, &matrix_client, &mut state, &msg).await;
|
||||
|
||||
@@ -828,4 +863,24 @@ mod tests {
|
||||
|
||||
assert_eq!(state.last_message_id, Some(100));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handle_message_tags_meshtastic_in_body() {
|
||||
assert_handle_message_emits_tag(Some("meshtastic"), "[MT]").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handle_message_defaults_missing_protocol_to_meshtastic_tag() {
|
||||
assert_handle_message_emits_tag(None, "[MT]").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handle_message_tags_meshcore_in_body() {
|
||||
assert_handle_message_emits_tag(Some("meshcore"), "[MC]").await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handle_message_tags_unknown_protocol_as_placeholder() {
|
||||
assert_handle_message_emits_tag(Some("reticulum"), "[??]").await;
|
||||
}
|
||||
}
|
||||
|
||||
+33
-18
@@ -19,11 +19,6 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::config::PotatomeshConfig;
|
||||
|
||||
/// Protocol identifier sent as a query parameter to restrict API results to
|
||||
/// Meshtastic data only. Other protocols (e.g. MeshCore) are excluded until
|
||||
/// the clients are updated to support them.
|
||||
const PROTOCOL_FILTER: &str = "meshtastic";
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct PotatoMessage {
|
||||
@@ -48,6 +43,10 @@ pub struct PotatoMessage {
|
||||
#[serde(default)]
|
||||
pub reply_id: Option<u64>,
|
||||
pub node_id: String,
|
||||
/// Mesh backend that produced this message, e.g. "meshtastic" or
|
||||
/// "meshcore". Optional because historical payloads predate the field.
|
||||
#[serde(default)]
|
||||
pub protocol: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
@@ -136,10 +135,7 @@ impl PotatoClient {
|
||||
}
|
||||
|
||||
pub async fn fetch_messages(&self, params: FetchParams) -> anyhow::Result<Vec<PotatoMessage>> {
|
||||
let mut req = self
|
||||
.http
|
||||
.get(self.messages_url())
|
||||
.query(&[("protocol", PROTOCOL_FILTER)]);
|
||||
let mut req = self.http.get(self.messages_url());
|
||||
if let Some(limit) = params.limit {
|
||||
req = req.query(&[("limit", limit)]);
|
||||
}
|
||||
@@ -243,6 +239,34 @@ mod tests {
|
||||
assert!(m.rssi.is_none());
|
||||
assert!(m.hop_limit.is_none());
|
||||
assert!(m.snr.is_none());
|
||||
assert!(m.protocol.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn deserialize_message_with_meshcore_protocol() {
|
||||
let json = r#"
|
||||
[
|
||||
{
|
||||
"id": 42,
|
||||
"rx_time": 1764241436,
|
||||
"rx_iso": "2025-11-27T11:03:56Z",
|
||||
"from_id": "!da6556d4",
|
||||
"to_id": "^all",
|
||||
"channel": 0,
|
||||
"portnum": "TEXT_MESSAGE_APP",
|
||||
"text": "Hi from meshcore",
|
||||
"lora_freq": 868,
|
||||
"modem_preset": "MediumFast",
|
||||
"channel_name": "General",
|
||||
"node_id": "!da6556d4",
|
||||
"protocol": "meshcore"
|
||||
}
|
||||
]
|
||||
"#;
|
||||
|
||||
let msgs: Vec<PotatoMessage> = serde_json::from_str(json).expect("valid message json");
|
||||
assert_eq!(msgs.len(), 1);
|
||||
assert_eq!(msgs[0].protocol.as_deref(), Some("meshcore"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -344,10 +368,6 @@ mod tests {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::UrlEncoded(
|
||||
"protocol".into(),
|
||||
"meshtastic".into(),
|
||||
))
|
||||
.with_status(200)
|
||||
.with_header("content-type", "application/json")
|
||||
.with_body(
|
||||
@@ -438,10 +458,6 @@ mod tests {
|
||||
let mut server = mockito::Server::new_async().await;
|
||||
let mock = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::UrlEncoded(
|
||||
"protocol".into(),
|
||||
PROTOCOL_FILTER.into(),
|
||||
))
|
||||
.with_status(500)
|
||||
.create();
|
||||
|
||||
@@ -463,7 +479,6 @@ mod tests {
|
||||
let mock = server
|
||||
.mock("GET", "/api/messages")
|
||||
.match_query(mockito::Matcher::AllOf(vec![
|
||||
mockito::Matcher::UrlEncoded("protocol".into(), PROTOCOL_FILTER.into()),
|
||||
mockito::Matcher::UrlEncoded("limit".into(), "10".into()),
|
||||
mockito::Matcher::UrlEncoded("since".into(), "123".into()),
|
||||
]))
|
||||
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 1.5 MiB |
@@ -96,6 +96,84 @@ class TestParseHiddenChannels:
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestResolveInstanceDomains:
|
||||
"""Tests for :func:`config._resolve_instance_domains`."""
|
||||
|
||||
def test_single_domain(self, monkeypatch):
|
||||
"""Single domain produces one-element tuple."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "foo.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "secret")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (("https://foo.tld", "secret"),)
|
||||
|
||||
def test_multi_domain_broadcast_token(self, monkeypatch):
|
||||
"""Multiple domains with a single token broadcast the token."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "foo.tld, bar.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "shared")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (
|
||||
("https://foo.tld", "shared"),
|
||||
("https://bar.tld", "shared"),
|
||||
)
|
||||
|
||||
def test_multi_domain_per_instance_tokens(self, monkeypatch):
|
||||
"""Comma-separated tokens are positionally paired with domains."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "a.tld,b.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "tok1,tok2")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (("https://a.tld", "tok1"), ("https://b.tld", "tok2"))
|
||||
|
||||
def test_token_count_mismatch_raises(self, monkeypatch):
|
||||
"""Mismatched counts raise ValueError at parse time."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "a.tld,b.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "t1,t2,t3")
|
||||
with pytest.raises(ValueError, match="counts must match"):
|
||||
config._resolve_instance_domains()
|
||||
|
||||
def test_deduplicates_domains(self, monkeypatch):
|
||||
"""Duplicate domains are collapsed to a single entry."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "foo.tld, foo.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "tok")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (("https://foo.tld", "tok"),)
|
||||
|
||||
def test_preserves_explicit_scheme(self, monkeypatch):
|
||||
"""Domains with explicit schemes keep them; others get https://."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "http://local:41447,bar.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "tok")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (
|
||||
("http://local:41447", "tok"),
|
||||
("https://bar.tld", "tok"),
|
||||
)
|
||||
|
||||
def test_empty_domain(self, monkeypatch):
|
||||
"""Empty INSTANCE_DOMAIN returns an empty tuple."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "")
|
||||
monkeypatch.setenv("API_TOKEN", "tok")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == ()
|
||||
|
||||
def test_strips_trailing_slashes(self, monkeypatch):
|
||||
"""Trailing slashes are stripped from domains."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "foo.tld/")
|
||||
monkeypatch.setenv("API_TOKEN", "tok")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (("https://foo.tld", "tok"),)
|
||||
|
||||
def test_empty_token_broadcast(self, monkeypatch):
|
||||
"""Empty API_TOKEN broadcasts empty string to all instances."""
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "a.tld,b.tld")
|
||||
monkeypatch.setenv("API_TOKEN", "")
|
||||
result = config._resolve_instance_domains()
|
||||
assert result == (("https://a.tld", ""), ("https://b.tld", ""))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _resolve_instance_domain (legacy, kept for backward compatibility)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestResolveInstanceDomain:
|
||||
"""Tests for :func:`config._resolve_instance_domain`."""
|
||||
|
||||
@@ -209,3 +287,85 @@ class TestProtocolValidation:
|
||||
# Restore to valid value so subsequent tests work
|
||||
monkeypatch.setenv("PROTOCOL", "meshtastic")
|
||||
importlib.reload(config)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _parse_lora_freq_env
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestParseLoraFreqEnv:
|
||||
"""Tests for :func:`config._parse_lora_freq_env`."""
|
||||
|
||||
def test_none_returns_none(self):
|
||||
"""None input returns None."""
|
||||
assert config._parse_lora_freq_env(None) is None
|
||||
|
||||
def test_empty_string_returns_none(self):
|
||||
"""Empty string returns None."""
|
||||
assert config._parse_lora_freq_env("") is None
|
||||
|
||||
def test_whitespace_only_returns_none(self):
|
||||
"""Whitespace-only string returns None."""
|
||||
assert config._parse_lora_freq_env(" ") is None
|
||||
|
||||
def test_integer_string_returns_int(self):
|
||||
"""Whole-number string returns int."""
|
||||
result = config._parse_lora_freq_env("868")
|
||||
assert result == 868
|
||||
assert isinstance(result, int)
|
||||
|
||||
def test_float_integer_value_returns_int(self):
|
||||
"""String like '915.0' (whole float) returns int 915."""
|
||||
result = config._parse_lora_freq_env("915.0")
|
||||
assert result == 915
|
||||
assert isinstance(result, int)
|
||||
|
||||
def test_decimal_string_returns_float(self):
|
||||
"""Decimal string returns float."""
|
||||
result = config._parse_lora_freq_env("869.525")
|
||||
assert result == pytest.approx(869.525)
|
||||
assert isinstance(result, float)
|
||||
|
||||
def test_non_numeric_label_returns_none(self):
|
||||
"""Non-numeric string returns None so auto-detection is not blocked."""
|
||||
assert config._parse_lora_freq_env("EU_868") is None
|
||||
|
||||
def test_unit_suffixed_string_returns_none(self):
|
||||
"""String like '915MHz' returns None (not numeric)."""
|
||||
assert config._parse_lora_freq_env("915MHz") is None
|
||||
|
||||
def test_inf_returns_none(self):
|
||||
"""'inf' is non-finite and returns None."""
|
||||
assert config._parse_lora_freq_env("inf") is None
|
||||
|
||||
def test_large_exponent_returns_none(self):
|
||||
"""'1e309' overflows to inf and returns None."""
|
||||
assert config._parse_lora_freq_env("1e309") is None
|
||||
|
||||
def test_nan_returns_none(self):
|
||||
"""'nan' is non-finite and returns None."""
|
||||
assert config._parse_lora_freq_env("nan") is None
|
||||
|
||||
def test_whitespace_stripped(self):
|
||||
"""Leading/trailing whitespace is ignored."""
|
||||
assert config._parse_lora_freq_env(" 919 ") == 919
|
||||
|
||||
def test_frequency_env_preseeds_lora_freq(self, monkeypatch):
|
||||
"""FREQUENCY env var pre-seeds LORA_FREQ at module load."""
|
||||
import importlib
|
||||
|
||||
monkeypatch.setenv("FREQUENCY", "915")
|
||||
importlib.reload(config)
|
||||
assert config.LORA_FREQ == 915
|
||||
# Restore
|
||||
monkeypatch.delenv("FREQUENCY")
|
||||
importlib.reload(config)
|
||||
|
||||
def test_no_frequency_env_leaves_lora_freq_none(self, monkeypatch):
|
||||
"""Absent FREQUENCY env var leaves LORA_FREQ as None."""
|
||||
import importlib
|
||||
|
||||
monkeypatch.delenv("FREQUENCY", raising=False)
|
||||
importlib.reload(config)
|
||||
assert config.LORA_FREQ is None
|
||||
|
||||
@@ -261,6 +261,8 @@ def _configure_common_defaults(
|
||||
):
|
||||
"""Set fast configuration defaults shared by daemon integration tests."""
|
||||
|
||||
monkeypatch.setattr(daemon.config, "INSTANCES", (("http://test", ""),))
|
||||
monkeypatch.setattr(daemon.config, "INSTANCE", "http://test")
|
||||
monkeypatch.setattr(daemon.config, "SNAPSHOT_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "_RECONNECT_INITIAL_DELAY_SECS", 0)
|
||||
monkeypatch.setattr(daemon.config, "_RECONNECT_MAX_DELAY_SECS", 0)
|
||||
@@ -1089,6 +1091,133 @@ def test_check_inactivity_reconnect_elapsed_triggers(monkeypatch):
|
||||
assert result is True
|
||||
|
||||
|
||||
def test_inactivity_reconnect_bypasses_throttle_when_explicitly_disconnected(
|
||||
monkeypatch,
|
||||
):
|
||||
"""Explicit disconnect reconnects even when last_inactivity_reconnect is recent.
|
||||
|
||||
When isConnected reports False the daemon must not wait the full
|
||||
inactivity window before reconnecting. It uses the shorter
|
||||
_RECONNECT_MAX_DELAY_SECS window instead.
|
||||
"""
|
||||
state = _make_state(inactivity_reconnect_secs=3600.0)
|
||||
state.iface = DummyInterface(is_connected=False)
|
||||
state.iface_connected_at = 0.0
|
||||
# 61 seconds since last reconnect attempt — outside the 60 s anti-thrash window.
|
||||
state.last_inactivity_reconnect = 3589.0
|
||||
|
||||
monkeypatch.setattr(daemon.time, "monotonic", lambda: 3650.0)
|
||||
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
|
||||
monkeypatch.setattr(daemon.config, "_RECONNECT_MAX_DELAY_SECS", 60.0)
|
||||
monkeypatch.setattr(daemon.config, "_debug_log", lambda *_a, **_k: None)
|
||||
monkeypatch.setattr(daemon, "_close_interface", lambda iface: None)
|
||||
|
||||
result = daemon._check_inactivity_reconnect(state)
|
||||
assert (
|
||||
result is True
|
||||
), "Expected reconnect to fire when explicitly disconnected and 61s have elapsed"
|
||||
|
||||
|
||||
def test_inactivity_reconnect_still_throttles_inactivity(monkeypatch):
|
||||
"""The full inactivity window still throttles reconnects that are not explicit disconnects."""
|
||||
state = _make_state(inactivity_reconnect_secs=3600.0)
|
||||
# isConnected=True → inactivity-only trigger (no explicit disconnect signal)
|
||||
state.iface = DummyInterface(is_connected=True)
|
||||
state.iface_connected_at = 0.0
|
||||
# now=3700, last_inactivity_reconnect=3691 → 9 s elapsed, well within 3600 s window.
|
||||
state.last_inactivity_reconnect = 3691.0
|
||||
|
||||
monkeypatch.setattr(daemon.time, "monotonic", lambda: 3700.0)
|
||||
# No recent packet → inactivity_elapsed = 3700 s > inactivity_reconnect_secs (3600 s)
|
||||
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
|
||||
monkeypatch.setattr(daemon.config, "_RECONNECT_MAX_DELAY_SECS", 60.0)
|
||||
|
||||
# Even though enough inactive time has passed, last_inactivity_reconnect is
|
||||
# only 9 s ago (< 3600 s throttle window) → reconnect is suppressed.
|
||||
result = daemon._check_inactivity_reconnect(state)
|
||||
assert (
|
||||
result is False
|
||||
), "Expected throttle to suppress reconnect when last attempt was 9 s ago"
|
||||
|
||||
|
||||
def test_inactivity_reconnect_logs_queue_depth(monkeypatch):
|
||||
"""The inactivity reconnect debug log includes the current queue depth."""
|
||||
state = _make_state(inactivity_reconnect_secs=30.0)
|
||||
state.iface = DummyInterface(is_connected=True)
|
||||
state.iface_connected_at = 0.0
|
||||
state.last_inactivity_reconnect = None
|
||||
|
||||
monkeypatch.setattr(daemon.time, "monotonic", lambda: 100.0)
|
||||
monkeypatch.setattr(daemon.handlers, "last_packet_monotonic", lambda: None)
|
||||
monkeypatch.setattr(daemon, "_close_interface", lambda iface: None)
|
||||
|
||||
# Seed the global queue with two dummy items so queue_depth is non-zero.
|
||||
from data.mesh_ingestor.queue import STATE, _enqueue_post_json
|
||||
|
||||
_enqueue_post_json("/api/a", {}, 10, state=STATE)
|
||||
_enqueue_post_json("/api/b", {}, 20, state=STATE)
|
||||
|
||||
log_kwargs: list[dict] = []
|
||||
monkeypatch.setattr(
|
||||
daemon.config,
|
||||
"_debug_log",
|
||||
lambda msg, **kw: log_kwargs.append(kw),
|
||||
)
|
||||
|
||||
try:
|
||||
result = daemon._check_inactivity_reconnect(state)
|
||||
assert result is True
|
||||
assert any(
|
||||
kw.get("queue_depth") == 2 for kw in log_kwargs
|
||||
), f"Expected queue_depth=2 in log kwargs, got {log_kwargs}"
|
||||
finally:
|
||||
# Clean up global state so other tests are not affected.
|
||||
STATE.queue.clear()
|
||||
|
||||
|
||||
def test_main_exits_early_when_no_instances(monkeypatch):
|
||||
"""main() returns immediately when no INSTANCE_DOMAIN is configured.
|
||||
|
||||
The queue drainer must NOT be started on the early-exit path.
|
||||
"""
|
||||
monkeypatch.setattr(daemon.config, "INSTANCES", ())
|
||||
monkeypatch.setattr(daemon.config, "INSTANCE", "")
|
||||
log_msgs: list[str] = []
|
||||
monkeypatch.setattr(
|
||||
daemon.config,
|
||||
"_debug_log",
|
||||
lambda msg, **kw: log_msgs.append(msg),
|
||||
)
|
||||
drainer_calls: list[object] = []
|
||||
monkeypatch.setattr(
|
||||
daemon.queue,
|
||||
"_start_queue_drainer",
|
||||
lambda state=None: drainer_calls.append(state),
|
||||
)
|
||||
|
||||
provider = _make_minimal_fake_provider("meshtastic")
|
||||
daemon.main(provider=provider)
|
||||
|
||||
assert any("no instance_domain" in m.lower() for m in log_msgs)
|
||||
assert drainer_calls == [], "Drainer must not start when no instances configured"
|
||||
|
||||
|
||||
def test_main_starts_queue_drainer(monkeypatch):
|
||||
"""main() calls queue._start_queue_drainer after subscribing."""
|
||||
drainer_calls: list[object] = []
|
||||
monkeypatch.setattr(
|
||||
daemon.queue,
|
||||
"_start_queue_drainer",
|
||||
lambda state=None: drainer_calls.append(state),
|
||||
)
|
||||
|
||||
_patch_daemon_for_fast_exit(monkeypatch)
|
||||
provider = _make_minimal_fake_provider("meshtastic")
|
||||
daemon.main(provider=provider)
|
||||
|
||||
assert len(drainer_calls) == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _try_send_self_node
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@@ -30,6 +30,7 @@ if str(REPO_ROOT) not in sys.path:
|
||||
import data.mesh_ingestor.config as config
|
||||
import data.mesh_ingestor.handlers as handlers
|
||||
import data.mesh_ingestor.handlers._state as _state_mod
|
||||
import data.mesh_ingestor.handlers.generic as generic_mod
|
||||
import data.mesh_ingestor.handlers.ignored as ignored_mod
|
||||
import data.mesh_ingestor.handlers.telemetry as telemetry_mod
|
||||
|
||||
@@ -891,3 +892,197 @@ class TestStoreRouterHeartbeatPacket:
|
||||
finally:
|
||||
q._queue_post_json = original
|
||||
assert sent == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _coerce_emoji_codepoint
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCoerceEmojiCodepoint:
|
||||
"""Tests for :func:`_coerce_emoji_codepoint`."""
|
||||
|
||||
def test_none_returns_none(self):
|
||||
"""``None`` input yields ``None``."""
|
||||
assert generic_mod._coerce_emoji_codepoint(None) is None
|
||||
|
||||
def test_int_codepoint_above_127(self):
|
||||
"""Integer codepoint above 127 is converted to the character."""
|
||||
assert generic_mod._coerce_emoji_codepoint(128077) == "\U0001f44d"
|
||||
|
||||
def test_string_codepoint_above_127(self):
|
||||
"""Digit string representing a codepoint above 127 is converted."""
|
||||
assert generic_mod._coerce_emoji_codepoint("128077") == "\U0001f44d"
|
||||
|
||||
def test_small_int_preserved_as_string(self):
|
||||
"""Small integer (≤ 127) is kept as its string representation."""
|
||||
assert generic_mod._coerce_emoji_codepoint(1) == "1"
|
||||
|
||||
def test_small_string_digit_preserved(self):
|
||||
"""Digit string ≤ 127 is kept as-is (slot marker)."""
|
||||
assert generic_mod._coerce_emoji_codepoint("1") == "1"
|
||||
|
||||
def test_emoji_string_passthrough(self):
|
||||
"""An already-resolved emoji character passes through."""
|
||||
assert generic_mod._coerce_emoji_codepoint("\U0001f44d") == "\U0001f44d"
|
||||
|
||||
def test_whitespace_only_returns_none(self):
|
||||
"""Whitespace-only string yields ``None``."""
|
||||
assert generic_mod._coerce_emoji_codepoint(" ") is None
|
||||
|
||||
def test_empty_string_returns_none(self):
|
||||
"""Empty string yields ``None``."""
|
||||
assert generic_mod._coerce_emoji_codepoint("") is None
|
||||
|
||||
def test_float_codepoint_above_127(self):
|
||||
"""Float codepoint above 127 is truncated and converted."""
|
||||
assert generic_mod._coerce_emoji_codepoint(128077.0) == "\U0001f44d"
|
||||
|
||||
def test_invalid_codepoint_returns_none(self):
|
||||
"""Out-of-range numeric codepoint returns ``None`` rather than the
|
||||
decimal form (which would render as garbage in the chat log)."""
|
||||
assert generic_mod._coerce_emoji_codepoint(0x7FFFFFFF) is None
|
||||
|
||||
def test_invalid_string_codepoint_returns_none(self):
|
||||
"""Out-of-range numeric string also returns ``None``."""
|
||||
assert generic_mod._coerce_emoji_codepoint(str(0x7FFFFFFF)) is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _is_reaction_placeholder_text
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIsReactionPlaceholderText:
|
||||
"""Tests for :func:`_is_reaction_placeholder_text`."""
|
||||
|
||||
def test_none_is_placeholder(self):
|
||||
"""``None`` is a placeholder."""
|
||||
assert generic_mod._is_reaction_placeholder_text(None) is True
|
||||
|
||||
def test_empty_is_placeholder(self):
|
||||
"""Empty string is a placeholder."""
|
||||
assert generic_mod._is_reaction_placeholder_text("") is True
|
||||
|
||||
def test_whitespace_is_placeholder(self):
|
||||
"""Whitespace-only string is a placeholder."""
|
||||
assert generic_mod._is_reaction_placeholder_text(" ") is True
|
||||
|
||||
def test_digit_slot_marker(self):
|
||||
"""Digit strings like '1' and '3' are placeholders."""
|
||||
assert generic_mod._is_reaction_placeholder_text("1") is True
|
||||
assert generic_mod._is_reaction_placeholder_text("3") is True
|
||||
|
||||
def test_bare_emoji_is_placeholder(self):
|
||||
"""A single emoji character is a placeholder."""
|
||||
assert generic_mod._is_reaction_placeholder_text("\U0001f44d") is True
|
||||
|
||||
def test_substantial_text_is_not_placeholder(self):
|
||||
"""Prose text is not a placeholder."""
|
||||
assert generic_mod._is_reaction_placeholder_text("Hello world") is False
|
||||
|
||||
def test_text_with_emoji_is_not_placeholder(self):
|
||||
"""Text containing both words and emoji is not a placeholder."""
|
||||
assert (
|
||||
generic_mod._is_reaction_placeholder_text("Great job! \U0001f44d") is False
|
||||
)
|
||||
|
||||
def test_short_ascii_word_is_not_placeholder(self):
|
||||
"""A short ASCII word is not a placeholder."""
|
||||
assert generic_mod._is_reaction_placeholder_text("hi") is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _is_likely_reaction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestIsLikelyReaction:
|
||||
"""Tests for :func:`_is_likely_reaction`."""
|
||||
|
||||
def test_reaction_app_portnum_string(self):
|
||||
"""Explicit REACTION_APP portnum is always a reaction."""
|
||||
assert (
|
||||
generic_mod._is_likely_reaction(
|
||||
"REACTION_APP", None, 123, "\U0001f44d", None
|
||||
)
|
||||
is True
|
||||
)
|
||||
|
||||
def test_reply_id_emoji_no_text(self):
|
||||
"""reply_id + emoji + no text is a reaction."""
|
||||
assert (
|
||||
generic_mod._is_likely_reaction(
|
||||
"TEXT_MESSAGE_APP", 1, 123, "\U0001f44d", None
|
||||
)
|
||||
is True
|
||||
)
|
||||
|
||||
def test_reply_id_emoji_digit_text(self):
|
||||
"""reply_id + emoji + digit count text is a reaction."""
|
||||
assert (
|
||||
generic_mod._is_likely_reaction(
|
||||
"TEXT_MESSAGE_APP", 1, 123, "\U0001f44d", "3"
|
||||
)
|
||||
is True
|
||||
)
|
||||
|
||||
def test_reply_id_emoji_substantial_text_not_reaction(self):
|
||||
"""reply_id + emoji + substantial text is NOT a reaction."""
|
||||
assert (
|
||||
generic_mod._is_likely_reaction(
|
||||
"TEXT_MESSAGE_APP", 1, 123, "\U0001f44d", "Great job!"
|
||||
)
|
||||
is False
|
||||
)
|
||||
|
||||
def test_no_emoji_not_reaction(self):
|
||||
"""Missing emoji means not a reaction (even with reply_id)."""
|
||||
assert (
|
||||
generic_mod._is_likely_reaction("TEXT_MESSAGE_APP", 1, 123, None, None)
|
||||
is False
|
||||
)
|
||||
|
||||
def test_no_reply_id_not_reaction(self):
|
||||
"""Missing reply_id means not a reaction (non-REACTION_APP portnum)."""
|
||||
assert (
|
||||
generic_mod._is_likely_reaction(
|
||||
"TEXT_MESSAGE_APP", 1, None, "\U0001f44d", None
|
||||
)
|
||||
is False
|
||||
)
|
||||
|
||||
def test_portnum_int_matches_reaction_candidate(self, monkeypatch):
|
||||
"""An unknown portnum string is still classified as a reaction when
|
||||
``portnum_int`` matches one of the firmware-resolved REACTION_APP
|
||||
candidates. Different Meshtastic firmware versions assign different
|
||||
integer values to the REACTION_APP enum, so the integer fallback is
|
||||
the authoritative path."""
|
||||
monkeypatch.setattr(generic_mod, "_portnum_candidates", lambda name: {77})
|
||||
assert (
|
||||
generic_mod._is_likely_reaction("UNKNOWN_PORT", 77, None, None, None)
|
||||
is True
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _coerce_emoji_codepoint — string conversion failure path
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCoerceEmojiStringFailure:
|
||||
"""Cover the ``except Exception`` branch in :func:`_coerce_emoji_codepoint`.
|
||||
|
||||
The string-conversion ``try`` is defensive against pathological values
|
||||
(objects whose ``__str__`` raises). We exercise it directly so the
|
||||
fallback ``return None`` line is covered.
|
||||
"""
|
||||
|
||||
def test_object_str_raises(self):
|
||||
"""A value whose ``__str__`` raises yields ``None``."""
|
||||
|
||||
class Boom:
|
||||
def __str__(self): # noqa: D401 - pytest-style helper
|
||||
raise RuntimeError("boom")
|
||||
|
||||
assert generic_mod._coerce_emoji_codepoint(Boom()) is None
|
||||
|
||||
@@ -227,7 +227,7 @@ def test_region_frequency_and_resolution_helpers():
|
||||
assert freq == "915MHz"
|
||||
|
||||
freq = interfaces._region_frequency(LoraMessage(2))
|
||||
assert freq == "US"
|
||||
assert freq == 902 # "US" is in the region lookup table → base 902 MHz
|
||||
|
||||
class StringRegionMessage:
|
||||
def __init__(self, region):
|
||||
|
||||
@@ -267,6 +267,72 @@ class TestEnumNameFromField:
|
||||
assert ifaces._enum_name_from_field(msg, "region", 3) == "US_915"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _computed_channel_frequency
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestComputedChannelFrequency:
|
||||
"""Tests for :func:`interfaces._computed_channel_frequency`."""
|
||||
|
||||
def test_none_enum_name_returns_none(self):
|
||||
"""None enum_name returns None."""
|
||||
assert ifaces._computed_channel_frequency(None, 0) is None
|
||||
|
||||
def test_unknown_region_returns_none(self):
|
||||
"""Enum name not in lookup table returns None."""
|
||||
assert ifaces._computed_channel_frequency("UNKNOWN_REGION", 0) is None
|
||||
|
||||
def test_us_channel_0_base_frequency(self):
|
||||
"""US region, channel 0, returns floor(902.0 + 0*0.25) = 902."""
|
||||
assert ifaces._computed_channel_frequency("US", 0) == 902
|
||||
|
||||
def test_us_channel_52_mid_band(self):
|
||||
"""US region, channel 52, returns floor(902.0 + 52*0.25) = 915."""
|
||||
assert ifaces._computed_channel_frequency("US", 52) == 915
|
||||
|
||||
def test_eu_868_channel_0_returns_869(self):
|
||||
"""EU_868 region, channel 0, returns floor(869.525) = 869, not 868."""
|
||||
assert ifaces._computed_channel_frequency("EU_868", 0) == 869
|
||||
|
||||
def test_eu_868_channel_1_returns_870(self):
|
||||
"""EU_868 region, channel 1, returns floor(869.525 + 0.5) = 870."""
|
||||
assert ifaces._computed_channel_frequency("EU_868", 1) == 870
|
||||
|
||||
def test_my_919_channel_0(self):
|
||||
"""MY_919 region, channel 0, returns floor(919.0) = 919."""
|
||||
assert ifaces._computed_channel_frequency("MY_919", 0) == 919
|
||||
|
||||
def test_lora_24_channel_0(self):
|
||||
"""LORA_24 region, channel 0, returns floor(2400.0) = 2400."""
|
||||
assert ifaces._computed_channel_frequency("LORA_24", 0) == 2400
|
||||
|
||||
def test_none_channel_num_defaults_to_zero(self):
|
||||
"""None channel_num is treated as 0, returning the base frequency."""
|
||||
assert ifaces._computed_channel_frequency("ANZ", None) == 916
|
||||
|
||||
def test_negative_channel_num_clamped_to_zero(self):
|
||||
"""Negative channel_num is clamped to 0, returning the base frequency."""
|
||||
assert ifaces._computed_channel_frequency("ANZ", -1) == 916
|
||||
|
||||
def test_result_is_int(self):
|
||||
"""Return type is int (math.floor result), not float."""
|
||||
result = ifaces._computed_channel_frequency("EU_868", 0)
|
||||
assert isinstance(result, int)
|
||||
|
||||
def test_nz_865_channel_0(self):
|
||||
"""NZ_865 region, channel 0, returns floor(864.0) = 864."""
|
||||
assert ifaces._computed_channel_frequency("NZ_865", 0) == 864
|
||||
|
||||
def test_br_902_channel_4_spacing_0_25(self):
|
||||
"""BR_902 region, channel 4, returns floor(902.0 + 4*0.25) = 903."""
|
||||
assert ifaces._computed_channel_frequency("BR_902", 4) == 903
|
||||
|
||||
def test_kz_863_channel_0(self):
|
||||
"""KZ_863 region, channel 0, returns floor(863.125) = 863."""
|
||||
assert ifaces._computed_channel_frequency("KZ_863", 0) == 863
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _region_frequency
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -323,6 +389,65 @@ class TestRegionFrequency:
|
||||
msg = SimpleNamespace(DESCRIPTOR=None, override_frequency=None, region="EU433")
|
||||
assert ifaces._region_frequency(msg) == "EU433"
|
||||
|
||||
def test_us_enum_lookup_table_used(self):
|
||||
"""US region with channel_num=0 returns 902 from lookup table, not None."""
|
||||
enum_val = SimpleNamespace(name="US")
|
||||
enum_type = SimpleNamespace(values_by_number={1: enum_val})
|
||||
field_desc = SimpleNamespace(enum_type=enum_type)
|
||||
desc = SimpleNamespace(fields_by_name={"region": field_desc})
|
||||
msg = SimpleNamespace(
|
||||
DESCRIPTOR=desc, override_frequency=None, region=1, channel_num=0
|
||||
)
|
||||
assert ifaces._region_frequency(msg) == 902
|
||||
|
||||
def test_eu_868_returns_869_not_868(self):
|
||||
"""EU_868 region returns 869 from lookup table, not 868 parsed from name."""
|
||||
enum_val = SimpleNamespace(name="EU_868")
|
||||
enum_type = SimpleNamespace(values_by_number={3: enum_val})
|
||||
field_desc = SimpleNamespace(enum_type=enum_type)
|
||||
desc = SimpleNamespace(fields_by_name={"region": field_desc})
|
||||
msg = SimpleNamespace(
|
||||
DESCRIPTOR=desc, override_frequency=None, region=3, channel_num=0
|
||||
)
|
||||
assert ifaces._region_frequency(msg) == 869
|
||||
|
||||
def test_unrecognised_int_falls_through(self):
|
||||
"""Raw int region with no DESCRIPTOR and value < 100 returns None."""
|
||||
msg = SimpleNamespace(DESCRIPTOR=None, override_frequency=None, region=99)
|
||||
assert ifaces._region_frequency(msg) is None
|
||||
|
||||
def test_missing_channel_num_attr_uses_base(self):
|
||||
"""Region in lookup table with no channel_num attribute returns base freq."""
|
||||
enum_val = SimpleNamespace(name="MY_919")
|
||||
enum_type = SimpleNamespace(values_by_number={17: enum_val})
|
||||
field_desc = SimpleNamespace(enum_type=enum_type)
|
||||
desc = SimpleNamespace(fields_by_name={"region": field_desc})
|
||||
# deliberately no channel_num attribute
|
||||
msg = SimpleNamespace(DESCRIPTOR=desc, override_frequency=None, region=17)
|
||||
assert ifaces._region_frequency(msg) == 919
|
||||
|
||||
def test_override_takes_priority_over_lookup_table(self):
|
||||
"""override_frequency takes priority over the lookup table."""
|
||||
enum_val = SimpleNamespace(name="EU_868")
|
||||
enum_type = SimpleNamespace(values_by_number={3: enum_val})
|
||||
field_desc = SimpleNamespace(enum_type=enum_type)
|
||||
desc = SimpleNamespace(fields_by_name={"region": field_desc})
|
||||
msg = SimpleNamespace(
|
||||
DESCRIPTOR=desc, override_frequency=867.3, region=3, channel_num=0
|
||||
)
|
||||
assert ifaces._region_frequency(msg) == 867
|
||||
|
||||
def test_unknown_enum_name_falls_to_digit_parse(self):
|
||||
"""Enum name not in lookup table falls through to digit parsing."""
|
||||
enum_val = SimpleNamespace(name="FUTURE_999")
|
||||
enum_type = SimpleNamespace(values_by_number={99: enum_val})
|
||||
field_desc = SimpleNamespace(enum_type=enum_type)
|
||||
desc = SimpleNamespace(fields_by_name={"region": field_desc})
|
||||
msg = SimpleNamespace(
|
||||
DESCRIPTOR=desc, override_frequency=None, region=99, channel_num=0
|
||||
)
|
||||
assert ifaces._region_frequency(msg) == 999
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _camelcase_enum_name
|
||||
|
||||
+84
-3
@@ -233,7 +233,9 @@ def test_instance_domain_prefers_primary_env(mesh_module, monkeypatch):
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "https://new.example")
|
||||
|
||||
try:
|
||||
refreshed_instances = mesh_module.config._resolve_instance_domains()
|
||||
refreshed_instance = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.config.INSTANCES = refreshed_instances
|
||||
mesh_module.config.INSTANCE = refreshed_instance
|
||||
mesh_module.INSTANCE = refreshed_instance
|
||||
|
||||
@@ -241,6 +243,7 @@ def test_instance_domain_prefers_primary_env(mesh_module, monkeypatch):
|
||||
assert mesh_module.INSTANCE == "https://new.example"
|
||||
finally:
|
||||
monkeypatch.delenv("INSTANCE_DOMAIN", raising=False)
|
||||
mesh_module.config.INSTANCES = mesh_module.config._resolve_instance_domains()
|
||||
mesh_module.config.INSTANCE = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.INSTANCE = mesh_module.config.INSTANCE
|
||||
|
||||
@@ -251,7 +254,9 @@ def test_instance_domain_infers_scheme_for_hostnames(mesh_module, monkeypatch):
|
||||
monkeypatch.setenv("INSTANCE_DOMAIN", "mesh.example.org")
|
||||
|
||||
try:
|
||||
refreshed_instances = mesh_module.config._resolve_instance_domains()
|
||||
refreshed_instance = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.config.INSTANCES = refreshed_instances
|
||||
mesh_module.config.INSTANCE = refreshed_instance
|
||||
mesh_module.INSTANCE = refreshed_instance
|
||||
|
||||
@@ -259,6 +264,7 @@ def test_instance_domain_infers_scheme_for_hostnames(mesh_module, monkeypatch):
|
||||
assert mesh_module.INSTANCE == "https://mesh.example.org"
|
||||
finally:
|
||||
monkeypatch.delenv("INSTANCE_DOMAIN", raising=False)
|
||||
mesh_module.config.INSTANCES = mesh_module.config._resolve_instance_domains()
|
||||
mesh_module.config.INSTANCE = mesh_module.config._resolve_instance_domain()
|
||||
mesh_module.INSTANCE = mesh_module.config.INSTANCE
|
||||
|
||||
@@ -583,10 +589,10 @@ def test_ensure_radio_metadata_extracts_config(mesh_module, capsys):
|
||||
first_log = capsys.readouterr().out
|
||||
|
||||
assert iface.wait_calls == 1
|
||||
assert mesh.config.LORA_FREQ == 868
|
||||
assert mesh.config.LORA_FREQ == 869
|
||||
assert mesh.config.MODEM_PRESET == "MediumFast"
|
||||
assert "Captured LoRa radio metadata" in first_log
|
||||
assert "lora_freq=868" in first_log
|
||||
assert "lora_freq=869" in first_log
|
||||
assert "modem_preset='MediumFast'" in first_log
|
||||
|
||||
secondary_lora = make_lora(7, "US_915", 2, "LONG_FAST", preset_field="preset")
|
||||
@@ -596,7 +602,7 @@ def test_ensure_radio_metadata_extracts_config(mesh_module, capsys):
|
||||
second_log = capsys.readouterr().out
|
||||
|
||||
assert second_iface.wait_calls == 1
|
||||
assert mesh.config.LORA_FREQ == 868
|
||||
assert mesh.config.LORA_FREQ == 869
|
||||
assert mesh.config.MODEM_PRESET == "MediumFast"
|
||||
assert second_log == ""
|
||||
|
||||
@@ -848,6 +854,73 @@ def test_store_packet_dict_posts_reaction_message(mesh_module, monkeypatch):
|
||||
assert priority == mesh._MESSAGE_POST_PRIORITY
|
||||
|
||||
|
||||
def test_store_packet_dict_text_with_reply_and_emoji_is_not_reaction(
|
||||
mesh_module, monkeypatch
|
||||
):
|
||||
"""Regression test for #699: a TEXT_MESSAGE_APP packet that carries both
|
||||
a ``reply_id`` and an ``emoji`` AND substantial body text must be ingested
|
||||
as a regular text message — not silently reclassified as a reaction.
|
||||
|
||||
This pins the end-to-end ingest contract: the helper's classification
|
||||
(``_is_likely_reaction``), the captured POST payload, and the preserved
|
||||
text/emoji/reply_id fields must all agree that this is text, not a
|
||||
reaction.
|
||||
"""
|
||||
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
monkeypatch.setattr(
|
||||
mesh,
|
||||
"_queue_post_json",
|
||||
lambda path, payload, *, priority: captured.append((path, payload, priority)),
|
||||
)
|
||||
|
||||
packet = {
|
||||
"id": 4242,
|
||||
"rxTime": 1_700_200_000,
|
||||
"fromId": "!sender",
|
||||
"toId": "^all",
|
||||
"channel": 1, # non-primary channel: bypass DM filter regardless
|
||||
"decoded": {
|
||||
"portnum": "TEXT_MESSAGE_APP",
|
||||
"text": "Great job! \U0001f44d",
|
||||
"data": {
|
||||
"reply_id": "7029",
|
||||
"emoji": "\U0001f44d",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
mesh.store_packet_dict(packet)
|
||||
|
||||
# 1. The packet was posted (not dropped) ---------------------------------
|
||||
assert captured, "Expected POST for text message with reply_id+emoji"
|
||||
path, payload, _ = captured[0]
|
||||
assert path == "/api/messages"
|
||||
|
||||
# 2. Substantial text is preserved verbatim ------------------------------
|
||||
assert payload["text"] == "Great job! \U0001f44d"
|
||||
assert payload["emoji"] == "\U0001f44d"
|
||||
assert payload["reply_id"] == 7029
|
||||
assert payload["portnum"] == "TEXT_MESSAGE_APP"
|
||||
|
||||
# 3. The classification helper agrees this is NOT a reaction -------------
|
||||
# (Pinning helper + ingest pipeline together prevents future drift where
|
||||
# one layer changes its mind without the other.)
|
||||
from data.mesh_ingestor.handlers.generic import _is_likely_reaction
|
||||
|
||||
assert (
|
||||
_is_likely_reaction(
|
||||
"TEXT_MESSAGE_APP",
|
||||
1,
|
||||
7029,
|
||||
"\U0001f44d",
|
||||
"Great job! \U0001f44d",
|
||||
)
|
||||
is False
|
||||
)
|
||||
|
||||
|
||||
def test_store_packet_dict_posts_position(mesh_module, monkeypatch):
|
||||
mesh = mesh_module
|
||||
captured = []
|
||||
@@ -1615,6 +1688,8 @@ def test_main_retries_interface_creation(mesh_module, monkeypatch):
|
||||
raise RuntimeError("boom")
|
||||
return iface, port
|
||||
|
||||
monkeypatch.setattr(mesh, "INSTANCES", (("http://test", ""),))
|
||||
monkeypatch.setattr(mesh, "INSTANCE", "http://test")
|
||||
monkeypatch.setattr(mesh, "CONNECTION", "/dev/ttyTEST")
|
||||
monkeypatch.setattr(mesh, "_create_serial_interface", fake_create)
|
||||
monkeypatch.setattr(mesh.threading, "Event", DummyEvent)
|
||||
@@ -1687,6 +1762,8 @@ def test_main_reconnects_when_connection_event_clears(mesh_module, monkeypatch):
|
||||
self._flag = True
|
||||
return True
|
||||
|
||||
monkeypatch.setattr(mesh, "INSTANCES", (("http://test", ""),))
|
||||
monkeypatch.setattr(mesh, "INSTANCE", "http://test")
|
||||
monkeypatch.setattr(mesh, "CONNECTION", "/dev/ttyTEST")
|
||||
monkeypatch.setattr(mesh, "_create_serial_interface", fake_create)
|
||||
monkeypatch.setattr(mesh.threading, "Event", DummyStopEvent)
|
||||
@@ -1751,6 +1828,8 @@ def test_main_recreates_interface_after_snapshot_error(mesh_module, monkeypatch)
|
||||
def record_upsert(node_id, node):
|
||||
upsert_calls.append(node_id)
|
||||
|
||||
monkeypatch.setattr(mesh, "INSTANCES", (("http://test", ""),))
|
||||
monkeypatch.setattr(mesh, "INSTANCE", "http://test")
|
||||
monkeypatch.setattr(mesh, "CONNECTION", "/dev/ttyTEST")
|
||||
monkeypatch.setattr(mesh, "_create_serial_interface", fake_create)
|
||||
monkeypatch.setattr(mesh, "upsert_node", record_upsert)
|
||||
@@ -1773,6 +1852,8 @@ def test_main_exits_when_defaults_unavailable(mesh_module, monkeypatch):
|
||||
def fail_default():
|
||||
raise mesh.NoAvailableMeshInterface("no interface available")
|
||||
|
||||
monkeypatch.setattr(mesh, "INSTANCES", (("http://test", ""),))
|
||||
monkeypatch.setattr(mesh, "INSTANCE", "http://test")
|
||||
monkeypatch.setattr(mesh, "CONNECTION", None)
|
||||
monkeypatch.setattr(mesh, "_create_default_interface", fail_default)
|
||||
monkeypatch.setattr(mesh.signal, "signal", lambda *_, **__: None)
|
||||
|
||||
@@ -0,0 +1,328 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Unit tests for the runtime patch installed against the upstream ``meshcore``
|
||||
library to suppress ``MessageReader.handle_rx`` crashes on malformed frames.
|
||||
|
||||
Covers GitHub issue #754.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
REPO_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(REPO_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
from data.mesh_ingestor.protocols import ( # noqa: E402 - path setup
|
||||
_meshcore_patches,
|
||||
)
|
||||
|
||||
|
||||
class _FakeReader:
|
||||
"""Stand-in for ``meshcore.reader.MessageReader`` that lets us control
|
||||
what ``handle_rx`` raises without dragging in the real library's
|
||||
framing state machine."""
|
||||
|
||||
def __init__(self, raise_exc: BaseException | None = None, return_value=None):
|
||||
self._raise_exc = raise_exc
|
||||
self._return_value = return_value
|
||||
self.received: list[bytes] = []
|
||||
|
||||
async def handle_rx(self, data):
|
||||
self.received.append(bytes(data))
|
||||
if self._raise_exc is not None:
|
||||
raise self._raise_exc
|
||||
return self._return_value
|
||||
|
||||
|
||||
def _install_patch_on(cls) -> None:
|
||||
"""Run the real wrap helper against an arbitrary class so the tests do
|
||||
not mutate the installed ``meshcore.reader.MessageReader``."""
|
||||
_meshcore_patches._wrap_handle_rx(cls)
|
||||
|
||||
|
||||
def _run(coro):
|
||||
return asyncio.run(coro)
|
||||
|
||||
|
||||
def test_apply_is_idempotent():
|
||||
"""``apply()`` wrapping twice must not double-wrap the target method."""
|
||||
|
||||
class Target:
|
||||
async def handle_rx(self, data):
|
||||
return "ok"
|
||||
|
||||
first_wrap = _meshcore_patches._wrap_handle_rx(Target)
|
||||
second_wrap = _meshcore_patches._wrap_handle_rx(Target)
|
||||
|
||||
assert first_wrap is True
|
||||
assert second_wrap is False
|
||||
# Marker is present on the wrapper so future imports short-circuit.
|
||||
assert getattr(Target.handle_rx, _meshcore_patches._PATCH_MARKER, False) is True
|
||||
# Original is preserved for revert.
|
||||
assert hasattr(Target, "_orig_handle_rx")
|
||||
|
||||
|
||||
def test_apply_returns_false_when_already_patched(monkeypatch):
|
||||
"""Once ``_wrap_handle_rx`` has been applied, ``apply()`` at module
|
||||
level observes the sentinel and short-circuits rather than rewrapping."""
|
||||
|
||||
class Target:
|
||||
async def handle_rx(self, data):
|
||||
return None
|
||||
|
||||
_meshcore_patches._wrap_handle_rx(Target)
|
||||
|
||||
# Replace ``meshcore.reader.MessageReader`` with our pre-patched Target
|
||||
# so ``apply()`` cannot accidentally wrap a real class in the test env.
|
||||
import meshcore.reader as reader_module
|
||||
|
||||
original_cls = reader_module.MessageReader
|
||||
monkeypatch.setattr(reader_module, "MessageReader", Target)
|
||||
try:
|
||||
assert _meshcore_patches.apply() is False
|
||||
finally:
|
||||
monkeypatch.setattr(reader_module, "MessageReader", original_cls)
|
||||
|
||||
|
||||
def test_index_error_swallowed_and_logged(monkeypatch):
|
||||
"""The exact failure mode reported in #754: ``IndexError`` on a malformed
|
||||
frame must not propagate and must emit one structured warning."""
|
||||
|
||||
class Target(_FakeReader):
|
||||
pass
|
||||
|
||||
_install_patch_on(Target)
|
||||
instance = Target(raise_exc=IndexError("index out of range"))
|
||||
|
||||
# Force the debug logger to always emit so we can capture the log line
|
||||
# regardless of the ``DEBUG`` env flag during test runs.
|
||||
from data.mesh_ingestor import config
|
||||
|
||||
emitted: list[tuple[str, dict]] = []
|
||||
|
||||
def _capture_log(message, **kwargs):
|
||||
emitted.append((message, kwargs))
|
||||
|
||||
monkeypatch.setattr(config, "_debug_log", _capture_log)
|
||||
|
||||
# Should return None rather than raise.
|
||||
result = _run(instance.handle_rx(b"\x01\x02\x03\x04"))
|
||||
assert result is None
|
||||
|
||||
assert emitted, "patched handle_rx should have logged the suppressed error"
|
||||
message, kwargs = emitted[-1]
|
||||
assert "malformed frame" in message
|
||||
assert kwargs["context"] == "meshcore.reader.patch"
|
||||
assert kwargs["severity"] == "warning"
|
||||
assert kwargs["error_class"] == "IndexError"
|
||||
assert kwargs["error_message"] == "index out of range"
|
||||
assert kwargs["packet_len"] == 4
|
||||
assert kwargs["packet_hex"] == "01020304"
|
||||
|
||||
|
||||
def test_unrelated_return_value_preserved():
|
||||
"""When the original ``handle_rx`` returns normally, the wrapper must
|
||||
forward the exact return value and not swallow it."""
|
||||
|
||||
class Target(_FakeReader):
|
||||
pass
|
||||
|
||||
_install_patch_on(Target)
|
||||
sentinel = object()
|
||||
instance = Target(return_value=sentinel)
|
||||
|
||||
result = _run(instance.handle_rx(b"\x00"))
|
||||
assert result is sentinel
|
||||
assert instance.received == [b"\x00"]
|
||||
|
||||
|
||||
def test_packet_dump_truncated_to_max(monkeypatch):
|
||||
"""Large frames must be truncated in the hex dump so a noisy device
|
||||
cannot flood the log."""
|
||||
|
||||
class Target(_FakeReader):
|
||||
pass
|
||||
|
||||
_install_patch_on(Target)
|
||||
instance = Target(raise_exc=ValueError("boom"))
|
||||
|
||||
from data.mesh_ingestor import config
|
||||
|
||||
emitted: list[dict] = []
|
||||
|
||||
def _capture_log(message, **kwargs):
|
||||
emitted.append(kwargs)
|
||||
|
||||
monkeypatch.setattr(config, "_debug_log", _capture_log)
|
||||
|
||||
payload = bytes(range(256)) * 2 # 512 bytes
|
||||
result = _run(instance.handle_rx(payload))
|
||||
assert result is None
|
||||
|
||||
kwargs = emitted[-1]
|
||||
# Hex length is exactly 2 * cap bytes.
|
||||
expected_len = 2 * _meshcore_patches._PACKET_LOG_MAX_BYTES
|
||||
assert len(kwargs["packet_hex"]) == expected_len
|
||||
# And matches the first N real bytes of the payload.
|
||||
assert (
|
||||
kwargs["packet_hex"] == payload[: _meshcore_patches._PACKET_LOG_MAX_BYTES].hex()
|
||||
)
|
||||
assert kwargs["packet_len"] == 512
|
||||
|
||||
|
||||
def test_hex_preview_handles_non_bytes():
|
||||
"""Defensive: ``_hex_preview`` accepts bytearray / memoryview and any
|
||||
object convertible via ``bytes(...)`` without raising."""
|
||||
|
||||
assert (
|
||||
_meshcore_patches._hex_preview(bytearray(b"\xde\xad\xbe\xef"), 4) == "deadbeef"
|
||||
)
|
||||
assert _meshcore_patches._hex_preview(memoryview(b"\x01\x02"), 8) == "0102"
|
||||
assert _meshcore_patches._hex_preview("not-bytes", 4) == ""
|
||||
|
||||
|
||||
def test_safe_len_handles_unsized():
|
||||
assert _meshcore_patches._safe_len(b"\x01\x02") == 2
|
||||
assert _meshcore_patches._safe_len(12345) is None
|
||||
|
||||
|
||||
def test_apply_skips_gracefully_when_meshcore_missing(monkeypatch):
|
||||
"""If ``meshcore`` is not importable, ``apply()`` must return ``False``
|
||||
instead of raising. Simulated by injecting an ImportError into
|
||||
``meshcore.reader``'s import machinery."""
|
||||
|
||||
# Block the import by clearing both the submodule and the parent, so
|
||||
# that ``import meshcore.reader`` inside ``apply()`` triggers a fresh
|
||||
# resolution that fails.
|
||||
monkeypatch.setitem(sys.modules, "meshcore.reader", None)
|
||||
assert _meshcore_patches.apply() is False
|
||||
|
||||
|
||||
def test_run_loop_exception_handler_routes_to_debug_log(monkeypatch):
|
||||
"""The loop-level safety net installed in ``_run_loop`` must forward
|
||||
asyncio's unhandled-exception contexts through ``config._debug_log``."""
|
||||
|
||||
from data.mesh_ingestor import config
|
||||
from data.mesh_ingestor.protocols import meshcore
|
||||
|
||||
emitted: list[tuple[str, dict]] = []
|
||||
|
||||
def _capture_log(message, **kwargs):
|
||||
emitted.append((message, kwargs))
|
||||
|
||||
monkeypatch.setattr(config, "_debug_log", _capture_log)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
meshcore._log_unhandled_loop_exception(
|
||||
loop,
|
||||
{"message": "synthetic task failure", "exception": RuntimeError("boom")},
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
assert emitted, "loop handler should forward to the structured logger"
|
||||
message, kwargs = emitted[-1]
|
||||
assert message == "synthetic task failure"
|
||||
assert kwargs["context"] == "asyncio.unhandled"
|
||||
assert kwargs["severity"] == "error"
|
||||
assert kwargs["error_class"] == "RuntimeError"
|
||||
assert kwargs["error_message"] == "boom"
|
||||
|
||||
|
||||
def test_wrap_returns_false_when_class_has_no_handle_rx():
|
||||
"""If a future upstream release renames ``handle_rx`` or we point the
|
||||
patch at the wrong class, ``_wrap_handle_rx`` must report the no-op
|
||||
rather than silently install nothing on a random attribute."""
|
||||
|
||||
class Bare:
|
||||
pass
|
||||
|
||||
assert _meshcore_patches._wrap_handle_rx(Bare) is False
|
||||
assert not hasattr(Bare, "_orig_handle_rx")
|
||||
|
||||
|
||||
def test_loop_handler_defaults_when_context_minimal(monkeypatch):
|
||||
"""Covers the fallback branches of ``_log_unhandled_loop_exception`` —
|
||||
missing ``message`` (defaults to a fixed string) and missing
|
||||
``exception`` (``error_class``/``error_message`` come through as ``None``).
|
||||
Both are real asyncio code paths: task-cancellation and unhandled-future
|
||||
warnings arrive with one-or-the-other key unset."""
|
||||
|
||||
from data.mesh_ingestor import config
|
||||
from data.mesh_ingestor.protocols import meshcore
|
||||
|
||||
emitted: list[tuple[str, dict]] = []
|
||||
|
||||
def _capture_log(message, **kwargs):
|
||||
emitted.append((message, kwargs))
|
||||
|
||||
monkeypatch.setattr(config, "_debug_log", _capture_log)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
# Empty context exercises both fallbacks at once.
|
||||
meshcore._log_unhandled_loop_exception(loop, {})
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
assert emitted, "loop handler should still emit something for a bare context"
|
||||
message, kwargs = emitted[-1]
|
||||
assert message == "Unhandled asyncio task exception"
|
||||
assert kwargs["context"] == "asyncio.unhandled"
|
||||
assert kwargs["severity"] == "error"
|
||||
assert kwargs["error_class"] is None
|
||||
assert kwargs["error_message"] is None
|
||||
|
||||
|
||||
def test_loop_handler_logs_task_name_when_present(monkeypatch):
|
||||
"""Asyncio includes the failing ``task`` object in its context dict when
|
||||
the exception comes from ``create_task(...)``. The handler extracts the
|
||||
task's name so operators can correlate log lines with the frame that
|
||||
blew up when several readers share a loop."""
|
||||
|
||||
from data.mesh_ingestor import config
|
||||
from data.mesh_ingestor.protocols import meshcore
|
||||
|
||||
emitted: list[dict] = []
|
||||
|
||||
def _capture_log(message, **kwargs):
|
||||
emitted.append(kwargs)
|
||||
|
||||
monkeypatch.setattr(config, "_debug_log", _capture_log)
|
||||
|
||||
async def _dummy():
|
||||
return None
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
task = loop.create_task(_dummy(), name="meshcore-reader-42")
|
||||
# Let the task finish so we don't leak a pending future.
|
||||
loop.run_until_complete(task)
|
||||
meshcore._log_unhandled_loop_exception(
|
||||
loop,
|
||||
{
|
||||
"message": "synthetic",
|
||||
"exception": ValueError("bad frame"),
|
||||
"task": task,
|
||||
},
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
assert emitted[-1]["task"] == "meshcore-reader-42"
|
||||
+171
-25
@@ -125,6 +125,8 @@ def test_daemon_main_uses_provider_connect(monkeypatch):
|
||||
),
|
||||
)
|
||||
|
||||
monkeypatch.setattr(daemon.config, "INSTANCES", (("http://test", ""),))
|
||||
monkeypatch.setattr(daemon.config, "INSTANCE", "http://test")
|
||||
monkeypatch.setattr(
|
||||
daemon.handlers, "register_host_node_id", lambda *_a, **_k: None
|
||||
)
|
||||
@@ -661,21 +663,22 @@ def test_meshcore_node_id_none_on_empty():
|
||||
assert _meshcore_node_id(None) is None # type: ignore[arg-type]
|
||||
|
||||
|
||||
def test_meshcore_short_name_first_four_hex_digits():
|
||||
"""_meshcore_short_name returns the first four hex chars, lowercased."""
|
||||
assert _meshcore_short_name("AABBccdd" + "00" * 28) == "aabb"
|
||||
def test_meshcore_short_name_first_two_bytes_of_node_id():
|
||||
"""_meshcore_short_name returns the first four hex chars of the node ID."""
|
||||
assert _meshcore_short_name("!aabbccdd") == "aabb"
|
||||
assert _meshcore_short_name("!AABBccdd") == "aabb"
|
||||
|
||||
|
||||
def test_meshcore_short_name_empty_when_too_short():
|
||||
"""_meshcore_short_name returns '' when the key has fewer than four hex digits."""
|
||||
"""_meshcore_short_name returns '' when the node ID is missing or too short."""
|
||||
assert _meshcore_short_name("") == ""
|
||||
assert _meshcore_short_name("abc") == ""
|
||||
assert _meshcore_short_name("!ab") == ""
|
||||
assert _meshcore_short_name(None) == "" # type: ignore[arg-type]
|
||||
|
||||
|
||||
def test_meshcore_short_name_exactly_four_chars():
|
||||
"""_meshcore_short_name with exactly four hex chars returns those four chars."""
|
||||
assert _meshcore_short_name("abcd") == "abcd"
|
||||
def test_meshcore_short_name_without_bang_prefix():
|
||||
"""_meshcore_short_name handles node IDs without the leading '!' prefix."""
|
||||
assert _meshcore_short_name("cafef00d") == "cafe"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -1199,46 +1202,108 @@ def test_interface_close_is_idempotent():
|
||||
|
||||
def test_derive_message_id_is_deterministic():
|
||||
"""Same inputs must always produce the same ID."""
|
||||
assert _derive_message_id(1_000_000, "c0", "hello") == _derive_message_id(
|
||||
1_000_000, "c0", "hello"
|
||||
assert _derive_message_id("alice", 1_000_000, "c0", "hello") == _derive_message_id(
|
||||
"alice", 1_000_000, "c0", "hello"
|
||||
)
|
||||
|
||||
|
||||
def test_derive_message_id_differs_by_channel():
|
||||
"""Messages on different channels with the same timestamp must not collide."""
|
||||
assert _derive_message_id(1_000_000, "c0", "hello") != _derive_message_id(
|
||||
1_000_000, "c1", "hello"
|
||||
assert _derive_message_id("alice", 1_000_000, "c0", "hello") != _derive_message_id(
|
||||
"alice", 1_000_000, "c1", "hello"
|
||||
)
|
||||
|
||||
|
||||
def test_derive_message_id_differs_by_text():
|
||||
"""Messages with different text must produce different IDs."""
|
||||
assert _derive_message_id(1_000_000, "c0", "hello") != _derive_message_id(
|
||||
1_000_000, "c0", "world"
|
||||
assert _derive_message_id("alice", 1_000_000, "c0", "hello") != _derive_message_id(
|
||||
"alice", 1_000_000, "c0", "world"
|
||||
)
|
||||
|
||||
|
||||
def test_derive_message_id_differs_by_timestamp():
|
||||
"""Messages at different timestamps must produce different IDs."""
|
||||
assert _derive_message_id(1_000_000, "c0", "hi") != _derive_message_id(
|
||||
1_000_001, "c0", "hi"
|
||||
assert _derive_message_id("alice", 1_000_000, "c0", "hi") != _derive_message_id(
|
||||
"alice", 1_000_001, "c0", "hi"
|
||||
)
|
||||
|
||||
|
||||
def test_derive_message_id_is_32bit():
|
||||
"""Result must fit in a 32-bit unsigned integer."""
|
||||
result = _derive_message_id(1_758_000_000, "aabbccddee11", "some text")
|
||||
assert 0 <= result <= 0xFFFFFFFF
|
||||
def test_derive_message_id_is_53bit():
|
||||
"""Result must fit in JS ``Number.MAX_SAFE_INTEGER`` (2**53 - 1).
|
||||
|
||||
Federation passes the id through JSON, where Number values exceeding
|
||||
53 bits lose precision in the JavaScript frontend. Clamping to 53 bits
|
||||
preserves the value across the round-trip while leaving ample collision
|
||||
headroom (~95M messages at the 50% birthday bound).
|
||||
"""
|
||||
result = _derive_message_id("alice", 1_758_000_000, "c0", "some text")
|
||||
assert 0 <= result <= (1 << 53) - 1
|
||||
|
||||
|
||||
def test_derive_message_id_distinguishes_long_messages_differing_after_128_chars():
|
||||
"""Messages that share the first 128 characters must still get different IDs."""
|
||||
prefix = "A" * 128
|
||||
id_a = _derive_message_id(1_000_000, "c0", prefix + "AAAAAA")
|
||||
id_b = _derive_message_id(1_000_000, "c0", prefix + "BBBBBB")
|
||||
id_a = _derive_message_id("alice", 1_000_000, "c0", prefix + "AAAAAA")
|
||||
id_b = _derive_message_id("alice", 1_000_000, "c0", prefix + "BBBBBB")
|
||||
assert id_a != id_b
|
||||
|
||||
|
||||
def test_derive_message_id_includes_sender_identity():
|
||||
"""Two senders posting the same text on the same channel/second must NOT collide.
|
||||
|
||||
Regression test for issue #751: prior to the fix the channel-message
|
||||
fingerprint omitted the sender entirely, so Alice and Bob both posting
|
||||
"ack" at the same instant collapsed into a single row.
|
||||
"""
|
||||
alice_id = _derive_message_id("alice", 1_000_000, "c0", "ack")
|
||||
bob_id = _derive_message_id("bob", 1_000_000, "c0", "ack")
|
||||
assert alice_id != bob_id
|
||||
|
||||
|
||||
def test_derive_message_id_channel_vs_dm_disjoint():
|
||||
"""Channel and direct messages must occupy disjoint id namespaces.
|
||||
|
||||
Without a discriminator that distinguishes the two classes, a channel
|
||||
message and a DM that happen to share the other components could collide.
|
||||
"""
|
||||
channel_id = _derive_message_id("alice", 1_000_000, "c0", "hi")
|
||||
dm_id = _derive_message_id("alice", 1_000_000, "dm", "hi")
|
||||
assert channel_id != dm_id
|
||||
|
||||
|
||||
def test_derive_message_id_identical_across_receivers():
|
||||
"""Two ingestors with different roster state must derive the same id.
|
||||
|
||||
The whole point of the fingerprint is that every input is sender-side, so
|
||||
two physically separate receivers compute the same id and the messages
|
||||
collapse on the ``messages.id`` PRIMARY KEY upsert.
|
||||
"""
|
||||
args = ("alice", 1_758_000_000, "c0", "hello mesh")
|
||||
assert _derive_message_id(*args) == _derive_message_id(*args)
|
||||
|
||||
|
||||
def test_derive_message_id_handles_invalid_utf8():
|
||||
"""Inputs with surrogate pairs must not raise; ``errors='replace'`` cleans them."""
|
||||
bad_text = "before \ud800 after" # lone surrogate is invalid UTF-8
|
||||
result = _derive_message_id("alice", 1_000_000, "c0", bad_text)
|
||||
assert 0 <= result <= (1 << 53) - 1
|
||||
|
||||
|
||||
def test_derive_message_id_anonymous_channel_msgs_still_distinguished_by_other_fields():
|
||||
"""Anonymous channel msgs (sender_identity="") still differ when text/ts differ.
|
||||
|
||||
The empty sender-identity path is documented as a degraded mode in
|
||||
CONTRACTS.md (anonymous transmissions cannot be distinguished from each
|
||||
other when timestamp + channel + text also match). This test pins down
|
||||
the *non-degraded* behaviour: as long as any of the remaining components
|
||||
differ, the ids must remain distinct.
|
||||
"""
|
||||
base = _derive_message_id("", 1_000_000, "c0", "hi")
|
||||
assert base != _derive_message_id("", 1_000_001, "c0", "hi") # ts differs
|
||||
assert base != _derive_message_id("", 1_000_000, "c1", "hi") # channel differs
|
||||
assert base != _derive_message_id("", 1_000_000, "c0", "hello") # text differs
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _make_event_handlers — async callbacks
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -1331,8 +1396,9 @@ def test_on_channel_msg_queues_packet(monkeypatch):
|
||||
assert pkt["from_id"] is None
|
||||
assert pkt["snr"] == 5
|
||||
assert pkt["rssi"] == -80
|
||||
# ID must be the hash-derived value, not the raw timestamp
|
||||
assert pkt["id"] == _derive_message_id(1_758_000_000, "c2", "hello mesh")
|
||||
# ID must be the hash-derived value, not the raw timestamp. The text has no
|
||||
# "Name:" prefix so the sender-identity component is the empty string.
|
||||
assert pkt["id"] == _derive_message_id("", 1_758_000_000, "c2", "hello mesh")
|
||||
|
||||
|
||||
def test_on_channel_msg_resolves_from_id_via_sender_name(monkeypatch):
|
||||
@@ -1532,10 +1598,90 @@ def test_on_contact_msg_queues_packet_with_from_id(monkeypatch):
|
||||
assert pkt["from_id"] == "!aabbccdd"
|
||||
assert pkt["to_id"] == "!deadbeef"
|
||||
assert pkt["id"] == _derive_message_id(
|
||||
1_758_000_001, "aabbccddee11", "direct message"
|
||||
"aabbccddee11", 1_758_000_001, "dm", "direct message"
|
||||
)
|
||||
|
||||
|
||||
def test_on_channel_msg_id_identical_across_ingestors_with_different_rosters(
|
||||
monkeypatch,
|
||||
):
|
||||
"""Two ingestors that hear the same channel message must emit the same id.
|
||||
|
||||
Regression test for issue #751. Ingestor A has Alice in its contact roster
|
||||
(so ``from_id`` resolves to ``!aabbccdd``); ingestor B does not (so a
|
||||
synthetic ``from_id`` is created). The dedup id MUST still match because
|
||||
it is derived from the parsed sender name in the text, not from the
|
||||
per-ingestor ``from_id`` resolution.
|
||||
"""
|
||||
import asyncio
|
||||
|
||||
pub_key = "aabbccdd" + "00" * 28
|
||||
payload = {
|
||||
"sender_timestamp": 1_758_000_999,
|
||||
"text": "Alice: dedup me",
|
||||
"channel_idx": 0,
|
||||
}
|
||||
|
||||
captured_a, _, _, hmap_a = _setup_channel_msg_handlers(
|
||||
monkeypatch,
|
||||
contacts=[{"public_key": pub_key, "adv_name": "Alice"}],
|
||||
)
|
||||
asyncio.run(hmap_a["CHANNEL_MSG_RECV"](_FakeEvt(payload)))
|
||||
|
||||
captured_b, _, _, hmap_b = _setup_channel_msg_handlers(monkeypatch)
|
||||
asyncio.run(hmap_b["CHANNEL_MSG_RECV"](_FakeEvt(payload)))
|
||||
|
||||
assert len(captured_a) == 1
|
||||
assert len(captured_b) == 1
|
||||
# Different ingestors → different from_id resolution, but the dedup id is
|
||||
# identical because it comes from the parsed sender name and the
|
||||
# sender-side timestamp/text.
|
||||
assert captured_a[0]["from_id"] != captured_b[0]["from_id"]
|
||||
assert captured_a[0]["id"] == captured_b[0]["id"]
|
||||
|
||||
|
||||
def test_on_contact_msg_id_identical_across_ingestors_with_different_rosters(
|
||||
monkeypatch,
|
||||
):
|
||||
"""Two ingestors that hear the same DM must emit the same id.
|
||||
|
||||
Direct messages already carry the sender's ``pubkey_prefix`` in the event
|
||||
payload, so the dedup id is identical regardless of contact-roster state.
|
||||
"""
|
||||
import asyncio
|
||||
import data.mesh_ingestor as _mesh_pkg
|
||||
import data.mesh_ingestor.protocols.meshcore as _mod
|
||||
|
||||
pub_key = "aabbccddee11" + "00" * 26
|
||||
payload = {
|
||||
"sender_timestamp": 1_758_000_998,
|
||||
"text": "private hello",
|
||||
"pubkey_prefix": "aabbccddee11",
|
||||
}
|
||||
|
||||
def _run(with_contact: bool):
|
||||
captured: list = []
|
||||
stub = _make_stub_handlers_module()
|
||||
stub.store_packet_dict = lambda pkt: captured.append(pkt)
|
||||
monkeypatch.setattr(_mod.config, "_debug_log", lambda *_a, **_k: None)
|
||||
monkeypatch.setattr(_mesh_pkg, "handlers", stub)
|
||||
|
||||
iface = _MeshcoreInterface(target=None)
|
||||
iface.host_node_id = "!deadbeef"
|
||||
if with_contact:
|
||||
iface._update_contact({"public_key": pub_key, "adv_name": "Alice"})
|
||||
hmap = _make_event_handlers(iface, "/dev/ttyUSB0")
|
||||
asyncio.run(hmap["CONTACT_MSG_RECV"](_FakeEvt(payload)))
|
||||
return captured
|
||||
|
||||
captured_a = _run(with_contact=True)
|
||||
captured_b = _run(with_contact=False)
|
||||
|
||||
assert len(captured_a) == 1
|
||||
assert len(captured_b) == 1
|
||||
assert captured_a[0]["id"] == captured_b[0]["id"]
|
||||
|
||||
|
||||
def test_on_channel_msg_skips_empty_text(monkeypatch):
|
||||
"""on_channel_msg must not queue a packet when text is absent."""
|
||||
import asyncio
|
||||
|
||||
+839
-37
@@ -17,6 +17,7 @@ from __future__ import annotations
|
||||
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from pathlib import Path
|
||||
@@ -29,13 +30,20 @@ if str(REPO_ROOT) not in sys.path:
|
||||
sys.path.insert(0, str(REPO_ROOT))
|
||||
|
||||
import data.mesh_ingestor.config as config
|
||||
import data.mesh_ingestor.queue as _queue_mod
|
||||
from data.mesh_ingestor.queue import (
|
||||
QueueState,
|
||||
_clear_post_queue,
|
||||
_drain_post_queue,
|
||||
_enqueue_post_json,
|
||||
_MAX_SEND_RETRIES,
|
||||
_post_json,
|
||||
_QUEUE_DEPTH_WARNING_THRESHOLD,
|
||||
_queue_drainer_loop,
|
||||
_queue_post_json,
|
||||
_send_single,
|
||||
_start_queue_drainer,
|
||||
_stop_queue_drainer,
|
||||
_CHANNEL_POST_PRIORITY,
|
||||
_DEFAULT_POST_PRIORITY,
|
||||
_INGESTOR_POST_PRIORITY,
|
||||
@@ -53,6 +61,19 @@ def _fresh_state() -> QueueState:
|
||||
return QueueState()
|
||||
|
||||
|
||||
class _FakeResp:
|
||||
"""Minimal context-manager response stub for ``urlopen`` patches."""
|
||||
|
||||
def read(self):
|
||||
return b""
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *a):
|
||||
pass
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Priority constant ordering
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -85,33 +106,24 @@ class TestPostJson:
|
||||
"""Tests for :func:`queue._post_json`."""
|
||||
|
||||
def test_skips_when_no_instance(self, monkeypatch):
|
||||
"""Does nothing when INSTANCE is empty."""
|
||||
"""Does nothing when INSTANCES is empty."""
|
||||
monkeypatch.setattr(config, "INSTANCES", ())
|
||||
monkeypatch.setattr(config, "INSTANCE", "")
|
||||
sent = []
|
||||
with patch("urllib.request.urlopen") as mock_open:
|
||||
_post_json("/api/test", {"key": "val"})
|
||||
mock_open.assert_not_called()
|
||||
|
||||
def test_sends_json_post(self, monkeypatch):
|
||||
"""Sends a POST request with JSON body and correct headers."""
|
||||
monkeypatch.setattr(config, "INSTANCES", (("http://localhost", "tok"),))
|
||||
monkeypatch.setattr(config, "INSTANCE", "http://localhost")
|
||||
monkeypatch.setattr(config, "API_TOKEN", "tok")
|
||||
|
||||
captured_req = []
|
||||
|
||||
class FakeResp:
|
||||
def read(self):
|
||||
return b""
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *a):
|
||||
pass
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
captured_req.append(req)
|
||||
return FakeResp()
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/nodes", {"a": 1})
|
||||
@@ -124,6 +136,7 @@ class TestPostJson:
|
||||
|
||||
def test_handles_network_error_gracefully(self, monkeypatch, capsys):
|
||||
"""Network errors are caught and logged, not raised."""
|
||||
monkeypatch.setattr(config, "INSTANCES", (("http://localhost", ""),))
|
||||
monkeypatch.setattr(config, "INSTANCE", "http://localhost")
|
||||
monkeypatch.setattr(config, "API_TOKEN", "")
|
||||
monkeypatch.setattr(config, "DEBUG", True)
|
||||
@@ -140,19 +153,9 @@ class TestPostJson:
|
||||
|
||||
captured_req = []
|
||||
|
||||
class FakeResp:
|
||||
def read(self):
|
||||
return b""
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *a):
|
||||
pass
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
captured_req.append(req)
|
||||
return FakeResp()
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/test", {}, instance="http://override")
|
||||
@@ -161,24 +164,15 @@ class TestPostJson:
|
||||
|
||||
def test_no_auth_header_when_token_empty(self, monkeypatch):
|
||||
"""No Authorization header is added when API_TOKEN is empty."""
|
||||
monkeypatch.setattr(config, "INSTANCES", (("http://localhost", ""),))
|
||||
monkeypatch.setattr(config, "INSTANCE", "http://localhost")
|
||||
monkeypatch.setattr(config, "API_TOKEN", "")
|
||||
|
||||
captured_req = []
|
||||
|
||||
class FakeResp:
|
||||
def read(self):
|
||||
return b""
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, *a):
|
||||
pass
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
captured_req.append(req)
|
||||
return FakeResp()
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/test", {})
|
||||
@@ -199,10 +193,11 @@ class TestEnqueuePostJson:
|
||||
state = _fresh_state()
|
||||
_enqueue_post_json("/api/test", {"k": 1}, 50, state=state)
|
||||
assert len(state.queue) == 1
|
||||
priority, _counter, path, payload = state.queue[0]
|
||||
priority, _counter, path, payload, retries = state.queue[0]
|
||||
assert priority == 50
|
||||
assert path == "/api/test"
|
||||
assert payload == {"k": 1}
|
||||
assert retries == 0
|
||||
|
||||
def test_heap_ordering(self):
|
||||
"""Lower priority values are dequeued first (min-heap)."""
|
||||
@@ -211,7 +206,7 @@ class TestEnqueuePostJson:
|
||||
state = _fresh_state()
|
||||
_enqueue_post_json("/api/low", {}, 90, state=state)
|
||||
_enqueue_post_json("/api/high", {}, 10, state=state)
|
||||
_priority, _counter, path, _payload = heapq.heappop(state.queue)
|
||||
_priority, _counter, path, _payload, _retries = heapq.heappop(state.queue)
|
||||
assert path == "/api/high"
|
||||
|
||||
def test_counter_increments(self):
|
||||
@@ -394,3 +389,810 @@ class TestClearPostQueue:
|
||||
state = _fresh_state()
|
||||
_clear_post_queue(state=state)
|
||||
assert state.queue == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Multi-instance fan-out
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestMultiInstanceFanOut:
|
||||
"""Tests for multi-instance POST fan-out in :func:`queue._post_json`."""
|
||||
|
||||
def test_fans_out_to_all_instances(self, monkeypatch):
|
||||
"""Each configured instance receives the payload."""
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"INSTANCES",
|
||||
(("http://alpha", "t1"), ("http://beta", "t2")),
|
||||
)
|
||||
|
||||
captured = []
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
captured.append(req)
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/nodes", {"a": 1})
|
||||
|
||||
assert len(captured) == 2
|
||||
urls = {r.get_full_url() for r in captured}
|
||||
assert urls == {"http://alpha/api/nodes", "http://beta/api/nodes"}
|
||||
tokens = {r.get_header("Authorization") for r in captured}
|
||||
assert tokens == {"Bearer t1", "Bearer t2"}
|
||||
|
||||
def test_failure_isolation(self, monkeypatch):
|
||||
"""A failure on one instance does not prevent delivery to the next."""
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"INSTANCES",
|
||||
(("http://broken", "t1"), ("http://ok", "t2")),
|
||||
)
|
||||
monkeypatch.setattr(config, "DEBUG", False)
|
||||
|
||||
captured = []
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
if "broken" in req.get_full_url():
|
||||
raise OSError("connection refused")
|
||||
captured.append(req)
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/test", {"x": 1})
|
||||
|
||||
assert len(captured) == 1
|
||||
assert "http://ok" in captured[0].get_full_url()
|
||||
|
||||
def test_explicit_instance_skips_fanout(self, monkeypatch):
|
||||
"""Passing instance= explicitly bypasses the INSTANCES fan-out."""
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"INSTANCES",
|
||||
(("http://a", "t1"), ("http://b", "t2")),
|
||||
)
|
||||
|
||||
captured = []
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
captured.append(req)
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/test", {}, instance="http://override")
|
||||
|
||||
assert len(captured) == 1
|
||||
assert "http://override" in captured[0].get_full_url()
|
||||
|
||||
def test_empty_instances_noop(self, monkeypatch):
|
||||
"""No requests are made when INSTANCES is empty."""
|
||||
monkeypatch.setattr(config, "INSTANCES", ())
|
||||
monkeypatch.setattr(config, "INSTANCE", "")
|
||||
|
||||
with patch("urllib.request.urlopen") as mock_open:
|
||||
_post_json("/api/test", {})
|
||||
mock_open.assert_not_called()
|
||||
|
||||
def test_backward_compat_fallback(self, monkeypatch):
|
||||
"""Falls back to config.INSTANCE when INSTANCES is empty."""
|
||||
monkeypatch.setattr(config, "INSTANCES", ())
|
||||
monkeypatch.setattr(config, "INSTANCE", "http://legacy")
|
||||
monkeypatch.setattr(config, "API_TOKEN", "tok")
|
||||
|
||||
captured = []
|
||||
|
||||
def fake_urlopen(req, timeout=None):
|
||||
captured.append(req)
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", fake_urlopen):
|
||||
_post_json("/api/test", {"v": 1})
|
||||
|
||||
assert len(captured) == 1
|
||||
assert "http://legacy" in captured[0].get_full_url()
|
||||
assert captured[0].get_header("Authorization") == "Bearer tok"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# HTTP failure always-logging
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_http_failure_always_logged(monkeypatch):
|
||||
"""POST failures are logged with always=True regardless of DEBUG mode.
|
||||
|
||||
Operators must be able to see HTTP errors without enabling DEBUG so they
|
||||
can tell whether the ingestor is silently dropping data.
|
||||
"""
|
||||
monkeypatch.setattr(config, "INSTANCES", (("http://localhost", ""),))
|
||||
monkeypatch.setattr(config, "INSTANCE", "http://localhost")
|
||||
monkeypatch.setattr(config, "DEBUG", False)
|
||||
|
||||
log_calls: list[dict] = []
|
||||
original_debug_log = config._debug_log
|
||||
|
||||
def capture_debug_log(msg, **kwargs):
|
||||
log_calls.append(kwargs)
|
||||
original_debug_log(msg, **kwargs)
|
||||
|
||||
monkeypatch.setattr(config, "_debug_log", capture_debug_log)
|
||||
|
||||
def raise_error(req, timeout=None):
|
||||
raise OSError("connection refused")
|
||||
|
||||
with patch("urllib.request.urlopen", raise_error):
|
||||
_send_single("http://localhost", "", "/api/test", {"x": 1})
|
||||
|
||||
assert any(
|
||||
c.get("always") is True for c in log_calls
|
||||
), "Expected at least one _debug_log call with always=True on HTTP failure"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Background drain thread
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestQueueDrainer:
|
||||
"""Tests for :func:`_start_queue_drainer` and :func:`_queue_drainer_loop`."""
|
||||
|
||||
def test_start_queue_drainer_starts_thread(self):
|
||||
"""_start_queue_drainer creates and starts a daemon thread."""
|
||||
state = _fresh_state()
|
||||
assert state.drainer is None
|
||||
_start_queue_drainer(state)
|
||||
assert state.drainer is not None
|
||||
assert state.drainer.is_alive()
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_start_queue_drainer_idempotent(self):
|
||||
"""Calling _start_queue_drainer twice does not create a second thread."""
|
||||
state = _fresh_state()
|
||||
_start_queue_drainer(state)
|
||||
first_thread = state.drainer
|
||||
_start_queue_drainer(state)
|
||||
assert state.drainer is first_thread
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_queue_drainer_loop_drains_items(self):
|
||||
"""_queue_drainer_loop drains enqueued items when signalled."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
_enqueue_post_json("/api/drainer-test", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/drainer-test" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/drainer-test" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_queue_post_json_signals_drain_event_with_drainer(self):
|
||||
"""When a drainer is alive, _queue_post_json signals drain_event instead of blocking."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
# With a live drainer, the call should return immediately
|
||||
# (signal only) and the drainer processes the item in the background.
|
||||
_queue_post_json("/api/bg-test", {"k": 1}, priority=10, state=state)
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/bg-test" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/bg-test" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_queue_post_json_falls_back_to_sync_drain_without_drainer(self):
|
||||
"""When no drainer is running, _queue_post_json drains synchronously."""
|
||||
state = _fresh_state()
|
||||
# state.drainer is None → synchronous path
|
||||
sent: list[str] = []
|
||||
_queue_post_json(
|
||||
"/api/sync",
|
||||
{"v": 1},
|
||||
priority=10,
|
||||
state=state,
|
||||
send=lambda p, d: sent.append(p),
|
||||
)
|
||||
assert "/api/sync" in sent
|
||||
|
||||
def test_enqueue_during_drain_is_processed(self):
|
||||
"""Items enqueued while the drainer is mid-drain are still drained.
|
||||
|
||||
Simulates the race where a new item arrives while
|
||||
``_drain_post_queue`` is actively processing. The new item must
|
||||
be picked up within the same drain cycle or on the next signal.
|
||||
"""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
gate = threading.Event()
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
|
||||
def slow_send(path, payload):
|
||||
"""Drain the first item slowly, allowing a second enqueue."""
|
||||
drained.append(path)
|
||||
if path == "/api/first":
|
||||
gate.set()
|
||||
|
||||
_queue_mod._post_json = slow_send
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
_enqueue_post_json("/api/first", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
# Wait until the drainer has started processing /api/first.
|
||||
gate.wait(timeout=2.0)
|
||||
# Enqueue a second item while the drainer is active.
|
||||
_enqueue_post_json("/api/second", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/second" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/second" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_stop_queue_drainer(self):
|
||||
"""_stop_queue_drainer signals the thread to exit and joins it."""
|
||||
state = _fresh_state()
|
||||
_start_queue_drainer(state)
|
||||
assert state.drainer is not None
|
||||
assert state.drainer.is_alive()
|
||||
_stop_queue_drainer(state)
|
||||
assert state.drainer is None
|
||||
assert state.shutdown.is_set()
|
||||
|
||||
def test_stop_queue_drainer_noop_when_not_running(self):
|
||||
"""_stop_queue_drainer is safe to call with no drainer."""
|
||||
state = _fresh_state()
|
||||
_stop_queue_drainer(state)
|
||||
assert state.drainer is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Drainer resilience
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDrainerResilience:
|
||||
"""Tests verifying the drainer thread cannot be killed by exceptions."""
|
||||
|
||||
def test_drainer_survives_drain_exception(self, monkeypatch):
|
||||
"""The drainer loop keeps running after _drain_post_queue raises."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
call_count = [0]
|
||||
|
||||
original_drain = _queue_mod._drain_post_queue
|
||||
|
||||
def flaky_drain(s, send=None):
|
||||
call_count[0] += 1
|
||||
if call_count[0] == 1:
|
||||
raise RuntimeError("transient drain error")
|
||||
original_drain(s, send=send)
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(_queue_mod, "_drain_post_queue", flaky_drain)
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
# First signal triggers the RuntimeError; drainer should survive.
|
||||
_enqueue_post_json("/api/first", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
time.sleep(0.2)
|
||||
assert state.drainer.is_alive(), "Drainer died after drain exception"
|
||||
# Second signal should succeed normally.
|
||||
_enqueue_post_json("/api/second", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/second" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/second" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_drainer_survives_debug_log_exception(self, monkeypatch):
|
||||
"""The drainer survives even when _debug_log raises inside the error handler."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
call_count = [0]
|
||||
|
||||
original_drain = _queue_mod._drain_post_queue
|
||||
|
||||
def flaky_drain(s, send=None):
|
||||
call_count[0] += 1
|
||||
if call_count[0] == 1:
|
||||
raise RuntimeError("drain error")
|
||||
original_drain(s, send=send)
|
||||
|
||||
def broken_log(*args, **kwargs):
|
||||
raise BrokenPipeError("stdout closed")
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(_queue_mod, "_drain_post_queue", flaky_drain)
|
||||
monkeypatch.setattr(config, "_debug_log", broken_log)
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
_enqueue_post_json("/api/first", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
time.sleep(0.2)
|
||||
assert state.drainer.is_alive(), "Drainer died after log exception"
|
||||
# Restore log so the second drain can proceed.
|
||||
monkeypatch.undo()
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(_queue_mod, "_drain_post_queue", original_drain)
|
||||
_enqueue_post_json("/api/second", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/second" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/second" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_drainer_logs_startup(self, monkeypatch):
|
||||
"""The drainer logs a startup message."""
|
||||
state = _fresh_state()
|
||||
log_msgs: list[str] = []
|
||||
monkeypatch.setattr(
|
||||
config, "_debug_log", lambda msg, **kw: log_msgs.append(msg)
|
||||
)
|
||||
_start_queue_drainer(state)
|
||||
time.sleep(0.1)
|
||||
_stop_queue_drainer(state)
|
||||
assert any("started" in m.lower() for m in log_msgs)
|
||||
|
||||
def test_drainer_logs_exit(self, monkeypatch):
|
||||
"""The drainer logs an exit message on clean shutdown."""
|
||||
state = _fresh_state()
|
||||
log_msgs: list[str] = []
|
||||
monkeypatch.setattr(
|
||||
config, "_debug_log", lambda msg, **kw: log_msgs.append(msg)
|
||||
)
|
||||
_start_queue_drainer(state)
|
||||
time.sleep(0.1)
|
||||
_stop_queue_drainer(state)
|
||||
assert any("exiting" in m.lower() for m in log_msgs)
|
||||
|
||||
def test_drainer_logs_depth_warning(self, monkeypatch):
|
||||
"""A warning is emitted when queue depth exceeds the threshold."""
|
||||
state = _fresh_state()
|
||||
log_kwargs: list[dict] = []
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda msg, **kw: log_kwargs.append({"msg": msg, **kw}),
|
||||
)
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: None
|
||||
try:
|
||||
for i in range(_QUEUE_DEPTH_WARNING_THRESHOLD + 1):
|
||||
_enqueue_post_json(f"/api/{i}", {}, 10, state=state)
|
||||
_start_queue_drainer(state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while (
|
||||
not any("depth" in e.get("msg", "").lower() for e in log_kwargs)
|
||||
and time.monotonic() < deadline
|
||||
):
|
||||
time.sleep(0.01)
|
||||
assert any("depth" in e.get("msg", "").lower() for e in log_kwargs)
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Retry logic
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestRetryLogic:
|
||||
"""Tests for send failure retry in :func:`_drain_post_queue`."""
|
||||
|
||||
def test_send_single_returns_true_on_success(self, monkeypatch):
|
||||
"""_send_single returns True when the HTTP call succeeds."""
|
||||
with patch("urllib.request.urlopen", lambda req, timeout=None: _FakeResp()):
|
||||
assert _send_single("http://localhost", "", "/api/ok", {}) is True
|
||||
|
||||
def test_send_single_returns_false_on_failure(self, monkeypatch):
|
||||
"""_send_single returns False when the HTTP call fails."""
|
||||
monkeypatch.setattr(config, "_debug_log", lambda *a, **kw: None)
|
||||
|
||||
def raise_error(req, timeout=None):
|
||||
raise OSError("fail")
|
||||
|
||||
with patch("urllib.request.urlopen", raise_error):
|
||||
assert _send_single("http://localhost", "", "/api/fail", {}) is False
|
||||
|
||||
def test_post_json_returns_true_on_success(self, monkeypatch):
|
||||
"""_post_json returns True when the instance succeeds."""
|
||||
monkeypatch.setattr(config, "INSTANCES", (("http://ok", ""),))
|
||||
with patch("urllib.request.urlopen", lambda req, timeout=None: _FakeResp()):
|
||||
assert _post_json("/api/ok", {}) is True
|
||||
|
||||
def test_post_json_returns_false_when_all_fail(self, monkeypatch):
|
||||
"""_post_json returns False when all instances fail."""
|
||||
monkeypatch.setattr(config, "INSTANCES", (("http://a", ""), ("http://b", "")))
|
||||
monkeypatch.setattr(config, "_debug_log", lambda *a, **kw: None)
|
||||
|
||||
def raise_error(req, timeout=None):
|
||||
raise OSError("fail")
|
||||
|
||||
with patch("urllib.request.urlopen", raise_error):
|
||||
assert _post_json("/api/fail", {}) is False
|
||||
|
||||
def test_post_json_returns_true_when_at_least_one_succeeds(self, monkeypatch):
|
||||
"""_post_json returns True when at least one instance succeeds."""
|
||||
monkeypatch.setattr(
|
||||
config, "INSTANCES", (("http://broken", ""), ("http://ok", ""))
|
||||
)
|
||||
monkeypatch.setattr(config, "_debug_log", lambda *a, **kw: None)
|
||||
|
||||
def selective_urlopen(req, timeout=None):
|
||||
if "broken" in req.get_full_url():
|
||||
raise OSError("fail")
|
||||
return _FakeResp()
|
||||
|
||||
with patch("urllib.request.urlopen", selective_urlopen):
|
||||
assert _post_json("/api/mixed", {}) is True
|
||||
|
||||
def test_drain_retries_on_send_failure(self):
|
||||
"""Items are re-queued and retried when send returns False."""
|
||||
state = _fresh_state()
|
||||
attempts: list[str] = []
|
||||
call_count = [0]
|
||||
|
||||
def flaky_send(path, payload):
|
||||
call_count[0] += 1
|
||||
attempts.append(path)
|
||||
# Fail on first attempt, succeed on retry.
|
||||
return call_count[0] > 1
|
||||
|
||||
_enqueue_post_json("/api/retry", {"v": 1}, 10, state=state)
|
||||
_drain_post_queue(state, send=flaky_send)
|
||||
assert attempts.count("/api/retry") == 2
|
||||
|
||||
def test_drain_drops_after_max_retries(self, monkeypatch):
|
||||
"""Items are dropped with a warning after exceeding max retries."""
|
||||
state = _fresh_state()
|
||||
attempts: list[str] = []
|
||||
log_kwargs: list[dict] = []
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda msg, **kw: log_kwargs.append({"msg": msg, **kw}),
|
||||
)
|
||||
|
||||
def always_fail(path, payload):
|
||||
attempts.append(path)
|
||||
return False
|
||||
|
||||
_enqueue_post_json("/api/doomed", {}, 10, state=state)
|
||||
_drain_post_queue(state, send=always_fail)
|
||||
# Initial attempt + _MAX_SEND_RETRIES retries.
|
||||
assert attempts.count("/api/doomed") == _MAX_SEND_RETRIES + 1
|
||||
assert any("dropping" in e.get("msg", "").lower() for e in log_kwargs)
|
||||
|
||||
def test_drain_no_retry_for_none_return(self):
|
||||
"""Custom send callables returning None are NOT retried.
|
||||
|
||||
This preserves backward compatibility with test lambdas that do not
|
||||
return a boolean.
|
||||
"""
|
||||
state = _fresh_state()
|
||||
attempts: list[str] = []
|
||||
|
||||
def custom_send(path, payload):
|
||||
attempts.append(path)
|
||||
return None
|
||||
|
||||
_enqueue_post_json("/api/once", {}, 10, state=state)
|
||||
_drain_post_queue(state, send=custom_send)
|
||||
assert attempts.count("/api/once") == 1
|
||||
|
||||
def test_enqueue_with_retries_parameter(self):
|
||||
"""_enqueue_post_json stores the retry count in the 5th tuple position."""
|
||||
state = _fresh_state()
|
||||
_enqueue_post_json("/api/r", {}, 10, state=state, retries=2)
|
||||
assert len(state.queue) == 1
|
||||
assert state.queue[0][4] == 2
|
||||
|
||||
def test_drain_handles_legacy_4_tuple(self):
|
||||
"""_drain_post_queue handles 4-tuple items without crashing."""
|
||||
import heapq
|
||||
|
||||
state = _fresh_state()
|
||||
sent: list[str] = []
|
||||
# Push a legacy 4-tuple directly.
|
||||
with state.lock:
|
||||
heapq.heappush(state.queue, (10, 0, "/api/legacy", {"v": 1}))
|
||||
_drain_post_queue(state, send=lambda p, d: sent.append(p))
|
||||
assert "/api/legacy" in sent
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Drainer auto-restart
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDrainerAutoRestart:
|
||||
"""Tests for automatic drainer thread recovery in :func:`_queue_post_json`."""
|
||||
|
||||
def test_queue_post_json_restarts_dead_drainer(self, monkeypatch):
|
||||
"""A dead drainer is automatically restarted by _queue_post_json."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(config, "_debug_log", lambda *a, **kw: None)
|
||||
try:
|
||||
# Start and then kill the drainer.
|
||||
_start_queue_drainer(state)
|
||||
_stop_queue_drainer(state)
|
||||
# _stop_queue_drainer sets drainer=None, so simulate a crash
|
||||
# where the Thread object is still present but dead.
|
||||
state.drainer = threading.Thread(target=lambda: None, daemon=True)
|
||||
state.drainer.start()
|
||||
state.drainer.join() # Dead thread, is_alive()=False
|
||||
|
||||
_queue_post_json("/api/revived", {"v": 1}, priority=10, state=state)
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/revived" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/revived" in drained
|
||||
assert state.drainer is not None
|
||||
assert state.drainer.is_alive()
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_queue_post_json_no_restart_when_never_started(self):
|
||||
"""No drainer is started when state.drainer is None (daemon's job)."""
|
||||
state = _fresh_state()
|
||||
assert state.drainer is None
|
||||
sent: list[str] = []
|
||||
_queue_post_json(
|
||||
"/api/no-restart",
|
||||
{},
|
||||
priority=10,
|
||||
state=state,
|
||||
send=lambda p, d: sent.append(p),
|
||||
)
|
||||
assert "/api/no-restart" in sent
|
||||
assert state.drainer is None
|
||||
|
||||
def test_start_queue_drainer_resets_shutdown(self):
|
||||
"""_start_queue_drainer clears the shutdown event before starting."""
|
||||
state = _fresh_state()
|
||||
_start_queue_drainer(state)
|
||||
_stop_queue_drainer(state)
|
||||
assert state.shutdown.is_set()
|
||||
# Re-start should clear shutdown and start a live thread.
|
||||
_start_queue_drainer(state)
|
||||
assert not state.shutdown.is_set()
|
||||
assert state.drainer is not None
|
||||
assert state.drainer.is_alive()
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# No-instances warning
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestNoInstancesWarning:
|
||||
"""Tests for the warning log when no target instances are configured."""
|
||||
|
||||
def test_post_json_errors_when_no_instances(self, monkeypatch):
|
||||
"""An error is logged when INSTANCES and INSTANCE are both empty."""
|
||||
monkeypatch.setattr(config, "INSTANCES", ())
|
||||
monkeypatch.setattr(config, "INSTANCE", "")
|
||||
log_kwargs: list[dict] = []
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda msg, **kw: log_kwargs.append({"msg": msg, **kw}),
|
||||
)
|
||||
|
||||
result = _post_json("/api/nowhere", {"v": 1})
|
||||
|
||||
assert result is False
|
||||
assert any(
|
||||
kw.get("always") is True and kw.get("severity") == "error"
|
||||
for kw in log_kwargs
|
||||
)
|
||||
|
||||
def test_post_json_survives_log_exception_on_no_instances(self, monkeypatch):
|
||||
"""_post_json still returns False when logging itself raises."""
|
||||
monkeypatch.setattr(config, "INSTANCES", ())
|
||||
monkeypatch.setattr(config, "INSTANCE", "")
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(OSError("log broken")),
|
||||
)
|
||||
assert _post_json("/api/nowhere", {}) is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Defensive exception guard coverage
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDefensiveExceptionGuards:
|
||||
"""Cover the ``except Exception: pass`` guards wrapping ``_debug_log`` calls.
|
||||
|
||||
These guards ensure that a broken logging backend (e.g. ``BrokenPipeError``
|
||||
from ``print()`` to a closed stdout) never crashes the drainer thread or
|
||||
drops data.
|
||||
"""
|
||||
|
||||
def test_drain_drop_log_exception(self, monkeypatch):
|
||||
"""Max-retries drop path survives a broken _debug_log."""
|
||||
state = _fresh_state()
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(BrokenPipeError("broken")),
|
||||
)
|
||||
|
||||
attempts: list[str] = []
|
||||
|
||||
def always_fail(path, payload):
|
||||
attempts.append(path)
|
||||
return False
|
||||
|
||||
_enqueue_post_json("/api/fail", {}, 10, state=state)
|
||||
# Should not raise even though _debug_log throws on the drop message.
|
||||
_drain_post_queue(state, send=always_fail)
|
||||
assert attempts.count("/api/fail") == _MAX_SEND_RETRIES + 1
|
||||
|
||||
def test_drainer_startup_log_exception(self, monkeypatch):
|
||||
"""Drainer thread starts even when the startup log raises."""
|
||||
state = _fresh_state()
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(BrokenPipeError("broken")),
|
||||
)
|
||||
_start_queue_drainer(state)
|
||||
time.sleep(0.15)
|
||||
assert state.drainer is not None
|
||||
assert state.drainer.is_alive()
|
||||
# Restore log so stop can log cleanly.
|
||||
monkeypatch.undo()
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_drainer_exit_log_exception(self, monkeypatch):
|
||||
"""Drainer thread exits cleanly even when the exit log raises."""
|
||||
state = _fresh_state()
|
||||
_start_queue_drainer(state)
|
||||
time.sleep(0.05)
|
||||
# Break _debug_log AFTER startup so only the exit log raises.
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(BrokenPipeError("broken")),
|
||||
)
|
||||
_stop_queue_drainer(state)
|
||||
assert state.drainer is None
|
||||
|
||||
def test_drainer_depth_warning_log_exception(self, monkeypatch):
|
||||
"""Drainer survives a broken _debug_log during depth warning."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
time.sleep(0.05)
|
||||
# Break _debug_log so the depth warning raises.
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(BrokenPipeError("broken")),
|
||||
)
|
||||
for i in range(_QUEUE_DEPTH_WARNING_THRESHOLD + 1):
|
||||
_enqueue_post_json(f"/api/{i}", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while (
|
||||
len(drained) < _QUEUE_DEPTH_WARNING_THRESHOLD + 1
|
||||
and time.monotonic() < deadline
|
||||
):
|
||||
time.sleep(0.01)
|
||||
assert len(drained) == _QUEUE_DEPTH_WARNING_THRESHOLD + 1
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
monkeypatch.undo()
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_drainer_error_handler_log_exception(self, monkeypatch):
|
||||
"""Drainer survives when both drain and error-log raise."""
|
||||
state = _fresh_state()
|
||||
call_count = [0]
|
||||
original_drain = _queue_mod._drain_post_queue
|
||||
|
||||
def flaky_drain(s, send=None):
|
||||
call_count[0] += 1
|
||||
if call_count[0] == 1:
|
||||
raise RuntimeError("drain boom")
|
||||
original_drain(s, send=send)
|
||||
|
||||
drained: list[str] = []
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(_queue_mod, "_drain_post_queue", flaky_drain)
|
||||
# _debug_log raises on the error handler's inner logging call.
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(BrokenPipeError("broken")),
|
||||
)
|
||||
try:
|
||||
_start_queue_drainer(state)
|
||||
_enqueue_post_json("/api/first", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
time.sleep(0.3)
|
||||
assert state.drainer.is_alive()
|
||||
# Restore to process an item normally.
|
||||
monkeypatch.undo()
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(_queue_mod, "_drain_post_queue", original_drain)
|
||||
_enqueue_post_json("/api/second", {}, 10, state=state)
|
||||
state.drain_event.set()
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/second" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/second" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
def test_restart_warning_log_exception(self, monkeypatch):
|
||||
"""Drainer restart proceeds even when the restart warning log raises."""
|
||||
state = _fresh_state()
|
||||
drained: list[str] = []
|
||||
original_post_json = _queue_mod._post_json
|
||||
_queue_mod._post_json = lambda path, payload: drained.append(path)
|
||||
monkeypatch.setattr(
|
||||
config,
|
||||
"_debug_log",
|
||||
lambda *a, **kw: (_ for _ in ()).throw(BrokenPipeError("broken")),
|
||||
)
|
||||
try:
|
||||
# Simulate a crashed drainer (dead Thread, not None).
|
||||
state.drainer = threading.Thread(target=lambda: None, daemon=True)
|
||||
state.drainer.start()
|
||||
state.drainer.join()
|
||||
assert not state.drainer.is_alive()
|
||||
|
||||
_queue_post_json("/api/restarted", {"v": 1}, priority=10, state=state)
|
||||
deadline = time.monotonic() + 2.0
|
||||
while "/api/restarted" not in drained and time.monotonic() < deadline:
|
||||
time.sleep(0.01)
|
||||
assert "/api/restarted" in drained
|
||||
finally:
|
||||
_queue_mod._post_json = original_post_json
|
||||
monkeypatch.undo()
|
||||
_stop_queue_drainer(state)
|
||||
|
||||
+3
-1
@@ -76,6 +76,7 @@ COPY --chown=potatomesh:potatomesh web/spec ./spec
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views ./views
|
||||
COPY --chown=potatomesh:potatomesh web/scripts ./scripts
|
||||
COPY --chown=potatomesh:potatomesh web/pages ./pages
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
@@ -84,7 +85,8 @@ COPY --chown=potatomesh:potatomesh data/mesh_ingestor/decode_payload.py /app/dat
|
||||
# Create data and configuration directories with correct ownership
|
||||
RUN mkdir -p /app/.local/share/potato-mesh \
|
||||
&& mkdir -p /app/.config/potato-mesh/well-known \
|
||||
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config
|
||||
&& mkdir -p /app/pages \
|
||||
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config /app/pages
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
|
||||
@@ -20,6 +20,8 @@ gem "sqlite3", "~> 1.7"
|
||||
gem "rackup", "~> 2.2"
|
||||
gem "puma", "~> 7.0"
|
||||
gem "prometheus-client"
|
||||
gem "kramdown", "~> 2.4"
|
||||
gem "kramdown-parser-gfm", "~> 1.1"
|
||||
|
||||
group :test do
|
||||
gem "rspec", "~> 3.12"
|
||||
@@ -29,3 +31,5 @@ group :test do
|
||||
gem "simplecov_json_formatter", "~> 0.1", require: false
|
||||
gem "rspec_junit_formatter", "~> 0.6", require: false
|
||||
end
|
||||
|
||||
gem "sanitize", "7.0.0"
|
||||
|
||||
@@ -57,6 +57,8 @@ require_relative "application/meshtastic/cipher"
|
||||
require_relative "application/meshtastic/payload_decoder"
|
||||
require_relative "application/data_processing"
|
||||
require_relative "application/filesystem"
|
||||
require_relative "application/api_cache"
|
||||
require_relative "application/pages"
|
||||
require_relative "application/instances"
|
||||
require_relative "application/routes/api"
|
||||
require_relative "application/routes/ingest"
|
||||
@@ -74,6 +76,7 @@ module PotatoMesh
|
||||
extend App::Queries
|
||||
extend App::DataProcessing
|
||||
extend App::Filesystem
|
||||
extend App::Pages
|
||||
|
||||
helpers App::Helpers
|
||||
include App::Database
|
||||
@@ -85,6 +88,7 @@ module PotatoMesh
|
||||
include App::Queries
|
||||
include App::DataProcessing
|
||||
include App::Filesystem
|
||||
include App::Pages
|
||||
|
||||
register App::Routes::Api
|
||||
register App::Routes::Ingest
|
||||
@@ -210,6 +214,7 @@ SELF_INSTANCE_ID = PotatoMesh::Application::SELF_INSTANCE_ID unless defined?(SEL
|
||||
PotatoMesh::App::Prometheus,
|
||||
PotatoMesh::App::Queries,
|
||||
PotatoMesh::App::DataProcessing,
|
||||
PotatoMesh::App::Pages,
|
||||
].each do |mod|
|
||||
Object.include(mod) unless Object < mod
|
||||
end
|
||||
|
||||
@@ -0,0 +1,163 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "digest"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Thread-safe in-memory cache for serialised API responses.
|
||||
#
|
||||
# Each entry is stored with a monotonic expiration time and a pre-computed
|
||||
# ETag so the route handler can skip recomputing the digest on cache hits.
|
||||
#
|
||||
# The cache is bounded to {MAX_ENTRIES} to prevent unbounded memory growth
|
||||
# from attacker-controlled query parameters. When the limit is reached the
|
||||
# oldest entry by insertion order is evicted (LRU-ish via Ruby hash ordering).
|
||||
#
|
||||
# Invalidation can target a specific prefix (e.g. +"api:nodes:"+) so that an
|
||||
# ingest POST to +/api/messages+ does not flush the neighbors cache.
|
||||
# A single-flight guard coalesces concurrent misses for the same key so only
|
||||
# one thread computes the value while others wait for the result.
|
||||
module ApiCache
|
||||
# Hard cap on the number of cached entries to prevent memory exhaustion.
|
||||
# With the whitelisted protocol values and known limit set, the realistic
|
||||
# key space is ~30 entries. 64 provides generous headroom.
|
||||
MAX_ENTRIES = 64
|
||||
|
||||
@store = {}
|
||||
@inflight = {}
|
||||
@mutex = Mutex.new
|
||||
|
||||
class << self
|
||||
# Retrieve a cached value or compute and store it.
|
||||
#
|
||||
# When multiple threads request the same cold key concurrently only one
|
||||
# executes the block; the others wait for the result (single-flight).
|
||||
#
|
||||
# The returned hash contains both +:value+ (the JSON string) and +:etag+
|
||||
# (pre-computed weak ETag) so callers can set the header without
|
||||
# re-hashing the body.
|
||||
#
|
||||
# @param key [String] cache key incorporating all relevant query
|
||||
# parameters (limit, protocol, etc.).
|
||||
# @param ttl_seconds [Numeric] time-to-live for the cached entry.
|
||||
# @yield Computes the value to cache when the entry is missing or
|
||||
# expired. The block should return the serialised JSON string.
|
||||
# @return [Hash{Symbol => String}] +:value+ and +:etag+ of the response.
|
||||
def fetch(key, ttl_seconds:)
|
||||
now = monotonic_now
|
||||
|
||||
@mutex.synchronize do
|
||||
entry = @store[key]
|
||||
if entry && now < entry[:expires_at]
|
||||
return { value: entry[:value], etag: entry[:etag] }
|
||||
end
|
||||
|
||||
# Single-flight: if another thread is already computing this key,
|
||||
# wait for it to finish and use its result. The loop guards
|
||||
# against spurious wakeups from ConditionVariable#wait.
|
||||
while @inflight.key?(key)
|
||||
cv = @inflight[key]
|
||||
cv.wait(@mutex)
|
||||
entry = @store[key]
|
||||
if entry && monotonic_now < entry[:expires_at]
|
||||
return { value: entry[:value], etag: entry[:etag] }
|
||||
end
|
||||
end
|
||||
|
||||
# Mark this key as in-flight so concurrent requests wait.
|
||||
@inflight[key] = ConditionVariable.new
|
||||
end
|
||||
|
||||
value = yield
|
||||
etag = Digest::MD5.hexdigest(value)
|
||||
|
||||
@mutex.synchronize do
|
||||
evict_oldest_if_full
|
||||
@store[key] = { value: value, etag: etag, expires_at: monotonic_now + ttl_seconds }
|
||||
cv = @inflight.delete(key)
|
||||
cv&.broadcast
|
||||
end
|
||||
|
||||
{ value: value, etag: etag }
|
||||
rescue => e
|
||||
# On error, unblock any waiters and re-raise.
|
||||
@mutex.synchronize do
|
||||
cv = @inflight.delete(key)
|
||||
cv&.broadcast
|
||||
end
|
||||
raise e
|
||||
end
|
||||
|
||||
# Remove entries whose keys start with any of the given prefixes.
|
||||
#
|
||||
# Targeted invalidation so that e.g. a messages POST does not flush the
|
||||
# neighbors or telemetry caches.
|
||||
#
|
||||
# @param prefixes [Array<String>] key prefixes to match.
|
||||
# @return [void]
|
||||
def invalidate_prefix(*prefixes)
|
||||
@mutex.synchronize do
|
||||
@store.reject! do |key, _|
|
||||
prefixes.any? { |p| key.start_with?(p) }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Remove all entries from the cache.
|
||||
#
|
||||
# @return [void]
|
||||
def invalidate_all
|
||||
@mutex.synchronize { @store.clear }
|
||||
end
|
||||
|
||||
# Remove specific entries by exact key.
|
||||
#
|
||||
# @param keys [Array<String>] cache keys to evict.
|
||||
# @return [void]
|
||||
def invalidate(*keys)
|
||||
@mutex.synchronize do
|
||||
keys.each { |k| @store.delete(k) }
|
||||
end
|
||||
end
|
||||
|
||||
# Return the number of entries currently held in the cache.
|
||||
#
|
||||
# @return [Integer] entry count.
|
||||
def size
|
||||
@mutex.synchronize { @store.size }
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Use the monotonic clock so TTL calculations are immune to wall-clock
|
||||
# adjustments (NTP jumps, DST transitions, etc.).
|
||||
def monotonic_now
|
||||
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
||||
end
|
||||
|
||||
# Evict the oldest entry when the store is at capacity. Ruby hashes
|
||||
# preserve insertion order, so +first+ is the oldest key.
|
||||
def evict_oldest_if_full
|
||||
while @store.size >= MAX_ENTRIES
|
||||
oldest_key = @store.each_key.first
|
||||
@store.delete(oldest_key)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -20,6 +20,20 @@ module PotatoMesh
|
||||
# Allowed values for the +telemetry_type+ discriminator column.
|
||||
VALID_TELEMETRY_TYPES = %w[device environment power air_quality].freeze
|
||||
|
||||
# Half-window (seconds) for the meshcore content-level message dedup
|
||||
# in +insert_message+ and the matching one-shot backfill. Set to
|
||||
# roughly 3× the observed relay-retransmit delta (~10 s) so genuine
|
||||
# clock skew across co-operating ingestors still collapses, while
|
||||
# rapid legitimate re-sends ("ack", "ok", "test") ≥30 s apart remain
|
||||
# distinct rows. See issue #756 and ``CONTRACTS.md`` for rationale.
|
||||
#
|
||||
# IMPORTANT: widening this value only takes effect at runtime — the
|
||||
# one-shot backfill in +PotatoMesh::App::Database+ is frozen at
|
||||
# +MESHCORE_CONTENT_DEDUP_BACKFILL_VERSION+. To re-sweep pre-existing
|
||||
# rows that newly fall within an expanded window, bump the backfill
|
||||
# version so the migration re-runs on the next deploy.
|
||||
MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS = 30
|
||||
|
||||
# Coerce a Ruby boolean into a SQLite integer (1/0) while passing through
|
||||
# any other value unchanged. Used when writing boolean node fields.
|
||||
#
|
||||
@@ -171,16 +185,20 @@ module PotatoMesh
|
||||
return if existing
|
||||
|
||||
long_name = "#{protocol_display_label(protocol)} #{short_id}"
|
||||
default_role = case protocol
|
||||
when "meshcore" then "COMPANION"
|
||||
else "CLIENT_HIDDEN"
|
||||
end
|
||||
heard_time = coerce_integer(heard_time)
|
||||
inserted = false
|
||||
|
||||
with_busy_retry do
|
||||
db.execute(
|
||||
<<~SQL,
|
||||
INSERT OR IGNORE INTO nodes(node_id,num,short_name,long_name,role,last_heard,first_heard)
|
||||
VALUES (?,?,?,?,?,?,?)
|
||||
INSERT OR IGNORE INTO nodes(node_id,num,short_name,long_name,role,last_heard,first_heard,protocol)
|
||||
VALUES (?,?,?,?,?,?,?,?)
|
||||
SQL
|
||||
[node_id, node_num, short_id, long_name, "CLIENT_HIDDEN", heard_time, heard_time],
|
||||
[node_id, node_num, short_id, long_name, default_role, heard_time, heard_time, protocol],
|
||||
)
|
||||
inserted = db.changes.positive?
|
||||
end
|
||||
@@ -463,12 +481,17 @@ module PotatoMesh
|
||||
AND NOT (COALESCE(nodes.synthetic,0) = 0 AND excluded.synthetic = 1)
|
||||
SQL
|
||||
|
||||
# When a real (non-synthetic) node is upserted with a known long
|
||||
# name, migrate any synthetic placeholder rows that share that name.
|
||||
# This fires when the MeshCore device finally receives the sender's
|
||||
# contact advertisement, resolving the placeholder to a real node ID.
|
||||
if synthetic == 0 && long_name && !long_name.empty?
|
||||
merge_synthetic_nodes(db, node_id, long_name)
|
||||
# Reconcile synthetic placeholder rows with their real counterparts
|
||||
# whenever a MeshCore node is upserted. Both directions must fire —
|
||||
# the arrival order of chat messages vs contact advertisements is
|
||||
# not guaranteed and may differ across co-operating ingestors that
|
||||
# share this database. See issue #755.
|
||||
if protocol == "meshcore" && long_name && !long_name.empty?
|
||||
if synthetic == 0
|
||||
merge_synthetic_nodes(db, node_id, long_name)
|
||||
else
|
||||
merge_into_real_node(db, node_id, long_name)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -490,6 +513,17 @@ module PotatoMesh
|
||||
# @param long_name [String] long name to match against synthetic rows.
|
||||
# @return [void]
|
||||
def merge_synthetic_nodes(db, real_node_id, long_name)
|
||||
# long_name is user-editable and not unique across pubkeys — two real
|
||||
# meshcore devices can legitimately share the same display name. When
|
||||
# that happens we cannot tell which real node a given chat-derived
|
||||
# synthetic was acting as placeholder for, so any merge would risk
|
||||
# mis-attributing messages. Bail out and leave the synthetic intact.
|
||||
other_real = db.execute(
|
||||
"SELECT 1 FROM nodes WHERE long_name = ? AND synthetic = 0 AND protocol = 'meshcore' AND node_id != ? LIMIT 1",
|
||||
[long_name, real_node_id],
|
||||
).first
|
||||
return if other_real
|
||||
|
||||
synthetic_ids = db.execute(
|
||||
"SELECT node_id FROM nodes WHERE long_name = ? AND synthetic = 1 AND protocol = 'meshcore' AND node_id != ?",
|
||||
[long_name, real_node_id],
|
||||
@@ -507,6 +541,50 @@ module PotatoMesh
|
||||
end
|
||||
end
|
||||
|
||||
# Reverse of +merge_synthetic_nodes+: when a synthetic placeholder is
|
||||
# upserted for a MeshCore sender whose real contact advertisement has
|
||||
# already been stored (e.g. by a co-operating ingestor that saw the
|
||||
# advertisement first), migrate any messages from the synthetic id to the
|
||||
# real id and drop the synthetic row.
|
||||
#
|
||||
# Fixes duplication bug #755 where a chat-derived synthetic node and a
|
||||
# pubkey-derived real node coexisted because the forward merge only fired
|
||||
# on real-node upserts and never back-filled late-arriving synthetics.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database connection.
|
||||
# @param synthetic_node_id [String] canonical node ID of the synthetic placeholder being upserted.
|
||||
# @param long_name [String] long name to match against existing real rows.
|
||||
# @return [void]
|
||||
def merge_into_real_node(db, synthetic_node_id, long_name)
|
||||
# Index by [0] rather than the hash key so this works whether the db
|
||||
# handle was opened with results_as_hash = true or not.
|
||||
real_rows = db.execute(
|
||||
"SELECT node_id FROM nodes WHERE long_name = ? AND synthetic = 0 AND protocol = 'meshcore' AND node_id != ? LIMIT 2",
|
||||
[long_name, synthetic_node_id],
|
||||
)
|
||||
# Ambiguous name: two distinct real meshcore devices share this
|
||||
# long_name. The synthetic placeholder could legitimately represent
|
||||
# either, so we cannot pick one without risking mis-attribution. Leave
|
||||
# the synthetic in place; an operator can resolve the duplicate
|
||||
# manually.
|
||||
return if real_rows.length > 1
|
||||
|
||||
row = real_rows.first
|
||||
return unless row
|
||||
|
||||
real_node_id = row[0]
|
||||
return unless real_node_id
|
||||
|
||||
db.execute(
|
||||
"UPDATE messages SET from_id = ? WHERE from_id = ?",
|
||||
[real_node_id, synthetic_node_id],
|
||||
)
|
||||
db.execute(
|
||||
"DELETE FROM nodes WHERE node_id = ? AND synthetic = 1",
|
||||
[synthetic_node_id],
|
||||
)
|
||||
end
|
||||
|
||||
def require_token!
|
||||
token = ENV["API_TOKEN"]
|
||||
provided = request.env["HTTP_AUTHORIZATION"].to_s.sub(/^Bearer\s+/i, "")
|
||||
@@ -1850,6 +1928,59 @@ module PotatoMesh
|
||||
]
|
||||
|
||||
with_busy_retry do
|
||||
# Meshcore-only content-level dedup (issue #756). The deterministic
|
||||
# message id (``_derive_message_id`` in the Python ingestor) hashes
|
||||
# ``sender_timestamp`` among other fields, but the MeshCore library
|
||||
# has been observed delivering the same physical packet twice with
|
||||
# a rewritten ``sender_timestamp`` (relay/retransmit behaviour).
|
||||
# The PK path below cannot catch that — two copies compute two
|
||||
# different ids — so we add a narrow content+window pre-check here.
|
||||
#
|
||||
# Ruby integer ``0`` is truthy, so the ``channel_index`` guard
|
||||
# passes for the broadcast channel intentionally; we only skip when
|
||||
# the channel is absent/nil. ``from_id`` + non-empty ``text`` keep
|
||||
# encrypted or anonymous traffic on the id-PK path.
|
||||
#
|
||||
# Known race: the SELECT and the downstream INSERT do not share a
|
||||
# transaction, so two Puma threads carrying the same content with
|
||||
# different ids can both pass the pre-check and both insert. The
|
||||
# deploy-time backfill sweeps the survivors; wrapping the pair in
|
||||
# ``db.transaction(:immediate)`` is a future tightening if the race
|
||||
# is ever observed in production.
|
||||
if protocol == "meshcore" && from_id && channel_index && text && !text.to_s.empty?
|
||||
# ``channel = ?`` matches the ``channel_index`` bind cleanly
|
||||
# because the guard above rejects nil; ``to_id`` may legitimately
|
||||
# be nil (rare meshcore fallback), so it keeps ``IS ?`` for a
|
||||
# NULL-safe compare.
|
||||
duplicate_id = db.get_first_value(
|
||||
<<~SQL,
|
||||
SELECT id FROM messages
|
||||
WHERE protocol = 'meshcore'
|
||||
AND from_id = ?
|
||||
AND to_id IS ?
|
||||
AND channel = ?
|
||||
AND text = ?
|
||||
AND rx_time BETWEEN ? AND ?
|
||||
AND id != ?
|
||||
LIMIT 1
|
||||
SQL
|
||||
[from_id, to_id, channel_index, text,
|
||||
rx_time - MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS,
|
||||
rx_time + MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS, msg_id],
|
||||
)
|
||||
if duplicate_id
|
||||
debug_log(
|
||||
"Skipped meshcore message duplicate",
|
||||
context: "data_processing.insert_message",
|
||||
new_id: msg_id,
|
||||
existing_id: duplicate_id,
|
||||
from_id: from_id,
|
||||
channel: channel_index,
|
||||
)
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
existing = db.get_first_row(
|
||||
"SELECT from_id, to_id, text, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji, portnum, ingestor, protocol FROM messages WHERE id = ?",
|
||||
[msg_id],
|
||||
|
||||
@@ -17,6 +17,12 @@
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Database
|
||||
# Schema-version marker that gates the one-shot #756 meshcore message
|
||||
# content-dedup backfill. Stored in SQLite's ``PRAGMA user_version``;
|
||||
# bump this constant when a new one-shot migration is appended and
|
||||
# check the previous value below to decide whether to skip.
|
||||
MESHCORE_CONTENT_DEDUP_BACKFILL_VERSION = 1
|
||||
|
||||
# Column definitions required for environment telemetry support. Each
|
||||
# entry pairs the column name with the SQL type used when backfilling
|
||||
# legacy databases that pre-date the extended telemetry schema.
|
||||
@@ -147,6 +153,64 @@ module PotatoMesh
|
||||
db.execute("CREATE INDEX IF NOT EXISTS idx_nodes_long_name ON nodes(long_name)")
|
||||
end
|
||||
end
|
||||
|
||||
# Backfill #747: ensure_unknown_node previously omitted the protocol
|
||||
# column and hardcoded role=CLIENT_HIDDEN, causing meshcore placeholder
|
||||
# nodes to be stored as meshtastic/CLIENT_HIDDEN. Fix both in one pass.
|
||||
if node_columns.include?("protocol")
|
||||
db.execute("UPDATE nodes SET protocol = 'meshcore' WHERE long_name LIKE 'Meshcore %' AND protocol = 'meshtastic'")
|
||||
db.execute("UPDATE nodes SET role = 'COMPANION' WHERE protocol = 'meshcore' AND role = 'CLIENT_HIDDEN'")
|
||||
end
|
||||
|
||||
# Backfill #755: reconcile meshcore synthetic placeholder rows that
|
||||
# share a long_name with a real (pubkey-derived) meshcore node.
|
||||
# Earlier releases only merged synthetics at real-node upsert time;
|
||||
# if a synthetic arrived after the real was already stored (common
|
||||
# with co-operating ingestors that share this DB), the duplicate
|
||||
# persisted. Migrate messages to the real id, then drop the stray
|
||||
# synthetic rows. Idempotent — the EXISTS guards make repeated runs
|
||||
# a no-op.
|
||||
if node_columns.include?("protocol") && node_columns.include?("synthetic")
|
||||
# Only collapse synthetics whose long_name resolves to *exactly*
|
||||
# one real meshcore node. When two real devices share a
|
||||
# long_name, the placeholder is ambiguous — merging would risk
|
||||
# mis-attributing historical chat messages to the wrong radio.
|
||||
# Wrapped in a single transaction so that a crash between the
|
||||
# UPDATE and DELETE cannot leave messages redirected without the
|
||||
# corresponding synthetic row cleared.
|
||||
db.transaction do
|
||||
db.execute(<<~SQL)
|
||||
UPDATE messages
|
||||
SET from_id = (
|
||||
SELECT real.node_id FROM nodes real
|
||||
JOIN nodes synth ON synth.long_name = real.long_name
|
||||
WHERE synth.node_id = messages.from_id
|
||||
AND synth.synthetic = 1 AND synth.protocol = 'meshcore'
|
||||
AND real.synthetic = 0 AND real.protocol = 'meshcore'
|
||||
LIMIT 1
|
||||
)
|
||||
WHERE from_id IN (
|
||||
SELECT synth.node_id FROM nodes synth
|
||||
WHERE synth.synthetic = 1 AND synth.protocol = 'meshcore'
|
||||
AND (
|
||||
SELECT COUNT(*) FROM nodes real
|
||||
WHERE real.long_name = synth.long_name
|
||||
AND real.synthetic = 0 AND real.protocol = 'meshcore'
|
||||
) = 1
|
||||
)
|
||||
SQL
|
||||
db.execute(<<~SQL)
|
||||
DELETE FROM nodes
|
||||
WHERE synthetic = 1 AND protocol = 'meshcore'
|
||||
AND (
|
||||
SELECT COUNT(*) FROM nodes real
|
||||
WHERE real.long_name = nodes.long_name
|
||||
AND real.synthetic = 0 AND real.protocol = 'meshcore'
|
||||
AND real.node_id != nodes.node_id
|
||||
) = 1
|
||||
SQL
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
message_table_exists = db.get_first_value(
|
||||
@@ -193,6 +257,64 @@ module PotatoMesh
|
||||
unless reply_index_exists
|
||||
db.execute("CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id)")
|
||||
end
|
||||
|
||||
# #756 — partial index backing the meshcore content-dedup lookup in
|
||||
# insert_message. Scoped to meshcore so the index stays small even
|
||||
# on meshtastic-heavy deployments. ``CREATE … IF NOT EXISTS`` is
|
||||
# cheap enough to run on every boot; the one-shot backfill below
|
||||
# is gated separately via ``PRAGMA user_version`` so it does not
|
||||
# repeat after the first successful pass.
|
||||
meshcore_dedup_columns = %w[from_id to_id channel text rx_time protocol]
|
||||
if meshcore_dedup_columns.all? { |column| message_columns.include?(column) }
|
||||
db.execute(<<~SQL)
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_meshcore_content
|
||||
ON messages(from_id, channel, rx_time)
|
||||
WHERE protocol = 'meshcore'
|
||||
SQL
|
||||
|
||||
# #756 backfill — collapse pre-existing meshcore duplicate groups.
|
||||
# Keep the earliest (min rx_time, min id) copy in each
|
||||
# (from_id, to_id, channel, text) cluster where any two rows are
|
||||
# within #{PotatoMesh::App::DataProcessing::MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS} s
|
||||
# of each other. Window matches the runtime guard so runtime and
|
||||
# backfill behave identically.
|
||||
#
|
||||
# Gated via ``PRAGMA user_version`` so this expensive self-join
|
||||
# runs exactly once after deploy. Post-fix the runtime guard
|
||||
# prevents new duplicates from accumulating, so re-running on
|
||||
# every boot would scan ``messages`` for no reason.
|
||||
current_version = db.get_first_value("PRAGMA user_version").to_i
|
||||
if current_version < MESHCORE_CONTENT_DEDUP_BACKFILL_VERSION
|
||||
window = PotatoMesh::App::DataProcessing::MESHCORE_CONTENT_DEDUP_WINDOW_SECONDS
|
||||
db.transaction do
|
||||
# Window bound via ``?`` to match the rest of the codebase's
|
||||
# parameter-binding style; the value is a Ruby integer constant
|
||||
# so SQL-injection was never at risk here — the switch is
|
||||
# purely for consistency. ``PRAGMA user_version`` cannot
|
||||
# accept bind params, so it keeps literal interpolation of
|
||||
# an internal constant.
|
||||
db.execute(<<~SQL, [window])
|
||||
DELETE FROM messages
|
||||
WHERE protocol = 'meshcore'
|
||||
AND text IS NOT NULL AND text != ''
|
||||
AND from_id IS NOT NULL
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM messages AS earlier
|
||||
WHERE earlier.protocol = 'meshcore'
|
||||
AND earlier.from_id = messages.from_id
|
||||
AND earlier.to_id IS messages.to_id
|
||||
AND earlier.channel IS messages.channel
|
||||
AND earlier.text = messages.text
|
||||
AND messages.rx_time - earlier.rx_time >= 0
|
||||
AND messages.rx_time - earlier.rx_time <= ?
|
||||
AND (earlier.rx_time < messages.rx_time
|
||||
OR earlier.id < messages.id)
|
||||
)
|
||||
SQL
|
||||
db.execute("PRAGMA user_version = #{MESHCORE_CONTENT_DEDUP_BACKFILL_VERSION}")
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='instances'").flatten
|
||||
@@ -209,6 +331,17 @@ module PotatoMesh
|
||||
|
||||
unless instance_columns.include?("nodes_count")
|
||||
db.execute("ALTER TABLE instances ADD COLUMN nodes_count INTEGER")
|
||||
instance_columns << "nodes_count"
|
||||
end
|
||||
|
||||
unless instance_columns.include?("meshcore_nodes_count")
|
||||
db.execute("ALTER TABLE instances ADD COLUMN meshcore_nodes_count INTEGER")
|
||||
instance_columns << "meshcore_nodes_count"
|
||||
end
|
||||
|
||||
unless instance_columns.include?("meshtastic_nodes_count")
|
||||
db.execute("ALTER TABLE instances ADD COLUMN meshtastic_nodes_count INTEGER")
|
||||
instance_columns << "meshtastic_nodes_count"
|
||||
end
|
||||
|
||||
telemetry_tables =
|
||||
|
||||
@@ -63,7 +63,11 @@ module PotatoMesh
|
||||
def self_instance_attributes
|
||||
domain = self_instance_domain
|
||||
last_update = latest_node_update_timestamp || Time.now.to_i
|
||||
nodes_count = active_node_count_since(Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age)
|
||||
cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
|
||||
db = open_database(readonly: true)
|
||||
nodes_count = active_node_count_since(cutoff, db: db)
|
||||
mc_count = active_node_count_since_for_protocol(cutoff, "meshcore", db: db)
|
||||
mt_count = active_node_count_since_for_protocol(cutoff, "meshtastic", db: db)
|
||||
{
|
||||
id: app_constant(:SELF_INSTANCE_ID),
|
||||
domain: domain,
|
||||
@@ -78,7 +82,11 @@ module PotatoMesh
|
||||
is_private: private_mode?,
|
||||
contact_link: sanitized_contact_link,
|
||||
nodes_count: nodes_count,
|
||||
meshcore_nodes_count: mc_count,
|
||||
meshtastic_nodes_count: mt_count,
|
||||
}
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Count the number of nodes active since the supplied timestamp.
|
||||
@@ -107,6 +115,39 @@ module PotatoMesh
|
||||
handle&.close unless db
|
||||
end
|
||||
|
||||
# Count the number of nodes for a specific protocol active since the
|
||||
# supplied timestamp.
|
||||
#
|
||||
# @param cutoff [Integer] unix timestamp in seconds.
|
||||
# @param protocol [String] protocol name (e.g. "meshcore", "meshtastic").
|
||||
# @param db [SQLite3::Database, nil] optional open handle to reuse.
|
||||
# @return [Integer, nil] node count or nil when unavailable.
|
||||
def active_node_count_since_for_protocol(cutoff, protocol, db: nil)
|
||||
return nil unless cutoff && protocol
|
||||
|
||||
handle = db || open_database(readonly: true)
|
||||
count =
|
||||
with_busy_retry do
|
||||
handle.get_first_value(
|
||||
"SELECT COUNT(*) FROM nodes WHERE last_heard >= ? AND protocol = ?",
|
||||
cutoff.to_i,
|
||||
protocol,
|
||||
)
|
||||
end
|
||||
Integer(count)
|
||||
rescue SQLite3::Exception, ArgumentError => e
|
||||
warn_log(
|
||||
"Failed to count active nodes for protocol",
|
||||
context: "instances.protocol_nodes_count",
|
||||
protocol: protocol,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
nil
|
||||
ensure
|
||||
handle&.close unless db
|
||||
end
|
||||
|
||||
def sign_instance_attributes(attributes)
|
||||
payload = canonical_instance_payload(attributes)
|
||||
Base64.strict_encode64(
|
||||
@@ -128,6 +169,9 @@ module PotatoMesh
|
||||
"lastUpdateTime" => attributes[:last_update_time],
|
||||
"isPrivate" => attributes[:is_private],
|
||||
"contactLink" => attributes[:contact_link],
|
||||
"nodesCount" => attributes[:nodes_count],
|
||||
"meshcoreNodesCount" => attributes[:meshcore_nodes_count],
|
||||
"meshtasticNodesCount" => attributes[:meshtastic_nodes_count],
|
||||
"signature" => signature,
|
||||
}
|
||||
payload.reject { |_, value| value.nil? }
|
||||
@@ -297,9 +341,12 @@ module PotatoMesh
|
||||
def shutdown_federation_background_work!(timeout: nil)
|
||||
request_federation_shutdown!
|
||||
timeout_value = timeout || PotatoMesh::Config.federation_shutdown_timeout_seconds
|
||||
# Drain the worker pool first so federation threads blocked in
|
||||
# wait_for_federation_tasks unblock promptly instead of waiting
|
||||
# for each task's individual timeout to expire.
|
||||
shutdown_federation_worker_pool!
|
||||
stop_federation_thread!(:initial_federation_thread, timeout: timeout_value)
|
||||
stop_federation_thread!(:federation_thread, timeout: timeout_value)
|
||||
shutdown_federation_worker_pool!
|
||||
clear_federation_crawl_state!
|
||||
end
|
||||
|
||||
@@ -377,6 +424,13 @@ module PotatoMesh
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Announce the local instance record to a remote federation peer,
|
||||
# cycling through resolved IP addresses when transport-level failures
|
||||
# occur.
|
||||
#
|
||||
# @param domain [String] remote peer hostname.
|
||||
# @param payload_json [String] JSON-encoded announcement body.
|
||||
# @return [Boolean] true when the announcement was accepted.
|
||||
def announce_instance_to_domain(domain, payload_json)
|
||||
return false unless domain && !domain.empty?
|
||||
return false if federation_shutdown_requested?
|
||||
@@ -387,14 +441,7 @@ module PotatoMesh
|
||||
break false if federation_shutdown_requested?
|
||||
|
||||
begin
|
||||
http = build_remote_http_client(uri)
|
||||
response = Timeout.timeout(PotatoMesh::Config.remote_instance_request_timeout) do
|
||||
http.start do |connection|
|
||||
request = build_federation_http_request(Net::HTTP::Post, uri)
|
||||
request.body = payload_json
|
||||
connection.request(request)
|
||||
end
|
||||
end
|
||||
response = perform_announce_request(uri, payload_json)
|
||||
if response.is_a?(Net::HTTPSuccess)
|
||||
debug_log(
|
||||
"Published federation announcement",
|
||||
@@ -448,6 +495,55 @@ module PotatoMesh
|
||||
published
|
||||
end
|
||||
|
||||
# Execute a POST announcement request against the supplied URI, cycling
|
||||
# through resolved IP addresses on connection-level failures.
|
||||
#
|
||||
# @param uri [URI::Generic] target endpoint.
|
||||
# @param payload_json [String] JSON-encoded announcement body.
|
||||
# @return [Net::HTTPResponse] the HTTP response from the first reachable address.
|
||||
# @raise [StandardError] when all addresses fail or a non-retryable error occurs.
|
||||
def perform_announce_request(uri, payload_json)
|
||||
remote_addresses = sort_addresses_for_connection(resolve_remote_ip_addresses(uri))
|
||||
addresses = remote_addresses.empty? ? [nil] : remote_addresses
|
||||
|
||||
last_error = nil
|
||||
addresses.each do |address|
|
||||
break if federation_shutdown_requested?
|
||||
|
||||
begin
|
||||
return perform_single_announce_request(uri, payload_json, ip_address: address&.to_s)
|
||||
rescue StandardError => e
|
||||
if connection_refused_or_unreachable?(e)
|
||||
last_error = e
|
||||
else
|
||||
raise
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
raise(last_error || StandardError.new("all resolved addresses failed"))
|
||||
end
|
||||
|
||||
# Execute a single POST announcement request, optionally pinning the
|
||||
# connection to a specific IP address.
|
||||
#
|
||||
# @param uri [URI::Generic] target endpoint.
|
||||
# @param payload_json [String] JSON-encoded announcement body.
|
||||
# @param ip_address [String, nil] resolved IP address to pin the
|
||||
# connection to, or +nil+ to let {build_remote_http_client} resolve.
|
||||
# @return [Net::HTTPResponse] the HTTP response.
|
||||
# @raise [StandardError] when the request fails.
|
||||
def perform_single_announce_request(uri, payload_json, ip_address: nil)
|
||||
http = build_remote_http_client(uri, ip_address: ip_address)
|
||||
Timeout.timeout(PotatoMesh::Config.remote_instance_request_timeout) do
|
||||
http.start do |connection|
|
||||
request = build_federation_http_request(Net::HTTP::Post, uri)
|
||||
request.body = payload_json
|
||||
connection.request(request)
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Determine whether an HTTPS announcement failure should fall back to HTTP.
|
||||
#
|
||||
# @param error [StandardError] failure raised while attempting HTTPS.
|
||||
@@ -463,6 +559,34 @@ module PotatoMesh
|
||||
false
|
||||
end
|
||||
|
||||
# Determine whether an error indicates a transport-level connection
|
||||
# failure that may succeed on an alternative resolved address.
|
||||
#
|
||||
# Connection refusals, host/network unreachable errors, and TCP open
|
||||
# timeouts signal that the selected IP address cannot be reached but
|
||||
# do not rule out alternative addresses for the same hostname.
|
||||
#
|
||||
# @param error [StandardError] failure raised during the connection attempt.
|
||||
# @return [Boolean] true when a retry with a different address is warranted.
|
||||
def connection_refused_or_unreachable?(error)
|
||||
retryable_classes = [
|
||||
Errno::ECONNREFUSED,
|
||||
Errno::EHOSTUNREACH,
|
||||
Errno::ENETUNREACH,
|
||||
Errno::ECONNRESET,
|
||||
Errno::ETIMEDOUT,
|
||||
Net::OpenTimeout,
|
||||
]
|
||||
current = error
|
||||
while current
|
||||
return true if retryable_classes.any? { |klass| current.is_a?(klass) }
|
||||
|
||||
current = current.respond_to?(:cause) ? current.cause : nil
|
||||
end
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
def announce_instance_to_all_domains
|
||||
return unless federation_enabled?
|
||||
return if federation_shutdown_requested?
|
||||
@@ -661,10 +785,57 @@ module PotatoMesh
|
||||
[]
|
||||
end
|
||||
|
||||
# Execute a GET request against the supplied federation URI, cycling
|
||||
# through resolved IP addresses when a transport-level connection
|
||||
# failure occurs.
|
||||
#
|
||||
# DNS resolution is performed once and the resulting addresses are
|
||||
# sorted with IPv4 first via {sort_addresses_for_connection}. Each
|
||||
# address is attempted sequentially; when a connection-level error
|
||||
# (refused, unreachable, timeout) is raised the next address is tried.
|
||||
# Non-connection errors (SSL failures, HTTP-level errors) are raised
|
||||
# immediately without trying further addresses.
|
||||
#
|
||||
# @param uri [URI::Generic] target endpoint to request.
|
||||
# @return [String] raw HTTP response body on success.
|
||||
# @raise [InstanceFetchError] when all addresses are exhausted or a
|
||||
# non-retryable error occurs.
|
||||
def perform_instance_http_request(uri)
|
||||
raise InstanceFetchError, "federation shutdown requested" if federation_shutdown_requested?
|
||||
|
||||
http = build_remote_http_client(uri)
|
||||
remote_addresses = sort_addresses_for_connection(resolve_remote_ip_addresses(uri))
|
||||
addresses = remote_addresses.empty? ? [nil] : remote_addresses
|
||||
|
||||
last_error = nil
|
||||
addresses.each do |address|
|
||||
break if federation_shutdown_requested?
|
||||
|
||||
begin
|
||||
return perform_single_http_request(uri, ip_address: address&.to_s)
|
||||
rescue InstanceFetchError => e
|
||||
if connection_refused_or_unreachable?(e)
|
||||
last_error = e
|
||||
else
|
||||
raise
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
raise last_error || InstanceFetchError.new("all resolved addresses failed")
|
||||
rescue ArgumentError => e
|
||||
raise_instance_fetch_error(e)
|
||||
end
|
||||
|
||||
# Execute a single HTTP GET request against the supplied URI, optionally
|
||||
# pinning the connection to a specific IP address.
|
||||
#
|
||||
# @param uri [URI::Generic] target endpoint.
|
||||
# @param ip_address [String, nil] resolved IP address to pin the
|
||||
# connection to, or +nil+ to let {build_remote_http_client} resolve.
|
||||
# @return [String] raw HTTP response body.
|
||||
# @raise [InstanceFetchError] when the request fails.
|
||||
def perform_single_http_request(uri, ip_address: nil)
|
||||
http = build_remote_http_client(uri, ip_address: ip_address)
|
||||
Timeout.timeout(PotatoMesh::Config.remote_instance_request_timeout) do
|
||||
http.start do |connection|
|
||||
request = build_federation_http_request(Net::HTTP::Get, uri)
|
||||
@@ -1094,6 +1265,14 @@ module PotatoMesh
|
||||
)
|
||||
attributes[:nodes_count] = stats_count if stats_count
|
||||
|
||||
# Extract per-protocol 24h counts (informational, not signed).
|
||||
if stats_payload.is_a?(Hash)
|
||||
mc_day = stats_payload.dig("meshcore", "day")
|
||||
mt_day = stats_payload.dig("meshtastic", "day")
|
||||
attributes[:meshcore_nodes_count] = coerce_integer(mc_day) if mc_day
|
||||
attributes[:meshtastic_nodes_count] = coerce_integer(mt_day) if mt_day
|
||||
end
|
||||
|
||||
nodes_since_path = "/api/nodes?since=#{recent_cutoff}&limit=1000"
|
||||
nodes_since_window, nodes_since_metadata = fetch_instance_json(attributes[:domain], nodes_since_path)
|
||||
if stats_count.nil? && attributes[:nodes_count].nil? && nodes_since_window.is_a?(Array)
|
||||
@@ -1194,15 +1373,41 @@ module PotatoMesh
|
||||
unrestricted_addresses
|
||||
end
|
||||
|
||||
# Sort resolved addresses so that IPv4 precedes IPv6.
|
||||
#
|
||||
# Federation peers with dual-stack DNS may publish addresses where one
|
||||
# family is unreachable. Placing IPv4 entries first mirrors the
|
||||
# preference used by {discover_local_ip_address} and improves the
|
||||
# likelihood that the first connection attempt succeeds.
|
||||
#
|
||||
# @param addresses [Array<IPAddr>] resolved IP address list.
|
||||
# @return [Array<IPAddr>] addresses sorted with IPv4 entries before IPv6.
|
||||
def sort_addresses_for_connection(addresses)
|
||||
return addresses if addresses.nil? || addresses.length <= 1
|
||||
|
||||
v4, v6 = addresses.partition { |ip| !ip.ipv6? }
|
||||
v4 + v6
|
||||
end
|
||||
|
||||
# Build an HTTP client configured for communication with a remote instance.
|
||||
#
|
||||
# When +ip_address+ is supplied the client is pinned to that specific
|
||||
# address, bypassing DNS resolution. Callers that iterate over
|
||||
# multiple resolved addresses should pass each candidate in turn.
|
||||
#
|
||||
# @param uri [URI::Generic] target URI describing the remote endpoint.
|
||||
# @param ip_address [String, nil] explicit IP address to connect to,
|
||||
# or +nil+ to resolve via DNS and use the first result.
|
||||
# @return [Net::HTTP] HTTP client ready to execute the request.
|
||||
def build_remote_http_client(uri)
|
||||
remote_addresses = resolve_remote_ip_addresses(uri)
|
||||
def build_remote_http_client(uri, ip_address: nil)
|
||||
http = Net::HTTP.new(uri.host, uri.port)
|
||||
if http.respond_to?(:ipaddr=) && remote_addresses.any?
|
||||
http.ipaddr = remote_addresses.first.to_s
|
||||
if ip_address
|
||||
http.ipaddr = ip_address if http.respond_to?(:ipaddr=)
|
||||
else
|
||||
remote_addresses = resolve_remote_ip_addresses(uri)
|
||||
if http.respond_to?(:ipaddr=) && remote_addresses.any?
|
||||
http.ipaddr = remote_addresses.first.to_s
|
||||
end
|
||||
end
|
||||
http.open_timeout = PotatoMesh::Config.remote_instance_http_timeout
|
||||
http.read_timeout = PotatoMesh::Config.remote_instance_read_timeout
|
||||
@@ -1395,8 +1600,9 @@ module PotatoMesh
|
||||
sql = <<~SQL
|
||||
INSERT INTO instances (
|
||||
id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, nodes_count, contact_link, signature
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
latitude, longitude, last_update_time, is_private, nodes_count,
|
||||
meshcore_nodes_count, meshtastic_nodes_count, contact_link, signature
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
domain=excluded.domain,
|
||||
pubkey=excluded.pubkey,
|
||||
@@ -1408,7 +1614,9 @@ module PotatoMesh
|
||||
longitude=excluded.longitude,
|
||||
last_update_time=excluded.last_update_time,
|
||||
is_private=excluded.is_private,
|
||||
nodes_count=excluded.nodes_count,
|
||||
nodes_count=COALESCE(excluded.nodes_count, instances.nodes_count),
|
||||
meshcore_nodes_count=COALESCE(excluded.meshcore_nodes_count, instances.meshcore_nodes_count),
|
||||
meshtastic_nodes_count=COALESCE(excluded.meshtastic_nodes_count, instances.meshtastic_nodes_count),
|
||||
contact_link=excluded.contact_link,
|
||||
signature=excluded.signature
|
||||
SQL
|
||||
@@ -1427,6 +1635,8 @@ module PotatoMesh
|
||||
attributes[:last_update_time],
|
||||
attributes[:is_private] ? 1 : 0,
|
||||
nodes_count,
|
||||
coerce_integer(attributes[:meshcore_nodes_count]),
|
||||
coerce_integer(attributes[:meshtastic_nodes_count]),
|
||||
attributes[:contact_link],
|
||||
signature,
|
||||
]
|
||||
|
||||
@@ -71,6 +71,13 @@ module PotatoMesh
|
||||
# Symbols and Dingbats (U+2600–U+27BF), and Miscellaneous Symbols and
|
||||
# Arrows (U+2B00–U+2BFF).
|
||||
#
|
||||
# Matching is intentionally single-codepoint: callers iterate grapheme
|
||||
# clusters first and then test each cluster against this pattern, so
|
||||
# multi-codepoint emoji (country flags like 🇩🇪 = 🇩 + 🇪, ZWJ family
|
||||
# sequences like 👨👩👧, skin-tone modifiers like 👍🏽, the rainbow flag
|
||||
# 🏳️🌈) come through intact instead of being shredded into their
|
||||
# component codepoints.
|
||||
#
|
||||
# @type [Regexp]
|
||||
MESHCORE_COMPANION_EMOJI_PATTERN = /[\u{1F000}-\u{1FFFF}\u{2600}-\u{27BF}\u{2B00}-\u{2BFF}]/u
|
||||
|
||||
@@ -79,17 +86,20 @@ module PotatoMesh
|
||||
# richer, human-readable variant for the API layer without touching the DB.
|
||||
#
|
||||
# Algorithm (applied in priority order):
|
||||
# 1. If the long name contains an emoji character (see
|
||||
# +MESHCORE_COMPANION_EMOJI_PATTERN+), use the first emoji embedded in a
|
||||
# 4-column display slot: ``" E "`` (one leading space, emoji, one trailing
|
||||
# space). Emoji are rendered double-width in monospace fonts, so one leading
|
||||
# space keeps the badge at four visual columns.
|
||||
# 2. If the long name contains two or more whitespace-separated words, use
|
||||
# the capitalised first letters of the first two words: ``" XY "``.
|
||||
# 3. If the long name is a single word, use its capitalised first letter:
|
||||
# ``" A "``.
|
||||
# 4. Return +nil+ when no short name can be derived (blank input, or a
|
||||
# word without extractable characters).
|
||||
# 1. If the long name contains an emoji grapheme cluster (anchored by
|
||||
# +MESHCORE_COMPANION_EMOJI_PATTERN+), use that whole cluster in a
|
||||
# 4-column display slot: ``" E "`` (one leading space, emoji, one
|
||||
# trailing space). Emoji are rendered double-width in monospace fonts,
|
||||
# so one leading space keeps the badge at four visual columns.
|
||||
# Iterating grapheme clusters (rather than raw codepoints) preserves
|
||||
# multi-codepoint sequences such as country flags 🇩🇪, ZWJ families
|
||||
# 👨👩👧, and skin-tone-modified thumbs 👍🏽.
|
||||
# 2. If the long name contains two or more whitespace-separated words,
|
||||
# use the capitalised first letters of the first two words: ``" XY "``.
|
||||
# 3. Return +nil+ — single-word names fall back to the raw short name
|
||||
# stored in the database (typically the first two bytes of the node
|
||||
# ID). A single initial looked poor and carried no more information
|
||||
# than the raw value.
|
||||
#
|
||||
# @param long_name [String, nil] long name stored on the node.
|
||||
# @return [String, nil] derived display short name or +nil+.
|
||||
@@ -97,10 +107,12 @@ module PotatoMesh
|
||||
name = string_or_nil(long_name)
|
||||
return nil unless name
|
||||
|
||||
emoji = name.scan(MESHCORE_COMPANION_EMOJI_PATTERN).first
|
||||
emoji_cluster = name.each_grapheme_cluster.find do |cluster|
|
||||
cluster.match?(MESHCORE_COMPANION_EMOJI_PATTERN)
|
||||
end
|
||||
# Wide emoji occupies two display columns, so use one leading space and
|
||||
# one trailing space to stay within the four-column badge width.
|
||||
return " #{emoji} " if emoji
|
||||
return " #{emoji_cluster} " if emoji_cluster
|
||||
|
||||
words = name.strip.split(/\s+/).reject(&:empty?)
|
||||
return nil if words.empty?
|
||||
@@ -111,8 +123,7 @@ module PotatoMesh
|
||||
return " #{first}#{second} " if first && second
|
||||
end
|
||||
|
||||
letter = words[0][0]&.upcase
|
||||
letter ? " #{letter} " : nil
|
||||
nil
|
||||
end
|
||||
|
||||
# Recursively coerce hash keys to strings and normalise nested arrays.
|
||||
|
||||
@@ -144,6 +144,8 @@ module PotatoMesh
|
||||
"lastUpdateTime" => last_update_time,
|
||||
"isPrivate" => private_flag,
|
||||
"nodesCount" => coerce_integer(row["nodes_count"]),
|
||||
"meshcoreNodesCount" => coerce_integer(row["meshcore_nodes_count"]),
|
||||
"meshtasticNodesCount" => coerce_integer(row["meshtastic_nodes_count"]),
|
||||
"contactLink" => string_or_nil(row["contact_link"]),
|
||||
"signature" => signature,
|
||||
}
|
||||
@@ -175,7 +177,8 @@ module PotatoMesh
|
||||
min_last_update_time = now - PotatoMesh::Config.week_seconds
|
||||
sql = <<~SQL
|
||||
SELECT id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, nodes_count, contact_link, signature
|
||||
latitude, longitude, last_update_time, is_private, nodes_count,
|
||||
meshcore_nodes_count, meshtastic_nodes_count, contact_link, signature
|
||||
FROM instances
|
||||
WHERE domain IS NOT NULL AND TRIM(domain) != ''
|
||||
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
|
||||
|
||||
@@ -0,0 +1,226 @@
|
||||
# Copyright © 2025-26 l5yth & contributors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "kramdown"
|
||||
require "kramdown-parser-gfm"
|
||||
require "sanitize"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Discovers, parses, and renders operator-managed Markdown pages from the
|
||||
# configured pages directory. Files are named with an optional numeric
|
||||
# prefix for ordering (e.g. +1-about.md+, +9-contact.md+) and exposed as
|
||||
# navigable routes under +/pages/:slug+.
|
||||
module Pages
|
||||
module_function
|
||||
|
||||
# Lightweight value object describing a single static page discovered on
|
||||
# disk. Fields are populated by {parse_page_filename} and consumed by
|
||||
# route handlers and layout templates.
|
||||
#
|
||||
# @!attribute [r] sort_key
|
||||
# @return [String] filename stem used for alphabetical ordering.
|
||||
# @!attribute [r] slug
|
||||
# @return [String] URL-safe identifier derived from the filename.
|
||||
# @!attribute [r] title
|
||||
# @return [String] human-readable nav label.
|
||||
# @!attribute [r] path
|
||||
# @return [String] absolute filesystem path to the Markdown source.
|
||||
PageEntry = Struct.new(:sort_key, :slug, :title, :path, keyword_init: true)
|
||||
|
||||
# Pattern matching a safe slug segment: lowercase alphanumeric words
|
||||
# separated by single hyphens. Used to validate both parsed slugs and
|
||||
# incoming route parameters.
|
||||
SLUG_PATTERN = /\A[a-z0-9]+(-[a-z0-9]+)*\z/
|
||||
|
||||
# Pattern used to split a page filename into an optional numeric sort
|
||||
# prefix and the slug portion.
|
||||
FILENAME_PATTERN = /\A(\d+)-(.+)\z/
|
||||
|
||||
# Maximum number of pages loaded from disk. Prevents accidental
|
||||
# directory-bomb scenarios from consuming unbounded memory.
|
||||
MAX_PAGES = 50
|
||||
|
||||
# Kramdown options shared across all page renders.
|
||||
KRAMDOWN_OPTIONS = {
|
||||
input: "GFM",
|
||||
hard_wrap: false,
|
||||
}.freeze
|
||||
|
||||
# HTML tags allowed in rendered markdown output. Tags not in this list
|
||||
# are stripped after rendering to prevent XSS from operator content.
|
||||
ALLOWED_TAGS = Set.new(%w[
|
||||
h1 h2 h3 h4 h5 h6 p a em strong b i u s del code pre br hr
|
||||
ul ol li dl dt dd blockquote table thead tbody tfoot tr th td
|
||||
img span div sup sub abbr mark small details summary
|
||||
]).freeze
|
||||
|
||||
@pages_cache = nil
|
||||
@pages_cache_mutex = Mutex.new
|
||||
|
||||
# Parse a Markdown filename into a {PageEntry} without the filesystem
|
||||
# path populated.
|
||||
#
|
||||
# Filenames are expected to follow the pattern +<digits>-<slug>.md+ where
|
||||
# the numeric prefix controls navigation order. Files without a prefix
|
||||
# are accepted, using the full stem as both sort key and slug.
|
||||
#
|
||||
# @param basename [String] bare filename (e.g. +"9-contact.md"+).
|
||||
# @return [PageEntry, nil] parsed entry or +nil+ when the filename is
|
||||
# invalid or contains an unsafe slug.
|
||||
def parse_page_filename(basename)
|
||||
stem = basename.sub(/\.md\z/i, "")
|
||||
return nil if stem.empty?
|
||||
|
||||
match = stem.match(FILENAME_PATTERN)
|
||||
if match
|
||||
slug = match[2].downcase
|
||||
sort_key = stem
|
||||
else
|
||||
slug = stem.downcase
|
||||
sort_key = stem
|
||||
end
|
||||
|
||||
return nil unless slug.match?(SLUG_PATTERN)
|
||||
|
||||
title = slug.split("-").map(&:capitalize).join(" ")
|
||||
PageEntry.new(sort_key: sort_key, slug: slug, title: title, path: nil)
|
||||
end
|
||||
|
||||
# Scan the pages directory and return a sorted list of page entries.
|
||||
#
|
||||
# The directory is read once per call; results are not cached here (see
|
||||
# {static_pages} for the cached interface). Non-+.md+ files and entries
|
||||
# with invalid filenames are silently skipped.
|
||||
#
|
||||
# @param directory [String] absolute path to the pages directory.
|
||||
# @return [Array<PageEntry>] frozen, sort-key-ordered list of pages.
|
||||
def load_static_pages(directory = PotatoMesh::Config.pages_directory)
|
||||
return [].freeze unless directory && File.directory?(directory)
|
||||
|
||||
entries = Dir.glob(File.join(directory, "*.md")).filter_map do |path|
|
||||
basename = File.basename(path)
|
||||
entry = parse_page_filename(basename)
|
||||
next unless entry
|
||||
|
||||
PageEntry.new(
|
||||
sort_key: entry.sort_key,
|
||||
slug: entry.slug,
|
||||
title: entry.title,
|
||||
path: path,
|
||||
)
|
||||
end
|
||||
|
||||
entries.sort_by!(&:sort_key)
|
||||
entries.uniq!(&:slug)
|
||||
entries.take(MAX_PAGES).freeze
|
||||
end
|
||||
|
||||
# Return the current set of static pages, reloading from disk when the
|
||||
# cache has expired.
|
||||
#
|
||||
# The TTL is short in non-production environments (1 second) so that
|
||||
# newly added files appear almost immediately during development.
|
||||
#
|
||||
# @return [Array<PageEntry>] cached page entries.
|
||||
def static_pages
|
||||
@pages_cache_mutex.synchronize do
|
||||
if @pages_cache.nil? || Time.now > @pages_cache[:expires_at]
|
||||
ttl = production_environment? ? 60 : 1
|
||||
@pages_cache = {
|
||||
entries: load_static_pages,
|
||||
expires_at: Time.now + ttl,
|
||||
}
|
||||
end
|
||||
@pages_cache[:entries]
|
||||
end
|
||||
end
|
||||
|
||||
# Look up a page entry by its URL slug.
|
||||
#
|
||||
# @param slug [String] URL slug to search for.
|
||||
# @return [PageEntry, nil] matching entry or +nil+.
|
||||
def find_page_by_slug(slug)
|
||||
static_pages.find { |entry| entry.slug == slug }
|
||||
end
|
||||
|
||||
# Read and render a page's Markdown source to HTML.
|
||||
#
|
||||
# Files exceeding {Config.max_page_file_bytes} are rejected to guard
|
||||
# against accidental out-of-memory conditions. Raw HTML blocks are
|
||||
# disabled at the parser level to prevent XSS.
|
||||
#
|
||||
# @param page_entry [PageEntry] entry whose +path+ points to the source.
|
||||
# @return [String, nil] sanitised HTML string, or +nil+ when the file
|
||||
# cannot be read.
|
||||
def render_page_content(page_entry)
|
||||
return nil unless page_entry&.path
|
||||
return nil unless File.file?(page_entry.path) && File.readable?(page_entry.path)
|
||||
|
||||
size = File.size(page_entry.path)
|
||||
return nil if size > PotatoMesh::Config.max_page_file_bytes
|
||||
|
||||
content = File.read(page_entry.path, encoding: "utf-8")
|
||||
raw_html = Kramdown::Document.new(content, **KRAMDOWN_OPTIONS).to_html
|
||||
strip_unsafe_html(raw_html)
|
||||
rescue SystemCallError
|
||||
nil
|
||||
end
|
||||
|
||||
# Remove HTML tags not present in {ALLOWED_TAGS} and strip dangerous
|
||||
# attributes (event handlers, javascript: URIs) from the rendered output.
|
||||
# This provides a safety net against XSS when operators include raw HTML
|
||||
# in their Markdown source.
|
||||
#
|
||||
# @param html [String] raw HTML produced by kramdown.
|
||||
# @return [String] HTML with disallowed tags and attributes stripped.
|
||||
def strip_unsafe_html(html)
|
||||
# Delegate to the sanitize gem for robust HTML and attribute
|
||||
# sanitization instead of relying on ad-hoc regular expressions.
|
||||
Sanitize.fragment(
|
||||
html,
|
||||
elements: ALLOWED_TAGS,
|
||||
attributes: {
|
||||
:all => %w[id class title alt],
|
||||
"a" => %w[href],
|
||||
"img" => %w[src width height loading decoding],
|
||||
},
|
||||
protocols: {
|
||||
"a" => { "href" => ["http", "https", "mailto"] },
|
||||
"img" => { "src" => ["http", "https"] },
|
||||
},
|
||||
)
|
||||
end
|
||||
|
||||
# Invalidate the in-memory page cache so the next call to
|
||||
# {static_pages} re-scans the directory. Intended for test teardown.
|
||||
#
|
||||
# @return [void]
|
||||
def clear_pages_cache!
|
||||
@pages_cache_mutex.synchronize { @pages_cache = nil }
|
||||
end
|
||||
|
||||
# Determine whether the application is running in a production-like
|
||||
# environment.
|
||||
#
|
||||
# @return [Boolean] true when +RACK_ENV+ or +APP_ENV+ is +"production"+.
|
||||
def production_environment?
|
||||
%w[production].include?(ENV.fetch("RACK_ENV", nil)) ||
|
||||
%w[production].include?(ENV.fetch("APP_ENV", nil))
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -64,6 +64,12 @@ module PotatoMesh
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
|
||||
# Batch-resolve all unique from_id values to canonical node_ids in a
|
||||
# single query instead of issuing 1-2 SELECTs per message row.
|
||||
raw_from_ids = rows.filter_map { |r| string_or_nil(r["from_id"]&.to_s&.strip) }.uniq
|
||||
canonical_map = batch_resolve_node_ids(db, raw_from_ids)
|
||||
|
||||
rows.each do |r|
|
||||
r.delete_if { |key, _| key.is_a?(Integer) }
|
||||
r["reply_id"] = coerce_integer(r["reply_id"]) if r.key?("reply_id")
|
||||
@@ -81,7 +87,7 @@ module PotatoMesh
|
||||
)
|
||||
end
|
||||
|
||||
canonical_from_id = string_or_nil(normalize_node_id(db, r["from_id"]))
|
||||
canonical_from_id = canonical_map[r["from_id"]&.to_s&.strip]
|
||||
node_id = canonical_from_id || string_or_nil(r["from_id"])
|
||||
|
||||
if canonical_from_id
|
||||
|
||||
@@ -133,6 +133,57 @@ module PotatoMesh
|
||||
coerced
|
||||
end
|
||||
|
||||
# Resolve a collection of raw node reference strings to their canonical
|
||||
# +node_id+ values in a single batch query. This avoids the N+1 pattern
|
||||
# of calling +normalize_node_id+ once per row.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database handle.
|
||||
# @param refs [Array<String>] raw node identifiers (hex strings or numeric
|
||||
# strings) to resolve.
|
||||
# @return [Hash{String => String}] mapping from each input reference to its
|
||||
# canonical +node_id+, omitting entries that could not be resolved.
|
||||
def batch_resolve_node_ids(db, refs)
|
||||
return {} if refs.nil? || refs.empty?
|
||||
|
||||
result = {}
|
||||
string_refs = []
|
||||
numeric_refs = []
|
||||
|
||||
refs.each do |ref|
|
||||
next if ref.nil? || ref.strip.empty?
|
||||
string_refs << ref.strip
|
||||
begin
|
||||
numeric_refs << Integer(ref.strip, 10)
|
||||
rescue ArgumentError
|
||||
# not a numeric reference — skip the numeric branch
|
||||
end
|
||||
end
|
||||
|
||||
# Batch lookup by node_id (string match)
|
||||
unless string_refs.empty?
|
||||
placeholders = Array.new(string_refs.length, "?").join(", ")
|
||||
rows = db.execute("SELECT node_id FROM nodes WHERE node_id IN (#{placeholders})", string_refs)
|
||||
rows.each do |row|
|
||||
nid = row.is_a?(Hash) ? row["node_id"] : row[0]
|
||||
result[nid] = nid if nid
|
||||
end
|
||||
end
|
||||
|
||||
# Batch lookup by num (numeric match) for refs not yet resolved
|
||||
unresolved_numeric = numeric_refs.select { |n| !result.key?(n.to_s) }
|
||||
unless unresolved_numeric.empty?
|
||||
placeholders = Array.new(unresolved_numeric.length, "?").join(", ")
|
||||
rows = db.execute("SELECT node_id, num FROM nodes WHERE num IN (#{placeholders})", unresolved_numeric)
|
||||
rows.each do |row|
|
||||
nid = row.is_a?(Hash) ? row["node_id"] : row[0]
|
||||
num = row.is_a?(Hash) ? row["num"] : row[1]
|
||||
result[num.to_s] = nid if nid && num
|
||||
end
|
||||
end
|
||||
|
||||
result
|
||||
end
|
||||
|
||||
# Normalise a caller-supplied timestamp for API pagination windows.
|
||||
#
|
||||
# @param since [Object] requested lower bound expressed as seconds since the epoch.
|
||||
|
||||
@@ -37,7 +37,7 @@ module PotatoMesh
|
||||
params << since_threshold
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"], db: db)
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
|
||||
@@ -70,11 +70,42 @@ module PotatoMesh
|
||||
}
|
||||
end
|
||||
|
||||
def node_lookup_clause(node_ref, string_columns:, numeric_columns: [])
|
||||
# Build a WHERE clause fragment for looking up a node across one or more
|
||||
# columns. When +numeric_columns+ are provided together with an open +db+
|
||||
# handle the numeric identifiers are resolved to canonical +node_id+
|
||||
# strings up-front so the resulting SQL uses only string-column +IN+
|
||||
# predicates. This avoids an +OR+ across heterogeneous columns which
|
||||
# prevents SQLite from choosing the optimal index.
|
||||
#
|
||||
# @param node_ref [String, Integer, nil] raw node reference from the request.
|
||||
# @param string_columns [Array<String>] SQL column names holding string identifiers.
|
||||
# @param numeric_columns [Array<String>] SQL column names holding numeric identifiers.
|
||||
# @param db [SQLite3::Database, nil] open database handle used to resolve
|
||||
# numeric IDs to canonical strings. When provided and +numeric_columns+
|
||||
# is non-empty the numeric branch is folded into the string branch.
|
||||
# @return [Array(String, Array), nil] SQL fragment and bind parameters, or
|
||||
# +nil+ when no lookup can be constructed.
|
||||
def node_lookup_clause(node_ref, string_columns:, numeric_columns: [], db: nil)
|
||||
tokens = node_reference_tokens(node_ref)
|
||||
string_values = tokens[:string_values]
|
||||
numeric_values = tokens[:numeric_values]
|
||||
|
||||
# When a database handle is available, resolve numeric identifiers to
|
||||
# canonical node_id strings so the query can use a single indexed column
|
||||
# instead of an OR across string and numeric columns.
|
||||
if db && !numeric_columns.empty? && !numeric_values.empty?
|
||||
numeric_values.each do |num|
|
||||
resolved = db.get_first_value("SELECT node_id FROM nodes WHERE num = ? LIMIT 1", [num])
|
||||
if resolved
|
||||
string_values << resolved unless string_values.include?(resolved)
|
||||
end
|
||||
end
|
||||
# All numeric values have been folded into string_values; drop the
|
||||
# numeric branch so the generated SQL avoids an OR.
|
||||
numeric_columns = []
|
||||
numeric_values = []
|
||||
end
|
||||
|
||||
clauses = []
|
||||
params = []
|
||||
|
||||
@@ -117,7 +148,7 @@ module PotatoMesh
|
||||
where_clauses = []
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["num"])
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["num"], db: db)
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
@@ -159,7 +190,16 @@ module PotatoMesh
|
||||
r["role"] ||= "CLIENT"
|
||||
if r["role"] == "COMPANION"
|
||||
derived = meshcore_companion_display_short_name(r["long_name"])
|
||||
r["short_name"] = derived if derived
|
||||
if derived
|
||||
r["short_name"] = derived
|
||||
elsif r["short_name"].nil? || r["short_name"].strip.empty?
|
||||
# No derived name and no stored public-key hex — synthesise from
|
||||
# the node ID (first four hex chars after the leading "!") so the
|
||||
# badge is stable, unique, and consistent with how the ingestor
|
||||
# builds short names from public keys.
|
||||
node_id = r["node_id"].to_s.delete_prefix("!")
|
||||
r["short_name"] = node_id[0, 4] unless node_id.empty?
|
||||
end
|
||||
end
|
||||
lh = r["last_heard"]&.to_i
|
||||
pt = r["position_time"]&.to_i
|
||||
@@ -229,7 +269,8 @@ module PotatoMesh
|
||||
#
|
||||
# @param now [Integer] reference unix timestamp in seconds.
|
||||
# @param db [SQLite3::Database, nil] optional open database handle to reuse.
|
||||
# @return [Hash{String => Integer}] counts keyed by hour/day/week/month.
|
||||
# @return [Hash{String => Object}] counts keyed by hour/day/week/month plus
|
||||
# per-protocol breakdowns under "meshcore" and "meshtastic" sub-hashes.
|
||||
def query_active_node_stats(now: Time.now.to_i, db: nil)
|
||||
handle = db || open_database(readonly: true)
|
||||
handle.results_as_hash = true
|
||||
@@ -238,22 +279,48 @@ module PotatoMesh
|
||||
day_cutoff = reference_now - 86_400
|
||||
week_cutoff = reference_now - PotatoMesh::Config.week_seconds
|
||||
month_cutoff = reference_now - (30 * 24 * 60 * 60)
|
||||
private_filter = private_mode? ? " AND (role IS NULL OR role <> 'CLIENT_HIDDEN')" : ""
|
||||
pf = private_mode? ? " AND (role IS NULL OR role <> 'CLIENT_HIDDEN')" : ""
|
||||
proto = " AND protocol = ?"
|
||||
sql = <<~SQL
|
||||
SELECT
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS hour_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS day_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS week_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS month_count
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}) AS hour_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}) AS day_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}) AS week_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}) AS month_count,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mc_hour,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mc_day,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mc_week,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mc_month,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mt_hour,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mt_day,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mt_week,
|
||||
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{pf}#{proto}) AS mt_month
|
||||
SQL
|
||||
cutoffs = [hour_cutoff, day_cutoff, week_cutoff, month_cutoff]
|
||||
# Total counts bind only cutoffs; per-protocol counts bind cutoff + protocol string.
|
||||
params = cutoffs +
|
||||
cutoffs.flat_map { |c| [c, "meshcore"] } +
|
||||
cutoffs.flat_map { |c| [c, "meshtastic"] }
|
||||
row = with_busy_retry do
|
||||
handle.get_first_row(sql, [hour_cutoff, day_cutoff, week_cutoff, month_cutoff])
|
||||
handle.get_first_row(sql, params)
|
||||
end || {}
|
||||
{
|
||||
"hour" => row["hour_count"].to_i,
|
||||
"day" => row["day_count"].to_i,
|
||||
"week" => row["week_count"].to_i,
|
||||
"month" => row["month_count"].to_i,
|
||||
"meshcore" => {
|
||||
"hour" => row["mc_hour"].to_i,
|
||||
"day" => row["mc_day"].to_i,
|
||||
"week" => row["mc_week"].to_i,
|
||||
"month" => row["mc_month"].to_i,
|
||||
},
|
||||
"meshtastic" => {
|
||||
"hour" => row["mt_hour"].to_i,
|
||||
"day" => row["mt_day"].to_i,
|
||||
"week" => row["mt_week"].to_i,
|
||||
"month" => row["mt_month"].to_i,
|
||||
},
|
||||
}
|
||||
ensure
|
||||
handle&.close unless db
|
||||
|
||||
@@ -37,7 +37,7 @@ module PotatoMesh
|
||||
params << since_threshold
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"], db: db)
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
|
||||
@@ -18,12 +18,34 @@ module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Api
|
||||
# Accepted protocol filter values. Unknown values are discarded to
|
||||
# prevent attacker-controlled strings from polluting the cache keyspace.
|
||||
KNOWN_PROTOCOLS = Set.new(%w[meshcore meshtastic]).freeze
|
||||
|
||||
# Register read-only API endpoints that expose cached mesh data and
|
||||
# instance metadata. Invoked by Sinatra during extension registration.
|
||||
#
|
||||
# @param app [Sinatra::Base] application instance receiving the routes.
|
||||
# @return [void]
|
||||
def self.registered(app)
|
||||
known_protocols = KNOWN_PROTOCOLS
|
||||
|
||||
app.helpers do
|
||||
# Sanitise the protocol query parameter to a known value.
|
||||
define_method(:sanitize_protocol) do |raw|
|
||||
val = raw&.to_s&.strip&.downcase
|
||||
known_protocols.include?(val) ? val : nil
|
||||
end
|
||||
|
||||
# Set Cache-Control headers appropriate for the current mode.
|
||||
# Private-mode instances must not allow intermediary caches to
|
||||
# store responses that may contain filtered data.
|
||||
define_method(:api_cache_control) do |max_age: 10|
|
||||
visibility = private_mode? ? :private : :public
|
||||
cache_control visibility, :must_revalidate, max_age: max_age
|
||||
end
|
||||
end
|
||||
|
||||
app.before "/api/messages*" do
|
||||
halt 404 if private_mode?
|
||||
end
|
||||
@@ -63,92 +85,213 @@ module PotatoMesh
|
||||
|
||||
app.get "/api/nodes" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_nodes(limit, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
since = params["since"]
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
since_val = coerce_integer(since) || 0
|
||||
priv = private_mode? ? 1 : 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_nodes(limit, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:nodes:#{limit}:#{protocol}:#{priv}", ttl_seconds: 15) do
|
||||
query_nodes(limit, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/stats" do
|
||||
content_type :json
|
||||
{
|
||||
active_nodes: query_active_node_stats,
|
||||
sampled: false,
|
||||
}.to_json
|
||||
priv = private_mode? ? 1 : 0
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:stats:#{priv}", ttl_seconds: 15) do
|
||||
stats = query_active_node_stats
|
||||
{
|
||||
active_nodes: {
|
||||
"hour" => stats["hour"], "day" => stats["day"],
|
||||
"week" => stats["week"], "month" => stats["month"],
|
||||
},
|
||||
meshcore: stats["meshcore"],
|
||||
meshtastic: stats["meshtastic"],
|
||||
sampled: false,
|
||||
}.to_json
|
||||
end
|
||||
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
|
||||
app.get "/api/nodes/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
rows = query_nodes(limit, node_ref: node_ref, since: params["since"])
|
||||
halt 404, { error: "not found" }.to_json if rows.empty?
|
||||
rows.first.to_json
|
||||
json_body = rows.first.to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
end
|
||||
|
||||
app.get "/api/ingestors" do
|
||||
content_type :json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
query_ingestors(limit, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
since = params["since"]
|
||||
since_val = coerce_integer(since) || 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_ingestors(limit, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:ingestors:#{limit}:#{protocol}", ttl_seconds: 30) do
|
||||
query_ingestors(limit, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/messages" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
include_encrypted = coerce_boolean(params["encrypted"]) || false
|
||||
since = coerce_integer(params["since"])
|
||||
since = 0 if since.nil? || since.negative?
|
||||
query_messages(limit, include_encrypted: include_encrypted, since: since, protocol: string_or_nil(params["protocol"])).to_json
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
enc_key = include_encrypted ? "1" : "0"
|
||||
|
||||
if since > 0
|
||||
json_body = query_messages(limit, include_encrypted: include_encrypted, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:messages:#{limit}:#{enc_key}:#{protocol}", ttl_seconds: 10) do
|
||||
query_messages(limit, include_encrypted: include_encrypted, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/messages/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
include_encrypted = coerce_boolean(params["encrypted"]) || false
|
||||
since = coerce_integer(params["since"])
|
||||
since = 0 if since.nil? || since.negative?
|
||||
query_messages(
|
||||
json_body = query_messages(
|
||||
limit,
|
||||
node_ref: node_ref,
|
||||
include_encrypted: include_encrypted,
|
||||
since: since,
|
||||
protocol: string_or_nil(params["protocol"]),
|
||||
protocol: sanitize_protocol(params["protocol"]),
|
||||
).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
end
|
||||
|
||||
app.get "/api/positions" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
since = params["since"]
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
since_val = coerce_integer(since) || 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_positions(limit, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:positions:#{limit}:#{protocol}", ttl_seconds: 15) do
|
||||
query_positions(limit, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/positions/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit, node_ref: node_ref, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
json_body = query_positions(limit, node_ref: node_ref, since: params["since"], protocol: sanitize_protocol(params["protocol"])).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
end
|
||||
|
||||
app.get "/api/neighbors" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
since = params["since"]
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
since_val = coerce_integer(since) || 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_neighbors(limit, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:neighbors:#{limit}:#{protocol}", ttl_seconds: 30) do
|
||||
query_neighbors(limit, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/neighbors/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit, node_ref: node_ref, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
json_body = query_neighbors(limit, node_ref: node_ref, since: params["since"], protocol: sanitize_protocol(params["protocol"])).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
end
|
||||
|
||||
app.get "/api/telemetry" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
since = params["since"]
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
since_val = coerce_integer(since) || 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_telemetry(limit, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:telemetry:#{limit}:#{protocol}", ttl_seconds: 15) do
|
||||
query_telemetry(limit, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/telemetry/aggregated" do
|
||||
@@ -179,33 +322,67 @@ module PotatoMesh
|
||||
halt 400, { error: "bucketSeconds too small for requested window" }.to_json
|
||||
end
|
||||
|
||||
query_telemetry_buckets(
|
||||
window_seconds: window_seconds,
|
||||
bucket_seconds: bucket_seconds,
|
||||
since: params["since"],
|
||||
).to_json
|
||||
since = params["since"]
|
||||
since_val = coerce_integer(since) || 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_telemetry_buckets(window_seconds: window_seconds, bucket_seconds: bucket_seconds, since: since).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control(max_age: 30)
|
||||
json_body
|
||||
else
|
||||
cache_key = "api:telemetry_agg:#{window_seconds}:#{bucket_seconds}"
|
||||
cached = PotatoMesh::App::ApiCache.fetch(cache_key, ttl_seconds: 60) do
|
||||
query_telemetry_buckets(window_seconds: window_seconds, bucket_seconds: bucket_seconds, since: since).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control(max_age: 30)
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/telemetry/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit, node_ref: node_ref, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
json_body = query_telemetry(limit, node_ref: node_ref, since: params["since"], protocol: sanitize_protocol(params["protocol"])).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
end
|
||||
|
||||
app.get "/api/traces" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_traces(limit, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
since = params["since"]
|
||||
protocol = sanitize_protocol(params["protocol"])
|
||||
since_val = coerce_integer(since) || 0
|
||||
|
||||
if since_val > 0
|
||||
json_body = query_traces(limit, since: since, protocol: protocol).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
else
|
||||
cached = PotatoMesh::App::ApiCache.fetch("api:traces:#{limit}:#{protocol}", ttl_seconds: 30) do
|
||||
query_traces(limit, since: since, protocol: protocol).to_json
|
||||
end
|
||||
etag cached[:etag], kind: :weak
|
||||
api_cache_control
|
||||
cached[:value]
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/api/traces/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_traces(limit, node_ref: node_ref, since: params["since"], protocol: string_or_nil(params["protocol"])).to_json
|
||||
limit = coerce_query_limit(params["limit"])
|
||||
json_body = query_traces(limit, node_ref: node_ref, since: params["since"], protocol: sanitize_protocol(params["protocol"])).to_json
|
||||
etag Digest::MD5.hexdigest(json_body), kind: :weak
|
||||
api_cache_control
|
||||
json_body
|
||||
end
|
||||
|
||||
app.get "/api/instances" do
|
||||
|
||||
@@ -45,6 +45,7 @@ module PotatoMesh
|
||||
upsert_node(db, node_id, node, protocol: protocol)
|
||||
end
|
||||
PotatoMesh::App::Prometheus::NODES_GAUGE.set(query_nodes(1000).length)
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:nodes:", "api:stats:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
@@ -65,6 +66,7 @@ module PotatoMesh
|
||||
messages.each do |msg|
|
||||
insert_message(db, msg, protocol_cache: protocol_cache)
|
||||
end
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:messages:", "api:stats:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
@@ -84,6 +86,7 @@ module PotatoMesh
|
||||
db = open_database
|
||||
stored = upsert_ingestor(db, payload)
|
||||
halt 400, { error: "invalid payload" }.to_json unless stored
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:ingestors:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
@@ -138,6 +141,9 @@ module PotatoMesh
|
||||
is_private = coerce_boolean(raw_private)
|
||||
signature = string_or_nil(payload["signature"])
|
||||
contact_link = string_or_nil(payload["contactLink"])
|
||||
nodes_count = coerce_integer(payload["nodesCount"])
|
||||
meshcore_nodes_count = coerce_integer(payload["meshcoreNodesCount"])
|
||||
meshtastic_nodes_count = coerce_integer(payload["meshtasticNodesCount"])
|
||||
|
||||
attributes = {
|
||||
id: id,
|
||||
@@ -152,6 +158,9 @@ module PotatoMesh
|
||||
last_update_time: last_update_time,
|
||||
is_private: is_private,
|
||||
contact_link: contact_link,
|
||||
nodes_count: nodes_count,
|
||||
meshcore_nodes_count: meshcore_nodes_count,
|
||||
meshtastic_nodes_count: meshtastic_nodes_count,
|
||||
}
|
||||
|
||||
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
|
||||
@@ -279,6 +288,31 @@ module PotatoMesh
|
||||
halt 400, { error: freshness_reason || "stale node data" }.to_json
|
||||
end
|
||||
|
||||
# Recompute node counts from the fetched node list so that
|
||||
# nodes_count, meshcore_nodes_count, and meshtastic_nodes_count
|
||||
# stay internally consistent. The announcement payload may carry
|
||||
# sender-asserted counts, but those are unsigned and could diverge
|
||||
# from the actual node data — overwriting them here is intentional.
|
||||
if remote_nodes.is_a?(Array)
|
||||
cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
|
||||
total = 0
|
||||
meshcore = 0
|
||||
meshtastic = 0
|
||||
remote_nodes.each do |n|
|
||||
next unless n.is_a?(Hash)
|
||||
ts = coerce_integer(n["lastHeard"] || n["last_heard"])
|
||||
next unless ts && ts >= cutoff
|
||||
total += 1
|
||||
case (n["protocol"] || n["mesh_protocol"]).to_s.downcase
|
||||
when "meshcore" then meshcore += 1
|
||||
when "meshtastic" then meshtastic += 1
|
||||
end
|
||||
end
|
||||
attributes[:nodes_count] = total
|
||||
attributes[:meshcore_nodes_count] = meshcore
|
||||
attributes[:meshtastic_nodes_count] = meshtastic
|
||||
end
|
||||
|
||||
db = open_database
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
enqueued = enqueue_federation_crawl(
|
||||
@@ -314,6 +348,7 @@ module PotatoMesh
|
||||
positions.each do |pos|
|
||||
insert_position(db, pos, protocol_cache: protocol_cache)
|
||||
end
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:positions:", "api:nodes:", "api:stats:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
@@ -334,6 +369,7 @@ module PotatoMesh
|
||||
neighbor_payloads.each do |packet|
|
||||
insert_neighbors(db, packet, protocol_cache: protocol_cache)
|
||||
end
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:neighbors:", "api:stats:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
@@ -354,6 +390,7 @@ module PotatoMesh
|
||||
telemetry_packets.each do |packet|
|
||||
insert_telemetry(db, packet, protocol_cache: protocol_cache)
|
||||
end
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:telemetry:", "api:stats:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
@@ -374,6 +411,7 @@ module PotatoMesh
|
||||
trace_packets.each do |packet|
|
||||
insert_trace(db, packet, protocol_cache: protocol_cache)
|
||||
end
|
||||
PotatoMesh::App::ApiCache.invalidate_prefix("api:traces:", "api:stats:")
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
|
||||
@@ -19,23 +19,12 @@ module PotatoMesh
|
||||
module Routes
|
||||
module Root
|
||||
module Helpers
|
||||
# Determine the initial theme from the request cookie and persist
|
||||
# sanitised values back to the client to avoid invalid states.
|
||||
# Return the fixed dark theme identifier. Light mode is no longer
|
||||
# supported; theme selection and cookie persistence have been removed.
|
||||
#
|
||||
# @return [String] normalised theme value ('dark' or 'light').
|
||||
# @return [String] always 'dark'.
|
||||
def resolve_initial_theme
|
||||
raw_theme = request.cookies["theme"]
|
||||
theme = %w[dark light].include?(raw_theme) ? raw_theme : "dark"
|
||||
if raw_theme != theme
|
||||
response.set_cookie(
|
||||
"theme",
|
||||
value: theme,
|
||||
path: "/",
|
||||
max_age: 60 * 60 * 24 * 7,
|
||||
same_site: :lax,
|
||||
)
|
||||
end
|
||||
theme
|
||||
"dark"
|
||||
end
|
||||
|
||||
# Render a dashboard-oriented ERB template within the shared layout.
|
||||
@@ -70,6 +59,7 @@ module PotatoMesh
|
||||
initial_theme: theme,
|
||||
current_view_mode: view_mode_sym,
|
||||
map_zoom: PotatoMesh::Config.map_zoom,
|
||||
static_pages: PotatoMesh::App::Pages.static_pages,
|
||||
}
|
||||
sanitized_locals = extra_locals.is_a?(Hash) ? extra_locals : {}
|
||||
merged_locals = base_locals.merge(sanitized_locals)
|
||||
@@ -191,6 +181,26 @@ module PotatoMesh
|
||||
render_root_view(:federation, view_mode: :federation)
|
||||
end
|
||||
|
||||
app.get "/pages/:slug" do
|
||||
slug = params.fetch("slug", "")
|
||||
halt 400, "Bad Request" unless slug.match?(PotatoMesh::App::Pages::SLUG_PATTERN)
|
||||
|
||||
page = PotatoMesh::App::Pages.find_page_by_slug(slug)
|
||||
halt 404, "Not Found" unless page
|
||||
|
||||
page_html = PotatoMesh::App::Pages.render_page_content(page)
|
||||
halt 500, "Internal Server Error" unless page_html
|
||||
|
||||
render_root_view(
|
||||
:page,
|
||||
view_mode: :"page_#{slug}",
|
||||
extra_locals: {
|
||||
page_title: page.title,
|
||||
page_content_html: page_html,
|
||||
},
|
||||
)
|
||||
end
|
||||
|
||||
app.get "/nodes/:id" do
|
||||
node_ref = params.fetch("id", nil)
|
||||
reference_payload = build_node_detail_reference(node_ref)
|
||||
|
||||
@@ -84,6 +84,26 @@ module PotatoMesh
|
||||
value.to_s.strip != "0"
|
||||
end
|
||||
|
||||
# Resolve the absolute path to the operator-managed static pages directory.
|
||||
#
|
||||
# The directory defaults to +pages/+ at the application root and can be
|
||||
# overridden with the +PAGES_DIR+ environment variable.
|
||||
#
|
||||
# @return [String] absolute filesystem path to the pages directory.
|
||||
def pages_directory
|
||||
custom = fetch_string("PAGES_DIR", nil)
|
||||
return File.expand_path(custom) if custom
|
||||
|
||||
File.join(web_root, "pages")
|
||||
end
|
||||
|
||||
# Maximum file size in bytes accepted when reading a static page.
|
||||
#
|
||||
# @return [Integer] byte ceiling for markdown files.
|
||||
def max_page_file_bytes
|
||||
512 * 1024
|
||||
end
|
||||
|
||||
# Resolve the absolute path to the web application root directory.
|
||||
#
|
||||
# @return [String] absolute filesystem path of the web folder.
|
||||
@@ -187,7 +207,7 @@ module PotatoMesh
|
||||
#
|
||||
# @return [String] semantic version identifier.
|
||||
def version_fallback
|
||||
"0.6.0"
|
||||
"0.6.2"
|
||||
end
|
||||
|
||||
# Default refresh interval for frontend polling routines.
|
||||
|
||||
Generated
+2
-2
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.6.0",
|
||||
"version": "0.6.1",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "potato-mesh",
|
||||
"version": "0.6.0",
|
||||
"version": "0.6.1",
|
||||
"devDependencies": {
|
||||
"istanbul-lib-coverage": "^3.2.2",
|
||||
"istanbul-lib-report": "^3.0.1",
|
||||
|
||||
+1
-1
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.6.0",
|
||||
"version": "0.6.2",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
# About This Mesh
|
||||
|
||||
Welcome to this [PotatoMesh](https://github.com/l5yth/potato-mesh) instance - a community dashboard for off-grid mesh networks. This is an example page, please modify it before deploying.
|
||||
|
||||
## What Is Meshtastic?
|
||||
|
||||
[Meshtastic](https://meshtastic.org) is an open-source project that turns
|
||||
affordable LoRa radios into a decentralised, long-range communication network.
|
||||
No cellular service or internet connection is required - nodes relay messages
|
||||
across the mesh automatically.
|
||||
|
||||
## What Is Meshcore?
|
||||
|
||||
[Meshcore](https://meshcore.co.uk) is a firmware for LoRa radios focused on
|
||||
reliable, low-power mesh networking. It provides a public channel system and
|
||||
supports narrow-band presets optimised for long range in dense environments.
|
||||
|
||||
## Network Details
|
||||
|
||||
| Setting | Meshtastic | Meshcore |
|
||||
| --------- | --------------- | ----------------- |
|
||||
| Channel | #MediumFast | Public |
|
||||
| Frequency | 869.525 MHz | 869.618 MHz |
|
||||
| Bandwidth | 250 kHz | 62.5 kHz |
|
||||
| SF | 8 | 8 |
|
||||
| CR | 4/5 | 4/8 |
|
||||
| Preset | Medium / Fast | EU/UK Narrow |
|
||||
|
||||
> Adjust this table to match the configuration of your local mesh.
|
||||
|
||||
## Contact
|
||||
|
||||
- **Public chat:** [#potatomesh:dod.ngo](https://matrix.to/#/#potatomesh:dod.ngo)
|
||||
- **Source code:** [github.com/l5yth/potato-mesh](https://github.com/l5yth/potato-mesh)
|
||||
|
||||
## Custom Pages
|
||||
|
||||
Instance operators can add, edit, or remove pages by placing Markdown files in
|
||||
the `pages/` directory (mounted as a Docker volume at `/app/pages`). Each file
|
||||
becomes a new entry in the navigation bar.
|
||||
|
||||
### Filename Convention
|
||||
|
||||
```
|
||||
<sort-prefix>-<slug>.md
|
||||
```
|
||||
|
||||
- **Sort prefix** - a number that controls the order in the nav bar (e.g. `1`,
|
||||
`5`, `10`). Files are sorted alphabetically by their full filename.
|
||||
- **Slug** - lowercase, hyphen-separated words that become the URL path and nav
|
||||
label. `contact` becomes `/pages/contact` with the label "Contact";
|
||||
`privacy-policy` becomes `/pages/privacy-policy` labelled "Privacy Policy".
|
||||
|
||||
### Examples
|
||||
|
||||
| Filename | Nav Label | URL |
|
||||
| --------------------- | ---------------- | ---------------------- |
|
||||
| `1-about.md` | About | `/pages/about` |
|
||||
| `5-rules.md` | Rules | `/pages/rules` |
|
||||
| `9-contact.md` | Contact | `/pages/contact` |
|
||||
| `10-privacy-policy.md`| Privacy Policy | `/pages/privacy-policy`|
|
||||
|
||||
### Impressum / Legal Notice
|
||||
|
||||
Operators subject to legal disclosure requirements (e.g. the German
|
||||
Telemediengesetz) can create an `impressum.md` page:
|
||||
|
||||
```
|
||||
20-impressum.md
|
||||
```
|
||||
|
||||
Fill it with your legally required contact details - name, address, email, phone
|
||||
- and it will appear in the navigation as "Impressum".
|
||||
@@ -0,0 +1,385 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { renderChatEntryContent } from '../chat-entry-renderer.js';
|
||||
|
||||
// Shared simple mocks used across the tests below.
|
||||
const esc = v => `ESC(${v})`;
|
||||
const emoji = v => `EMOJI(${v})`;
|
||||
const renderShortHtml = (short, role, long /*, source */) => `SHORT(${short ?? '?'}|${role ?? '-'}|${long ?? '-'})`;
|
||||
|
||||
function makeNode(overrides = {}) {
|
||||
return {
|
||||
node_id: '!aabbccdd',
|
||||
short_name: 'AL',
|
||||
long_name: 'Alice',
|
||||
role: 'CLIENT',
|
||||
...overrides,
|
||||
};
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MeshCore leading @[Name] as reply (#727 — issue 1)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('renderChatEntryContent: MeshCore channel leading @[Name] becomes reply prefix', () => {
|
||||
const alice = makeNode({ node_id: '!11111111', short_name: 'AL', long_name: 'Alice' });
|
||||
const bob = makeNode({ node_id: '!22222222', short_name: 'BO', long_name: 'Bob' });
|
||||
const nodesById = new Map([
|
||||
[alice.node_id, alice],
|
||||
[bob.node_id, bob],
|
||||
]);
|
||||
const message = {
|
||||
text: 'Bob: @[Alice] thanks!',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(html.includes('chat-entry-reply'), 'should include reply prefix span');
|
||||
assert.ok(html.includes('ESC(in reply to)'), 'reply prefix label should be escaped');
|
||||
assert.ok(html.includes('SHORT(AL|CLIENT|Alice)'), 'reply target should be rendered as short name badge');
|
||||
// The @[Alice] should NOT appear inline as a mention badge — it has been
|
||||
// consumed into the reply prefix. The remaining text is "thanks!".
|
||||
assert.ok(html.includes('ESC(thanks!)'), 'remaining text should appear');
|
||||
// Exactly one SHORT(...) appearance: the reply badge (mention was absorbed).
|
||||
assert.equal((html.match(/SHORT\(/g) ?? []).length, 1);
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: MeshCore channel leading @[Name] handles name whitespace', () => {
|
||||
const timo = makeNode({ node_id: '!6aee769f', short_name: 'TI', long_name: '\u{1F4FA} Timo +' });
|
||||
const nodesById = new Map([[timo.node_id, timo]]);
|
||||
const message = {
|
||||
text: 'Bob: @[ Timo +] vielleicht hat jemand einen tip',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
// Should resolve despite leading space + emoji prefix on the registry name.
|
||||
assert.ok(html.includes('chat-entry-reply'));
|
||||
assert.ok(html.includes('SHORT(TI|CLIENT'), 'Timo + should be resolved via fallback match');
|
||||
assert.ok(html.includes('ESC(vielleicht hat jemand einen tip)'));
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: MeshCore multi-mention body does NOT emit reply prefix', () => {
|
||||
const alice = makeNode({ node_id: '!11111111', short_name: 'AL', long_name: 'Alice' });
|
||||
const bob = makeNode({ node_id: '!22222222', short_name: 'BO', long_name: 'Bob' });
|
||||
const nodesById = new Map([[alice.node_id, alice], [bob.node_id, bob]]);
|
||||
const message = {
|
||||
text: 'X: @[Alice] and @[Bob] both',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(!html.includes('chat-entry-reply'), 'multi-mention should not trigger reply prefix');
|
||||
// Both mentions render as inline badges.
|
||||
assert.ok(html.includes('SHORT(AL|CLIENT|Alice)'));
|
||||
assert.ok(html.includes('SHORT(BO|CLIENT|Bob)'));
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: leading mention with unresolved node still surfaces a reply prefix using the raw name (#727)', () => {
|
||||
// Production deployments cap ``/api/nodes`` at 1000 entries, so the global
|
||||
// registry can be missing nodes that recent messages reference. In that
|
||||
// case the leading-mention-as-reply detection must still emit a reply
|
||||
// prefix using the bare mention name, otherwise the body would render as
|
||||
// ``@[Name] body...`` and look like an unresolved mention link.
|
||||
const nodesById = new Map();
|
||||
const message = {
|
||||
text: 'X: @[DA6ML/p] ja, klingt sehr gut',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(html.includes('chat-entry-reply'), 'should include a reply prefix even without a node match');
|
||||
assert.ok(html.includes('ESC(in reply to)'), 'reply prefix label is escaped');
|
||||
assert.ok(html.includes('ESC(DA6ML/p)'), 'mention name is shown verbatim (escaped)');
|
||||
assert.ok(html.includes('ESC(ja, klingt sehr gut)'), 'remaining text rendered after the prefix');
|
||||
// The bare ``@[Name]`` form must NOT survive into the body.
|
||||
assert.ok(!html.includes('@[ESC('), 'unresolved mention should not leak into the body');
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: inline (non-leading) mentions still render as escaped literals when unresolved', () => {
|
||||
// Mentions that are NOT at the start are left as escaped literals — the
|
||||
// reply-prefix fallback only applies to leading-mention-as-reply.
|
||||
const nodesById = new Map();
|
||||
const message = {
|
||||
text: 'X: hello @[Unknown] there',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(!html.includes('chat-entry-reply'), 'mid-text mention must not become reply prefix');
|
||||
assert.ok(html.includes('@[ESC(Unknown)]'), 'unresolved inline mention falls back to escaped literal');
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: MeshCore DM leading mention also becomes reply prefix', () => {
|
||||
const alice = makeNode({ node_id: '!11111111', short_name: 'AL', long_name: 'Alice' });
|
||||
const nodesById = new Map([[alice.node_id, alice]]);
|
||||
const message = {
|
||||
text: '@[Alice] private reply',
|
||||
protocol: 'meshcore',
|
||||
to_id: '!22222222', // DM target, not ^all
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(html.includes('chat-entry-reply'), 'DM should also get reply prefix when leading mention resolves');
|
||||
assert.ok(html.includes('ESC(private reply)'));
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Structured reply_id prefix (Meshtastic)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('renderChatEntryContent: Meshtastic reply_id takes precedence over mention detection', () => {
|
||||
const parent = makeNode({ node_id: '!11111111', short_name: 'PA', long_name: 'Parent' });
|
||||
const nodesById = new Map([[parent.node_id, parent]]);
|
||||
const parentMsg = { id: 7029, node_id: parent.node_id, node: parent };
|
||||
const messagesById = new Map([['7029', parentMsg]]);
|
||||
|
||||
const message = {
|
||||
id: 8000,
|
||||
reply_id: 7029,
|
||||
text: 'ok got it',
|
||||
protocol: 'meshtastic',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById,
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(html.includes('chat-entry-reply'));
|
||||
assert.ok(html.includes('SHORT(PA|CLIENT|Parent)'));
|
||||
assert.ok(html.includes('ESC(ok got it)'));
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Non-MeshCore messages: no mention rendering
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('renderChatEntryContent: Meshtastic messages do NOT render @[Name] as badges', () => {
|
||||
const alice = makeNode({ node_id: '!11111111', short_name: 'AL', long_name: 'Alice' });
|
||||
const nodesById = new Map([[alice.node_id, alice]]);
|
||||
const message = {
|
||||
text: 'look at @[Alice] here',
|
||||
protocol: 'meshtastic',
|
||||
};
|
||||
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
// Meshtastic does not carry @[Name] mentions, so the text is escaped literal.
|
||||
assert.ok(!html.includes('SHORT(AL'), 'no mention badge for non-MeshCore protocol');
|
||||
assert.ok(html.includes('ESC(look at @[Alice] here)'), 'literal text should be escaped verbatim');
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Encrypted messages
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('renderChatEntryContent: encrypted message uses notice formatter when provided', () => {
|
||||
const message = { encrypted: true, text: 'GAA=', protocol: 'meshtastic' };
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById: new Map(),
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
formatEncryptedMessageNotice: () => ({ content: '[encrypted]', isHtml: false }),
|
||||
});
|
||||
assert.equal(html, 'ESC([encrypted])');
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: encrypted message without notice formatter returns empty', () => {
|
||||
const message = { encrypted: true, text: 'GAA=', protocol: 'meshtastic' };
|
||||
const { html } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById: new Map(),
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
assert.equal(html, '');
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// meshcoreSenderNode fallback return value
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('renderChatEntryContent: returns meshcoreSenderNode when prefix resolves against registry', () => {
|
||||
const sender = makeNode({ node_id: '!11111111', short_name: 'SN', long_name: 'Sender' });
|
||||
const nodesById = new Map([[sender.node_id, sender]]);
|
||||
const message = {
|
||||
text: 'Sender: hello everyone',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
// Note: no `.node` — simulates ingestor not hydrating the sender.
|
||||
};
|
||||
|
||||
const { parsedMeshcorePrefix, meshcoreSenderNode } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
assert.ok(parsedMeshcorePrefix);
|
||||
assert.equal(parsedMeshcorePrefix.senderName, 'Sender');
|
||||
assert.equal(meshcoreSenderNode, sender);
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: does not perform sender lookup when message.node is set', () => {
|
||||
const hydrated = makeNode({ node_id: '!existing', short_name: 'HY', long_name: 'Sender' });
|
||||
const registry = makeNode({ node_id: '!otherid', short_name: 'XX', long_name: 'Sender' });
|
||||
const nodesById = new Map([[registry.node_id, registry]]);
|
||||
const message = {
|
||||
text: 'Sender: hi',
|
||||
protocol: 'meshcore',
|
||||
to_id: '^all',
|
||||
node: hydrated,
|
||||
};
|
||||
|
||||
const { meshcoreSenderNode } = renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
|
||||
// When the ingestor already hydrated message.node, the helper should NOT
|
||||
// override it via a name-based lookup.
|
||||
assert.equal(meshcoreSenderNode, null);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Input validation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('renderChatEntryContent: non-object message returns empty html', () => {
|
||||
const result = renderChatEntryContent({
|
||||
message: null,
|
||||
nodesById: new Map(),
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
});
|
||||
assert.equal(result.html, '');
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: throws when escapeHtml is not a function', () => {
|
||||
assert.throws(() => renderChatEntryContent({
|
||||
message: { text: 'hi' },
|
||||
nodesById: new Map(),
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: null,
|
||||
renderEmojiHtml: emoji,
|
||||
}), TypeError);
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: throws when renderShortHtml is not a function', () => {
|
||||
assert.throws(() => renderChatEntryContent({
|
||||
message: { text: 'hi' },
|
||||
nodesById: new Map(),
|
||||
messagesById: new Map(),
|
||||
renderShortHtml: null,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
}), TypeError);
|
||||
});
|
||||
|
||||
test('renderChatEntryContent: throws when renderEmojiHtml is not a function', () => {
|
||||
assert.throws(() => renderChatEntryContent({
|
||||
message: { text: 'hi' },
|
||||
nodesById: new Map(),
|
||||
messagesById: new Map(),
|
||||
renderShortHtml,
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: null,
|
||||
}), TypeError);
|
||||
});
|
||||
@@ -92,14 +92,15 @@ test('buildChatTabModel returns sorted nodes and channel buckets', () => {
|
||||
);
|
||||
|
||||
assert.equal(model.channels.length, 6);
|
||||
// All channels have 1 message each; ties are broken alphabetically by label.
|
||||
// Primary channels (index 0) come first, secondary channels (index > 0) come last.
|
||||
// Within each tier, ties on messageCount are broken alphabetically by label.
|
||||
assert.deepEqual(model.channels.map(channel => channel.label), [
|
||||
'1',
|
||||
'BerlinMesh',
|
||||
'EnvDefault',
|
||||
'Fallback',
|
||||
'MediumFast',
|
||||
'ShortFast'
|
||||
'ShortFast',
|
||||
'1',
|
||||
'BerlinMesh',
|
||||
]);
|
||||
|
||||
const channelByLabel = Object.fromEntries(model.channels.map(channel => [channel.label, channel]));
|
||||
@@ -512,3 +513,70 @@ test('buildChatTabModel breaks messageCount ties alphabetically', () => {
|
||||
assert.equal(model.channels[0].label, 'Apple');
|
||||
assert.equal(model.channels[1].label, 'Zebra');
|
||||
});
|
||||
|
||||
test('buildChatTabModel puts primary channels (index 0) before secondary channels', () => {
|
||||
const model = buildChatTabModel({
|
||||
nodes: [],
|
||||
messages: [
|
||||
// Secondary channels with many messages
|
||||
{ id: 's1', rx_time: NOW - 30, channel: 2, channel_name: 'SecondaryA' },
|
||||
{ id: 's2', rx_time: NOW - 28, channel: 2, channel_name: 'SecondaryA' },
|
||||
{ id: 's3', rx_time: NOW - 26, channel: 2, channel_name: 'SecondaryA' },
|
||||
// Primary channel (index 0) with fewer messages
|
||||
{ id: 'p1', rx_time: NOW - 20, channel: 0, channel_name: 'LongFast' },
|
||||
],
|
||||
nowSeconds: NOW,
|
||||
windowSeconds: WINDOW
|
||||
});
|
||||
assert.equal(model.channels.length, 2);
|
||||
assert.equal(model.channels[0].label, 'LongFast', 'primary channel must come first regardless of activity');
|
||||
assert.equal(model.channels[0].index, 0);
|
||||
assert.equal(model.channels[1].label, 'SecondaryA', 'secondary channel must come second');
|
||||
});
|
||||
|
||||
test('buildChatTabModel sorts primary channels by activity then alpha within the primary tier', () => {
|
||||
const model = buildChatTabModel({
|
||||
nodes: [],
|
||||
messages: [
|
||||
// LongFast: 1 message
|
||||
{ id: 'lf1', rx_time: NOW - 30, channel: 0, channel_name: 'LongFast' },
|
||||
// MediumFast: 3 messages (most active primary)
|
||||
{ id: 'mf1', rx_time: NOW - 28, channel: 0, channel_name: 'MediumFast' },
|
||||
{ id: 'mf2', rx_time: NOW - 26, channel: 0, channel_name: 'MediumFast' },
|
||||
{ id: 'mf3', rx_time: NOW - 24, channel: 0, channel_name: 'MediumFast' },
|
||||
// Public: 2 messages
|
||||
{ id: 'pb1', rx_time: NOW - 22, channel: 0, channel_name: 'Public' },
|
||||
{ id: 'pb2', rx_time: NOW - 20, channel: 0, channel_name: 'Public' },
|
||||
],
|
||||
nowSeconds: NOW,
|
||||
windowSeconds: WINDOW
|
||||
});
|
||||
assert.equal(model.channels.length, 3);
|
||||
assert.equal(model.channels[0].label, 'MediumFast', 'most active primary first');
|
||||
assert.equal(model.channels[1].label, 'Public', 'second most active primary second');
|
||||
assert.equal(model.channels[2].label, 'LongFast', 'least active primary last');
|
||||
});
|
||||
|
||||
test('buildChatTabModel sorts secondary channels by activity then alpha after all primaries', () => {
|
||||
const model = buildChatTabModel({
|
||||
nodes: [],
|
||||
messages: [
|
||||
// Primary with 1 message
|
||||
{ id: 'p1', rx_time: NOW - 50, channel: 0, channel_name: 'LongFast' },
|
||||
// Secondary channels
|
||||
{ id: 'b1', rx_time: NOW - 40, channel: 3, channel_name: 'Beta' },
|
||||
{ id: 'a1', rx_time: NOW - 38, channel: 1, channel_name: 'Alpha' },
|
||||
{ id: 'a2', rx_time: NOW - 36, channel: 1, channel_name: 'Alpha' },
|
||||
{ id: 'a3', rx_time: NOW - 34, channel: 1, channel_name: 'Alpha' },
|
||||
{ id: 'g1', rx_time: NOW - 32, channel: 2, channel_name: 'Gamma' },
|
||||
{ id: 'g2', rx_time: NOW - 30, channel: 2, channel_name: 'Gamma' },
|
||||
],
|
||||
nowSeconds: NOW,
|
||||
windowSeconds: WINDOW
|
||||
});
|
||||
assert.equal(model.channels.length, 4);
|
||||
assert.equal(model.channels[0].label, 'LongFast', 'primary always first');
|
||||
assert.equal(model.channels[1].label, 'Alpha', 'most active secondary first');
|
||||
assert.equal(model.channels[2].label, 'Gamma', 'second most active secondary second');
|
||||
assert.equal(model.channels[3].label, 'Beta', 'least active secondary last');
|
||||
});
|
||||
|
||||
@@ -326,6 +326,14 @@ export function createDomEnvironment(options = {}) {
|
||||
querySelector() {
|
||||
return null;
|
||||
},
|
||||
querySelectorAll(selector) {
|
||||
// Delegate to body when available — MockElement.querySelectorAll supports
|
||||
// class selectors which covers the majority of test-time lookups.
|
||||
if (document.body && typeof document.body.querySelectorAll === 'function') {
|
||||
return document.body.querySelectorAll(selector);
|
||||
}
|
||||
return [];
|
||||
},
|
||||
createElement(tagName) {
|
||||
return new MockElement(tagName, registry);
|
||||
},
|
||||
|
||||
@@ -809,3 +809,136 @@ test('federation page sorts by full site names before truncating visible labels'
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('federation table linkifies Matrix room aliases, user IDs, and bare domain paths', async () => {
|
||||
const { tbodyEl, cleanup } = (() => {
|
||||
const e = createBasicFederationPageHarness();
|
||||
return { tbodyEl: e.tbodyEl, cleanup: e.cleanup.bind(e) };
|
||||
})();
|
||||
|
||||
const fetchImpl = () => Promise.resolve({
|
||||
ok: true,
|
||||
json: async () => [
|
||||
{
|
||||
domain: 'mesh.example',
|
||||
name: 'Room Test',
|
||||
contactLink: '@jmrplens:matrix.jmrp.io',
|
||||
channel: '#mesh:server.tld',
|
||||
version: '1.0.0',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
lastUpdateTime: Math.floor(Date.now() / 1000) - 60,
|
||||
nodesCount: 3
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeFederationPage({ config: {}, fetchImpl, leaflet: createBasicLeafletStub() });
|
||||
|
||||
const rowHtml = tbodyEl.childNodes[0].innerHTML;
|
||||
// Matrix user ID: @jmrplens:matrix.jmrp.io → https://matrix.to/#/@jmrplens:matrix.jmrp.io
|
||||
assert.match(rowHtml, /href="https:\/\/matrix\.to\/#\/@jmrplens:matrix\.jmrp\.io"/);
|
||||
assert.match(rowHtml, /@jmrplens:matrix\.jmrp\.io/);
|
||||
// Matrix room alias in channel cell: #mesh:server.tld → https://matrix.to/#/#mesh:server.tld
|
||||
assert.match(rowHtml, /href="https:\/\/matrix\.to\/#\/#mesh:server\.tld"/);
|
||||
assert.match(rowHtml, /#mesh:server\.tld/);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('federation table linkifies bare domain-with-path as https', async () => {
|
||||
const { tbodyEl, cleanup } = (() => {
|
||||
const e = createBasicFederationPageHarness();
|
||||
return { tbodyEl: e.tbodyEl, cleanup: e.cleanup.bind(e) };
|
||||
})();
|
||||
|
||||
const fetchImpl = () => Promise.resolve({
|
||||
ok: true,
|
||||
json: async () => [
|
||||
{
|
||||
domain: 'mesh.example',
|
||||
contactLink: 'discord.gg/EGdbRKQnFk',
|
||||
version: '1.0.0',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
lastUpdateTime: Math.floor(Date.now() / 1000) - 60,
|
||||
nodesCount: 1
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeFederationPage({ config: {}, fetchImpl, leaflet: createBasicLeafletStub() });
|
||||
|
||||
const rowHtml = tbodyEl.childNodes[0].innerHTML;
|
||||
assert.match(rowHtml, /href="https:\/\/discord\.gg\/EGdbRKQnFk"/);
|
||||
assert.match(rowHtml, /discord\.gg\/EGdbRKQnFk/);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('federation table sanitises <a> tags and strips other HTML in contact field', async () => {
|
||||
const { tbodyEl, cleanup } = (() => {
|
||||
const e = createBasicFederationPageHarness();
|
||||
return { tbodyEl: e.tbodyEl, cleanup: e.cleanup.bind(e) };
|
||||
})();
|
||||
|
||||
const contactWithHtml =
|
||||
'<a href=https://t.me/+BpSW3no2mJgzM2I8 target=_blank>YO Telegram group</a><b> Contact:</b> YO3IBZ';
|
||||
const contactViber =
|
||||
'<a href="https://invite.viber.com/?g=64h1QIFIC1Unai6DS6SE2Ot8ks9xoTm6">Viber Group</a>';
|
||||
|
||||
const fetchImpl = () => Promise.resolve({
|
||||
ok: true,
|
||||
json: async () => [
|
||||
{
|
||||
domain: 'a.mesh',
|
||||
contactLink: contactWithHtml,
|
||||
version: '1.0.0',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
lastUpdateTime: Math.floor(Date.now() / 1000) - 60,
|
||||
nodesCount: 1
|
||||
},
|
||||
{
|
||||
domain: 'b.mesh',
|
||||
contactLink: contactViber,
|
||||
version: '1.0.0',
|
||||
latitude: 0,
|
||||
longitude: 0,
|
||||
lastUpdateTime: Math.floor(Date.now() / 1000) - 60,
|
||||
nodesCount: 1
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeFederationPage({ config: {}, fetchImpl, leaflet: createBasicLeafletStub() });
|
||||
|
||||
const rows = tbodyEl.childNodes;
|
||||
const aHtml = rows[0].innerHTML;
|
||||
const bHtml = rows[1].innerHTML;
|
||||
|
||||
// Unquoted href extracted and normalised
|
||||
assert.match(aHtml, /href="https:\/\/t\.me\/\+BpSW3no2mJgzM2I8"/);
|
||||
assert.match(aHtml, /YO Telegram group/);
|
||||
// <b> tag stripped, text content preserved
|
||||
assert.match(aHtml, /Contact:/);
|
||||
assert.doesNotMatch(aHtml, /<b>/);
|
||||
// Remaining plain text present
|
||||
assert.match(aHtml, /YO3IBZ/);
|
||||
|
||||
// Quoted href passes through correctly
|
||||
assert.match(bHtml, /href="https:\/\/invite\.viber\.com\/\?g=64h1QIFIC1Unai6DS6SE2Ot8ks9xoTm6"/);
|
||||
assert.match(bHtml, /Viber Group/);
|
||||
|
||||
// No raw HTML from input leaks into output
|
||||
assert.doesNotMatch(aHtml, /target=_blank/);
|
||||
assert.doesNotMatch(aHtml, /<\/b>/);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { maxRecordTimestamp, mergeById, mergeByCompositeKey, trimToLimit } from '../incremental-helpers.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// maxRecordTimestamp
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('maxRecordTimestamp returns 0 for an empty array', () => {
|
||||
assert.equal(maxRecordTimestamp([]), 0);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp returns 0 for non-array input', () => {
|
||||
assert.equal(maxRecordTimestamp(null), 0);
|
||||
assert.equal(maxRecordTimestamp(undefined), 0);
|
||||
assert.equal(maxRecordTimestamp('string'), 0);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp extracts the highest rx_time by default', () => {
|
||||
const records = [
|
||||
{ rx_time: 100 },
|
||||
{ rx_time: 300 },
|
||||
{ rx_time: 200 },
|
||||
];
|
||||
assert.equal(maxRecordTimestamp(records), 300);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp inspects last_heard by default', () => {
|
||||
const records = [
|
||||
{ last_heard: 500 },
|
||||
{ last_heard: 250 },
|
||||
];
|
||||
assert.equal(maxRecordTimestamp(records), 500);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp returns 0 when records lack timestamp fields', () => {
|
||||
const records = [{ node_id: '!abc' }, { node_id: '!def' }];
|
||||
assert.equal(maxRecordTimestamp(records), 0);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp accepts custom field names', () => {
|
||||
const records = [
|
||||
{ telemetry_time: 700, rx_time: 600 },
|
||||
{ telemetry_time: 800 },
|
||||
];
|
||||
assert.equal(maxRecordTimestamp(records, ['telemetry_time']), 800);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp picks the max across multiple fields', () => {
|
||||
const records = [
|
||||
{ rx_time: 100, position_time: 400 },
|
||||
{ rx_time: 300, position_time: 200 },
|
||||
];
|
||||
assert.equal(maxRecordTimestamp(records, ['rx_time', 'position_time']), 400);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp skips null and non-object entries', () => {
|
||||
const records = [null, undefined, 42, { rx_time: 10 }];
|
||||
assert.equal(maxRecordTimestamp(records), 10);
|
||||
});
|
||||
|
||||
test('maxRecordTimestamp ignores non-number timestamp values', () => {
|
||||
const records = [{ rx_time: 'abc' }, { rx_time: 50 }];
|
||||
assert.equal(maxRecordTimestamp(records), 50);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// mergeById
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('mergeById returns existing when incoming is empty', () => {
|
||||
const existing = [{ id: 1, v: 'a' }];
|
||||
assert.strictEqual(mergeById(existing, [], 'id'), existing);
|
||||
assert.strictEqual(mergeById(existing, null, 'id'), existing);
|
||||
assert.strictEqual(mergeById(existing, undefined, 'id'), existing);
|
||||
});
|
||||
|
||||
test('mergeById deduplicates by keyField keeping the incoming value', () => {
|
||||
const existing = [
|
||||
{ id: 1, v: 'old' },
|
||||
{ id: 2, v: 'keep' },
|
||||
];
|
||||
const incoming = [
|
||||
{ id: 1, v: 'new' },
|
||||
{ id: 3, v: 'added' },
|
||||
];
|
||||
const result = mergeById(existing, incoming, 'id');
|
||||
assert.equal(result.length, 3);
|
||||
const byId = Object.fromEntries(result.map(r => [r.id, r.v]));
|
||||
assert.equal(byId[1], 'new');
|
||||
assert.equal(byId[2], 'keep');
|
||||
assert.equal(byId[3], 'added');
|
||||
});
|
||||
|
||||
test('mergeById works with string keys', () => {
|
||||
const existing = [{ node_id: '!abc', name: 'A' }];
|
||||
const incoming = [{ node_id: '!abc', name: 'B' }];
|
||||
const result = mergeById(existing, incoming, 'node_id');
|
||||
assert.equal(result.length, 1);
|
||||
assert.equal(result[0].name, 'B');
|
||||
});
|
||||
|
||||
test('mergeById skips items with null or undefined key', () => {
|
||||
const existing = [{ id: 1, v: 'a' }];
|
||||
const incoming = [{ v: 'no-id' }, { id: 2, v: 'b' }];
|
||||
const result = mergeById(existing, incoming, 'id');
|
||||
assert.equal(result.length, 2);
|
||||
});
|
||||
|
||||
test('mergeById returns all incoming when existing is empty', () => {
|
||||
const result = mergeById([], [{ id: 1 }, { id: 2 }], 'id');
|
||||
assert.equal(result.length, 2);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// mergeByCompositeKey
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('mergeByCompositeKey deduplicates by composite key', () => {
|
||||
const existing = [
|
||||
{ node_id: '!a', neighbor_id: '!b', snr: 5 },
|
||||
{ node_id: '!a', neighbor_id: '!c', snr: 3 },
|
||||
];
|
||||
const incoming = [
|
||||
{ node_id: '!a', neighbor_id: '!b', snr: 8 },
|
||||
{ node_id: '!a', neighbor_id: '!d', snr: 1 },
|
||||
];
|
||||
const result = mergeByCompositeKey(existing, incoming, ['node_id', 'neighbor_id']);
|
||||
assert.equal(result.length, 3);
|
||||
const ab = result.find(r => r.neighbor_id === '!b');
|
||||
assert.equal(ab.snr, 8, 'incoming should overwrite existing for same composite key');
|
||||
});
|
||||
|
||||
test('mergeByCompositeKey returns existing when incoming is empty', () => {
|
||||
const existing = [{ a: 1, b: 2 }];
|
||||
assert.strictEqual(mergeByCompositeKey(existing, [], ['a', 'b']), existing);
|
||||
assert.strictEqual(mergeByCompositeKey(existing, null, ['a', 'b']), existing);
|
||||
});
|
||||
|
||||
test('mergeByCompositeKey handles missing key fields gracefully', () => {
|
||||
const existing = [{ node_id: '!a' }];
|
||||
const incoming = [{ node_id: '!a', neighbor_id: '!b' }];
|
||||
const result = mergeByCompositeKey(existing, incoming, ['node_id', 'neighbor_id']);
|
||||
assert.equal(result.length, 2, 'different composite keys due to missing field');
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// trimToLimit
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('trimToLimit returns the same array when within limit', () => {
|
||||
const records = [{ id: 1, rx_time: 100 }, { id: 2, rx_time: 200 }];
|
||||
const result = trimToLimit(records, 5);
|
||||
assert.strictEqual(result, records);
|
||||
});
|
||||
|
||||
test('trimToLimit trims to limit keeping newest entries', () => {
|
||||
const records = [
|
||||
{ id: 1, rx_time: 100 },
|
||||
{ id: 2, rx_time: 300 },
|
||||
{ id: 3, rx_time: 200 },
|
||||
{ id: 4, rx_time: 400 },
|
||||
];
|
||||
const result = trimToLimit(records, 2);
|
||||
assert.equal(result.length, 2);
|
||||
const ids = result.map(r => r.id);
|
||||
assert.ok(ids.includes(4), 'should keep newest (id=4)');
|
||||
assert.ok(ids.includes(2), 'should keep second newest (id=2)');
|
||||
});
|
||||
|
||||
test('trimToLimit uses custom timestamp field', () => {
|
||||
const records = [
|
||||
{ id: 1, last_heard: 100 },
|
||||
{ id: 2, last_heard: 300 },
|
||||
{ id: 3, last_heard: 200 },
|
||||
];
|
||||
const result = trimToLimit(records, 1, 'last_heard');
|
||||
assert.equal(result.length, 1);
|
||||
assert.equal(result[0].id, 2);
|
||||
});
|
||||
|
||||
test('trimToLimit returns input for non-array values', () => {
|
||||
assert.equal(trimToLimit(null, 10), null);
|
||||
assert.equal(trimToLimit(undefined, 10), undefined);
|
||||
});
|
||||
|
||||
test('trimToLimit handles records with missing timestamp fields', () => {
|
||||
const records = [
|
||||
{ id: 1, rx_time: 100 },
|
||||
{ id: 2 },
|
||||
{ id: 3, rx_time: 300 },
|
||||
];
|
||||
const result = trimToLimit(records, 2);
|
||||
assert.equal(result.length, 2);
|
||||
assert.equal(result[0].id, 3);
|
||||
});
|
||||
@@ -230,7 +230,7 @@ test('initializeInstanceSelector navigates to the chosen instance domain', async
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
async json() {
|
||||
return [{ domain: 'mesh.example' }];
|
||||
return [{ domain: 'mesh.example' }, { domain: 'other.mesh' }];
|
||||
}
|
||||
});
|
||||
|
||||
@@ -249,7 +249,7 @@ test('initializeInstanceSelector navigates to the chosen instance domain', async
|
||||
defaultLabel: 'Select region ...'
|
||||
});
|
||||
|
||||
assert.equal(select.options.length, 2);
|
||||
assert.equal(select.options.length, 3);
|
||||
assert.equal(select.options[1].value, 'mesh.example');
|
||||
|
||||
select.value = 'mesh.example';
|
||||
@@ -261,6 +261,68 @@ test('initializeInstanceSelector navigates to the chosen instance domain', async
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeInstanceSelector hides the selector container when fewer than 2 instances are available', async () => {
|
||||
const env = createDomEnvironment();
|
||||
const select = setupSelectElement(env.document);
|
||||
|
||||
// Simulate a parent container; mock elements lack closest() so we set
|
||||
// parentElement directly so the hide logic falls back to it.
|
||||
const container = env.document.createElement('div');
|
||||
container.classList.add('header-federation');
|
||||
select.parentElement = container;
|
||||
env.document.body.appendChild(container);
|
||||
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
async json() {
|
||||
return [{ domain: 'only.mesh' }];
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeInstanceSelector({
|
||||
selectElement: select,
|
||||
fetchImpl,
|
||||
windowObject: env.window,
|
||||
documentObject: env.document
|
||||
});
|
||||
|
||||
assert.equal(container.hidden, true, 'container should be hidden with fewer than 2 instances');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeInstanceSelector keeps the selector visible when 2 or more instances are available', async () => {
|
||||
const env = createDomEnvironment();
|
||||
const select = setupSelectElement(env.document);
|
||||
|
||||
const container = env.document.createElement('div');
|
||||
container.classList.add('header-federation');
|
||||
select.parentElement = container;
|
||||
env.document.body.appendChild(container);
|
||||
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
async json() {
|
||||
return [{ domain: 'alpha.mesh' }, { domain: 'beta.mesh' }];
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
await initializeInstanceSelector({
|
||||
selectElement: select,
|
||||
fetchImpl,
|
||||
windowObject: env.window,
|
||||
documentObject: env.document
|
||||
});
|
||||
|
||||
assert.ok(!container.hidden, 'container should remain visible with 2 or more instances');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('initializeInstanceSelector updates federation navigation labels with instance count', async () => {
|
||||
const env = createDomEnvironment();
|
||||
const select = setupSelectElement(env.document);
|
||||
|
||||
@@ -44,8 +44,6 @@ export const MINIMAL_CONFIG = Object.freeze({
|
||||
*/
|
||||
export function setupApp() {
|
||||
const env = createDomEnvironment({ includeBody: true });
|
||||
// themeToggle is accessed without a null guard in initializeApp.
|
||||
env.createElement('button', 'themeToggle');
|
||||
const { _testUtils } = initializeApp(MINIMAL_CONFIG);
|
||||
return { testUtils: _testUtils, cleanup: env.cleanup.bind(env) };
|
||||
}
|
||||
@@ -65,6 +63,24 @@ export function withApp(fn) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Spin up a DOM environment, optionally pre-register elements by id, then
|
||||
* initialise the app with a custom config override. Returns the test utils,
|
||||
* the environment (for DOM inspection), and a cleanup handle.
|
||||
*
|
||||
* @param {{ extraElements?: string[], configOverrides?: Object }} [opts]
|
||||
* @returns {{ testUtils: Object, env: Object, cleanup: Function }}
|
||||
*/
|
||||
export function setupAppWithOptions({ extraElements = [], configOverrides = {} } = {}) {
|
||||
const env = createDomEnvironment({ includeBody: true });
|
||||
for (const id of extraElements) {
|
||||
env.registerElement(id, env.createElement('span', id));
|
||||
}
|
||||
const config = { ...MINIMAL_CONFIG, ...configOverrides };
|
||||
const { _testUtils } = initializeApp(config);
|
||||
return { testUtils: _testUtils, env, cleanup: env.cleanup.bind(env) };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the serialised HTML string from a DOM element returned by the test
|
||||
* utils. The stub environment exposes innerHTML as a plain string; this
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { withApp } from './main-app-test-helpers.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// isAutorefreshPaused
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('isAutorefreshPaused returns false by default', () => {
|
||||
withApp((t) => {
|
||||
assert.equal(t.isAutorefreshPaused(), false);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// restartAutoRefresh is safe when called without a timer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('restartAutoRefresh does not throw when invoked with refreshMs 0', () => {
|
||||
withApp((t) => {
|
||||
assert.doesNotThrow(() => t.restartAutoRefresh());
|
||||
});
|
||||
});
|
||||
@@ -66,7 +66,7 @@ test('makeRoleFilterKey SENSOR and REPEATER produce distinct keys across protoco
|
||||
// matchesRoleFilter — no active filters
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('matchesRoleFilter returns true when no filters are active', () => {
|
||||
test('matchesRoleFilter returns true when no roles are hidden', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
assert.equal(t.matchesRoleFilter({ role: 'ROUTER', protocol: 'meshtastic' }), true);
|
||||
@@ -75,56 +75,56 @@ test('matchesRoleFilter returns true when no filters are active', () => {
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// matchesRoleFilter — protocol-aware compound key matching
|
||||
// matchesRoleFilter — exclusion-set semantics (roles in set are hidden)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('matchesRoleFilter matches meshtastic SENSOR filter for meshtastic node', () => {
|
||||
test('matchesRoleFilter hides meshtastic SENSOR when in exclusion set', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshtastic:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshtastic' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter does not match meshtastic SENSOR filter for meshcore node', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshtastic:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshcore' }), false);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter matches meshcore SENSOR filter for meshcore node', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshcore' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter does not match meshcore SENSOR filter for meshtastic node', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshtastic' }), false);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter matches meshtastic REPEATER filter for meshtastic node', () => {
|
||||
test('matchesRoleFilter does not hide meshcore SENSOR when meshtastic SENSOR is hidden', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshtastic:REPEATER');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshtastic' }), true);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshcore' }), false);
|
||||
t.activeRoleFilters.add('meshtastic:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshcore' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter matches meshcore REPEATER filter for meshcore node', () => {
|
||||
test('matchesRoleFilter hides meshcore SENSOR when in exclusion set', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshcore' }), false);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter does not hide meshtastic SENSOR when meshcore SENSOR is hidden', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshtastic' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter hides meshtastic REPEATER but not meshcore REPEATER', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshtastic:REPEATER');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshtastic' }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshcore' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter hides meshcore REPEATER but not meshtastic REPEATER', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:REPEATER');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshcore' }), true);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshtastic' }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshcore' }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshtastic' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -132,27 +132,27 @@ test('matchesRoleFilter matches meshcore REPEATER filter for meshcore node', ()
|
||||
// matchesRoleFilter — null/absent protocol treated as meshtastic
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('matchesRoleFilter treats null protocol as meshtastic for filter matching', () => {
|
||||
test('matchesRoleFilter treats null protocol as meshtastic for exclusion', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshtastic:SENSOR');
|
||||
// null-protocol node should match the meshtastic SENSOR filter
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: null }), true);
|
||||
// but not the meshcore one
|
||||
// null-protocol node should be hidden by the meshtastic SENSOR exclusion
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: null }), false);
|
||||
// but meshcore SENSOR exclusion should not affect null-protocol nodes
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:SENSOR');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: null }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: null }), true);
|
||||
});
|
||||
});
|
||||
|
||||
test('matchesRoleFilter with multiple active filters returns true when any matches', () => {
|
||||
test('matchesRoleFilter with multiple hidden roles hides only those roles', () => {
|
||||
withApp((t) => {
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshtastic:SENSOR');
|
||||
t.activeRoleFilters.add('meshcore:REPEATER');
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshtastic' }), true);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshcore' }), true);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'ROUTER', protocol: 'meshtastic' }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'SENSOR', protocol: 'meshtastic' }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'REPEATER', protocol: 'meshcore' }), false);
|
||||
assert.equal(t.matchesRoleFilter({ role: 'ROUTER', protocol: 'meshtastic' }), true);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -348,7 +348,7 @@ test('buildRoleButtons appends one child per palette entry', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { SENSOR: '#4A7EB4', REPEATER: '#C8D0DC' }, 'meshcore');
|
||||
t.buildRoleButtons(col, { SENSOR: '#40749E', REPEATER: '#B8C4D4' }, 'meshcore');
|
||||
assert.equal(col.childNodes.length, 2);
|
||||
});
|
||||
});
|
||||
@@ -357,7 +357,7 @@ test('buildRoleButtons sets dataset.role and dataset.protocol on each button', (
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { SENSOR: '#4A7EB4' }, 'meshcore');
|
||||
t.buildRoleButtons(col, { SENSOR: '#40749E' }, 'meshcore');
|
||||
const btn = t.legendRoleButtons.get('meshcore:SENSOR');
|
||||
assert.ok(btn, 'button should be in legendRoleButtons');
|
||||
assert.equal(btn.dataset.role, 'SENSOR');
|
||||
@@ -369,7 +369,7 @@ test('buildRoleButtons registers compound keys in legendRoleButtons', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { SENSOR: '#4A7EB4', REPEATER: '#C8D0DC' }, 'meshcore');
|
||||
t.buildRoleButtons(col, { SENSOR: '#40749E', REPEATER: '#B8C4D4' }, 'meshcore');
|
||||
assert.ok(t.legendRoleButtons.has('meshcore:SENSOR'));
|
||||
assert.ok(t.legendRoleButtons.has('meshcore:REPEATER'));
|
||||
});
|
||||
@@ -380,8 +380,8 @@ test('buildRoleButtons keeps meshtastic and meshcore SENSOR keys distinct', () =
|
||||
t.legendRoleButtons.clear();
|
||||
const colMc = document.createElement('div');
|
||||
const colMt = document.createElement('div');
|
||||
t.buildRoleButtons(colMc, { SENSOR: '#4A7EB4' }, 'meshcore');
|
||||
t.buildRoleButtons(colMt, { SENSOR: '#B2D880' }, 'meshtastic');
|
||||
t.buildRoleButtons(colMc, { SENSOR: '#40749E' }, 'meshcore');
|
||||
t.buildRoleButtons(colMt, { SENSOR: '#A8D5BA' }, 'meshtastic');
|
||||
assert.ok(t.legendRoleButtons.has('meshcore:SENSOR'));
|
||||
assert.ok(t.legendRoleButtons.has('meshtastic:SENSOR'));
|
||||
assert.notEqual(
|
||||
@@ -391,14 +391,14 @@ test('buildRoleButtons keeps meshtastic and meshcore SENSOR keys distinct', () =
|
||||
});
|
||||
});
|
||||
|
||||
test('buildRoleButtons sets aria-pressed to false initially', () => {
|
||||
test('buildRoleButtons sets aria-pressed to true initially (all visible)', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { ROUTER: '#D44E14' }, 'meshtastic');
|
||||
t.buildRoleButtons(col, { ROUTER: '#ff0019' }, 'meshtastic');
|
||||
const btn = t.legendRoleButtons.get('meshtastic:ROUTER');
|
||||
assert.ok(btn, 'button should be in legendRoleButtons');
|
||||
assert.equal(btn.getAttribute('aria-pressed'), 'false');
|
||||
assert.equal(btn.getAttribute('aria-pressed'), 'true');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -406,7 +406,7 @@ test('buildRoleButtons creates swatch child with background color', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { ROUTER: '#D44E14' }, 'meshtastic');
|
||||
t.buildRoleButtons(col, { ROUTER: '#ff0019' }, 'meshtastic');
|
||||
const btn = t.legendRoleButtons.get('meshtastic:ROUTER');
|
||||
// swatch is the first child of the button
|
||||
const swatch = btn.childNodes[0];
|
||||
@@ -419,7 +419,7 @@ test('buildRoleButtons creates label child with role text', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { ROUTER: '#D44E14' }, 'meshtastic');
|
||||
t.buildRoleButtons(col, { ROUTER: '#ff0019' }, 'meshtastic');
|
||||
const btn = t.legendRoleButtons.get('meshtastic:ROUTER');
|
||||
// label is the second child of the button
|
||||
const label = btn.childNodes[1];
|
||||
@@ -432,55 +432,28 @@ test('buildRoleButtons creates label child with role text', () => {
|
||||
// updateLegendRoleFiltersUI
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('updateLegendRoleFiltersUI sets aria-pressed true on active role buttons', () => {
|
||||
test('updateLegendRoleFiltersUI sets aria-pressed false on hidden role buttons', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { SENSOR: '#4A7EB4' }, 'meshcore');
|
||||
t.buildRoleButtons(col, { SENSOR: '#40749E' }, 'meshcore');
|
||||
const btn = t.legendRoleButtons.get('meshcore:SENSOR');
|
||||
t.activeRoleFilters.clear();
|
||||
t.activeRoleFilters.add('meshcore:SENSOR');
|
||||
t.updateLegendRoleFiltersUI();
|
||||
assert.equal(btn.getAttribute('aria-pressed'), 'true');
|
||||
});
|
||||
});
|
||||
|
||||
test('updateLegendRoleFiltersUI sets aria-pressed false on inactive role buttons', () => {
|
||||
withApp((t) => {
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { SENSOR: '#4A7EB4' }, 'meshcore');
|
||||
const btn = t.legendRoleButtons.get('meshcore:SENSOR');
|
||||
t.activeRoleFilters.clear();
|
||||
t.updateLegendRoleFiltersUI();
|
||||
assert.equal(btn.getAttribute('aria-pressed'), 'false');
|
||||
});
|
||||
});
|
||||
|
||||
test('updateLegendRoleFiltersUI updates protocol button text to Show when hidden', () => {
|
||||
test('updateLegendRoleFiltersUI sets aria-pressed true on visible role buttons', () => {
|
||||
withApp((t) => {
|
||||
t.legendProtocolButtons.clear();
|
||||
const fakeBtn = document.createElement('button');
|
||||
fakeBtn.setAttribute('aria-pressed', 'false');
|
||||
t.legendProtocolButtons.set('meshtastic', fakeBtn);
|
||||
t.hiddenProtocols.clear();
|
||||
t.hiddenProtocols.add('meshtastic');
|
||||
t.legendRoleButtons.clear();
|
||||
const col = document.createElement('div');
|
||||
t.buildRoleButtons(col, { SENSOR: '#40749E' }, 'meshcore');
|
||||
const btn = t.legendRoleButtons.get('meshcore:SENSOR');
|
||||
t.activeRoleFilters.clear();
|
||||
t.updateLegendRoleFiltersUI();
|
||||
assert.equal(fakeBtn.getAttribute('aria-pressed'), 'true');
|
||||
assert.ok(fakeBtn.textContent.startsWith('Show'));
|
||||
});
|
||||
});
|
||||
|
||||
test('updateLegendRoleFiltersUI updates protocol button text to Hide when visible', () => {
|
||||
withApp((t) => {
|
||||
t.legendProtocolButtons.clear();
|
||||
const fakeBtn = document.createElement('button');
|
||||
fakeBtn.setAttribute('aria-pressed', 'true');
|
||||
t.legendProtocolButtons.set('meshcore', fakeBtn);
|
||||
t.hiddenProtocols.clear();
|
||||
t.updateLegendRoleFiltersUI();
|
||||
assert.equal(fakeBtn.getAttribute('aria-pressed'), 'false');
|
||||
assert.ok(fakeBtn.textContent.startsWith('Hide'));
|
||||
assert.equal(btn.getAttribute('aria-pressed'), 'true');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -490,3 +463,81 @@ test('updateLegendRoleFiltersUI is safe when legendContainer is null', () => {
|
||||
assert.doesNotThrow(() => t.updateLegendRoleFiltersUI());
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// adjustStatsForHiddenProtocols
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('adjustStatsForHiddenProtocols returns original stats when nothing is hidden', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.clear();
|
||||
const stats = { hour: 10, day: 50, week: 100, month: 200, meshcore: { hour: 2, day: 10, week: 20, month: 40 }, meshtastic: { hour: 8, day: 40, week: 80, month: 160 } };
|
||||
const result = t.adjustStatsForHiddenProtocols(stats);
|
||||
assert.equal(result, stats);
|
||||
});
|
||||
});
|
||||
|
||||
test('adjustStatsForHiddenProtocols subtracts meshcore counts when meshcore hidden', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.clear();
|
||||
t.hiddenProtocols.add('meshcore');
|
||||
const stats = { hour: 10, day: 50, week: 100, month: 200, meshcore: { hour: 2, day: 10, week: 20, month: 40 }, meshtastic: { hour: 8, day: 40, week: 80, month: 160 } };
|
||||
const result = t.adjustStatsForHiddenProtocols(stats);
|
||||
assert.equal(result.week, 80);
|
||||
assert.equal(result.day, 40);
|
||||
assert.equal(result.month, 160);
|
||||
assert.equal(result.hour, 8);
|
||||
});
|
||||
});
|
||||
|
||||
test('adjustStatsForHiddenProtocols subtracts meshtastic counts when meshtastic hidden', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.clear();
|
||||
t.hiddenProtocols.add('meshtastic');
|
||||
const stats = { hour: 10, day: 50, week: 100, month: 200, meshcore: { hour: 2, day: 10, week: 20, month: 40 }, meshtastic: { hour: 8, day: 40, week: 80, month: 160 } };
|
||||
const result = t.adjustStatsForHiddenProtocols(stats);
|
||||
assert.equal(result.week, 20);
|
||||
assert.equal(result.day, 10);
|
||||
});
|
||||
});
|
||||
|
||||
test('adjustStatsForHiddenProtocols subtracts both when both hidden', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.clear();
|
||||
t.hiddenProtocols.add('meshcore');
|
||||
t.hiddenProtocols.add('meshtastic');
|
||||
const stats = { hour: 10, day: 50, week: 100, month: 200, meshcore: { hour: 2, day: 10, week: 20, month: 40 }, meshtastic: { hour: 8, day: 40, week: 80, month: 160 } };
|
||||
const result = t.adjustStatsForHiddenProtocols(stats);
|
||||
assert.equal(result.week, 0);
|
||||
assert.equal(result.day, 0);
|
||||
});
|
||||
});
|
||||
|
||||
test('adjustStatsForHiddenProtocols floors at zero', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.clear();
|
||||
t.hiddenProtocols.add('meshcore');
|
||||
const stats = { hour: 1, day: 5, week: 10, month: 20, meshcore: { hour: 50, day: 50, week: 50, month: 50 } };
|
||||
const result = t.adjustStatsForHiddenProtocols(stats);
|
||||
assert.equal(result.week, 0);
|
||||
assert.equal(result.day, 0);
|
||||
});
|
||||
});
|
||||
|
||||
test('adjustStatsForHiddenProtocols handles null stats gracefully', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.add('meshcore');
|
||||
assert.equal(t.adjustStatsForHiddenProtocols(null), null);
|
||||
assert.equal(t.adjustStatsForHiddenProtocols(undefined), undefined);
|
||||
});
|
||||
});
|
||||
|
||||
test('adjustStatsForHiddenProtocols handles missing protocol bucket', () => {
|
||||
withApp((t) => {
|
||||
t.hiddenProtocols.clear();
|
||||
t.hiddenProtocols.add('meshcore');
|
||||
const stats = { hour: 10, day: 50, week: 100, month: 200 };
|
||||
const result = t.adjustStatsForHiddenProtocols(stats);
|
||||
assert.equal(result.week, 100);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -0,0 +1,240 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { createDomEnvironment } from './dom-environment.js';
|
||||
import { initializeApp } from '../main.js';
|
||||
|
||||
/** Minimal config that disables auto-refresh so we control timing. */
|
||||
const BASE_CONFIG = Object.freeze({
|
||||
channel: 'Primary',
|
||||
frequency: '915MHz',
|
||||
refreshMs: 0,
|
||||
refreshIntervalSeconds: 0,
|
||||
chatEnabled: true,
|
||||
mapCenter: { lat: 0, lon: 0 },
|
||||
mapZoom: null,
|
||||
maxDistanceKm: 0,
|
||||
tileFilters: { light: '', dark: '' },
|
||||
instancesFeatureEnabled: false,
|
||||
instanceDomain: null,
|
||||
snapshotWindowSeconds: 3600,
|
||||
});
|
||||
|
||||
/**
|
||||
* Build a stubbed fetch that records every call and responds with canned data.
|
||||
*
|
||||
* @param {Object} responsesByEndpoint Map of URL prefix to JSON response body.
|
||||
* @returns {{ fetch: Function, calls: Array<{ url: string, options: Object }> }}
|
||||
*/
|
||||
function buildStubFetch(responsesByEndpoint = {}) {
|
||||
const calls = [];
|
||||
|
||||
function stubFetch(url, options = {}) {
|
||||
calls.push({ url, options });
|
||||
for (const [prefix, body] of Object.entries(responsesByEndpoint)) {
|
||||
if (url.includes(prefix)) {
|
||||
return Promise.resolve({
|
||||
ok: true,
|
||||
status: 200,
|
||||
json: () => Promise.resolve(
|
||||
typeof body === 'function' ? body() : body,
|
||||
),
|
||||
});
|
||||
}
|
||||
}
|
||||
return Promise.resolve({
|
||||
ok: true,
|
||||
status: 200,
|
||||
json: () => Promise.resolve([]),
|
||||
});
|
||||
}
|
||||
|
||||
return { fetch: stubFetch, calls };
|
||||
}
|
||||
|
||||
/**
|
||||
* Run test body with a fetch-stubbed app instance.
|
||||
*
|
||||
* @param {Object} stubResponses Response map for the stub fetch.
|
||||
* @param {function(Object): Promise<void>} fn Receives { testUtils, calls }.
|
||||
*/
|
||||
async function withStubFetchApp(stubResponses, fn) {
|
||||
const env = createDomEnvironment({ includeBody: true });
|
||||
const originalFetch = globalThis.fetch;
|
||||
const { fetch: stubFetch, calls } = buildStubFetch(stubResponses);
|
||||
globalThis.fetch = stubFetch;
|
||||
try {
|
||||
const { _testUtils } = initializeApp(BASE_CONFIG);
|
||||
// Allow the initial refresh() to settle (it is async).
|
||||
await new Promise(r => setTimeout(r, 50));
|
||||
await fn({ testUtils: _testUtils, calls });
|
||||
} finally {
|
||||
globalThis.fetch = originalFetch;
|
||||
env.cleanup();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Verify fetch functions append since parameter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('first refresh does not include since parameter in fetch URLs', async () => {
|
||||
await withStubFetchApp({}, ({ calls }) => {
|
||||
const apiCalls = calls.filter(c => c.url.startsWith('/api/'));
|
||||
assert.ok(apiCalls.length > 0, 'should have made API calls');
|
||||
for (const call of apiCalls) {
|
||||
assert.ok(
|
||||
!call.url.includes('since='),
|
||||
`first refresh should not pass since: ${call.url}`,
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('second refresh includes since parameter for endpoints with timestamp data', async () => {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const stubResponses = {
|
||||
'/api/nodes': [{ node_id: '!aabb', last_heard: now, short_name: 'AB', role: 'CLIENT' }],
|
||||
'/api/messages': [{ id: 1, rx_time: now, from_id: '!aabb', text: 'hello' }],
|
||||
'/api/positions': [{ id: 1, node_id: '!aabb', rx_time: now, latitude: 52.5, longitude: 13.4 }],
|
||||
'/api/telemetry': [{ id: 1, node_id: '!aabb', rx_time: now, battery_level: 90 }],
|
||||
'/api/neighbors': [{ node_id: '!aabb', neighbor_id: '!ccdd', rx_time: now, snr: 10 }],
|
||||
'/api/traces': [{ id: 1, rx_time: now, src: 1, dest: 2 }],
|
||||
};
|
||||
|
||||
await withStubFetchApp(stubResponses, async ({ testUtils, calls }) => {
|
||||
// Verify first refresh completed without since params
|
||||
const firstRoundCalls = [...calls];
|
||||
const firstApiCalls = firstRoundCalls.filter(c => c.url.startsWith('/api/'));
|
||||
assert.ok(firstApiCalls.length > 0, 'initial refresh should have fired');
|
||||
for (const call of firstApiCalls) {
|
||||
assert.ok(
|
||||
!call.url.includes('since='),
|
||||
`first refresh should not pass since: ${call.url}`,
|
||||
);
|
||||
}
|
||||
|
||||
// Clear call log and trigger a second refresh
|
||||
calls.length = 0;
|
||||
await testUtils.refresh();
|
||||
await new Promise(r => setTimeout(r, 50));
|
||||
|
||||
// Second refresh should include since= on all data endpoints
|
||||
const secondApiCalls = calls.filter(c => c.url.startsWith('/api/'));
|
||||
assert.ok(secondApiCalls.length > 0, 'second refresh should have fired');
|
||||
|
||||
const nodeCall = secondApiCalls.find(c => c.url.includes('/api/nodes?'));
|
||||
assert.ok(nodeCall, 'should have made a nodes call');
|
||||
assert.ok(nodeCall.url.includes('since='), `nodes should include since: ${nodeCall.url}`);
|
||||
|
||||
const posCall = secondApiCalls.find(c => c.url.includes('/api/positions?'));
|
||||
assert.ok(posCall, 'should have made a positions call');
|
||||
assert.ok(posCall.url.includes('since='), `positions should include since: ${posCall.url}`);
|
||||
|
||||
const telCall = secondApiCalls.find(c => c.url.includes('/api/telemetry?'));
|
||||
assert.ok(telCall, 'should have made a telemetry call');
|
||||
assert.ok(telCall.url.includes('since='), `telemetry should include since: ${telCall.url}`);
|
||||
|
||||
const nbCall = secondApiCalls.find(c => c.url.includes('/api/neighbors?'));
|
||||
assert.ok(nbCall, 'should have made a neighbors call');
|
||||
assert.ok(nbCall.url.includes('since='), `neighbors should include since: ${nbCall.url}`);
|
||||
|
||||
const trCall = secondApiCalls.find(c => c.url.includes('/api/traces?'));
|
||||
assert.ok(trCall, 'should have made a traces call');
|
||||
assert.ok(trCall.url.includes('since='), `traces should include since: ${trCall.url}`);
|
||||
|
||||
const msgCalls = secondApiCalls.filter(c => c.url.includes('/api/messages?'));
|
||||
assert.ok(msgCalls.length > 0, 'should have made message calls');
|
||||
for (const mc of msgCalls) {
|
||||
assert.ok(mc.url.includes('since='), `messages should include since: ${mc.url}`);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('second refresh merges incremental data into existing state', async () => {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
let callCount = 0;
|
||||
|
||||
// First call returns node A, second call returns node B
|
||||
const stubResponses = {
|
||||
'/api/nodes': () => {
|
||||
callCount++;
|
||||
if (callCount <= 1) {
|
||||
return [{ node_id: '!aaaa', last_heard: now, short_name: 'AA', role: 'CLIENT' }];
|
||||
}
|
||||
return [{ node_id: '!bbbb', last_heard: now + 60, short_name: 'BB', role: 'CLIENT' }];
|
||||
},
|
||||
};
|
||||
|
||||
await withStubFetchApp(stubResponses, async ({ testUtils, calls }) => {
|
||||
// After first refresh, call count should be 1
|
||||
assert.ok(callCount >= 1, 'first refresh should have fetched nodes');
|
||||
|
||||
// Trigger second refresh
|
||||
calls.length = 0;
|
||||
await testUtils.refresh();
|
||||
await new Promise(r => setTimeout(r, 50));
|
||||
|
||||
// The second refresh should have merged data
|
||||
assert.ok(callCount >= 2, 'second refresh should have fetched nodes again');
|
||||
});
|
||||
});
|
||||
|
||||
test('fetch functions use cache: default option', async () => {
|
||||
await withStubFetchApp({}, ({ calls }) => {
|
||||
const apiCalls = calls.filter(c => c.url.startsWith('/api/'));
|
||||
for (const call of apiCalls) {
|
||||
assert.equal(
|
||||
call.options.cache,
|
||||
'default',
|
||||
`${call.url} should use cache:default`,
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('messages fetch sends encrypted parameter when requested', async () => {
|
||||
await withStubFetchApp({}, ({ calls }) => {
|
||||
const encryptedCalls = calls.filter(
|
||||
c => c.url.includes('/api/messages') && c.url.includes('encrypted=true'),
|
||||
);
|
||||
assert.ok(encryptedCalls.length > 0, 'should have made encrypted message call');
|
||||
});
|
||||
});
|
||||
|
||||
test('since parameter uses a 1-second overlap to avoid missing rows', async () => {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const stubResponses = {
|
||||
'/api/nodes': [{ node_id: '!test', last_heard: now, short_name: 'T', role: 'CLIENT' }],
|
||||
};
|
||||
|
||||
await withStubFetchApp(stubResponses, async ({ testUtils, calls }) => {
|
||||
calls.length = 0;
|
||||
await testUtils.refresh();
|
||||
await new Promise(r => setTimeout(r, 50));
|
||||
|
||||
const nodeCall = calls.find(c => c.url.includes('/api/nodes?'));
|
||||
assert.ok(nodeCall, 'should have nodes call on second refresh');
|
||||
// The since value should be (now - 1) to create the overlap
|
||||
const expectedSince = now - 1;
|
||||
assert.ok(
|
||||
nodeCall.url.includes(`since=${expectedSince}`),
|
||||
`expected since=${expectedSince} in URL: ${nodeCall.url}`,
|
||||
);
|
||||
});
|
||||
});
|
||||
@@ -346,3 +346,46 @@ test('createMessageChatEntry: meshtastic message with @[Name] is NOT resolved as
|
||||
assert.ok(shortNameCount <= 1, 'only the sender badge should be present, no mention badge');
|
||||
});
|
||||
});
|
||||
|
||||
// --- renderShortHtml badge padding ---
|
||||
|
||||
test('renderShortHtml leaves 4-char ASCII names unpadded', () => {
|
||||
withApp(() => {
|
||||
const html = globalThis.PotatoMesh.renderShortHtml('0ac7', 'CLIENT');
|
||||
assert.ok(!html.includes(' 0ac7'), 'should not add leading space');
|
||||
assert.ok(!html.includes('0ac7 '), 'should not add trailing space');
|
||||
});
|
||||
});
|
||||
|
||||
test('renderShortHtml adds single space padding for short emoji names', () => {
|
||||
withApp(() => {
|
||||
const html = globalThis.PotatoMesh.renderShortHtml('\u26A1', 'CLIENT');
|
||||
// Should produce " ⚡ " — one leading, one trailing space (as )
|
||||
assert.ok(html.includes(' \u26A1 '), 'emoji should have one space on each side');
|
||||
// Should NOT have double leading spaces
|
||||
assert.ok(!html.includes(' \u26A1'), 'should not double-pad emoji');
|
||||
});
|
||||
});
|
||||
|
||||
test('renderShortHtml adds single space padding for surrogate pair emoji', () => {
|
||||
withApp(() => {
|
||||
const html = globalThis.PotatoMesh.renderShortHtml('\uD83D\uDE43', 'CLIENT');
|
||||
// 🙃 is a surrogate pair (length 2 in JS) but 1 grapheme
|
||||
assert.ok(html.includes(' \uD83D\uDE43 '), 'surrogate emoji should have one space on each side');
|
||||
});
|
||||
});
|
||||
|
||||
test('renderShortHtml adds single space padding for ZWJ emoji sequence', () => {
|
||||
withApp(() => {
|
||||
const zwj = '\u{1F3C3}\u{200D}\u{2642}\u{FE0F}'; // 🏃♂️ — length 5, 1 grapheme
|
||||
const html = globalThis.PotatoMesh.renderShortHtml(zwj, 'CLIENT');
|
||||
assert.ok(html.includes(` ${zwj} `), 'ZWJ emoji should have one space on each side');
|
||||
});
|
||||
});
|
||||
|
||||
test('renderShortHtml adds single space padding for plain 2-char name', () => {
|
||||
withApp(() => {
|
||||
const html = globalThis.PotatoMesh.renderShortHtml('ab', 'CLIENT');
|
||||
assert.ok(html.includes(' ab '), '2-char name should have one space on each side');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { withApp } from './main-app-test-helpers.js';
|
||||
|
||||
/**
|
||||
* Build a stub Leaflet ``L`` that implements ``point({x, y})``. The renderer
|
||||
* uses ``L.point`` to construct an offset point in layer-pixel space and the
|
||||
* spider helper does the same.
|
||||
*
|
||||
* @returns {{ point: Function }}
|
||||
*/
|
||||
function makeStubLeaflet() {
|
||||
return {
|
||||
point(x, y) {
|
||||
return { x, y };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a stub Leaflet map that implements the projection methods used by
|
||||
* ``projectColocatedOffsetLatLng``. The pseudo projection is the identity:
|
||||
* ``[lat, lon]`` → ``{ x: lon, y: lat }``. This keeps assertions easy while
|
||||
* still exercising both projection round-trip calls.
|
||||
*
|
||||
* @returns {{ latLngToLayerPoint: Function, layerPointToLatLng: Function, calls: { project: Array, unproject: Array } }}
|
||||
*/
|
||||
function makeStubMap() {
|
||||
const calls = { project: [], unproject: [] };
|
||||
return {
|
||||
calls,
|
||||
latLngToLayerPoint(latLng) {
|
||||
calls.project.push(latLng);
|
||||
return { x: latLng[1], y: latLng[0] };
|
||||
},
|
||||
layerPointToLatLng(point) {
|
||||
calls.unproject.push(point);
|
||||
return { lat: point.y, lng: point.x };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
test('projectColocatedOffsetLatLng short-circuits the singleton case', () => {
|
||||
withApp((t) => {
|
||||
// No map injected → if the function reached the projection path it would
|
||||
// crash on a null reference. The early-return branch keeps it safe.
|
||||
const result = t.projectColocatedOffsetLatLng(10, 20, 0, 0);
|
||||
assert.deepEqual(result, [10, 20]);
|
||||
});
|
||||
});
|
||||
|
||||
test('projectColocatedOffsetLatLng routes through the map projection for real offsets', () => {
|
||||
withApp((t) => {
|
||||
const previousL = globalThis.L;
|
||||
globalThis.L = makeStubLeaflet();
|
||||
const stubMap = makeStubMap();
|
||||
t._setMapForTests(stubMap);
|
||||
try {
|
||||
const result = t.projectColocatedOffsetLatLng(10, 20, 5, -3);
|
||||
// Identity projection: input [10, 20] → {x:20, y:10};
|
||||
// offset by (5, -3) → {x:25, y:7}; back-projection → {lat:7, lng:25}.
|
||||
assert.deepEqual(result, [7, 25]);
|
||||
assert.deepEqual(stubMap.calls.project, [[10, 20]]);
|
||||
assert.deepEqual(stubMap.calls.unproject, [{ x: 25, y: 7 }]);
|
||||
} finally {
|
||||
t._setMapForTests(null);
|
||||
globalThis.L = previousL;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('refreshColocatedSpiderState bails out when no map is available', () => {
|
||||
withApp((t) => {
|
||||
let setLatLngCalls = 0;
|
||||
t._setColocatedSpiderStateForTests([
|
||||
{
|
||||
marker: { setLatLng() { setLatLngCalls += 1; } },
|
||||
line: null,
|
||||
lat: 0,
|
||||
lon: 0,
|
||||
dx: 1,
|
||||
dy: 1
|
||||
}
|
||||
]);
|
||||
// map starts as null in the test harness; the guard must skip the work.
|
||||
t.refreshColocatedSpiderState();
|
||||
assert.equal(setLatLngCalls, 0);
|
||||
t._setColocatedSpiderStateForTests([]);
|
||||
});
|
||||
});
|
||||
|
||||
test('refreshColocatedSpiderState updates marker and line through injected map', () => {
|
||||
withApp((t) => {
|
||||
const previousL = globalThis.L;
|
||||
globalThis.L = makeStubLeaflet();
|
||||
t._setMapForTests(makeStubMap());
|
||||
const markerLatLngs = [];
|
||||
const lineLatLngs = [];
|
||||
t._setColocatedSpiderStateForTests([
|
||||
{
|
||||
marker: { setLatLng(value) { markerLatLngs.push(value); } },
|
||||
line: { setLatLngs(value) { lineLatLngs.push(value); } },
|
||||
lat: 1,
|
||||
lon: 2,
|
||||
dx: 4,
|
||||
dy: -6
|
||||
}
|
||||
]);
|
||||
try {
|
||||
t.refreshColocatedSpiderState();
|
||||
// [1,2] → {x:2,y:1}; offset (4,-6) → {x:6,y:-5}; back → [-5, 6].
|
||||
assert.deepEqual(markerLatLngs, [[-5, 6]]);
|
||||
assert.deepEqual(lineLatLngs, [[[1, 2], [-5, 6]]]);
|
||||
} finally {
|
||||
t._setMapForTests(null);
|
||||
t._setColocatedSpiderStateForTests([]);
|
||||
globalThis.L = previousL;
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('scheduleColocatedSpiderRefresh calls immediately when requestAnimationFrame is unavailable', () => {
|
||||
withApp((t) => {
|
||||
const previousRaf = globalThis.requestAnimationFrame;
|
||||
globalThis.requestAnimationFrame = undefined;
|
||||
let invocations = 0;
|
||||
t._setColocatedSpiderStateForTests([
|
||||
{
|
||||
// Stub map remains null → refreshColocatedSpiderState short-circuits;
|
||||
// we count how many times the public scheduler dispatches by adding
|
||||
// a marker that records setLatLng — but with no map it never runs.
|
||||
// Instead we observe invocations indirectly: replace the state with
|
||||
// one whose marker counts setLatLng if the projector ever runs.
|
||||
marker: { setLatLng() { invocations += 1; } },
|
||||
line: null,
|
||||
lat: 0,
|
||||
lon: 0,
|
||||
dx: 1,
|
||||
dy: 1
|
||||
}
|
||||
]);
|
||||
try {
|
||||
// Even without rAF the function must not throw and must reach the
|
||||
// immediate-call branch (which itself short-circuits because there is
|
||||
// no map; the assertion is "did not throw" + invocations stays 0).
|
||||
assert.doesNotThrow(() => t.scheduleColocatedSpiderRefresh());
|
||||
assert.equal(invocations, 0);
|
||||
} finally {
|
||||
globalThis.requestAnimationFrame = previousRaf;
|
||||
t._setColocatedSpiderStateForTests([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('scheduleColocatedSpiderRefresh coalesces multiple calls within one frame', () => {
|
||||
withApp((t) => {
|
||||
const previousRaf = globalThis.requestAnimationFrame;
|
||||
const previousL = globalThis.L;
|
||||
globalThis.L = makeStubLeaflet();
|
||||
t._setMapForTests(makeStubMap());
|
||||
|
||||
let scheduled = 0;
|
||||
let pending = null;
|
||||
globalThis.requestAnimationFrame = (cb) => {
|
||||
scheduled += 1;
|
||||
pending = cb;
|
||||
return scheduled;
|
||||
};
|
||||
|
||||
let setLatLngCalls = 0;
|
||||
t._setColocatedSpiderStateForTests([
|
||||
{
|
||||
marker: { setLatLng() { setLatLngCalls += 1; } },
|
||||
line: null,
|
||||
lat: 0,
|
||||
lon: 0,
|
||||
dx: 2,
|
||||
dy: 3
|
||||
}
|
||||
]);
|
||||
try {
|
||||
t.scheduleColocatedSpiderRefresh();
|
||||
t.scheduleColocatedSpiderRefresh();
|
||||
t.scheduleColocatedSpiderRefresh();
|
||||
// All three calls must be coalesced into a single rAF schedule and the
|
||||
// refresh callback must not yet have fired.
|
||||
assert.equal(scheduled, 1);
|
||||
assert.equal(setLatLngCalls, 0);
|
||||
|
||||
// Fire the queued frame; the refresh runs once and the next call
|
||||
// schedules a fresh frame (proving the pending handle was reset).
|
||||
pending();
|
||||
assert.equal(setLatLngCalls, 1);
|
||||
t.scheduleColocatedSpiderRefresh();
|
||||
assert.equal(scheduled, 2);
|
||||
} finally {
|
||||
globalThis.requestAnimationFrame = previousRaf;
|
||||
globalThis.L = previousL;
|
||||
t._setMapForTests(null);
|
||||
t._setColocatedSpiderStateForTests([]);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
test('_setColocatedSpiderStateForTests returns the previous state and rejects non-arrays', () => {
|
||||
withApp((t) => {
|
||||
const replacement = [{ marker: null, line: null, lat: 0, lon: 0, dx: 0, dy: 0 }];
|
||||
const initial = t._setColocatedSpiderStateForTests(replacement);
|
||||
// Initial state at fresh init is an empty array.
|
||||
assert.deepEqual(initial, []);
|
||||
assert.equal(t._getColocatedSpiderStateForTests(), replacement);
|
||||
// Passing a non-array clears the state to an empty array, leaving the
|
||||
// previous (replacement) value as the return.
|
||||
const previous = t._setColocatedSpiderStateForTests('not-an-array');
|
||||
assert.equal(previous, replacement);
|
||||
assert.deepEqual(t._getColocatedSpiderStateForTests(), []);
|
||||
});
|
||||
});
|
||||
@@ -26,24 +26,24 @@ import {
|
||||
|
||||
const NOW = 1_700_000_000;
|
||||
|
||||
test('computeLocalActiveNodeStats calculates local hour/day/week/month counts', () => {
|
||||
test('computeLocalActiveNodeStats calculates local hour/day/week/month counts with per-protocol data', () => {
|
||||
const nodes = [
|
||||
{ last_heard: NOW - 60 },
|
||||
{ last_heard: NOW - 4_000 },
|
||||
{ last_heard: NOW - 90_000 },
|
||||
{ last_heard: NOW - (8 * 86_400) },
|
||||
{ last_heard: NOW - (20 * 86_400) },
|
||||
{ last_heard: NOW - 60, protocol: 'meshtastic' },
|
||||
{ last_heard: NOW - 4_000, protocol: 'meshcore' },
|
||||
{ last_heard: NOW - 90_000, protocol: 'meshtastic' },
|
||||
{ last_heard: NOW - (8 * 86_400), protocol: 'meshcore' },
|
||||
{ last_heard: NOW - (20 * 86_400), protocol: 'meshtastic' },
|
||||
];
|
||||
|
||||
const stats = computeLocalActiveNodeStats(nodes, NOW);
|
||||
|
||||
assert.deepEqual(stats, {
|
||||
hour: 1,
|
||||
day: 2,
|
||||
week: 3,
|
||||
month: 5,
|
||||
sampled: true,
|
||||
});
|
||||
assert.equal(stats.hour, 1);
|
||||
assert.equal(stats.day, 2);
|
||||
assert.equal(stats.week, 3);
|
||||
assert.equal(stats.month, 5);
|
||||
assert.equal(stats.sampled, true);
|
||||
assert.deepEqual(stats.meshcore, { hour: 0, day: 1, week: 1, month: 2 });
|
||||
assert.deepEqual(stats.meshtastic, { hour: 1, day: 1, week: 2, month: 3 });
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload validates and normalizes API payload', () => {
|
||||
@@ -57,17 +57,27 @@ test('normaliseActiveNodeStatsPayload validates and normalizes API payload', ()
|
||||
sampled: false,
|
||||
};
|
||||
|
||||
assert.deepEqual(normaliseActiveNodeStatsPayload(payload), {
|
||||
hour: 11,
|
||||
day: 22,
|
||||
week: 33,
|
||||
month: 44,
|
||||
sampled: false,
|
||||
});
|
||||
const result = normaliseActiveNodeStatsPayload(payload);
|
||||
assert.equal(result.hour, 11);
|
||||
assert.equal(result.day, 22);
|
||||
assert.equal(result.week, 33);
|
||||
assert.equal(result.month, 44);
|
||||
assert.equal(result.sampled, false);
|
||||
|
||||
assert.equal(normaliseActiveNodeStatsPayload({}), null);
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload includes per-protocol buckets when present', () => {
|
||||
const result = normaliseActiveNodeStatsPayload({
|
||||
active_nodes: { hour: 10, day: 20, week: 30, month: 40 },
|
||||
meshcore: { hour: 3, day: 8, week: 12, month: 15 },
|
||||
meshtastic: { hour: 7, day: 12, week: 18, month: 25 },
|
||||
sampled: false,
|
||||
});
|
||||
assert.deepEqual(result.meshcore, { hour: 3, day: 8, week: 12, month: 15 });
|
||||
assert.deepEqual(result.meshtastic, { hour: 7, day: 12, week: 18, month: 25 });
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload rejects malformed stat values', () => {
|
||||
assert.equal(
|
||||
normaliseActiveNodeStatsPayload({ active_nodes: { hour: 'x', day: 1, week: 1, month: 1 } }),
|
||||
@@ -140,8 +150,8 @@ test('fetchActiveNodeStats reuses cached /api/stats response for repeated calls'
|
||||
|
||||
test('fetchActiveNodeStats falls back to local counts when stats fetch fails', async () => {
|
||||
const nodes = [
|
||||
{ last_heard: NOW - 120 },
|
||||
{ last_heard: NOW - (10 * 86_400) },
|
||||
{ last_heard: NOW - 120, protocol: 'meshtastic' },
|
||||
{ last_heard: NOW - (10 * 86_400), protocol: 'meshcore' },
|
||||
];
|
||||
const fetchImpl = async () => {
|
||||
throw new Error('network down');
|
||||
@@ -149,13 +159,13 @@ test('fetchActiveNodeStats falls back to local counts when stats fetch fails', a
|
||||
|
||||
const stats = await fetchActiveNodeStats({ nodes, nowSeconds: NOW, fetchImpl });
|
||||
|
||||
assert.deepEqual(stats, {
|
||||
hour: 1,
|
||||
day: 1,
|
||||
week: 1,
|
||||
month: 2,
|
||||
sampled: true,
|
||||
});
|
||||
assert.equal(stats.hour, 1);
|
||||
assert.equal(stats.day, 1);
|
||||
assert.equal(stats.week, 1);
|
||||
assert.equal(stats.month, 2);
|
||||
assert.equal(stats.sampled, true);
|
||||
assert.ok(stats.meshcore != null, 'fallback should include meshcore');
|
||||
assert.ok(stats.meshtastic != null, 'fallback should include meshtastic');
|
||||
});
|
||||
|
||||
test('fetchActiveNodeStats falls back to local counts on non-OK HTTP responses', async () => {
|
||||
@@ -183,28 +193,10 @@ test('fetchActiveNodeStats falls back to local counts on invalid payloads', asyn
|
||||
assert.equal(stats.month, 0);
|
||||
});
|
||||
|
||||
test('formatActiveNodeStatsText emits expected dashboard string', () => {
|
||||
test('formatActiveNodeStatsText emits compact day/week/month footer string', () => {
|
||||
const text = formatActiveNodeStatsText({
|
||||
channel: 'LongFast',
|
||||
frequency: '868MHz',
|
||||
stats: { hour: 1, day: 2, week: 3, month: 4, sampled: false },
|
||||
stats: { day: 2, week: 3, month: 4, sampled: false },
|
||||
});
|
||||
|
||||
assert.equal(
|
||||
text,
|
||||
'LongFast (868MHz) — active nodes: 1/hour, 2/day, 3/week, 4/month.'
|
||||
);
|
||||
});
|
||||
|
||||
test('formatActiveNodeStatsText appends sampled marker when local fallback is used', () => {
|
||||
const text = formatActiveNodeStatsText({
|
||||
channel: 'LongFast',
|
||||
frequency: '868MHz',
|
||||
stats: { hour: 9, day: 8, week: 7, month: 6, sampled: true },
|
||||
});
|
||||
|
||||
assert.equal(
|
||||
text,
|
||||
'LongFast (868MHz) — active nodes: 9/hour, 8/day, 7/week, 6/month (sampled).'
|
||||
);
|
||||
assert.equal(text, '2/day \u00b7 3/week \u00b7 4/month');
|
||||
});
|
||||
|
||||
@@ -0,0 +1,321 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { setupApp, setupAppWithOptions } from './main-app-test-helpers.js';
|
||||
|
||||
const NOW = 1_700_000_000;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// updateTitleCount
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('updateTitleCount does not throw when title and header elements are absent', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
assert.doesNotThrow(() => {
|
||||
testUtils.updateTitleCount({ hour: 5, day: 20, week: 42, month: 100, sampled: false });
|
||||
});
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateTitleCount handles null and undefined stats gracefully', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
assert.doesNotThrow(() => testUtils.updateTitleCount(null));
|
||||
assert.doesNotThrow(() => testUtils.updateTitleCount(undefined));
|
||||
assert.doesNotThrow(() => testUtils.updateTitleCount({}));
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// updateLegendProtocolCounts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('updateLegendProtocolCounts returns early when both count elements are null', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
// Default state: meshcoreCountEl and meshtasticCountEl are null — should not throw.
|
||||
assert.doesNotThrow(() => {
|
||||
testUtils.updateLegendProtocolCounts({
|
||||
week: 10,
|
||||
meshcore: { hour: 1, day: 2, week: 3, month: 4 },
|
||||
meshtastic: { hour: 5, day: 6, week: 7, month: 8 },
|
||||
});
|
||||
});
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateLegendProtocolCounts sets per-protocol counts when elements are present', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcEl = { textContent: '' };
|
||||
const mtEl = { textContent: '' };
|
||||
testUtils._setProtocolCountElements(mcEl, mtEl);
|
||||
|
||||
testUtils.updateLegendProtocolCounts({
|
||||
week: 3,
|
||||
meshcore: { hour: 1, day: 1, week: 2, month: 3 },
|
||||
meshtastic: { hour: 0, day: 1, week: 1, month: 2 },
|
||||
});
|
||||
|
||||
assert.equal(mcEl.textContent, ' (2)', 'meshcore count should be 2');
|
||||
assert.equal(mtEl.textContent, ' (1)', 'meshtastic count should be 1');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateLegendProtocolCounts handles missing per-protocol data gracefully', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcEl = { textContent: '' };
|
||||
const mtEl = { textContent: '' };
|
||||
testUtils._setProtocolCountElements(mcEl, mtEl);
|
||||
|
||||
// Stats without per-protocol breakdowns (e.g. from an old instance).
|
||||
testUtils.updateLegendProtocolCounts({ week: 5 });
|
||||
|
||||
assert.equal(mcEl.textContent, ' (0)');
|
||||
assert.equal(mtEl.textContent, ' (0)');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateLegendProtocolCounts works when only meshcoreCountEl is present', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcEl = { textContent: '' };
|
||||
testUtils._setProtocolCountElements(mcEl, null);
|
||||
|
||||
testUtils.updateLegendProtocolCounts({
|
||||
week: 5,
|
||||
meshcore: { hour: 0, day: 0, week: 1, month: 2 },
|
||||
});
|
||||
assert.equal(mcEl.textContent, ' (1)');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateLegendProtocolCounts works when only meshtasticCountEl is present', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mtEl = { textContent: '' };
|
||||
testUtils._setProtocolCountElements(null, mtEl);
|
||||
|
||||
testUtils.updateLegendProtocolCounts({
|
||||
week: 5,
|
||||
meshtastic: { hour: 0, day: 0, week: 1, month: 2 },
|
||||
});
|
||||
assert.equal(mtEl.textContent, ' (1)');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// updateFooterStats
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('updateFooterStats is a no-op when footerActiveNodes element is absent', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
assert.doesNotThrow(() => {
|
||||
testUtils.updateFooterStats({ day: 1, week: 2, month: 3, sampled: false });
|
||||
});
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('updateFooterStats populates the active-stats element when present', () => {
|
||||
const { testUtils, env, cleanup } = setupAppWithOptions({
|
||||
extraElements: ['footerActiveNodes'],
|
||||
});
|
||||
try {
|
||||
const el = env.document.getElementById('footerActiveNodes');
|
||||
testUtils.updateFooterStats({ day: 10, week: 20, month: 30, sampled: false });
|
||||
|
||||
assert.ok(
|
||||
el.textContent.includes('/day'),
|
||||
`expected footerActiveNodes to contain "/day", got: ${el.textContent}`,
|
||||
);
|
||||
assert.ok(
|
||||
el.textContent.includes('10/day'),
|
||||
`expected footerActiveNodes to contain "10/day", got: ${el.textContent}`,
|
||||
);
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// applyProtocolVisibility
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('applyProtocolVisibility hides meshcore column when meshcore week is 0', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcCol = { style: { display: '' } };
|
||||
const mtCol = { style: { display: '' } };
|
||||
testUtils._setProtocolColElements(mcCol, mtCol);
|
||||
|
||||
testUtils.applyProtocolVisibility({
|
||||
meshcore: { hour: 0, day: 0, week: 0, month: 0 },
|
||||
meshtastic: { hour: 1, day: 5, week: 10, month: 20 },
|
||||
});
|
||||
|
||||
assert.equal(mcCol.style.display, 'none', 'meshcore column should be hidden');
|
||||
assert.equal(mtCol.style.display, '', 'meshtastic column should remain visible');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('applyProtocolVisibility hides meshtastic column when meshtastic week is 0', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcCol = { style: { display: '' } };
|
||||
const mtCol = { style: { display: '' } };
|
||||
testUtils._setProtocolColElements(mcCol, mtCol);
|
||||
|
||||
testUtils.applyProtocolVisibility({
|
||||
meshcore: { hour: 1, day: 5, week: 10, month: 20 },
|
||||
meshtastic: { hour: 0, day: 0, week: 0, month: 0 },
|
||||
});
|
||||
|
||||
assert.equal(mcCol.style.display, '', 'meshcore column should remain visible');
|
||||
assert.equal(mtCol.style.display, 'none', 'meshtastic column should be hidden');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('applyProtocolVisibility shows both columns when both protocols have active nodes', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcCol = { style: { display: 'none' } };
|
||||
const mtCol = { style: { display: 'none' } };
|
||||
testUtils._setProtocolColElements(mcCol, mtCol);
|
||||
|
||||
testUtils.applyProtocolVisibility({
|
||||
meshcore: { hour: 1, day: 2, week: 5, month: 10 },
|
||||
meshtastic: { hour: 2, day: 3, week: 8, month: 15 },
|
||||
});
|
||||
|
||||
assert.equal(mcCol.style.display, '', 'meshcore column should be visible');
|
||||
assert.equal(mtCol.style.display, '', 'meshtastic column should be visible');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('applyProtocolVisibility handles missing per-protocol data gracefully', () => {
|
||||
const { testUtils, cleanup } = setupApp();
|
||||
try {
|
||||
const mcCol = { style: { display: '' } };
|
||||
const mtCol = { style: { display: '' } };
|
||||
testUtils._setProtocolColElements(mcCol, mtCol);
|
||||
|
||||
// No per-protocol data at all — treat as 0.
|
||||
testUtils.applyProtocolVisibility({ week: 5 });
|
||||
|
||||
assert.equal(mcCol.style.display, 'none');
|
||||
assert.equal(mtCol.style.display, 'none');
|
||||
} finally {
|
||||
cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// restartAutoRefresh
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('restartAutoRefresh does not start a timer when refreshMs is 0', () => {
|
||||
// MINIMAL_CONFIG has refreshMs: 0 — timer must not be armed.
|
||||
const origSetInterval = globalThis.setInterval;
|
||||
const calls = [];
|
||||
globalThis.setInterval = (...args) => { calls.push(args); return origSetInterval(...args); };
|
||||
try {
|
||||
const { cleanup } = setupApp(); // uses refreshMs: 0
|
||||
// restartAutoRefresh is called during init; no timer should have been started.
|
||||
assert.equal(calls.length, 0, 'setInterval should not be called with refreshMs=0');
|
||||
cleanup();
|
||||
} finally {
|
||||
globalThis.setInterval = origSetInterval;
|
||||
}
|
||||
});
|
||||
|
||||
test('restartAutoRefresh starts a timer when refreshMs > 0', () => {
|
||||
const timers = [];
|
||||
const origSetInterval = globalThis.setInterval;
|
||||
const origClearInterval = globalThis.clearInterval;
|
||||
globalThis.setInterval = (fn, ms) => {
|
||||
const id = Symbol('timer');
|
||||
timers.push({ fn, ms, id });
|
||||
return id;
|
||||
};
|
||||
globalThis.clearInterval = () => {};
|
||||
|
||||
try {
|
||||
const { cleanup } = setupAppWithOptions({ configOverrides: { refreshMs: 30_000 } });
|
||||
assert.equal(timers.length, 1, 'setInterval should be called once during init');
|
||||
assert.equal(timers[0].ms, 30_000, 'interval should match configured refreshMs');
|
||||
cleanup();
|
||||
} finally {
|
||||
globalThis.setInterval = origSetInterval;
|
||||
globalThis.clearInterval = origClearInterval;
|
||||
}
|
||||
});
|
||||
|
||||
test('restartAutoRefresh clears the existing timer before starting a new one', () => {
|
||||
const cleared = [];
|
||||
const timers = [];
|
||||
const origSetInterval = globalThis.setInterval;
|
||||
const origClearInterval = globalThis.clearInterval;
|
||||
globalThis.setInterval = (fn, ms) => {
|
||||
const id = Symbol('timer');
|
||||
timers.push(id);
|
||||
return id;
|
||||
};
|
||||
globalThis.clearInterval = id => { cleared.push(id); };
|
||||
|
||||
try {
|
||||
const { testUtils, cleanup } = setupAppWithOptions({ configOverrides: { refreshMs: 30_000 } });
|
||||
// One timer started during init.
|
||||
assert.equal(timers.length, 1);
|
||||
|
||||
// Calling restartAutoRefresh again must clear the first timer and start a new one.
|
||||
testUtils.restartAutoRefresh();
|
||||
assert.equal(cleared.length, 1, 'existing timer should be cleared');
|
||||
assert.equal(cleared[0], timers[0], 'the original timer id should be cleared');
|
||||
assert.equal(timers.length, 2, 'a new timer should be started');
|
||||
cleanup();
|
||||
} finally {
|
||||
globalThis.setInterval = origSetInterval;
|
||||
globalThis.clearInterval = origClearInterval;
|
||||
}
|
||||
});
|
||||
@@ -0,0 +1,407 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import {
|
||||
buildRenderableEntries,
|
||||
computeColocatedOffsets,
|
||||
isOffsetSignificant,
|
||||
refreshSpiderPositions,
|
||||
__testUtils
|
||||
} from '../map-colocated-offset.js';
|
||||
|
||||
const {
|
||||
DEFAULT_PRECISION,
|
||||
DEFAULT_BASE_RADIUS_PX,
|
||||
DEFAULT_RADIUS_GROWTH_PX,
|
||||
MAX_PRECISION,
|
||||
OFFSET_EPSILON_PX,
|
||||
coordinateKey,
|
||||
normalisePrecision,
|
||||
normalisePositive
|
||||
} = __testUtils;
|
||||
|
||||
/**
|
||||
* Build a canonical entry shape for tests.
|
||||
*
|
||||
* @param {string} id Node identifier used for stable ordering.
|
||||
* @param {number} lat Latitude in degrees.
|
||||
* @param {number} lon Longitude in degrees.
|
||||
* @returns {{node: {node_id: string}, lat: number, lon: number}} Entry record.
|
||||
*/
|
||||
function makeEntry(id, lat, lon) {
|
||||
return { node: { node_id: id }, lat, lon };
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that two floating-point numbers are within a small epsilon.
|
||||
*
|
||||
* @param {number} actual Observed value.
|
||||
* @param {number} expected Reference value.
|
||||
* @param {number} [epsilon=1e-9] Permitted absolute difference.
|
||||
* @returns {void}
|
||||
*/
|
||||
function approximatelyEqual(actual, expected, epsilon = 1e-9) {
|
||||
assert.ok(
|
||||
Math.abs(actual - expected) <= epsilon,
|
||||
`${actual} is not within ${epsilon} of ${expected}`
|
||||
);
|
||||
}
|
||||
|
||||
test('returns empty array for empty/invalid input', () => {
|
||||
assert.deepEqual(computeColocatedOffsets([]), []);
|
||||
assert.deepEqual(computeColocatedOffsets(null), []);
|
||||
assert.deepEqual(computeColocatedOffsets(undefined), []);
|
||||
assert.deepEqual(computeColocatedOffsets('not-an-array'), []);
|
||||
});
|
||||
|
||||
test('singleton group passes through with zero offset', () => {
|
||||
const entries = [makeEntry('a', 52.5, 13.4)];
|
||||
const result = computeColocatedOffsets(entries);
|
||||
assert.equal(result.length, 1);
|
||||
assert.equal(result[0].entry, entries[0]);
|
||||
assert.equal(result[0].dx, 0);
|
||||
assert.equal(result[0].dy, 0);
|
||||
});
|
||||
|
||||
test('two co-located entries get opposite offsets at base radius', () => {
|
||||
const entries = [makeEntry('a', 1.23456, 4.56789), makeEntry('b', 1.23456, 4.56789)];
|
||||
const result = computeColocatedOffsets(entries);
|
||||
assert.equal(result.length, 2);
|
||||
for (const slot of result) {
|
||||
const magnitude = Math.hypot(slot.dx, slot.dy);
|
||||
approximatelyEqual(magnitude, DEFAULT_BASE_RADIUS_PX);
|
||||
}
|
||||
// Two slots, 180° apart → dx values are negatives of each other (and dy too).
|
||||
approximatelyEqual(result[0].dx + result[1].dx, 0);
|
||||
approximatelyEqual(result[0].dy + result[1].dy, 0);
|
||||
});
|
||||
|
||||
test('three+ co-located entries are evenly spaced on a single circle', () => {
|
||||
const entries = [
|
||||
makeEntry('a', 10, 20),
|
||||
makeEntry('b', 10, 20),
|
||||
makeEntry('c', 10, 20)
|
||||
];
|
||||
const result = computeColocatedOffsets(entries);
|
||||
// Group of 3 → radius grows by one growth step beyond the base ring.
|
||||
const expectedRadius = DEFAULT_BASE_RADIUS_PX + DEFAULT_RADIUS_GROWTH_PX;
|
||||
for (const slot of result) {
|
||||
approximatelyEqual(Math.hypot(slot.dx, slot.dy), expectedRadius);
|
||||
}
|
||||
// Sum of vectors at evenly spaced angles should cancel to (≈0, ≈0).
|
||||
const sumX = result.reduce((acc, slot) => acc + slot.dx, 0);
|
||||
const sumY = result.reduce((acc, slot) => acc + slot.dy, 0);
|
||||
approximatelyEqual(sumX, 0, 1e-9);
|
||||
approximatelyEqual(sumY, 0, 1e-9);
|
||||
});
|
||||
|
||||
test('groups of five or more grow the offset radius by radiusGrowthPx per extra node', () => {
|
||||
const entries = [];
|
||||
for (let i = 0; i < 5; i += 1) {
|
||||
entries.push(makeEntry(`n${i}`, 0, 0));
|
||||
}
|
||||
const result = computeColocatedOffsets(entries);
|
||||
const expectedRadius = DEFAULT_BASE_RADIUS_PX + DEFAULT_RADIUS_GROWTH_PX * 3;
|
||||
for (const slot of result) {
|
||||
approximatelyEqual(Math.hypot(slot.dx, slot.dy), expectedRadius);
|
||||
}
|
||||
});
|
||||
|
||||
test('entries at distinct coordinates are not offset', () => {
|
||||
const entries = [
|
||||
makeEntry('a', 10, 20),
|
||||
makeEntry('b', 11, 20),
|
||||
makeEntry('c', 10, 21)
|
||||
];
|
||||
const result = computeColocatedOffsets(entries);
|
||||
for (const slot of result) {
|
||||
assert.equal(slot.dx, 0);
|
||||
assert.equal(slot.dy, 0);
|
||||
}
|
||||
});
|
||||
|
||||
test('precision option controls bucket granularity', () => {
|
||||
const closeEntries = [makeEntry('a', 1.000001, 2.000001), makeEntry('b', 1.000002, 2.000002)];
|
||||
const closeResult = computeColocatedOffsets(closeEntries);
|
||||
// At default precision (5dp) these round to identical keys → both offset.
|
||||
assert.notEqual(closeResult[0].dx, 0);
|
||||
assert.notEqual(closeResult[1].dx, 0);
|
||||
|
||||
const farEntries = [makeEntry('a', 1.0001, 2.0001), makeEntry('b', 1.0009, 2.0001)];
|
||||
const farResult = computeColocatedOffsets(farEntries);
|
||||
// At default precision the 4th decimal differs → distinct buckets, no offset.
|
||||
for (const slot of farResult) {
|
||||
assert.equal(slot.dx, 0);
|
||||
assert.equal(slot.dy, 0);
|
||||
}
|
||||
});
|
||||
|
||||
test('custom baseRadiusPx and radiusGrowthPx override defaults', () => {
|
||||
const entries = [
|
||||
makeEntry('a', 0, 0),
|
||||
makeEntry('b', 0, 0),
|
||||
makeEntry('c', 0, 0),
|
||||
makeEntry('d', 0, 0)
|
||||
];
|
||||
const result = computeColocatedOffsets(entries, { baseRadiusPx: 20, radiusGrowthPx: 10 });
|
||||
// 4 entries → radius = 20 + 10 * (4 - 2) = 40.
|
||||
for (const slot of result) {
|
||||
approximatelyEqual(Math.hypot(slot.dx, slot.dy), 40);
|
||||
}
|
||||
});
|
||||
|
||||
test('custom precision overrides default bucketing', () => {
|
||||
// At precision=5 these are distinct buckets (no offset); at precision=2
|
||||
// both round to "1.00,2.00" and merge into a single co-located group.
|
||||
const entries = [makeEntry('a', 1.001, 2.001), makeEntry('b', 1.004, 2.004)];
|
||||
const defaultResult = computeColocatedOffsets(entries);
|
||||
for (const slot of defaultResult) {
|
||||
assert.equal(slot.dx, 0);
|
||||
assert.equal(slot.dy, 0);
|
||||
}
|
||||
const coarseResult = computeColocatedOffsets(entries, { precision: 2 });
|
||||
assert.notEqual(coarseResult[0].dx, 0);
|
||||
assert.notEqual(coarseResult[1].dx, 0);
|
||||
});
|
||||
|
||||
test('invalid option values fall back to defaults', () => {
|
||||
// NaN / negative values must not corrupt geometry — fall back to defaults.
|
||||
const entries = [makeEntry('a', 0, 0), makeEntry('b', 0, 0)];
|
||||
const result = computeColocatedOffsets(entries, {
|
||||
baseRadiusPx: Number.NaN,
|
||||
radiusGrowthPx: -3,
|
||||
precision: -1
|
||||
});
|
||||
for (const slot of result) {
|
||||
approximatelyEqual(Math.hypot(slot.dx, slot.dy), DEFAULT_BASE_RADIUS_PX);
|
||||
}
|
||||
});
|
||||
|
||||
test('angular slot assignment is stable across input shuffles', () => {
|
||||
const baseA = makeEntry('a', 5, 5);
|
||||
const baseB = makeEntry('b', 5, 5);
|
||||
const baseC = makeEntry('c', 5, 5);
|
||||
const orderedResult = computeColocatedOffsets([baseA, baseB, baseC]);
|
||||
const shuffledResult = computeColocatedOffsets([baseC, baseA, baseB]);
|
||||
// Build a node_id → offset map for both calls and ensure they match.
|
||||
const orderedOffsets = new Map(orderedResult.map(slot => [slot.entry.node.node_id, slot]));
|
||||
const shuffledOffsets = new Map(shuffledResult.map(slot => [slot.entry.node.node_id, slot]));
|
||||
for (const id of ['a', 'b', 'c']) {
|
||||
approximatelyEqual(orderedOffsets.get(id).dx, shuffledOffsets.get(id).dx);
|
||||
approximatelyEqual(orderedOffsets.get(id).dy, shuffledOffsets.get(id).dy);
|
||||
}
|
||||
});
|
||||
|
||||
test('result order matches input order', () => {
|
||||
const entries = [
|
||||
makeEntry('z', 1, 1),
|
||||
makeEntry('a', 0, 0),
|
||||
makeEntry('m', 1, 1),
|
||||
makeEntry('b', 0, 0)
|
||||
];
|
||||
const result = computeColocatedOffsets(entries);
|
||||
assert.equal(result.length, entries.length);
|
||||
for (let i = 0; i < entries.length; i += 1) {
|
||||
assert.equal(result[i].entry, entries[i]);
|
||||
}
|
||||
});
|
||||
|
||||
test('entries without node_id still receive deterministic slots', () => {
|
||||
// Missing node_id falls back to '' in the comparator — ensure no exception
|
||||
// is thrown and both entries still get base-radius offsets.
|
||||
const entries = [
|
||||
{ node: {}, lat: 0, lon: 0 },
|
||||
{ node: null, lat: 0, lon: 0 }
|
||||
];
|
||||
const result = computeColocatedOffsets(entries);
|
||||
for (const slot of result) {
|
||||
approximatelyEqual(Math.hypot(slot.dx, slot.dy), DEFAULT_BASE_RADIUS_PX);
|
||||
}
|
||||
});
|
||||
|
||||
test('coordinateKey formats lat/lon at requested precision', () => {
|
||||
assert.equal(coordinateKey(1.234567, 7.654321, 3), '1.235,7.654');
|
||||
assert.equal(coordinateKey(0, 0, DEFAULT_PRECISION), '0.00000,0.00000');
|
||||
});
|
||||
|
||||
test('normalisePrecision sanitises invalid inputs', () => {
|
||||
assert.equal(normalisePrecision(3), 3);
|
||||
assert.equal(normalisePrecision(0), 0);
|
||||
assert.equal(normalisePrecision(2.7), 2);
|
||||
assert.equal(normalisePrecision(-1), DEFAULT_PRECISION);
|
||||
assert.equal(normalisePrecision(Number.NaN), DEFAULT_PRECISION);
|
||||
// Above MAX_PRECISION the value is clamped so toFixed cannot throw.
|
||||
assert.equal(normalisePrecision(MAX_PRECISION + 50), MAX_PRECISION);
|
||||
assert.doesNotThrow(() => (0).toFixed(normalisePrecision(1e6)));
|
||||
});
|
||||
|
||||
test('entries sharing identical node_id fall back to input index for ordering', () => {
|
||||
// Repeated calls with the same input must produce identical offsets even
|
||||
// when ids tie — the secondary index tie-break makes this independent of
|
||||
// the host engine's sort stability guarantees.
|
||||
const entries = [
|
||||
{ node: { node_id: 'dup' }, lat: 0, lon: 0 },
|
||||
{ node: { node_id: 'dup' }, lat: 0, lon: 0 },
|
||||
{ node: { node_id: 'dup' }, lat: 0, lon: 0 }
|
||||
];
|
||||
const first = computeColocatedOffsets(entries);
|
||||
const second = computeColocatedOffsets(entries);
|
||||
for (let i = 0; i < entries.length; i += 1) {
|
||||
approximatelyEqual(first[i].dx, second[i].dx);
|
||||
approximatelyEqual(first[i].dy, second[i].dy);
|
||||
}
|
||||
// The first entry by index should land at angle 0 (dy ≈ 0, dx > 0).
|
||||
approximatelyEqual(first[0].dy, 0);
|
||||
assert.ok(first[0].dx > 0);
|
||||
});
|
||||
|
||||
test('normalisePositive sanitises invalid inputs', () => {
|
||||
assert.equal(normalisePositive(5, 10), 5);
|
||||
assert.equal(normalisePositive(0, 10), 0);
|
||||
assert.equal(normalisePositive(-1, 10), 10);
|
||||
assert.equal(normalisePositive(Number.NaN, 10), 10);
|
||||
});
|
||||
|
||||
test('buildRenderableEntries returns empty for null/undefined/non-iterable input', () => {
|
||||
assert.deepEqual(buildRenderableEntries(null), []);
|
||||
assert.deepEqual(buildRenderableEntries(undefined), []);
|
||||
assert.deepEqual(buildRenderableEntries(42), []);
|
||||
assert.deepEqual(buildRenderableEntries('string'), []);
|
||||
});
|
||||
|
||||
test('buildRenderableEntries parses lat/lon and skips invalid nodes', () => {
|
||||
const nodes = [
|
||||
{ latitude: '10', longitude: '20' },
|
||||
null,
|
||||
{ latitude: null, longitude: '5' },
|
||||
{ latitude: '', longitude: '' },
|
||||
{ latitude: 'NaN', longitude: '3' },
|
||||
{ latitude: 30, longitude: 40 }
|
||||
];
|
||||
const result = buildRenderableEntries(nodes);
|
||||
assert.equal(result.length, 2);
|
||||
assert.equal(result[0].lat, 10);
|
||||
assert.equal(result[0].lon, 20);
|
||||
assert.equal(result[0].node, nodes[0]);
|
||||
assert.equal(result[1].lat, 30);
|
||||
assert.equal(result[1].lon, 40);
|
||||
assert.equal(result[1].node, nodes[5]);
|
||||
});
|
||||
|
||||
test('buildRenderableEntries respects maxDistanceKm', () => {
|
||||
const nodes = [
|
||||
{ latitude: 1, longitude: 2, distance_km: 5 },
|
||||
{ latitude: 3, longitude: 4, distance_km: 15 },
|
||||
{ latitude: 5, longitude: 6 } // no distance → always included
|
||||
];
|
||||
const result = buildRenderableEntries(nodes, { maxDistanceKm: 10 });
|
||||
assert.equal(result.length, 2);
|
||||
assert.equal(result[0].lat, 1);
|
||||
assert.equal(result[1].lat, 5);
|
||||
});
|
||||
|
||||
test('buildRenderableEntries ignores maxDistanceKm when zero/negative/non-finite', () => {
|
||||
const nodes = [
|
||||
{ latitude: 1, longitude: 2, distance_km: 1000 }
|
||||
];
|
||||
assert.equal(buildRenderableEntries(nodes, { maxDistanceKm: 0 }).length, 1);
|
||||
assert.equal(buildRenderableEntries(nodes, { maxDistanceKm: -5 }).length, 1);
|
||||
assert.equal(buildRenderableEntries(nodes, { maxDistanceKm: NaN }).length, 1);
|
||||
assert.equal(buildRenderableEntries(nodes).length, 1);
|
||||
});
|
||||
|
||||
test('isOffsetSignificant reports true for real offsets and false near zero', () => {
|
||||
// Strict equality would treat radius * sin(π) (~1.7e-15) as non-zero;
|
||||
// the hypotenuse-based check rejects values below OFFSET_EPSILON_PX.
|
||||
assert.equal(isOffsetSignificant(0, 0), false);
|
||||
assert.equal(isOffsetSignificant(OFFSET_EPSILON_PX / 2, OFFSET_EPSILON_PX / 2), false);
|
||||
assert.equal(isOffsetSignificant(14, 0), true);
|
||||
assert.equal(isOffsetSignificant(0, -14), true);
|
||||
assert.equal(isOffsetSignificant(14 * Math.cos(Math.PI), 14 * Math.sin(Math.PI)), true);
|
||||
});
|
||||
|
||||
test('refreshSpiderPositions ignores empty / non-array / missing-projector input', () => {
|
||||
// Should not throw and should not mutate anything when there is no work
|
||||
// to do. The marker stub asserts `setLatLng` is never invoked.
|
||||
let setLatLngCalls = 0;
|
||||
const stubMarker = { setLatLng() { setLatLngCalls += 1; } };
|
||||
refreshSpiderPositions([], () => [0, 0]);
|
||||
refreshSpiderPositions(null, () => [0, 0]);
|
||||
refreshSpiderPositions([{ marker: stubMarker, lat: 0, lon: 0, dx: 1, dy: 1 }], null);
|
||||
refreshSpiderPositions([{ marker: stubMarker, lat: 0, lon: 0, dx: 1, dy: 1 }], 'not-a-fn');
|
||||
assert.equal(setLatLngCalls, 0);
|
||||
});
|
||||
|
||||
test('refreshSpiderPositions invokes projector and updates marker + line', () => {
|
||||
const projectorCalls = [];
|
||||
const project = (lat, lon, dx, dy) => {
|
||||
projectorCalls.push([lat, lon, dx, dy]);
|
||||
return [lat + dx / 1000, lon + dy / 1000];
|
||||
};
|
||||
const markerCalls = [];
|
||||
const lineCalls = [];
|
||||
const state = [
|
||||
{
|
||||
marker: { setLatLng(latLng) { markerCalls.push(latLng); } },
|
||||
line: { setLatLngs(latLngs) { lineCalls.push(latLngs); } },
|
||||
lat: 10,
|
||||
lon: 20,
|
||||
dx: 5,
|
||||
dy: -7
|
||||
}
|
||||
];
|
||||
refreshSpiderPositions(state, project);
|
||||
assert.deepEqual(projectorCalls, [[10, 20, 5, -7]]);
|
||||
assert.deepEqual(markerCalls, [[10.005, 19.993]]);
|
||||
assert.deepEqual(lineCalls, [[[10, 20], [10.005, 19.993]]]);
|
||||
});
|
||||
|
||||
test('refreshSpiderPositions tolerates missing marker / line / item', () => {
|
||||
// The renderer may legitimately skip the spider line for some entries
|
||||
// (e.g. when spiderLinesLayer is absent at init time). The helper must
|
||||
// not throw when `marker` or `line` is missing or lacks the expected
|
||||
// setter, and must skip falsy entries entirely.
|
||||
const sink = [];
|
||||
const state = [
|
||||
null,
|
||||
{ lat: 0, lon: 0, dx: 1, dy: 1 }, // no marker, no line
|
||||
{ marker: {}, line: {}, lat: 0, lon: 0, dx: 1, dy: 1 }, // wrong shape
|
||||
{
|
||||
marker: { setLatLng(value) { sink.push(['marker', value]); } },
|
||||
line: null,
|
||||
lat: 1,
|
||||
lon: 2,
|
||||
dx: 3,
|
||||
dy: 4
|
||||
},
|
||||
{
|
||||
marker: null,
|
||||
line: { setLatLngs(value) { sink.push(['line', value]); } },
|
||||
lat: 5,
|
||||
lon: 6,
|
||||
dx: 7,
|
||||
dy: 8
|
||||
}
|
||||
];
|
||||
assert.doesNotThrow(() => refreshSpiderPositions(state, (lat, lon) => [lat, lon]));
|
||||
assert.deepEqual(sink, [
|
||||
['marker', [1, 2]],
|
||||
['line', [[5, 6], [5, 6]]]
|
||||
]);
|
||||
});
|
||||
@@ -20,6 +20,7 @@ import assert from 'node:assert/strict';
|
||||
import {
|
||||
parseMeshcoreSenderPrefix,
|
||||
findNodeByLongName,
|
||||
extractLeadingMentionAsReply,
|
||||
} from '../meshcore-chat-helpers.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -166,3 +167,120 @@ test('findNodeByLongName: node with null long_name is skipped', () => {
|
||||
const map = new Map([['!aabbccdd', node]]);
|
||||
assert.equal(findNodeByLongName('Alice', map), null);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// findNodeByLongName — whitespace trimming and emoji-prefix fallback (#727)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('findNodeByLongName: trims trailing whitespace in input', () => {
|
||||
const node = { node_id: '!deadbeef', long_name: 'T-deck NK' };
|
||||
const map = new Map([['!deadbeef', node]]);
|
||||
assert.equal(findNodeByLongName('T-deck NK ', map), node);
|
||||
});
|
||||
|
||||
test('findNodeByLongName: trims leading whitespace in input', () => {
|
||||
const node = { node_id: '!deadbeef', long_name: 'Alice' };
|
||||
const map = new Map([['!deadbeef', node]]);
|
||||
assert.equal(findNodeByLongName(' Alice', map), node);
|
||||
});
|
||||
|
||||
test('findNodeByLongName: trims whitespace from candidate long_name', () => {
|
||||
const node = { node_id: '!deadbeef', long_name: ' T-deck NK ' };
|
||||
const map = new Map([['!deadbeef', node]]);
|
||||
assert.equal(findNodeByLongName('T-deck NK', map), node);
|
||||
});
|
||||
|
||||
test('findNodeByLongName: matches when candidate has leading emoji prefix', () => {
|
||||
const node = { node_id: '!6aee769f', long_name: '\u{1F4FA} Timo +' };
|
||||
const map = new Map([['!6aee769f', node]]);
|
||||
// "Timo +" (from @[Timo +]) should match "\u{1F4FA} Timo +" via the
|
||||
// leading-non-letter-stripping fallback pass.
|
||||
assert.equal(findNodeByLongName('Timo +', map), node);
|
||||
});
|
||||
|
||||
test('findNodeByLongName: emoji-prefix fallback combined with trimming', () => {
|
||||
const node = { node_id: '!6aee769f', long_name: '\u{1F4FA} Timo +' };
|
||||
const map = new Map([['!6aee769f', node]]);
|
||||
assert.equal(findNodeByLongName(' Timo +', map), node);
|
||||
});
|
||||
|
||||
test('findNodeByLongName: emoji-prefix fallback preserves exact-match precedence', () => {
|
||||
// When both a prefixed and non-prefixed node match, the exact match wins.
|
||||
const prefixed = { node_id: '!11111111', long_name: '\u{1F4FA} Alice' };
|
||||
const exact = { node_id: '!22222222', long_name: 'Alice' };
|
||||
const map = new Map([['!11111111', prefixed], ['!22222222', exact]]);
|
||||
assert.equal(findNodeByLongName('Alice', map), exact);
|
||||
});
|
||||
|
||||
test('findNodeByLongName: whitespace-only input returns null', () => {
|
||||
const { map } = makeAliceMap();
|
||||
assert.equal(findNodeByLongName(' ', map), null);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// extractLeadingMentionAsReply — MeshCore leading-mention detection (#727)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('extractLeadingMentionAsReply: single leading mention with body', () => {
|
||||
assert.deepEqual(
|
||||
extractLeadingMentionAsReply('@[Alice] hello world'),
|
||||
{ mentionName: 'Alice', remainingText: 'hello world' },
|
||||
);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: single leading mention with no body', () => {
|
||||
assert.deepEqual(
|
||||
extractLeadingMentionAsReply('@[Alice]'),
|
||||
{ mentionName: 'Alice', remainingText: null },
|
||||
);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: trims mention name whitespace', () => {
|
||||
assert.deepEqual(
|
||||
extractLeadingMentionAsReply('@[ Timo +] hello'),
|
||||
{ mentionName: 'Timo +', remainingText: 'hello' },
|
||||
);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: trims trailing whitespace in mention name', () => {
|
||||
assert.deepEqual(
|
||||
extractLeadingMentionAsReply('@[T-deck NK ] some text'),
|
||||
{ mentionName: 'T-deck NK', remainingText: 'some text' },
|
||||
);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: mention not at start returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply('hello @[Alice]'), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: multiple mentions returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply('@[Alice] hi @[Bob]'), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: empty string returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply(''), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: null input returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply(null), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: non-string input returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply(42), null);
|
||||
assert.equal(extractLeadingMentionAsReply({}), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: plain text returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply('just a plain message'), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: empty mention name returns null', () => {
|
||||
assert.equal(extractLeadingMentionAsReply('@[ ] body'), null);
|
||||
});
|
||||
|
||||
test('extractLeadingMentionAsReply: leading whitespace before mention is allowed', () => {
|
||||
assert.deepEqual(
|
||||
extractLeadingMentionAsReply(' @[Alice] hello'),
|
||||
{ mentionName: 'Alice', remainingText: 'hello' },
|
||||
);
|
||||
});
|
||||
|
||||
@@ -20,7 +20,9 @@ import assert from 'node:assert/strict';
|
||||
import {
|
||||
buildMessageBody,
|
||||
buildMessageIndex,
|
||||
normaliseEmojiValue,
|
||||
normaliseMessageId,
|
||||
renderLiteralWithLinks,
|
||||
resolveReplyPrefix
|
||||
} from '../message-replies.js';
|
||||
|
||||
@@ -204,6 +206,32 @@ test('buildMessageBody with renderMentionHtml handles multiple mentions', () =>
|
||||
assert.equal(body, 'BADGE(A)ESC( and )BADGE(B)');
|
||||
});
|
||||
|
||||
test('buildMessageBody trims mention name whitespace before callback (#727)', () => {
|
||||
const calls = [];
|
||||
const body = buildMessageBody({
|
||||
message: { text: '@[ Timo +] hello' },
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
renderMentionHtml: (name) => { calls.push(name); return `BADGE(${name})`; },
|
||||
});
|
||||
// The callback should receive the trimmed mention name so that whitespace
|
||||
// typed by MeshCore users (e.g. "@[ Timo +]" or "@[T-deck NK ]") matches
|
||||
// the canonical long name stored on the node record.
|
||||
assert.deepEqual(calls, ['Timo +']);
|
||||
assert.equal(body, 'BADGE(Timo +)ESC( hello)');
|
||||
});
|
||||
|
||||
test('buildMessageBody trims trailing whitespace in mention name (#727)', () => {
|
||||
const calls = [];
|
||||
buildMessageBody({
|
||||
message: { text: '@[T-deck NK ] ping' },
|
||||
escapeHtml: esc,
|
||||
renderEmojiHtml: emoji,
|
||||
renderMentionHtml: (name) => { calls.push(name); return `BADGE(${name})`; },
|
||||
});
|
||||
assert.deepEqual(calls, ['T-deck NK']);
|
||||
});
|
||||
|
||||
test('buildMessageBody with renderMentionHtml escapes literal segments', () => {
|
||||
const body = buildMessageBody({
|
||||
message: { text: '<b> @[Alice]' },
|
||||
@@ -279,3 +307,194 @@ test('buildMessageBody with renderMentionHtml: unclosed @[ treated as literal',
|
||||
// @[ without closing ] does not match the pattern — treated as literal
|
||||
assert.equal(body, 'ESC(hello @[unclosed)');
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// renderLiteralWithLinks — URL detection
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const e = v => `E(${v})`;
|
||||
|
||||
test('renderLiteralWithLinks passes plain text through escapeHtml', () => {
|
||||
assert.equal(renderLiteralWithLinks('hello world', e), 'E(hello world)');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks wraps http:// URL in an anchor element', () => {
|
||||
const result = renderLiteralWithLinks('check http://example.com out', e);
|
||||
assert.equal(result, 'E(check )<a href="E(http://example.com)" target="_blank" rel="noopener noreferrer">E(http://example.com)</a>E( out)');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks wraps https:// URL in an anchor element', () => {
|
||||
const result = renderLiteralWithLinks('see https://example.com/path?q=1', e);
|
||||
assert.ok(result.includes('<a href='), 'should produce an anchor');
|
||||
assert.ok(result.includes('target="_blank"'), 'should open in new tab');
|
||||
assert.ok(result.includes('rel="noopener noreferrer"'), 'should include noopener rel');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks strips trailing period from URL', () => {
|
||||
const result = renderLiteralWithLinks('visit https://example.com.', e);
|
||||
assert.ok(result.includes('href="E(https://example.com)"'), 'period should not be in href');
|
||||
assert.ok(result.includes('>E(https://example.com)<'), 'period should not be in link text');
|
||||
assert.ok(result.endsWith('E(.)'), 'trailing period should appear as escaped text after the link');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks strips trailing comma from URL', () => {
|
||||
const result = renderLiteralWithLinks('go to https://example.com, then stop', e);
|
||||
assert.ok(result.includes('href="E(https://example.com)"'), 'comma must not be in href');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks handles URL at the start of text', () => {
|
||||
const result = renderLiteralWithLinks('https://example.com is great', e);
|
||||
assert.ok(result.startsWith('<a href='), 'anchor should be at start');
|
||||
assert.ok(result.endsWith('E( is great)'), 'text after URL should be escaped');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks handles URL at the end of text', () => {
|
||||
const result = renderLiteralWithLinks('see https://example.com', e);
|
||||
assert.ok(result.startsWith('E(see )'), 'text before URL should be escaped');
|
||||
assert.ok(result.includes('<a href='), 'URL should be linked');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks handles multiple URLs in text', () => {
|
||||
const result = renderLiteralWithLinks('a https://foo.com b https://bar.com c', e);
|
||||
const matches = result.match(/<a href=/g) || [];
|
||||
assert.equal(matches.length, 2, 'should produce two anchors');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks does not linkify non-http schemes', () => {
|
||||
const result = renderLiteralWithLinks('ftp://example.com', e);
|
||||
assert.ok(!result.includes('<a href='), 'ftp:// should not be linkified');
|
||||
assert.equal(result, 'E(ftp://example.com)');
|
||||
});
|
||||
|
||||
test('renderLiteralWithLinks returns empty string for empty input', () => {
|
||||
assert.equal(renderLiteralWithLinks('', e), '');
|
||||
});
|
||||
|
||||
test('buildMessageBody linkifies URLs in message text without renderMentionHtml', () => {
|
||||
const body = buildMessageBody({
|
||||
message: { text: 'visit https://example.com now' },
|
||||
escapeHtml: e,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`,
|
||||
});
|
||||
assert.ok(body.includes('<a href='), 'URL should be linkified');
|
||||
assert.ok(body.includes('target="_blank"'), 'should open in new tab');
|
||||
});
|
||||
|
||||
test('buildMessageBody linkifies URLs alongside @[Name] mentions', () => {
|
||||
const body = buildMessageBody({
|
||||
message: { text: '@[Alice] see https://example.com' },
|
||||
escapeHtml: e,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`,
|
||||
renderMentionHtml: name => `BADGE(${name})`,
|
||||
});
|
||||
assert.ok(body.startsWith('BADGE(Alice)'), 'mention should be rendered as badge');
|
||||
assert.ok(body.includes('<a href='), 'URL should be linkified');
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// normaliseEmojiValue — codepoint conversion
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('normaliseEmojiValue converts integer codepoint above 127 to emoji', () => {
|
||||
assert.equal(normaliseEmojiValue(128077), '\u{1F44D}');
|
||||
});
|
||||
|
||||
test('normaliseEmojiValue converts string codepoint above 127 to emoji', () => {
|
||||
assert.equal(normaliseEmojiValue('128077'), '\u{1F44D}');
|
||||
});
|
||||
|
||||
test('normaliseEmojiValue preserves small integer as string', () => {
|
||||
assert.equal(normaliseEmojiValue(49), '49');
|
||||
});
|
||||
|
||||
test('normaliseEmojiValue preserves small digit string as-is', () => {
|
||||
assert.equal(normaliseEmojiValue('1'), '1');
|
||||
});
|
||||
|
||||
test('normaliseEmojiValue passes through emoji character unchanged', () => {
|
||||
assert.equal(normaliseEmojiValue('\u{1F44D}'), '\u{1F44D}');
|
||||
});
|
||||
|
||||
test('normaliseEmojiValue returns null for null', () => {
|
||||
assert.equal(normaliseEmojiValue(null), null);
|
||||
});
|
||||
|
||||
test('normaliseEmojiValue returns null for empty string', () => {
|
||||
assert.equal(normaliseEmojiValue(''), null);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// isReactionMessage — tightened classification (bug #699)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('buildMessageBody does not treat reply with emoji and substantial text as reaction', () => {
|
||||
const message = {
|
||||
text: 'Great job!',
|
||||
emoji: '\u{1F44D}',
|
||||
reply_id: 123,
|
||||
portnum: 'TEXT_MESSAGE_APP'
|
||||
};
|
||||
const body = buildMessageBody({
|
||||
message,
|
||||
escapeHtml: v => `ESC(${v})`,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`
|
||||
});
|
||||
// Text should be rendered as a normal message, not suppressed into a reaction.
|
||||
assert.ok(body.includes('ESC(Great job!)'), 'text content should be visible');
|
||||
});
|
||||
|
||||
test('buildMessageBody treats reply with emoji and no text as reaction', () => {
|
||||
const message = {
|
||||
emoji: '\u{1F44D}',
|
||||
reply_id: 123,
|
||||
};
|
||||
const body = buildMessageBody({
|
||||
message,
|
||||
escapeHtml: v => `ESC(${v})`,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`
|
||||
});
|
||||
assert.equal(body, 'EMOJI(\u{1F44D})');
|
||||
});
|
||||
|
||||
test('buildMessageBody treats reply with emoji and whitespace text as reaction', () => {
|
||||
const message = {
|
||||
text: ' ',
|
||||
emoji: '\u{1F44D}',
|
||||
reply_id: 123,
|
||||
};
|
||||
const body = buildMessageBody({
|
||||
message,
|
||||
escapeHtml: v => `ESC(${v})`,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`
|
||||
});
|
||||
assert.equal(body, 'EMOJI(\u{1F44D})');
|
||||
});
|
||||
|
||||
test('buildMessageBody treats reply with emoji and digit text as reaction', () => {
|
||||
const message = {
|
||||
text: '3',
|
||||
emoji: '\u{2728}',
|
||||
reply_id: 456,
|
||||
};
|
||||
const body = buildMessageBody({
|
||||
message,
|
||||
escapeHtml: v => `ESC(${v})`,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`
|
||||
});
|
||||
assert.equal(body, 'EMOJI(\u{2728}) ESC(\u00d73)');
|
||||
});
|
||||
|
||||
test('buildMessageBody renders emoji from numeric codepoint in reaction', () => {
|
||||
const message = {
|
||||
text: '2',
|
||||
emoji: 128077,
|
||||
reply_id: 789,
|
||||
portnum: 'REACTION_APP'
|
||||
};
|
||||
const body = buildMessageBody({
|
||||
message,
|
||||
escapeHtml: v => `ESC(${v})`,
|
||||
renderEmojiHtml: v => `EMOJI(${v})`
|
||||
});
|
||||
assert.equal(body, 'EMOJI(\u{1F44D}) ESC(\u00d72)');
|
||||
});
|
||||
|
||||
@@ -109,7 +109,7 @@ test('refreshNodeInformation merges telemetry metrics when the base node lacks t
|
||||
|
||||
assert.equal(calls.length, 4);
|
||||
calls.forEach(call => {
|
||||
assert.deepEqual(call.options, { cache: 'no-store' });
|
||||
assert.deepEqual(call.options, { cache: 'default' });
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { fetchMessages, fetchTracesForNode } from '../node-page-data.js';
|
||||
import { fetchMessages, fetchNodesById, fetchTracesForNode } from '../node-page-data.js';
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// fetchMessages
|
||||
@@ -174,3 +174,97 @@ test('fetchTracesForNode accepts numeric identifier', async () => {
|
||||
await fetchTracesForNode(12345, { fetchImpl });
|
||||
assert.ok(calls[0].includes('12345'), 'numeric identifier should appear in URL');
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// fetchNodesById
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('fetchNodesById returns Map keyed by node_id on success', async () => {
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
status: 200,
|
||||
async json() {
|
||||
return [
|
||||
{ node_id: '!11111111', short_name: 'A1', long_name: 'Alpha' },
|
||||
{ node_id: '!22222222', short_name: 'B2', long_name: 'Bravo' },
|
||||
];
|
||||
},
|
||||
});
|
||||
const result = await fetchNodesById({ fetchImpl });
|
||||
assert.ok(result instanceof Map);
|
||||
assert.equal(result.size, 2);
|
||||
assert.equal(result.get('!11111111').short_name, 'A1');
|
||||
assert.equal(result.get('!22222222').long_name, 'Bravo');
|
||||
});
|
||||
|
||||
test('fetchNodesById accepts camelCase nodeId fallback', async () => {
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
status: 200,
|
||||
async json() { return [{ nodeId: '!33333333', short_name: 'C3' }]; },
|
||||
});
|
||||
const result = await fetchNodesById({ fetchImpl });
|
||||
assert.equal(result.get('!33333333').short_name, 'C3');
|
||||
});
|
||||
|
||||
test('fetchNodesById returns empty Map when response is not OK', async () => {
|
||||
const fetchImpl = async () => ({ ok: false, status: 500, async json() { return []; } });
|
||||
const result = await fetchNodesById({ fetchImpl });
|
||||
assert.ok(result instanceof Map);
|
||||
assert.equal(result.size, 0);
|
||||
});
|
||||
|
||||
test('fetchNodesById returns empty Map when payload is not an array', async () => {
|
||||
const fetchImpl = async () => ({ ok: true, status: 200, async json() { return { nodes: [] }; } });
|
||||
const result = await fetchNodesById({ fetchImpl });
|
||||
assert.ok(result instanceof Map);
|
||||
assert.equal(result.size, 0);
|
||||
});
|
||||
|
||||
test('fetchNodesById skips entries lacking an identifier', async () => {
|
||||
const fetchImpl = async () => ({
|
||||
ok: true,
|
||||
status: 200,
|
||||
async json() {
|
||||
return [
|
||||
{ short_name: 'X', long_name: 'no id' },
|
||||
{ node_id: '!aabbccdd', short_name: 'AB' },
|
||||
null,
|
||||
'not-an-object',
|
||||
];
|
||||
},
|
||||
});
|
||||
const result = await fetchNodesById({ fetchImpl });
|
||||
assert.equal(result.size, 1);
|
||||
assert.equal(result.get('!aabbccdd').short_name, 'AB');
|
||||
});
|
||||
|
||||
test('fetchNodesById returns empty Map when fetch throws', async () => {
|
||||
const fetchImpl = async () => { throw new Error('network down'); };
|
||||
const result = await fetchNodesById({ fetchImpl });
|
||||
assert.ok(result instanceof Map);
|
||||
assert.equal(result.size, 0);
|
||||
});
|
||||
|
||||
test('fetchNodesById returns empty Map when fetch implementation is unavailable', async () => {
|
||||
const savedFetch = globalThis.fetch;
|
||||
try {
|
||||
delete globalThis.fetch;
|
||||
const result = await fetchNodesById();
|
||||
assert.ok(result instanceof Map);
|
||||
assert.equal(result.size, 0);
|
||||
} finally {
|
||||
if (savedFetch !== undefined) globalThis.fetch = savedFetch;
|
||||
}
|
||||
});
|
||||
|
||||
test('fetchNodesById issues a request with the configured limit', async () => {
|
||||
const calls = [];
|
||||
const fetchImpl = async (url) => {
|
||||
calls.push(url);
|
||||
return { ok: true, status: 200, async json() { return []; } };
|
||||
};
|
||||
await fetchNodesById({ fetchImpl });
|
||||
assert.equal(calls.length, 1);
|
||||
assert.ok(calls[0].startsWith('/api/nodes?limit='), 'should call /api/nodes with a limit query');
|
||||
});
|
||||
|
||||
@@ -756,6 +756,68 @@ test('renderMessages shows no protocol icon when node protocol is absent', () =>
|
||||
assert.ok(!html.includes('meshcore.svg'), 'absent node protocol chat entry should show no meshcore icon');
|
||||
});
|
||||
|
||||
test('renderMessages: channel name fallback uses message.channel_label when metadata is empty (#727 coverage)', () => {
|
||||
// Exercises the ``fallbackChannel`` branch in renderMessages: when
|
||||
// extractChatMessageMetadata does not return a channelName (because the
|
||||
// message has neither ``channel_name`` nor ``channelName``) but does
|
||||
// carry a legacy ``channel_label`` field, that string is promoted into
|
||||
// the metadata object so the channel tag still renders.
|
||||
const renderShortHtml = (short) => `<span class="short-name">${short}</span>`;
|
||||
const html = renderMessages(
|
||||
[{ text: 'hi', rx_time: 1_700_000_000, channel_label: 'legacy-label' }],
|
||||
renderShortHtml,
|
||||
{ node_id: '!self', short_name: 'NODE', long_name: 'Node', role: 'CLIENT' },
|
||||
);
|
||||
assert.ok(html.includes('[legacy-label]'), 'channel_label fallback should appear in tag');
|
||||
});
|
||||
|
||||
test('renderMessages: channel name fallback uses numeric message.channel when no string label exists (#727 coverage)', () => {
|
||||
// Exercises the ``numberOrNull(message.channel)`` branch: a numeric
|
||||
// channel index without an associated channel_name is stringified into
|
||||
// the channel tag.
|
||||
const renderShortHtml = (short) => `<span class="short-name">${short}</span>`;
|
||||
const html = renderMessages(
|
||||
[{ text: 'hi', rx_time: 1_700_000_000, channel: 7 }],
|
||||
renderShortHtml,
|
||||
{ node_id: '!self', short_name: 'NODE', long_name: 'Node', role: 'CLIENT' },
|
||||
);
|
||||
assert.ok(html.includes('[7]'), 'numeric channel fallback should appear in tag');
|
||||
});
|
||||
|
||||
test('renderMessages: channel name fallback uses string message.channel when neither label nor number is present (#727 coverage)', () => {
|
||||
// Exercises the final ``stringOrNull(message.channel)`` branch. This
|
||||
// covers the path where ``channel`` is a non-numeric string label that
|
||||
// ``numberOrNull`` rejects but ``stringOrNull`` accepts.
|
||||
const renderShortHtml = (short) => `<span class="short-name">${short}</span>`;
|
||||
const html = renderMessages(
|
||||
[{ text: 'hi', rx_time: 1_700_000_000, channel: 'alpha' }],
|
||||
renderShortHtml,
|
||||
{ node_id: '!self', short_name: 'NODE', long_name: 'Node', role: 'CLIENT' },
|
||||
);
|
||||
assert.ok(html.includes('[alpha]'), 'string channel fallback should appear in tag');
|
||||
});
|
||||
|
||||
test('renderMessages: skips invalid entries in the global node registry (#727 coverage)', () => {
|
||||
// Covers the ``if (id && node && typeof node === 'object')`` guard inside
|
||||
// buildNodesById. Bad entries (null values, non-id keys) must be ignored
|
||||
// without breaking the registry build.
|
||||
const renderShortHtml = (short) => `<span class="short-name">${short}</span>`;
|
||||
const globalNodesById = new Map([
|
||||
['', { node_id: '', short_name: 'EMPTY' }], // empty-id entry — skipped
|
||||
['!ok', { node_id: '!ok', short_name: 'OK', long_name: 'OK Node' }],
|
||||
['!bad', null], // null value — skipped
|
||||
]);
|
||||
const html = renderMessages(
|
||||
[{ text: '@[OK Node] hi', rx_time: 1_700_000_000, protocol: 'meshcore', to_id: '^all' }],
|
||||
renderShortHtml,
|
||||
{ node_id: '!self', short_name: 'NODE', long_name: 'Node', role: 'CLIENT' },
|
||||
globalNodesById,
|
||||
);
|
||||
// The mention should resolve via the surviving entry, demonstrating the
|
||||
// registry build skipped the bad ones without throwing.
|
||||
assert.ok(html.includes('OK'), 'valid registry entry should still resolve mention');
|
||||
});
|
||||
|
||||
test('renderMessages omits meshtastic icon for meshcore node protocol', () => {
|
||||
const nodeContext = {
|
||||
shortName: 'MC',
|
||||
@@ -919,7 +981,7 @@ test('fetchMessages handles HTTP responses and uses defaults', async () => {
|
||||
};
|
||||
const messages = await fetchMessages('!node', { fetchImpl });
|
||||
assert.equal(messages.length, 1);
|
||||
assert.equal(calls[0].options.cache, 'no-store');
|
||||
assert.equal(calls[0].options.cache, 'default');
|
||||
});
|
||||
|
||||
test('fetchMessages returns an empty list when the endpoint is missing', async () => {
|
||||
@@ -1002,7 +1064,7 @@ test('fetchTracesForNode requests traceroutes for the node', async () => {
|
||||
const traces = await fetchTracesForNode('!abc', { fetchImpl });
|
||||
assert.equal(traces.length, 1);
|
||||
assert.equal(calls[0].url.includes('/api/traces/!abc'), true);
|
||||
assert.equal(calls[0].options.cache, 'no-store');
|
||||
assert.equal(calls[0].options.cache, 'default');
|
||||
});
|
||||
|
||||
test('fetchTracesForNode returns empty when identifier is missing', async () => {
|
||||
|
||||
@@ -62,35 +62,35 @@ test('render priority uses canonical role keys and defaults to zero for unknowns
|
||||
});
|
||||
|
||||
test('render priority is protocol-aware for shared roles', () => {
|
||||
// SENSOR: meshtastic=2, meshcore=3
|
||||
// SENSOR: meshtastic=2, meshcore=9
|
||||
assert.equal(getRoleRenderPriority('SENSOR', 'meshtastic'), 2);
|
||||
assert.equal(getRoleRenderPriority('SENSOR', 'meshcore'), 3);
|
||||
assert.equal(getRoleRenderPriority('SENSOR', 'meshcore'), 9);
|
||||
assert.ok(getRoleRenderPriority('SENSOR', 'meshcore') > getRoleRenderPriority('SENSOR', 'meshtastic'));
|
||||
// REPEATER: meshtastic=11, meshcore=12
|
||||
// REPEATER: meshtastic=11, meshcore=3
|
||||
assert.equal(getRoleRenderPriority('REPEATER', 'meshtastic'), 11);
|
||||
assert.equal(getRoleRenderPriority('REPEATER', 'meshcore'), 12);
|
||||
assert.ok(getRoleRenderPriority('REPEATER', 'meshcore') > getRoleRenderPriority('REPEATER', 'meshtastic'));
|
||||
assert.equal(getRoleRenderPriority('REPEATER', 'meshcore'), 3);
|
||||
assert.ok(getRoleRenderPriority('REPEATER', 'meshtastic') > getRoleRenderPriority('REPEATER', 'meshcore'));
|
||||
});
|
||||
|
||||
test('render priority meshcore-exclusive roles have defined priorities', () => {
|
||||
assert.equal(getRoleRenderPriority('COMPANION', 'meshcore'), 7);
|
||||
assert.equal(getRoleRenderPriority('ROOM_SERVER', 'meshcore'), 9);
|
||||
assert.equal(getRoleRenderPriority('COMPANION', 'meshcore'), 12);
|
||||
assert.equal(getRoleRenderPriority('ROOM_SERVER', 'meshcore'), 7);
|
||||
});
|
||||
|
||||
test('render priority respects the full bottom-to-top order', () => {
|
||||
const order = [
|
||||
['CLIENT_HIDDEN', null],
|
||||
['SENSOR', 'meshtastic'],
|
||||
['SENSOR', 'meshcore'],
|
||||
['REPEATER', 'meshcore'],
|
||||
['TRACKER', null],
|
||||
['CLIENT_MUTE', null],
|
||||
['CLIENT', null],
|
||||
['COMPANION', 'meshcore'],
|
||||
['CLIENT_BASE', null],
|
||||
['ROOM_SERVER', 'meshcore'],
|
||||
['CLIENT_BASE', null],
|
||||
['SENSOR', 'meshcore'],
|
||||
['ROUTER_LATE', null],
|
||||
['REPEATER', 'meshtastic'],
|
||||
['REPEATER', 'meshcore'],
|
||||
['COMPANION', 'meshcore'],
|
||||
['ROUTER', null],
|
||||
['LOST_AND_FOUND', null],
|
||||
];
|
||||
|
||||
@@ -32,39 +32,41 @@ const NOW = 1_700_000_000;
|
||||
|
||||
test('computeLocalActiveNodeStats counts nodes within each window', () => {
|
||||
const nodes = [
|
||||
{ last_heard: NOW - 60 }, // within hour, day, week, month
|
||||
{ last_heard: NOW - 4_000 }, // within day, week, month
|
||||
{ last_heard: NOW - 90_000 }, // within week, month
|
||||
{ last_heard: NOW - (8 * 86_400) }, // within month only
|
||||
{ last_heard: NOW - (20 * 86_400) }, // within month only
|
||||
{ last_heard: NOW - 60, protocol: 'meshtastic' }, // within hour, day, week, month
|
||||
{ last_heard: NOW - 4_000, protocol: 'meshcore' }, // within day, week, month
|
||||
{ last_heard: NOW - 90_000, protocol: 'meshtastic' }, // within week, month
|
||||
{ last_heard: NOW - (8 * 86_400), protocol: 'meshcore' }, // within month only
|
||||
{ last_heard: NOW - (20 * 86_400), protocol: 'meshtastic' }, // within month only
|
||||
];
|
||||
|
||||
assert.deepEqual(computeLocalActiveNodeStats(nodes, NOW), {
|
||||
hour: 1,
|
||||
day: 2,
|
||||
week: 3,
|
||||
month: 5,
|
||||
sampled: true,
|
||||
});
|
||||
const result = computeLocalActiveNodeStats(nodes, NOW);
|
||||
assert.equal(result.hour, 1);
|
||||
assert.equal(result.day, 2);
|
||||
assert.equal(result.week, 3);
|
||||
assert.equal(result.month, 5);
|
||||
assert.equal(result.sampled, true);
|
||||
assert.deepEqual(result.meshcore, { hour: 0, day: 1, week: 1, month: 2 });
|
||||
assert.deepEqual(result.meshtastic, { hour: 1, day: 1, week: 2, month: 3 });
|
||||
});
|
||||
|
||||
test('computeLocalActiveNodeStats returns zero counts for empty node array', () => {
|
||||
assert.deepEqual(computeLocalActiveNodeStats([], NOW), {
|
||||
hour: 0,
|
||||
day: 0,
|
||||
week: 0,
|
||||
month: 0,
|
||||
sampled: true,
|
||||
});
|
||||
const result = computeLocalActiveNodeStats([], NOW);
|
||||
assert.equal(result.hour, 0);
|
||||
assert.equal(result.day, 0);
|
||||
assert.equal(result.week, 0);
|
||||
assert.equal(result.month, 0);
|
||||
assert.equal(result.sampled, true);
|
||||
assert.deepEqual(result.meshcore, { hour: 0, day: 0, week: 0, month: 0 });
|
||||
assert.deepEqual(result.meshtastic, { hour: 0, day: 0, week: 0, month: 0 });
|
||||
});
|
||||
|
||||
test('computeLocalActiveNodeStats handles non-array nodes gracefully', () => {
|
||||
assert.deepEqual(computeLocalActiveNodeStats(null, NOW), {
|
||||
hour: 0, day: 0, week: 0, month: 0, sampled: true,
|
||||
});
|
||||
assert.deepEqual(computeLocalActiveNodeStats(undefined, NOW), {
|
||||
hour: 0, day: 0, week: 0, month: 0, sampled: true,
|
||||
});
|
||||
const result = computeLocalActiveNodeStats(null, NOW);
|
||||
assert.equal(result.hour, 0);
|
||||
assert.deepEqual(result.meshcore, { hour: 0, day: 0, week: 0, month: 0 });
|
||||
const result2 = computeLocalActiveNodeStats(undefined, NOW);
|
||||
assert.equal(result2.hour, 0);
|
||||
assert.deepEqual(result2.meshcore, { hour: 0, day: 0, week: 0, month: 0 });
|
||||
});
|
||||
|
||||
test('computeLocalActiveNodeStats ignores nodes with missing last_heard', () => {
|
||||
@@ -74,9 +76,10 @@ test('computeLocalActiveNodeStats ignores nodes with missing last_heard', () =>
|
||||
{ last_heard: undefined },
|
||||
{ last_heard: 'not-a-number' },
|
||||
];
|
||||
assert.deepEqual(computeLocalActiveNodeStats(nodes, NOW), {
|
||||
hour: 0, day: 0, week: 0, month: 0, sampled: true,
|
||||
});
|
||||
const result = computeLocalActiveNodeStats(nodes, NOW);
|
||||
assert.equal(result.hour, 0);
|
||||
assert.deepEqual(result.meshcore, { hour: 0, day: 0, week: 0, month: 0 });
|
||||
assert.deepEqual(result.meshtastic, { hour: 0, day: 0, week: 0, month: 0 });
|
||||
});
|
||||
|
||||
test('computeLocalActiveNodeStats uses Date.now() when nowSeconds is non-finite', () => {
|
||||
@@ -84,13 +87,27 @@ test('computeLocalActiveNodeStats uses Date.now() when nowSeconds is non-finite'
|
||||
const result = computeLocalActiveNodeStats([{ last_heard: Date.now() / 1000 - 60 }], NaN);
|
||||
assert.equal(typeof result.hour, 'number');
|
||||
assert.ok(result.hour >= 0);
|
||||
assert.ok(result.meshcore != null);
|
||||
});
|
||||
|
||||
test('computeLocalActiveNodeStats counts nodes exactly at window boundary', () => {
|
||||
// A node whose last_heard equals exactly now - 3600 is within the hour window (<=).
|
||||
const nodes = [{ last_heard: NOW - 3600 }];
|
||||
const nodes = [{ last_heard: NOW - 3600, protocol: 'meshtastic' }];
|
||||
const result = computeLocalActiveNodeStats(nodes, NOW);
|
||||
assert.equal(result.hour, 1);
|
||||
assert.equal(result.meshtastic.hour, 1);
|
||||
assert.equal(result.meshcore.hour, 0);
|
||||
});
|
||||
|
||||
test('computeLocalActiveNodeStats bins unknown protocols into meshtastic bucket', () => {
|
||||
const nodes = [
|
||||
{ last_heard: NOW - 100, protocol: 'reticulum' },
|
||||
{ last_heard: NOW - 200, protocol: 'meshcore' },
|
||||
];
|
||||
const result = computeLocalActiveNodeStats(nodes, NOW);
|
||||
assert.equal(result.hour, 2);
|
||||
assert.equal(result.meshcore.hour, 1);
|
||||
assert.equal(result.meshtastic.hour, 1);
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -98,13 +115,47 @@ test('computeLocalActiveNodeStats counts nodes exactly at window boundary', () =
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('normaliseActiveNodeStatsPayload validates and normalises API payload', () => {
|
||||
assert.deepEqual(
|
||||
normaliseActiveNodeStatsPayload({
|
||||
active_nodes: { hour: '11', day: 22, week: 33, month: 44 },
|
||||
sampled: false,
|
||||
}),
|
||||
{ hour: 11, day: 22, week: 33, month: 44, sampled: false }
|
||||
);
|
||||
const result = normaliseActiveNodeStatsPayload({
|
||||
active_nodes: { hour: '11', day: 22, week: 33, month: 44 },
|
||||
sampled: false,
|
||||
});
|
||||
assert.equal(result.hour, 11);
|
||||
assert.equal(result.day, 22);
|
||||
assert.equal(result.week, 33);
|
||||
assert.equal(result.month, 44);
|
||||
assert.equal(result.sampled, false);
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload includes per-protocol buckets when present', () => {
|
||||
const result = normaliseActiveNodeStatsPayload({
|
||||
active_nodes: { hour: 10, day: 20, week: 30, month: 40 },
|
||||
meshcore: { hour: 3, day: 8, week: 12, month: 15 },
|
||||
meshtastic: { hour: 7, day: 12, week: 18, month: 25 },
|
||||
sampled: false,
|
||||
});
|
||||
assert.deepEqual(result.meshcore, { hour: 3, day: 8, week: 12, month: 15 });
|
||||
assert.deepEqual(result.meshtastic, { hour: 7, day: 12, week: 18, month: 25 });
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload omits per-protocol buckets when absent', () => {
|
||||
const result = normaliseActiveNodeStatsPayload({
|
||||
active_nodes: { hour: 1, day: 2, week: 3, month: 4 },
|
||||
sampled: false,
|
||||
});
|
||||
assert.equal(result.meshcore, undefined);
|
||||
assert.equal(result.meshtastic, undefined);
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload ignores malformed per-protocol buckets', () => {
|
||||
const result = normaliseActiveNodeStatsPayload({
|
||||
active_nodes: { hour: 1, day: 2, week: 3, month: 4 },
|
||||
meshcore: { hour: 'bad', day: 1, week: 1, month: 1 },
|
||||
meshtastic: 'not-an-object',
|
||||
sampled: false,
|
||||
});
|
||||
assert.equal(result.hour, 1);
|
||||
assert.equal(result.meshcore, undefined);
|
||||
assert.equal(result.meshtastic, undefined);
|
||||
});
|
||||
|
||||
test('normaliseActiveNodeStatsPayload returns null for missing active_nodes', () => {
|
||||
@@ -157,13 +208,22 @@ test('fetchActiveNodeStats returns remote stats when /api/stats succeeds', async
|
||||
});
|
||||
|
||||
test('fetchActiveNodeStats falls back to local counts on network error', async () => {
|
||||
const nodes = [{ last_heard: NOW - 120 }, { last_heard: NOW - (10 * 86_400) }];
|
||||
const nodes = [
|
||||
{ last_heard: NOW - 120, protocol: 'meshtastic' },
|
||||
{ last_heard: NOW - (10 * 86_400), protocol: 'meshcore' },
|
||||
];
|
||||
const stats = await fetchActiveNodeStats({
|
||||
nodes,
|
||||
nowSeconds: NOW,
|
||||
fetchImpl: async () => { throw new Error('network down'); },
|
||||
});
|
||||
assert.deepEqual(stats, { hour: 1, day: 1, week: 1, month: 2, sampled: true });
|
||||
assert.equal(stats.hour, 1);
|
||||
assert.equal(stats.day, 1);
|
||||
assert.equal(stats.week, 1);
|
||||
assert.equal(stats.month, 2);
|
||||
assert.equal(stats.sampled, true);
|
||||
assert.ok(stats.meshcore != null, 'fallback should include meshcore');
|
||||
assert.ok(stats.meshtastic != null, 'fallback should include meshtastic');
|
||||
});
|
||||
|
||||
test('fetchActiveNodeStats falls back to local counts on non-OK status', async () => {
|
||||
@@ -237,29 +297,16 @@ test('fetchActiveNodeStats concurrent calls share a single in-flight request', a
|
||||
// formatActiveNodeStatsText
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
test('formatActiveNodeStatsText emits expected dashboard sentence', () => {
|
||||
test('formatActiveNodeStatsText emits compact day/week/month string', () => {
|
||||
assert.equal(
|
||||
formatActiveNodeStatsText({
|
||||
channel: 'LongFast',
|
||||
frequency: '868MHz',
|
||||
stats: { hour: 1, day: 2, week: 3, month: 4, sampled: false },
|
||||
stats: { day: 2, week: 3, month: 4, sampled: false },
|
||||
}),
|
||||
'LongFast (868MHz) \u2014 active nodes: 1/hour, 2/day, 3/week, 4/month.'
|
||||
);
|
||||
});
|
||||
|
||||
test('formatActiveNodeStatsText appends sampled marker', () => {
|
||||
assert.equal(
|
||||
formatActiveNodeStatsText({
|
||||
channel: 'LongFast',
|
||||
frequency: '868MHz',
|
||||
stats: { hour: 9, day: 8, week: 7, month: 6, sampled: true },
|
||||
}),
|
||||
'LongFast (868MHz) \u2014 active nodes: 9/hour, 8/day, 7/week, 6/month (sampled).'
|
||||
'2/day \u00b7 3/week \u00b7 4/month'
|
||||
);
|
||||
});
|
||||
|
||||
test('formatActiveNodeStatsText handles missing or null stats gracefully', () => {
|
||||
const text = formatActiveNodeStatsText({ channel: 'X', frequency: 'Y', stats: null });
|
||||
assert.ok(text.includes('0/hour'), 'defaults to zero counts for null stats');
|
||||
const text = formatActiveNodeStatsText({ stats: null });
|
||||
assert.equal(text, '0/day \u00b7 0/week \u00b7 0/month', 'defaults to zero counts for null stats');
|
||||
});
|
||||
|
||||
@@ -57,8 +57,6 @@ function executeInDom(source, url, env) {
|
||||
test('theme and background modules behave correctly across scenarios', async t => {
|
||||
const env = createDomEnvironment({ readyState: 'complete', cookie: '' });
|
||||
try {
|
||||
const toggle = env.createElement('button', 'themeToggle');
|
||||
env.registerElement('themeToggle', toggle);
|
||||
let filterInvocations = 0;
|
||||
env.window.applyFiltersToAllTiles = () => {
|
||||
filterInvocations += 1;
|
||||
@@ -72,52 +70,27 @@ test('theme and background modules behave correctly across scenarios', async t =
|
||||
const backgroundHelpers = env.window.__potatoBackground;
|
||||
const backgroundHooks = backgroundHelpers.__testHooks;
|
||||
|
||||
await t.test('initialises with a dark theme and persists cookies', () => {
|
||||
await t.test('initialises with a dark theme', () => {
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'dark');
|
||||
assert.equal(env.document.body.classList.contains('dark'), true);
|
||||
assert.equal(toggle.textContent, '☀️');
|
||||
themeHelpers.persistTheme('light');
|
||||
themeHelpers.setCookie('bare', '1');
|
||||
themeHooks.exerciseSetCookieGuard();
|
||||
themeHelpers.setCookie('flag', 'true', { Secure: true });
|
||||
const cookieString = env.getCookieString();
|
||||
assert.equal(themeHelpers.getCookie('flag'), 'true');
|
||||
assert.equal(themeHelpers.getCookie('missing'), null);
|
||||
assert.match(cookieString, /theme=light/);
|
||||
assert.match(cookieString, /; path=\//);
|
||||
assert.match(cookieString, /; SameSite=Lax/);
|
||||
assert.match(cookieString, /; Secure/);
|
||||
});
|
||||
|
||||
await t.test('serializeCookieOptions covers boolean and string attributes', () => {
|
||||
const withAttributes = themeHooks.serializeCookieOptions({ Secure: true, HttpOnly: '1' });
|
||||
assert.equal(withAttributes.includes('; Secure'), true);
|
||||
assert.equal(withAttributes.includes('; HttpOnly=1'), true);
|
||||
const secureOnly = themeHooks.serializeCookieOptions({ Secure: true });
|
||||
assert.equal(secureOnly.trim(), '; Secure');
|
||||
assert.equal(themeHooks.formatCookieOption(['HttpOnly', '1']), '; HttpOnly=1');
|
||||
assert.equal(themeHooks.formatCookieOption(['Secure', true]), '; Secure');
|
||||
assert.equal(themeHooks.serializeCookieOptions({}), '');
|
||||
assert.equal(themeHooks.serializeCookieOptions(), '');
|
||||
});
|
||||
|
||||
await t.test('re-bootstrap handles DOMContentLoaded flow and filter hooks', () => {
|
||||
env.document.readyState = 'loading';
|
||||
filterInvocations = 0;
|
||||
env.setCookieString('theme=light');
|
||||
themeHooks.bootstrap();
|
||||
env.triggerDOMContentLoaded();
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'light');
|
||||
assert.equal(env.document.body.classList.contains('dark'), false);
|
||||
assert.equal(toggle.textContent, '🌙');
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'dark');
|
||||
assert.equal(env.document.body.classList.contains('dark'), true);
|
||||
assert.equal(filterInvocations, 1);
|
||||
env.document.removeEventListener('DOMContentLoaded', themeHooks.handleReady);
|
||||
});
|
||||
|
||||
await t.test('handleReady tolerates missing toggle button', () => {
|
||||
env.registerElement('themeToggle', null);
|
||||
await t.test('handleReady calls applyFiltersToAllTiles', () => {
|
||||
filterInvocations = 0;
|
||||
env.document.readyState = 'complete';
|
||||
themeHooks.handleReady();
|
||||
env.registerElement('themeToggle', toggle);
|
||||
assert.equal(filterInvocations, 1);
|
||||
});
|
||||
|
||||
await t.test('applyTheme copes with absent DOM nodes', () => {
|
||||
@@ -125,10 +98,10 @@ test('theme and background modules behave correctly across scenarios', async t =
|
||||
const originalRoot = env.document.documentElement;
|
||||
env.document.body = null;
|
||||
env.document.documentElement = null;
|
||||
assert.equal(themeHooks.applyTheme('dark'), true);
|
||||
// Should not throw even when DOM nodes are absent
|
||||
assert.doesNotThrow(() => themeHooks.applyTheme());
|
||||
env.document.body = originalBody;
|
||||
env.document.documentElement = originalRoot;
|
||||
assert.equal(themeHooks.applyTheme('light'), false);
|
||||
});
|
||||
|
||||
await t.test('background bootstrap waits for DOM readiness', () => {
|
||||
@@ -161,12 +134,12 @@ test('theme and background modules behave correctly across scenarios', async t =
|
||||
env.document.body = originalBody;
|
||||
});
|
||||
|
||||
await t.test('theme changes trigger background updates', () => {
|
||||
env.document.body.classList.remove('dark');
|
||||
themeHooks.setTheme('light');
|
||||
await t.test('themechange event triggers background update', () => {
|
||||
env.document.body.classList.add('dark');
|
||||
backgroundHooks.init();
|
||||
env.dispatchWindowEvent('themechange');
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#f6f3ee');
|
||||
// Background should reflect dark mode
|
||||
assert.ok(env.document.documentElement.style.backgroundColor !== '');
|
||||
});
|
||||
|
||||
env.window.removeEventListener('themechange', backgroundHelpers.applyBackground);
|
||||
|
||||
@@ -179,7 +179,7 @@ export async function fetchAggregatedTelemetry({
|
||||
const bucketSecondsSafe = bucketSecondsCandidate > 0 ? bucketSecondsCandidate : TELEMETRY_BUCKET_SECONDS;
|
||||
const response = await fetchFn(
|
||||
`/api/telemetry/aggregated?windowSeconds=${windowSeconds}&bucketSeconds=${bucketSecondsSafe}`,
|
||||
{ cache: 'no-store' },
|
||||
{ cache: 'default' },
|
||||
);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch aggregated telemetry (HTTP ${response.status})`);
|
||||
|
||||
@@ -0,0 +1,250 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { buildMessageBody, resolveReplyPrefix } from './message-replies.js';
|
||||
import {
|
||||
extractLeadingMentionAsReply,
|
||||
findNodeByLongName,
|
||||
parseMeshcoreSenderPrefix,
|
||||
} from './meshcore-chat-helpers.js';
|
||||
import { isMeshcoreProtocol } from './protocol-helpers.js';
|
||||
|
||||
/**
|
||||
* Pick the first defined property value from a list of candidate objects.
|
||||
*
|
||||
* Used to resolve protocol from either the message itself or its hydrated
|
||||
* node, whichever is populated first.
|
||||
*
|
||||
* @param {Array<?Object>} sources Objects to probe in order.
|
||||
* @param {string} key Property name to read.
|
||||
* @returns {*} The first non-nullish value or ``undefined``.
|
||||
*/
|
||||
function pickFirst(sources, key) {
|
||||
for (const source of sources) {
|
||||
if (source && typeof source === 'object') {
|
||||
const value = source[key];
|
||||
if (value != null) return value;
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render the shared badge HTML for a node, tolerant of both snake_case and
|
||||
* camelCase property names.
|
||||
*
|
||||
* @param {Function} renderShortHtml Badge renderer.
|
||||
* @param {Object} node Node record.
|
||||
* @returns {string} HTML fragment.
|
||||
*/
|
||||
function renderNodeBadge(renderShortHtml, node) {
|
||||
return renderShortHtml(
|
||||
node.short_name ?? node.shortName,
|
||||
node.role,
|
||||
node.long_name ?? node.longName,
|
||||
node,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a reply prefix HTML fragment for a resolved reply target node.
|
||||
*
|
||||
* @param {string} label Label text (already raw, will be escaped).
|
||||
* @param {string} badgeHtml Rendered badge for the reply target.
|
||||
* @param {Function} escapeHtml HTML-escape helper.
|
||||
* @returns {string} ``<span class="chat-entry-reply">...</span>`` HTML.
|
||||
*/
|
||||
function formatReplyPrefixHtml(label, badgeHtml, escapeHtml) {
|
||||
return `<span class="chat-entry-reply">[${escapeHtml(label)} ${badgeHtml}]</span>`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render the text content of a chat entry (reply prefix + body) using shared
|
||||
* message formatting helpers.
|
||||
*
|
||||
* This extracts the rendering pipeline that lives inside the dashboard chat
|
||||
* panel so that both the dashboard (``main.js``) and the node detail page
|
||||
* (``node-page.js``) produce identical message bodies.
|
||||
*
|
||||
* The function performs:
|
||||
*
|
||||
* 1. Resolution of the standard ``reply_id``-based reply prefix.
|
||||
* 2. MeshCore ``"SenderName: body"`` prefix parsing for channel messages.
|
||||
* 3. MeshCore leading-``@[Name]`` detection, surfacing it as an ``[in reply
|
||||
* to BADGE]`` prefix when no structured reply is already present.
|
||||
* 4. Mention rendering for MeshCore messages, mapping ``@[Name]`` to either
|
||||
* a badge (when the named node is known) or an escaped literal fallback.
|
||||
* 5. ``buildMessageBody()`` invocation, which handles URL linkification,
|
||||
* emoji rendering, and reaction detection.
|
||||
* 6. Encrypted-message notices when available from the caller.
|
||||
*
|
||||
* The returned ``meshcoreSenderNode`` lets callers render the sender badge
|
||||
* correctly when the ingestor could not resolve ``m.node`` for a MeshCore
|
||||
* channel message.
|
||||
*
|
||||
* @param {{
|
||||
* message: Object,
|
||||
* nodesById: ?Map<string, Object>,
|
||||
* messagesById: ?Map<string, Object>,
|
||||
* renderShortHtml: Function,
|
||||
* escapeHtml: Function,
|
||||
* renderEmojiHtml: Function,
|
||||
* formatEncryptedMessageNotice?: ?Function,
|
||||
* }} params Rendering dependencies.
|
||||
* @returns {{
|
||||
* html: string,
|
||||
* parsedMeshcorePrefix: ?{ senderName: string, bodyText: string },
|
||||
* meshcoreSenderNode: ?Object
|
||||
* }} Rendered HTML plus MeshCore metadata for caller-side badge fallbacks.
|
||||
*/
|
||||
export function renderChatEntryContent({
|
||||
message,
|
||||
nodesById,
|
||||
messagesById,
|
||||
renderShortHtml,
|
||||
escapeHtml,
|
||||
renderEmojiHtml,
|
||||
formatEncryptedMessageNotice = null,
|
||||
}) {
|
||||
if (typeof escapeHtml !== 'function') {
|
||||
throw new TypeError('escapeHtml must be a function');
|
||||
}
|
||||
if (typeof renderEmojiHtml !== 'function') {
|
||||
throw new TypeError('renderEmojiHtml must be a function');
|
||||
}
|
||||
if (typeof renderShortHtml !== 'function') {
|
||||
throw new TypeError('renderShortHtml must be a function');
|
||||
}
|
||||
if (!message || typeof message !== 'object') {
|
||||
return { html: '', parsedMeshcorePrefix: null, meshcoreSenderNode: null };
|
||||
}
|
||||
|
||||
const protocol = pickFirst([message, message.node], 'protocol');
|
||||
const isMeshcore = isMeshcoreProtocol(protocol);
|
||||
const toId = message.to_id ?? message.toId;
|
||||
const isMeshcoreChannelMsg = isMeshcore && toId === '^all';
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// MeshCore sender prefix (channel messages only)
|
||||
// ------------------------------------------------------------------
|
||||
let parsedMeshcorePrefix = null;
|
||||
let meshcoreSenderNode = null;
|
||||
if (isMeshcoreChannelMsg && typeof message.text === 'string') {
|
||||
parsedMeshcorePrefix = parseMeshcoreSenderPrefix(message.text);
|
||||
if (parsedMeshcorePrefix && !message.node) {
|
||||
meshcoreSenderNode = findNodeByLongName(parsedMeshcorePrefix.senderName, nodesById);
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Encrypted messages take precedence over body rendering
|
||||
// ------------------------------------------------------------------
|
||||
if (message.encrypted) {
|
||||
let bodyHtml = '';
|
||||
if (typeof formatEncryptedMessageNotice === 'function') {
|
||||
const notice = formatEncryptedMessageNotice(message);
|
||||
if (notice && typeof notice === 'object') {
|
||||
const content = notice.content ?? '';
|
||||
bodyHtml = notice.isHtml ? content : escapeHtml(content);
|
||||
}
|
||||
}
|
||||
return { html: bodyHtml, parsedMeshcorePrefix, meshcoreSenderNode };
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Structured reply_id prefix (Meshtastic style)
|
||||
// ------------------------------------------------------------------
|
||||
const replyPrefix = resolveReplyPrefix({
|
||||
message,
|
||||
messagesById,
|
||||
nodesById,
|
||||
renderShortHtml,
|
||||
escapeHtml,
|
||||
});
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// MeshCore leading @[Name] as reply (substitute when no reply_id)
|
||||
// ------------------------------------------------------------------
|
||||
let meshcoreReplyPrefix = '';
|
||||
// The text we actually hand to ``buildMessageBody``; starts as the body
|
||||
// text from the sender prefix (when present) and is further stripped if
|
||||
// a leading-mention-as-reply is detected.
|
||||
let effectiveBodyText = parsedMeshcorePrefix
|
||||
? parsedMeshcorePrefix.bodyText
|
||||
: (typeof message.text === 'string' ? message.text : null);
|
||||
|
||||
if (!replyPrefix && isMeshcore && effectiveBodyText) {
|
||||
const leading = extractLeadingMentionAsReply(effectiveBodyText);
|
||||
if (leading) {
|
||||
const replyNode = findNodeByLongName(leading.mentionName, nodesById);
|
||||
let badgeHtml = '';
|
||||
if (replyNode) {
|
||||
badgeHtml = renderNodeBadge(renderShortHtml, replyNode);
|
||||
}
|
||||
// Graceful degradation: when the registry doesn't contain the
|
||||
// mention target (common on large deployments where ``/api/nodes``
|
||||
// caps at 1000 entries by recency), still surface the leading
|
||||
// mention as a reply prefix using the raw name. Without this
|
||||
// fallback the body would render as bare ``@[Name] body...`` which
|
||||
// looks like an unresolved mention link to the user.
|
||||
if (typeof badgeHtml !== 'string' || badgeHtml.length === 0) {
|
||||
badgeHtml = `<span class="short-name">${escapeHtml(leading.mentionName)}</span>`;
|
||||
}
|
||||
meshcoreReplyPrefix = formatReplyPrefixHtml('in reply to', badgeHtml, escapeHtml);
|
||||
effectiveBodyText = leading.remainingText ?? '';
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Mention rendering for MeshCore messages
|
||||
// ------------------------------------------------------------------
|
||||
const renderMentionHtml = isMeshcore
|
||||
? (mentionedName) => {
|
||||
const mentionNode = findNodeByLongName(mentionedName, nodesById);
|
||||
if (mentionNode) {
|
||||
return renderNodeBadge(renderShortHtml, mentionNode);
|
||||
}
|
||||
return `@[${escapeHtml(mentionedName)}]`;
|
||||
}
|
||||
: null;
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Build body HTML via the shared buildMessageBody helper
|
||||
// ------------------------------------------------------------------
|
||||
const bodyMsg = (parsedMeshcorePrefix || effectiveBodyText !== message.text)
|
||||
? { ...message, text: effectiveBodyText }
|
||||
: message;
|
||||
|
||||
const bodyHtml = buildMessageBody({
|
||||
message: bodyMsg,
|
||||
escapeHtml,
|
||||
renderEmojiHtml,
|
||||
renderMentionHtml,
|
||||
});
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Combine prefix + body
|
||||
// ------------------------------------------------------------------
|
||||
const segments = [];
|
||||
const prefix = replyPrefix || meshcoreReplyPrefix;
|
||||
if (prefix) segments.push(prefix);
|
||||
if (bodyHtml) segments.push(bodyHtml);
|
||||
return {
|
||||
html: segments.join(' '),
|
||||
parsedMeshcorePrefix,
|
||||
meshcoreSenderNode,
|
||||
};
|
||||
}
|
||||
@@ -311,10 +311,17 @@ export function buildChatTabModel({
|
||||
channel.entries.sort((a, b) => a.ts - b.ts);
|
||||
channel.messageCount = channel.entries.length;
|
||||
}
|
||||
// Sort channels by activity (most messages first), then alphabetically on ties.
|
||||
const channels = Array.from(channelBuckets.values()).sort((a, b) =>
|
||||
b.messageCount - a.messageCount || a.label.localeCompare(b.label)
|
||||
);
|
||||
// Sort channels into two tiers:
|
||||
// 1. Primary channels (channel index 0 — LongFast, MediumFast, Public, etc.)
|
||||
// ordered by activity desc so the most-active protocol leads within the tier.
|
||||
// 2. Secondary channels (index > 0) ordered by activity desc, then alpha.
|
||||
// Within each tier, ties on messageCount are broken alphabetically by label.
|
||||
const channels = Array.from(channelBuckets.values()).sort((a, b) => {
|
||||
const aTier = a.index === 0 ? 0 : 1;
|
||||
const bTier = b.index === 0 ? 0 : 1;
|
||||
if (aTier !== bTier) return aTier - bTier;
|
||||
return b.messageCount - a.messageCount || a.label.localeCompare(b.label);
|
||||
});
|
||||
|
||||
return { logEntries, channels };
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
import { resolveLegendVisibility } from './map-legend-visibility.js';
|
||||
import { mergeConfig } from './settings.js';
|
||||
import { roleColors } from './role-helpers.js';
|
||||
import { meshcoreIconHtml, meshtasticIconHtml } from './protocol-helpers.js';
|
||||
|
||||
/**
|
||||
* Escape HTML special characters to prevent XSS.
|
||||
@@ -113,38 +114,135 @@ function colorForNodeCount(count) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Render arbitrary contact text while hyperlinking recognised URL-like segments.
|
||||
* Matches recognised link-like segments in plain text:
|
||||
* - Absolute URLs (https?://, mailto:, matrix:)
|
||||
* - Matrix room aliases (#room:domain.tld)
|
||||
* - Matrix user IDs (@user:domain.tld)
|
||||
* - Bare domain-with-path (discord.gg/..., t.me/...)
|
||||
*
|
||||
* @param {*} contact Raw contact value from the API.
|
||||
* @returns {string} HTML markup safe for insertion.
|
||||
* Character classes use possessive-style atomic groupings (no overlap) so the
|
||||
* regex engine cannot backtrack into super-linear runtime.
|
||||
*/
|
||||
function renderContactHtml(contact) {
|
||||
if (typeof contact !== 'string') return '';
|
||||
const trimmed = contact.trim();
|
||||
if (!trimmed) return '';
|
||||
const urlPattern = /(https?:\/\/[^\s]+|mailto:[^\s]+|matrix:[^\s]+)/gi;
|
||||
const CONTACT_LINK_PATTERN =
|
||||
/(https?:\/\/\S+|mailto:\S+|matrix:\S+|[@#][a-zA-Z0-9._/-]+:[a-zA-Z0-9._-]+\.[a-zA-Z]{2,}|[a-zA-Z0-9-]+\.[a-zA-Z]{2,}\/\S+)/gi;
|
||||
|
||||
/**
|
||||
* Regex matching `<a>` elements (including malformed unquoted attributes) and
|
||||
* any other HTML tags so they can be handled separately from plain text.
|
||||
*
|
||||
* The `<a>` branch uses `[^<]*` (no nesting) instead of `[\s\S]*?` to avoid
|
||||
* super-linear backtracking when nested or malformed tags are present.
|
||||
*/
|
||||
const HTML_SEGMENT_PATTERN = /(<a\b[^>]*>[^<]*<\/a\s*>|<[^>]+>)/gi;
|
||||
|
||||
/** Extracts the href value from an `<a>` opening tag, quoted or unquoted. */
|
||||
const HREF_ATTR_PATTERN = /\bhref\s*=\s*(?:"([^"]*)"|'([^']*)'|([^\s>]*))/i;
|
||||
|
||||
/** Protocols allowed in whitelisted `<a>` hrefs. */
|
||||
const SAFE_HREF_RE = /^(?:https?:\/\/|matrix:|mailto:)/i;
|
||||
|
||||
/**
|
||||
* Resolve a raw matched token to an href string.
|
||||
*
|
||||
* @param {string} raw Matched token from CONTACT_LINK_PATTERN.
|
||||
* @returns {string} Absolute href suitable for use in an anchor element.
|
||||
*/
|
||||
function resolveHref(raw) {
|
||||
if (raw.startsWith('#') || raw.startsWith('@')) {
|
||||
// Matrix room alias or user ID → matrix.to permalink
|
||||
return `https://matrix.to/#/${raw}`;
|
||||
}
|
||||
if (/^[a-zA-Z0-9-]+\./.test(raw) && !raw.includes('://')) {
|
||||
// Bare domain-with-path (e.g. discord.gg/…) — prepend https://
|
||||
return `https://${raw}`;
|
||||
}
|
||||
return raw;
|
||||
}
|
||||
|
||||
/**
|
||||
* Linkify URL-like tokens in a plain-text (already HTML-free) segment.
|
||||
*
|
||||
* @param {string} text Plain text with no HTML tags.
|
||||
* @returns {string} HTML-safe string with recognised links wrapped in anchors.
|
||||
*/
|
||||
function renderPlainSegment(text) {
|
||||
const parts = [];
|
||||
let lastIndex = 0;
|
||||
const pattern = new RegExp(CONTACT_LINK_PATTERN.source, 'gi');
|
||||
let match;
|
||||
while ((match = pattern.exec(text)) !== null) {
|
||||
const before = text.slice(lastIndex, match.index);
|
||||
if (before) parts.push(escapeHtml(before));
|
||||
const raw = match[0];
|
||||
const href = resolveHref(raw);
|
||||
parts.push(`<a href="${escapeHtml(href)}" target="_blank" rel="noopener noreferrer">${escapeHtml(raw)}</a>`);
|
||||
lastIndex = match.index + raw.length;
|
||||
}
|
||||
const trailing = text.slice(lastIndex);
|
||||
if (trailing) parts.push(escapeHtml(trailing));
|
||||
return parts.join('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Render arbitrary contact or channel text as safe HTML.
|
||||
*
|
||||
* - Existing `<a>` tags are sanitised: href is validated against an allowlist
|
||||
* of safe protocols (https, http, matrix:, mailto:), link text is escaped,
|
||||
* and `target="_blank" rel="noopener noreferrer"` is always applied.
|
||||
* - Other HTML tags (e.g. `<b>`) are stripped; their text content is kept.
|
||||
* - Plain-text segments are scanned for URLs, Matrix aliases/user-IDs, and
|
||||
* bare domain-with-path references (e.g. discord.gg/…) and linkified.
|
||||
* - Line breaks are converted to `<br>`.
|
||||
*
|
||||
* @param {*} text Raw value from the API (may contain HTML).
|
||||
* @returns {string} HTML markup safe for insertion.
|
||||
*/
|
||||
function renderContactHtml(text) {
|
||||
if (typeof text !== 'string') return '';
|
||||
const trimmed = text.trim();
|
||||
if (!trimmed) return '';
|
||||
|
||||
const parts = [];
|
||||
let lastIndex = 0;
|
||||
const segPattern = new RegExp(HTML_SEGMENT_PATTERN.source, 'gi');
|
||||
let match;
|
||||
|
||||
while ((match = urlPattern.exec(trimmed)) !== null) {
|
||||
const textBefore = trimmed.slice(lastIndex, match.index);
|
||||
if (textBefore) {
|
||||
parts.push(escapeHtml(textBefore));
|
||||
while ((match = segPattern.exec(trimmed)) !== null) {
|
||||
// Linkify plain text before this HTML segment
|
||||
const before = trimmed.slice(lastIndex, match.index);
|
||||
if (before) parts.push(renderPlainSegment(before));
|
||||
|
||||
const tag = match[0];
|
||||
if (/^<a\b/i.test(tag)) {
|
||||
// Whitelisted <a> tag — extract href, validate, re-render safely
|
||||
const hrefMatch = HREF_ATTR_PATTERN.exec(tag);
|
||||
const href = hrefMatch ? (hrefMatch[1] ?? hrefMatch[2] ?? hrefMatch[3] ?? '') : '';
|
||||
// Strip HTML tags to derive plain link text; content is still escaped below.
|
||||
// The replace runs in a loop to handle residual tags left after the first
|
||||
// pass — this is safe because a single-pass replace of `<…>` can leave
|
||||
// behind a reconstructed tag when the input contains e.g. `<<script>`.
|
||||
let linkText = tag;
|
||||
let prev;
|
||||
do { prev = linkText; linkText = linkText.replace(/<[^>]*>/g, ''); } while (linkText !== prev);
|
||||
linkText = linkText.trim();
|
||||
if (SAFE_HREF_RE.test(href)) {
|
||||
parts.push(`<a href="${escapeHtml(href)}" target="_blank" rel="noopener noreferrer">${escapeHtml(linkText || href)}</a>`);
|
||||
} else {
|
||||
// Unsafe or missing href — render link text only
|
||||
parts.push(escapeHtml(linkText));
|
||||
}
|
||||
}
|
||||
const url = match[0];
|
||||
const safeUrl = escapeHtml(url);
|
||||
parts.push(`<a href="${safeUrl}" target="_blank" rel="noopener noreferrer">${safeUrl}</a>`);
|
||||
lastIndex = match.index + url.length;
|
||||
// Other tags (<b>, </b>, etc.) are stripped; their text content falls
|
||||
// through as plain text in subsequent iterations.
|
||||
|
||||
lastIndex = match.index + tag.length;
|
||||
}
|
||||
|
||||
// Linkify any remaining plain text after the last HTML segment
|
||||
const trailing = trimmed.slice(lastIndex);
|
||||
if (trailing) {
|
||||
parts.push(escapeHtml(trailing));
|
||||
}
|
||||
if (trailing) parts.push(renderPlainSegment(trailing));
|
||||
|
||||
const html = parts.join('');
|
||||
return html.replace(/\r?\n/g, '<br>');
|
||||
return parts.join('').replace(/\r?\n/g, '<br>');
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -296,6 +394,18 @@ export async function initializeFederationPage(options = {}) {
|
||||
hasValue: hasNumberValue,
|
||||
defaultDirection: 'desc'
|
||||
},
|
||||
meshcoreNodesCount: {
|
||||
getValue: inst => toFiniteNumber(inst.meshcoreNodesCount),
|
||||
compare: compareNumber,
|
||||
hasValue: hasNumberValue,
|
||||
defaultDirection: 'desc'
|
||||
},
|
||||
meshtasticNodesCount: {
|
||||
getValue: inst => toFiniteNumber(inst.meshtasticNodesCount),
|
||||
compare: compareNumber,
|
||||
hasValue: hasNumberValue,
|
||||
defaultDirection: 'desc'
|
||||
},
|
||||
latitude: { getValue: inst => toFiniteNumber(inst.latitude), compare: compareNumber, hasValue: hasNumberValue, defaultDirection: 'asc' },
|
||||
longitude: { getValue: inst => toFiniteNumber(inst.longitude), compare: compareNumber, hasValue: hasNumberValue, defaultDirection: 'asc' },
|
||||
lastUpdateTime: {
|
||||
@@ -381,15 +491,21 @@ export async function initializeFederationPage(options = {}) {
|
||||
const contactHtml = renderContactHtml(instance.contactLink);
|
||||
const nodesCountValue = toFiniteNumber(instance.nodesCount ?? instance.nodes_count);
|
||||
const nodesCountText = nodesCountValue == null ? '<em>—</em>' : escapeHtml(String(nodesCountValue));
|
||||
const mcNodesVal = toFiniteNumber(instance.meshcoreNodesCount);
|
||||
const mcNodesText = mcNodesVal == null ? '<em>—</em>' : `${meshcoreIconHtml()} ${escapeHtml(String(mcNodesVal))}`;
|
||||
const mtNodesVal = toFiniteNumber(instance.meshtasticNodesCount);
|
||||
const mtNodesText = mtNodesVal == null ? '<em>—</em>' : `${meshtasticIconHtml()} ${escapeHtml(String(mtNodesVal))}`;
|
||||
|
||||
tr.innerHTML = `
|
||||
<td class="instances-col instances-col--name">${nameHtml}</td>
|
||||
<td class="instances-col instances-col--domain mono">${domainHtml}</td>
|
||||
<td class="instances-col instances-col--contact">${contactHtml || '<em>—</em>'}</td>
|
||||
<td class="instances-col instances-col--version mono">${escapeHtml(instance.version || '')}</td>
|
||||
<td class="instances-col instances-col--channel">${escapeHtml(instance.channel || '')}</td>
|
||||
<td class="instances-col instances-col--channel">${renderContactHtml(instance.channel) || ''}</td>
|
||||
<td class="instances-col instances-col--frequency">${escapeHtml(instance.frequency || '')}</td>
|
||||
<td class="instances-col instances-col--nodes mono">${nodesCountText}</td>
|
||||
<td class="instances-col instances-col--meshcore-nodes mono">${mcNodesText}</td>
|
||||
<td class="instances-col instances-col--meshtastic-nodes mono">${mtNodesText}</td>
|
||||
<td class="instances-col instances-col--latitude mono">${fmtCoords(instance.latitude)}</td>
|
||||
<td class="instances-col instances-col--longitude mono">${fmtCoords(instance.longitude)}</td>
|
||||
<td class="instances-col instances-col--last-update mono">${timeAgo(instance.lastUpdateTime, nowSec)}</td>
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Extract the maximum timestamp from an array of API records.
|
||||
*
|
||||
* Inspects the specified fields on each record and returns the highest
|
||||
* value found. Returns 0 when the array is empty or contains no valid
|
||||
* timestamps.
|
||||
*
|
||||
* @param {Array<Object>} records API response rows.
|
||||
* @param {Array<string>} [fields] Timestamp field names to inspect.
|
||||
* @returns {number} Maximum unix timestamp across all records.
|
||||
*/
|
||||
export function maxRecordTimestamp(records, fields = ['rx_time', 'last_heard']) {
|
||||
let max = 0;
|
||||
if (!Array.isArray(records)) return max;
|
||||
for (const record of records) {
|
||||
if (!record || typeof record !== 'object') continue;
|
||||
for (const field of fields) {
|
||||
const val = record[field];
|
||||
if (typeof val === 'number' && val > max) max = val;
|
||||
}
|
||||
}
|
||||
return max;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge incremental rows into an existing collection, deduplicating by a
|
||||
* key field. New rows replace existing entries with the same key.
|
||||
*
|
||||
* @param {Array<Object>} existing Previous full dataset.
|
||||
* @param {Array<Object>} incoming New incremental rows.
|
||||
* @param {string} keyField Property used for deduplication.
|
||||
* @returns {Array<Object>} Merged array.
|
||||
*/
|
||||
export function mergeById(existing, incoming, keyField) {
|
||||
if (!incoming || incoming.length === 0) return existing;
|
||||
const map = new Map();
|
||||
for (const item of existing) {
|
||||
const key = item[keyField];
|
||||
if (key != null) map.set(key, item);
|
||||
}
|
||||
for (const item of incoming) {
|
||||
const key = item[keyField];
|
||||
if (key != null) map.set(key, item);
|
||||
}
|
||||
return Array.from(map.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge incremental rows using a composite key built from multiple fields.
|
||||
*
|
||||
* Behaves like {@link mergeById} but joins the values of several fields
|
||||
* into a single string key so records with a composite primary key (e.g.
|
||||
* ``node_id`` + ``neighbor_id``) are deduplicated correctly.
|
||||
*
|
||||
* @param {Array<Object>} existing Previous full dataset.
|
||||
* @param {Array<Object>} incoming New incremental rows.
|
||||
* @param {Array<string>} keyFields Properties whose values form the composite key.
|
||||
* @returns {Array<Object>} Merged array.
|
||||
*/
|
||||
export function mergeByCompositeKey(existing, incoming, keyFields) {
|
||||
if (!incoming || incoming.length === 0) return existing;
|
||||
|
||||
function buildKey(item) {
|
||||
return keyFields.map(f => String(item[f] ?? '')).join('\0');
|
||||
}
|
||||
|
||||
const map = new Map();
|
||||
for (const item of existing) {
|
||||
map.set(buildKey(item), item);
|
||||
}
|
||||
for (const item of incoming) {
|
||||
map.set(buildKey(item), item);
|
||||
}
|
||||
return Array.from(map.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Trim an array to at most ``limit`` entries, keeping the ones with the
|
||||
* highest timestamp value. Prevents unbounded growth from incremental
|
||||
* merges over a long-running browser tab.
|
||||
*
|
||||
* @param {Array<Object>} records Merged record array.
|
||||
* @param {number} limit Maximum number of entries to retain.
|
||||
* @param {string} [tsField] Timestamp field name used for sorting.
|
||||
* @returns {Array<Object>} Trimmed array (may be the same reference if
|
||||
* already within the limit).
|
||||
*/
|
||||
export function trimToLimit(records, limit, tsField = 'rx_time') {
|
||||
if (!Array.isArray(records) || records.length <= limit) return records;
|
||||
const sorted = records.slice().sort((a, b) => (b[tsField] || 0) - (a[tsField] || 0));
|
||||
return sorted.slice(0, limit);
|
||||
}
|
||||
@@ -205,6 +205,18 @@ export async function initializeInstanceSelector(options) {
|
||||
const visibleEntries = filterDisplayableFederationInstances(payload);
|
||||
updateFederationNavCount({ documentObject: doc, count: visibleEntries.length });
|
||||
|
||||
// Hide the selector when fewer than two instances are available — a single
|
||||
// entry (only the current instance) offers no meaningful navigation choice.
|
||||
if (visibleEntries.length < 2) {
|
||||
const selectorContainer = (typeof selectElement.closest === 'function'
|
||||
? selectElement.closest('.header-federation')
|
||||
: null) || selectElement.parentElement;
|
||||
if (selectorContainer) {
|
||||
selectorContainer.hidden = true;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const sanitizedDomain = typeof instanceDomain === 'string' ? instanceDomain.trim().toLowerCase() : null;
|
||||
|
||||
const sortedEntries = visibleEntries
|
||||
|
||||
+577
-401
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,282 @@
|
||||
/*
|
||||
* Copyright © 2025-26 l5yth & contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Default coordinate-bucketing precision. Five decimal places is roughly 1.1
|
||||
* metres at the equator, which is well below the typical GPS uncertainty for
|
||||
* Meshtastic / MeshCore position reports — coordinates that match to this
|
||||
* precision are treated as the "same" location for offset purposes.
|
||||
*/
|
||||
const DEFAULT_PRECISION = 5;
|
||||
|
||||
/**
|
||||
* Default offset ring radius in pixels for a co-located group of two nodes.
|
||||
* Chosen slightly larger than the standard map marker radius so the markers
|
||||
* read as adjacent rather than overlapping. Callers may pass ``0`` to
|
||||
* intentionally collapse all members of a group back onto the shared centre
|
||||
* — the value is honoured rather than substituted with the default so that
|
||||
* the offset feature can be disabled without touching the call sites that
|
||||
* still want grouping for other purposes.
|
||||
*/
|
||||
const DEFAULT_BASE_RADIUS_PX = 14;
|
||||
|
||||
/**
|
||||
* Tolerance (in pixels) below which an offset is considered effectively zero.
|
||||
* ``radius * Math.sin(Math.PI)`` is ~1.7e-15, not exactly ``0``, so the
|
||||
* ``isOffsetSignificant`` check uses a small epsilon rather than strict
|
||||
* equality to avoid producing zero-length spider lines for those slots.
|
||||
*/
|
||||
const OFFSET_EPSILON_PX = 1e-9;
|
||||
|
||||
/**
|
||||
* Additional pixels added to the offset ring radius for every node beyond the
|
||||
* second. Keeps groups of five or more visually legible without growing
|
||||
* unbounded for any single pair.
|
||||
*/
|
||||
const DEFAULT_RADIUS_GROWTH_PX = 4;
|
||||
|
||||
/**
|
||||
* Build a string key used to bucket entries that share the same coordinate at
|
||||
* the requested precision.
|
||||
*
|
||||
* @param {number} lat Latitude in degrees.
|
||||
* @param {number} lon Longitude in degrees.
|
||||
* @param {number} precision Number of fractional digits to retain.
|
||||
* @returns {string} Stable bucket key.
|
||||
*/
|
||||
function coordinateKey(lat, lon, precision) {
|
||||
return `${lat.toFixed(precision)},${lon.toFixed(precision)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Upper bound on coordinate precision: ``Number.prototype.toFixed`` throws a
|
||||
* ``RangeError`` for values outside ``0..100``.
|
||||
*/
|
||||
const MAX_PRECISION = 100;
|
||||
|
||||
/**
|
||||
* Normalise the precision option to a non-negative integer in the range
|
||||
* accepted by ``Number.prototype.toFixed``, falling back to the module default
|
||||
* when the caller passes an invalid value.
|
||||
*
|
||||
* @param {number} value Caller-provided precision.
|
||||
* @returns {number} Precision used to format coordinate keys.
|
||||
*/
|
||||
function normalisePrecision(value) {
|
||||
if (!Number.isFinite(value) || value < 0) return DEFAULT_PRECISION;
|
||||
return Math.min(Math.floor(value), MAX_PRECISION);
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise a positive numeric option, falling back to the supplied default
|
||||
* when the caller passes an invalid (non-finite or negative) value.
|
||||
*
|
||||
* @param {number} value Caller-provided value.
|
||||
* @param {number} fallback Default applied when ``value`` is invalid.
|
||||
* @returns {number} Sanitised numeric option.
|
||||
*/
|
||||
function normalisePositive(value, fallback) {
|
||||
if (!Number.isFinite(value) || value < 0) return fallback;
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Filter and parse a list of node payloads down to the entries that should
|
||||
* actually be rendered as map markers. The parsing rules mirror the legacy
|
||||
* inline loop in ``renderMap`` so behaviour stays identical:
|
||||
*
|
||||
* - ``null`` / ``undefined`` / empty-string lat or lon → skipped.
|
||||
* - lat or lon that does not parse as a finite number → skipped.
|
||||
* - When a positive ``maxDistanceKm`` is supplied, nodes whose ``distance_km``
|
||||
* is finite and exceeds the limit are skipped. Nodes with no ``distance_km``
|
||||
* value are always kept regardless of the limit, matching the renderer's
|
||||
* "show by default if we don't know" behaviour.
|
||||
*
|
||||
* The function lives in the helper module so it can be unit-tested without
|
||||
* spinning up Leaflet or the DOM environment used by the renderer.
|
||||
*
|
||||
* @param {Iterable<Object>} nodes Iterable of node payloads.
|
||||
* @param {Object} [options] Optional limits.
|
||||
* @param {number} [options.maxDistanceKm] When finite and positive, drop nodes
|
||||
* whose ``distance_km`` field exceeds this value.
|
||||
* @returns {Array<{node: Object, lat: number, lon: number}>} Renderable
|
||||
* entries in input order.
|
||||
*/
|
||||
export function buildRenderableEntries(nodes, options = {}) {
|
||||
if (!nodes || typeof nodes[Symbol.iterator] !== 'function') return [];
|
||||
const limit = Number.isFinite(options.maxDistanceKm) && options.maxDistanceKm > 0
|
||||
? options.maxDistanceKm
|
||||
: null;
|
||||
const entries = [];
|
||||
for (const node of nodes) {
|
||||
if (!node) continue;
|
||||
const latRaw = node.latitude;
|
||||
const lonRaw = node.longitude;
|
||||
if (latRaw == null || latRaw === '' || lonRaw == null || lonRaw === '') continue;
|
||||
const lat = Number(latRaw);
|
||||
const lon = Number(lonRaw);
|
||||
if (!Number.isFinite(lat) || !Number.isFinite(lon)) continue;
|
||||
if (limit !== null && node.distance_km != null && node.distance_km > limit) continue;
|
||||
entries.push({ node, lat, lon });
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute pixel-space offsets that spread co-located map markers around their
|
||||
* shared coordinate so each node remains individually visible and clickable.
|
||||
*
|
||||
* The function is deliberately dependency-free and synchronous: it operates
|
||||
* entirely on plain data so that callers can unit-test the geometry without a
|
||||
* live Leaflet map in scope. The caller is expected to translate the returned
|
||||
* ``{dx, dy}`` deltas from layer-pixel space into the final ``LatLng`` using
|
||||
* the active map projection.
|
||||
*
|
||||
* Output preserves the order of ``entries`` so that any prior render-priority
|
||||
* sort applied by the caller continues to drive draw order. Within each
|
||||
* co-located group, the angular slot assigned to each node is determined by a
|
||||
* stable sort on ``node.node_id`` which ensures repeated renders place the
|
||||
* same node in the same slot.
|
||||
*
|
||||
* @param {Array<{node: Object, lat: number, lon: number}>} entries Renderable
|
||||
* nodes paired with their parsed numeric coordinates.
|
||||
* @param {Object} [options] Optional tuning parameters.
|
||||
* @param {number} [options.precision=5] Decimal places used to bucket nearby
|
||||
* coordinates into the same group.
|
||||
* @param {number} [options.baseRadiusPx=14] Pixel radius applied to a group
|
||||
* of two nodes.
|
||||
* @param {number} [options.radiusGrowthPx=4] Pixel radius added per extra
|
||||
* node beyond the second.
|
||||
* @returns {Array<{entry: {node: Object, lat: number, lon: number}, dx: number, dy: number}>}
|
||||
* One result per input entry, in the original input order. Singleton
|
||||
* groups receive ``{dx: 0, dy: 0}``.
|
||||
*/
|
||||
export function computeColocatedOffsets(entries, options = {}) {
|
||||
if (!Array.isArray(entries) || entries.length === 0) return [];
|
||||
const precision = normalisePrecision(options.precision ?? DEFAULT_PRECISION);
|
||||
const baseRadiusPx = normalisePositive(options.baseRadiusPx ?? DEFAULT_BASE_RADIUS_PX, DEFAULT_BASE_RADIUS_PX);
|
||||
const radiusGrowthPx = normalisePositive(options.radiusGrowthPx ?? DEFAULT_RADIUS_GROWTH_PX, DEFAULT_RADIUS_GROWTH_PX);
|
||||
|
||||
// Group entries by rounded coordinate so identical (or near-identical)
|
||||
// positions can be spread around a shared centre. We retain each entry's
|
||||
// original index so the final result can be returned in input order.
|
||||
const groups = new Map();
|
||||
entries.forEach((entry, index) => {
|
||||
const key = coordinateKey(entry.lat, entry.lon, precision);
|
||||
let bucket = groups.get(key);
|
||||
if (!bucket) {
|
||||
bucket = [];
|
||||
groups.set(key, bucket);
|
||||
}
|
||||
bucket.push({ entry, index });
|
||||
});
|
||||
|
||||
const results = new Array(entries.length);
|
||||
for (const bucket of groups.values()) {
|
||||
if (bucket.length === 1) {
|
||||
const { entry, index } = bucket[0];
|
||||
results[index] = { entry, dx: 0, dy: 0 };
|
||||
continue;
|
||||
}
|
||||
|
||||
// Sort within the group by node_id so the angular slot is deterministic
|
||||
// across renders even if the caller-supplied entry order changes. When
|
||||
// two entries share an identical id (e.g. two records with empty/missing
|
||||
// ids), fall back to the original input index so the comparator never
|
||||
// returns 0 — making the ordering portable across sort implementations
|
||||
// rather than relying on engine-specific stable-sort semantics.
|
||||
const ordered = bucket.slice().sort((a, b) => {
|
||||
const idA = a.entry?.node?.node_id ?? '';
|
||||
const idB = b.entry?.node?.node_id ?? '';
|
||||
if (idA !== idB) return idA < idB ? -1 : 1;
|
||||
return a.index - b.index;
|
||||
});
|
||||
|
||||
const radius = baseRadiusPx + Math.max(0, ordered.length - 2) * radiusGrowthPx;
|
||||
const angularStep = (2 * Math.PI) / ordered.length;
|
||||
ordered.forEach((member, slot) => {
|
||||
const theta = slot * angularStep;
|
||||
results[member.index] = {
|
||||
entry: member.entry,
|
||||
dx: radius * Math.cos(theta),
|
||||
dy: radius * Math.sin(theta)
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test whether a ``{dx, dy}`` pixel offset is large enough to materially
|
||||
* change a marker's on-screen position. Used by the renderer to decide
|
||||
* whether to draw a spider leader line and to bypass an unnecessary
|
||||
* projection round-trip for singleton (or near-singleton) groups.
|
||||
*
|
||||
* @param {number} dx Pixel offset along the layer-point X axis.
|
||||
* @param {number} dy Pixel offset along the layer-point Y axis.
|
||||
* @returns {boolean} True when the offset magnitude exceeds ``OFFSET_EPSILON_PX``.
|
||||
*/
|
||||
export function isOffsetSignificant(dx, dy) {
|
||||
return Math.hypot(dx, dy) > OFFSET_EPSILON_PX;
|
||||
}
|
||||
|
||||
/**
|
||||
* Re-position every entry in a previously-recorded spider state by re-running
|
||||
* the supplied projector and pushing the result back onto the marker / leader
|
||||
* line. Pulled out of the renderer so it can be unit-tested without a live
|
||||
* Leaflet map: the caller injects whatever projector + Leaflet-marker shapes
|
||||
* make sense for the current host.
|
||||
*
|
||||
* Each ``state`` entry is expected to look like
|
||||
* ``{ marker, line, lat, lon, dx, dy }`` where ``lat``/``lon`` are the
|
||||
* original (un-offset) coordinates and ``marker``/``line`` may be ``null``.
|
||||
* Markers / lines that do not expose ``setLatLng`` / ``setLatLngs`` methods
|
||||
* are silently skipped so the helper is tolerant of stub objects supplied
|
||||
* by tests and of Leaflet objects whose API surface evolves over time.
|
||||
*
|
||||
* @param {Array<{marker: ?Object, line: ?Object, lat: number, lon: number, dx: number, dy: number}>} state
|
||||
* Per-render record produced by the renderer when it places offset markers.
|
||||
* @param {(lat: number, lon: number, dx: number, dy: number) => [number, number]} project
|
||||
* Function that converts an original coordinate plus a pixel offset into
|
||||
* the corresponding display ``[lat, lng]`` for the current map projection.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function refreshSpiderPositions(state, project) {
|
||||
if (!Array.isArray(state) || state.length === 0) return;
|
||||
if (typeof project !== 'function') return;
|
||||
for (const item of state) {
|
||||
if (!item) continue;
|
||||
const offsetLatLng = project(item.lat, item.lon, item.dx, item.dy);
|
||||
if (item.marker && typeof item.marker.setLatLng === 'function') {
|
||||
item.marker.setLatLng(offsetLatLng);
|
||||
}
|
||||
if (item.line && typeof item.line.setLatLngs === 'function') {
|
||||
item.line.setLatLngs([[item.lat, item.lon], offsetLatLng]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const __testUtils = {
|
||||
DEFAULT_PRECISION,
|
||||
DEFAULT_BASE_RADIUS_PX,
|
||||
DEFAULT_RADIUS_GROWTH_PX,
|
||||
MAX_PRECISION,
|
||||
OFFSET_EPSILON_PX,
|
||||
coordinateKey,
|
||||
normalisePrecision,
|
||||
normalisePositive
|
||||
};
|
||||
@@ -59,9 +59,68 @@ export function parseMeshcoreSenderPrefix(text) {
|
||||
export function findNodeByLongName(longName, nodesById) {
|
||||
if (!longName || typeof longName !== 'string') return null;
|
||||
if (!(nodesById instanceof Map)) return null;
|
||||
const trimmed = longName.trim();
|
||||
if (!trimmed) return null;
|
||||
|
||||
// Two-pass scan: O(2N) worst case (no match found). Fine for typical
|
||||
// mesh sizes (hundreds of nodes); if registries grow into the thousands
|
||||
// and lookup becomes hot, consider building a normalised long-name index
|
||||
// alongside the id-keyed map at hydration time.
|
||||
|
||||
// First pass: exact match on trimmed candidate long names.
|
||||
for (const node of nodesById.values()) {
|
||||
const candidate = node.long_name ?? node.longName;
|
||||
if (typeof candidate === 'string' && candidate === longName) return node;
|
||||
const raw = node.long_name ?? node.longName;
|
||||
if (typeof raw !== 'string') continue;
|
||||
if (raw.trim() === trimmed) return node;
|
||||
}
|
||||
|
||||
// Second pass: fallback match after stripping leading non-letter/non-digit
|
||||
// characters (emoji, punctuation, spaces) from the candidate. Handles
|
||||
// messages that reference a node by its semantic name without the emoji
|
||||
// prefix the node carries in the registry — e.g. @[Timo +] matching
|
||||
// a node whose long_name is "📺 Timo +".
|
||||
for (const node of nodesById.values()) {
|
||||
const raw = node.long_name ?? node.longName;
|
||||
if (typeof raw !== 'string') continue;
|
||||
const stripped = raw.replace(/^[^\p{L}\p{N}]+/u, '').trim();
|
||||
if (stripped && stripped === trimmed) return node;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract a leading ``@[Name]`` mention from text if it looks like a reply.
|
||||
*
|
||||
* MeshCore does not carry a structured ``reply_id`` on replies; instead, the
|
||||
* sender's client prepends ``@[Author]`` to the body when quoting a previous
|
||||
* message. When the body starts with exactly one mention and no other
|
||||
* mentions appear in the text, we treat that as a reply and surface it as an
|
||||
* ``[in reply to BADGE]`` prefix, similar to Meshtastic's reply rendering.
|
||||
*
|
||||
* Names captured from ``@[...]`` are trimmed so that ``@[ Timo +]`` or
|
||||
* ``@[T-deck NK ]`` resolve correctly against the registry.
|
||||
*
|
||||
* @param {?string} text Message text to inspect.
|
||||
* @returns {{ mentionName: string, remainingText: ?string }|null}
|
||||
* Parsed components, or ``null`` when the text does not begin with a single
|
||||
* mention.
|
||||
*/
|
||||
export function extractLeadingMentionAsReply(text) {
|
||||
if (!text || typeof text !== 'string') return null;
|
||||
const trimmed = text.trim();
|
||||
if (!trimmed.startsWith('@[')) return null;
|
||||
|
||||
// Total mention count must be exactly one for the message to qualify as a
|
||||
// single-mention reply (multiple mentions are ambiguous — treat as regular
|
||||
// mentions instead).
|
||||
const allMentions = trimmed.match(/@\[[^\]]+\]/g);
|
||||
if (!allMentions || allMentions.length !== 1) return null;
|
||||
|
||||
const match = trimmed.match(/^@\[([^\]]+)\]\s*([\s\S]*)$/);
|
||||
if (!match) return null;
|
||||
const mentionName = match[1].trim();
|
||||
if (!mentionName) return null;
|
||||
const rest = match[2].trim();
|
||||
return { mentionName, remainingText: rest.length > 0 ? rest : null };
|
||||
}
|
||||
|
||||
@@ -262,23 +262,81 @@ export function resolveReplyPrefix({
|
||||
/**
|
||||
* Normalise an emoji candidate into a trimmed string.
|
||||
*
|
||||
* Numeric values above 127 are treated as Unicode codepoints and converted to
|
||||
* the corresponding character (e.g. ``128077`` → ``"👍"``). Small values
|
||||
* (≤ 127) are kept as digit strings so that slot markers like ``"1"`` pass
|
||||
* through unchanged.
|
||||
*
|
||||
* @param {*} value Emoji candidate.
|
||||
* @returns {?string} Emoji string when valid.
|
||||
*/
|
||||
function normaliseEmojiValue(value) {
|
||||
export function normaliseEmojiValue(value) {
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
return trimmed.length > 0 ? trimmed : null;
|
||||
if (!trimmed) return null;
|
||||
if (/^\d+$/.test(trimmed)) {
|
||||
const cp = Number(trimmed);
|
||||
if (cp > 127 && Number.isFinite(cp)) {
|
||||
try { return String.fromCodePoint(cp); } catch { /* fall through */ }
|
||||
}
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
if (typeof value === 'number') {
|
||||
if (!Number.isFinite(value)) return null;
|
||||
if (value > 127) {
|
||||
try { return String.fromCodePoint(value); } catch { /* fall through */ }
|
||||
}
|
||||
return String(value);
|
||||
}
|
||||
const str = String(value).trim();
|
||||
return str.length > 0 ? str : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Maximum Unicode codepoint length for text that may still qualify as a
|
||||
* reaction placeholder. A bare emoji (single grapheme) is at most 2
|
||||
* codepoints — base character plus an optional variation selector
|
||||
* (U+FE0F). Multi-codepoint ZWJ families (👨👩👧, 🏳️🌈) are intentionally
|
||||
* NOT accepted here: matching them would also let through short CJK
|
||||
* messages like "你好世界吗" (5 codepoints, no ASCII letters), causing real
|
||||
* prose to be misclassified as a reaction.
|
||||
*
|
||||
* MUST stay aligned with the Python ingestor's
|
||||
* ``_REACTION_PLACEHOLDER_MAX_CODEPOINTS`` (``handlers/generic.py``);
|
||||
* changing one side without the other re-introduces ingest/render
|
||||
* disagreement (a packet stored as a reaction but rendered as text, or
|
||||
* vice versa).
|
||||
*
|
||||
* @type {number}
|
||||
*/
|
||||
const REACTION_PLACEHOLDER_MAX_CODEPOINTS = 2;
|
||||
|
||||
/**
|
||||
* Return whether ``text`` looks like a reaction placeholder rather than
|
||||
* substantive message content.
|
||||
*
|
||||
* Reaction packets carry either no text, a small numeric count/slot marker
|
||||
* (e.g. ``"1"``, ``"3"``), or occasionally a bare emoji. Anything that reads
|
||||
* as real prose should cause the message to be classified as a regular text
|
||||
* message, not a reaction.
|
||||
*
|
||||
* @param {?string} text Trimmed message text (may be ``null``).
|
||||
* @returns {boolean} ``true`` when *text* is absent or a placeholder.
|
||||
*/
|
||||
function isReactionPlaceholderText(text) {
|
||||
if (!text) return true;
|
||||
const trimmed = text.trim();
|
||||
if (!trimmed) return true;
|
||||
if (/^\d+$/.test(trimmed)) return true;
|
||||
// Bare emoji heuristic — see REACTION_PLACEHOLDER_MAX_CODEPOINTS.
|
||||
if ([...trimmed].length <= REACTION_PLACEHOLDER_MAX_CODEPOINTS && !/[a-zA-Z]/.test(trimmed)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify whether ``message`` represents a reaction payload.
|
||||
*
|
||||
@@ -299,7 +357,11 @@ function isReactionMessage(message) {
|
||||
return false;
|
||||
}
|
||||
const hasReplyId = message.reply_id != null || message.replyId != null;
|
||||
return hasReplyId || !!portnum;
|
||||
if (!hasReplyId) {
|
||||
return false;
|
||||
}
|
||||
const text = toTrimmedString(message.text);
|
||||
return isReactionPlaceholderText(text);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -339,12 +401,55 @@ function resolveMessageTextSegment(message, isReaction) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Render a text segment, replacing ``@[Name]`` mention patterns with the
|
||||
* output of ``renderMentionHtml`` when provided. Segments between mentions
|
||||
* are passed through ``escapeHtml`` to prevent XSS.
|
||||
* Regex with a single capturing group that matches http:// and https:// URLs.
|
||||
* Used by {@link renderLiteralWithLinks} to split text into URL and non-URL
|
||||
* segments while preserving the matched URL in the resulting array.
|
||||
* @type {RegExp}
|
||||
*/
|
||||
const URL_SPLIT_PATTERN = /(https?:\/\/[^\s<>"'[\]]{1,2048})/;
|
||||
|
||||
/**
|
||||
* Strip trailing punctuation characters that are typically sentence
|
||||
* delimiters rather than part of a URL (e.g. a period at end of sentence).
|
||||
*
|
||||
* When ``renderMentionHtml`` is ``null`` the function behaves identically to
|
||||
* ``escapeHtml(text)``, preserving backward compatibility.
|
||||
* @param {string} url Raw URL candidate.
|
||||
* @returns {string} URL with trailing punctuation trimmed.
|
||||
*/
|
||||
function trimUrlTrailingPunctuation(url) {
|
||||
return url.replace(/[.,;!?)]+$/, '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Render a single raw text segment, converting any ``http://`` or
|
||||
* ``https://`` URLs into ``<a>`` elements that open in a new tab.
|
||||
* Non-URL text is passed through ``escapeHtml`` unchanged.
|
||||
*
|
||||
* @param {string} text Raw (unescaped) literal text.
|
||||
* @param {Function} escapeHtml HTML-escape function.
|
||||
* @returns {string} Safe HTML with URLs wrapped in anchor elements.
|
||||
*/
|
||||
export function renderLiteralWithLinks(text, escapeHtml) {
|
||||
// split() with a capturing group interleaves plain text (even indices)
|
||||
// and matched URLs (odd indices): ["before", "https://x", " after", ...]
|
||||
const parts = text.split(URL_SPLIT_PATTERN);
|
||||
return parts.map((part, i) => {
|
||||
if (i % 2 === 0) {
|
||||
return part ? escapeHtml(part) : '';
|
||||
}
|
||||
// URL segment — strip trailing punctuation then linkify.
|
||||
const url = trimUrlTrailingPunctuation(part);
|
||||
const trailing = part.slice(url.length);
|
||||
return `<a href="${escapeHtml(url)}" target="_blank" rel="noopener noreferrer">${escapeHtml(url)}</a>${trailing ? escapeHtml(trailing) : ''}`;
|
||||
}).join('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Render a text segment, replacing ``@[Name]`` mention patterns with the
|
||||
* output of ``renderMentionHtml`` when provided. Literal text segments are
|
||||
* passed through {@link renderLiteralWithLinks} so that URLs become clickable.
|
||||
*
|
||||
* When ``renderMentionHtml`` is ``null`` the function is equivalent to
|
||||
* calling {@link renderLiteralWithLinks} on the whole string.
|
||||
*
|
||||
* @param {string} text Raw message text segment.
|
||||
* @param {Function} escapeHtml HTML-escape function.
|
||||
@@ -353,20 +458,26 @@ function resolveMessageTextSegment(message, isReaction) {
|
||||
* @returns {string} HTML string safe for insertion into the DOM.
|
||||
*/
|
||||
function renderTextWithMentions(text, escapeHtml, renderMentionHtml) {
|
||||
if (typeof renderMentionHtml !== 'function') return escapeHtml(text);
|
||||
if (typeof renderMentionHtml !== 'function') return renderLiteralWithLinks(text, escapeHtml);
|
||||
// split() with a capturing group interleaves literal segments (even indices)
|
||||
// and captured mention names (odd indices): ["before", "Alice", "after", ...]
|
||||
const parts = text.split(/@\[([^\]]+)\]/);
|
||||
return parts.map((part, i) => {
|
||||
if (i % 2 === 1) return renderMentionHtml(part);
|
||||
// Mention names are trimmed before being passed to the callback so that
|
||||
// captures like "@[ Timo +]" or "@[T-deck NK ]" (with stray whitespace)
|
||||
// resolve against the registry; the callback is responsible for falling
|
||||
// back to a plain-text rendering when the name does not match.
|
||||
if (i % 2 === 1) return renderMentionHtml(part.trim());
|
||||
// Empty literal segments (e.g. when a mention is at the start or end) can
|
||||
// be skipped to avoid unnecessary escapeHtml calls on empty strings.
|
||||
return part ? escapeHtml(part) : '';
|
||||
// be skipped to avoid unnecessary renderLiteralWithLinks calls.
|
||||
return part ? renderLiteralWithLinks(part, escapeHtml) : '';
|
||||
}).join('');
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the rendered message body containing text and optional emoji.
|
||||
* ``http://`` and ``https://`` URLs in the message text are automatically
|
||||
* converted to ``<a>`` elements that open in a new tab.
|
||||
*
|
||||
* @param {{
|
||||
* message: Object,
|
||||
|
||||
@@ -24,7 +24,7 @@ import {
|
||||
aggregateTelemetrySnapshots,
|
||||
} from './snapshot-aggregator.js';
|
||||
|
||||
const DEFAULT_FETCH_OPTIONS = Object.freeze({ cache: 'no-store' });
|
||||
const DEFAULT_FETCH_OPTIONS = Object.freeze({ cache: 'default' });
|
||||
const TELEMETRY_LIMIT = 1000;
|
||||
const POSITION_LIMIT = SNAPSHOT_WINDOW;
|
||||
const NEIGHBOR_LIMIT = 1000;
|
||||
|
||||
@@ -24,8 +24,8 @@
|
||||
* @module node-page-data
|
||||
*/
|
||||
|
||||
/** Shared fetch options that disable the browser HTTP cache for all API calls. */
|
||||
const DEFAULT_FETCH_OPTIONS = Object.freeze({ cache: 'no-store' });
|
||||
/** Shared fetch options for API calls, allowing conditional ETag revalidation. */
|
||||
const DEFAULT_FETCH_OPTIONS = Object.freeze({ cache: 'default' });
|
||||
|
||||
/** Maximum number of messages to request from the messages API. */
|
||||
const MESSAGE_LIMIT = 50;
|
||||
@@ -33,6 +33,46 @@ const MESSAGE_LIMIT = 50;
|
||||
/** Maximum number of traceroute records to request from the traces API. */
|
||||
const TRACE_LIMIT = 200;
|
||||
|
||||
/** Maximum number of nodes to request from the nodes API for the registry. */
|
||||
const NODES_LIMIT = 1000;
|
||||
|
||||
/**
|
||||
* Fetch the global node registry and return it as a Map keyed by node id.
|
||||
*
|
||||
* The node detail page uses this registry to resolve MeshCore mentions
|
||||
* (``@[Name]``) and reply targets that appear in messages but reference
|
||||
* nodes other than the page's own node. The result is consumed by the
|
||||
* shared chat-entry renderer, mirroring the dashboard's behaviour.
|
||||
*
|
||||
* Returns an empty Map on any failure so the page still renders messages
|
||||
* without crashing — mentions and reply badges simply degrade to plain
|
||||
* fallback text in that case.
|
||||
*
|
||||
* @param {{ fetchImpl?: Function }} [options] Fetch options.
|
||||
* @returns {Promise<Map<string, Object>>} Lookup map keyed by node id.
|
||||
*/
|
||||
export async function fetchNodesById({ fetchImpl } = {}) {
|
||||
const fetchFn = typeof fetchImpl === 'function' ? fetchImpl : globalThis.fetch;
|
||||
if (typeof fetchFn !== 'function') return new Map();
|
||||
try {
|
||||
const response = await fetchFn(`/api/nodes?limit=${NODES_LIMIT}`, DEFAULT_FETCH_OPTIONS);
|
||||
if (!response.ok) return new Map();
|
||||
const payload = await response.json();
|
||||
if (!Array.isArray(payload)) return new Map();
|
||||
const map = new Map();
|
||||
for (const node of payload) {
|
||||
if (!node || typeof node !== 'object') continue;
|
||||
const id = node.node_id ?? node.nodeId ?? null;
|
||||
if (id) map.set(id, node);
|
||||
}
|
||||
return map;
|
||||
} catch (error) {
|
||||
// Network/JSON failures degrade gracefully — the page still renders.
|
||||
console.warn('Failed to load nodes registry for chat rendering', error);
|
||||
return new Map();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Fetch recent messages for a node.
|
||||
*
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user