Compare commits

..

214 Commits

Author SHA1 Message Date
l5y e1d43cec57 Added comprehensive helper unit tests (#457)
* Added comprehensive helper unit tests

* run black
2025-11-16 16:47:57 +01:00
l5y cd7bced827 Added reaction-aware handling (#455) 2025-11-16 15:31:17 +01:00
l5y b298f2f22c env: add map zoom (#454)
* chore: bump version to 0.5.5 everywhere

* add MAP_ZOOM varibale

* run black
2025-11-16 12:57:47 +01:00
l5y 9304a99745 charts: render aggregated telemetry charts for all nodes (#453) 2025-11-15 17:09:55 +01:00
l5y 4a03e17886 nodes: render charts detail pages as overlay (#452) 2025-11-15 12:13:06 +01:00
l5y e502ddd436 fix telemetry parsing for charts (#451) 2025-11-14 21:18:37 +01:00
l5y 12f1801ed2 nodes: improve charts on detail pages (#450)
* nodes: add charts to detail pages

* nodes: improve charts on detail pages

* fix ignored packet debug loggin

* run rufo

* address review comments
2025-11-14 20:17:58 +01:00
l5y a6a63bf12e nodes: add charts to detail pages (#449) 2025-11-14 16:24:09 +01:00
l5y 631455237f Aggregate frontend snapshots across views (#447) 2025-11-13 22:02:42 +01:00
Alexkurd 382e2609c9 Remove added 1 if reply with emoji (#443)
In reply message.text contains emoji, and message.emoji is 1.
2025-11-13 21:15:35 +01:00
l5y 05efbc5f20 Refine node detail view layout (#442)
* Refine node detail view layout

* Refine node detail controls and formatting

* Improve node detail neighbor roles and message metadata

* Fix node detail neighbor metadata hydration
2025-11-13 19:59:07 +01:00
l5y 9a45430321 Enable map centering from node table coordinates (#439)
* Enable map centering from node table coordinates

* Replace node coordinate buttons with links
2025-11-13 17:23:35 +01:00
l5y cb843d5774 Add node detail route and page (#441) 2025-11-13 17:19:20 +01:00
l5y c823347175 Ensure nodeinfo patch runs before importing interfaces (#440) 2025-11-13 17:16:59 +01:00
l5y d87c0cc226 Filter zero-valued fields from API responses (#438)
* Filter zero-value fields from API responses

* Restore zero-valued API fields (#438)

* Clarify compact_api_row documentation
2025-11-13 17:10:46 +01:00
l5y 9c957a4a14 Add debug payload tracing and ignored packet logging (#437) 2025-11-13 17:06:35 +01:00
l5y 16442bab08 Tighten map auto-fit behaviour (#435) 2025-11-12 20:49:03 +01:00
l5y e479983d38 Fetch encrypted chat log entries for log tab (#434)
* Fetch encrypted chat log entries for log tab

* Guard log-only chat log merge from plaintext
2025-11-12 14:13:46 +01:00
l5y 70fca17230 Add encrypted filter to messages API (#432) 2025-11-12 12:46:34 +01:00
l5y 2107d6790d Guard NodeInfo handler against missing IDs (#426) (#431) 2025-11-12 12:39:36 +01:00
l5y 8823b7cb48 Add standalone full-screen map, chat, and nodes views (#429)
* Add dedicated full-screen dashboard views

* Simplify full-screen routes layout

* Restore refresh controls on full-screen views

* Polish standalone view layout

* Streamline standalone layouts
2025-11-12 11:38:26 +01:00
l5y e40c0d9078 Ensure chat history fetches full message limit (#428) 2025-11-11 22:33:30 +01:00
l5y 8b090cb238 Handle nodeinfo packets without identifiers (#426) (#427) 2025-11-11 20:45:32 +01:00
l5y 2bb8e3fd66 Chore: update license headers (#424) 2025-11-08 10:41:57 +01:00
l5y deb7263c3e Chore: bump version to 0.5.5 (#423) 2025-11-08 09:15:52 +00:00
l5y 3daadc4f68 handle naming when primary channel has a name (#422) 2025-11-08 09:44:41 +01:00
l5y 6b72b1b3da handle edge case when primary channel has a name (#421) 2025-11-07 21:39:26 +01:00
l5y 52486d82ad Add preset mode to logs (#420) 2025-11-07 17:56:27 +01:00
l5y 487d618e00 Parallelize federation tasks with worker pool (#419)
* Parallelize federation work with worker pool

* Handle worker pool shutdown fallback during federation announcements
2025-11-07 17:24:37 +01:00
l5y 9239805129 allow filtering chat and logs by node name (#417) 2025-11-07 15:55:11 +01:00
l5y 554b2abd82 gem: add erb as dependency removed from std (#416)
* gem: add erb as dependency removed from std

* Relax erb dependency for Ruby 3.3 compatibility
2025-11-07 15:11:05 +01:00
l5y 8bb98f65d6 implement support for replies and reactions app (#411)
* implement support for replies and reactions app

* Allow numeric reaction port packets

* allow reaction packets through mai channel filter
2025-11-06 20:58:35 +01:00
l5y 71c0f8b21e ingestor: ignore direct messages on default channel (#414)
* ingestor: ignore direct messages on default channel

* tests: run black formatter
2025-11-06 20:14:32 +01:00
l5y aa2bc68544 agents: add instructions (#410) 2025-11-03 22:23:20 +00:00
l5y a8394effdc display encrypted messages in frontend log window (#409)
* display encrypted messages in frontend log window

* render recipient by known node name short id
2025-11-03 22:51:20 +01:00
l5y e27d5ab53c Add chat log entries for telemetry, position, and neighbor events (#408)
* Add telemetry and neighbor chat log events

* Refine chat log highlights for telemetry and position updates

* Add emoji prefixes to chat log events

* Fix telemetry highlights and emoji styling

* Remove italic chat copy and drop zero-valued highlights

* address style and formatting issues
2025-11-03 12:33:02 +01:00
l5y 6af272c01f Handle missing instance domain outside production (#405) 2025-10-31 12:36:53 +01:00
l5y 03e2fe6a72 Add tabbed chat panel with channel grouping (#404)
* feat: add tabbed chat panel with channel grouping

* Handle ISO-only chat timestamps in dashboard renderer

* Remove redundant chat channel tag
2025-10-31 12:24:17 +01:00
l5y 87b4cd79e7 Normalize numeric client roles using Meshtastic CLI enums (#402)
* Normalize firmware client roles using CLI enums

* Prioritize CLI role lookup before protobuf fallbacks
2025-10-31 11:43:48 +01:00
l5y d94d75e605 Ensure Docker images publish versioned tags (#403) 2025-10-31 11:43:30 +01:00
l5y c965d05229 Document environment configuration variables (#400)
* Document environment configuration variables

* Escape sed replacements when updating .env values
2025-10-31 11:08:06 +01:00
l5y ba80fac36c Document federation refresh cadence (#401) 2025-10-31 11:05:08 +01:00
l5y 3c2c7611ee docs: document prometheus metrics (#399) 2025-10-31 11:04:20 +01:00
Nic Jansma 49e0f39ca9 Config: Read PROM_REPORT_IDS from environment (#398) 2025-10-29 09:22:33 +01:00
KenADev 625df7982d feat: Mesh-Ingestor: Ability to provide already-existing interface instance (#395)
* feat: Mesh-Ingestor: Ability to provide already-existing interface instance

* Prevent Signal-Registration if not main thread (causes exception)

* fix redundant ternary operator

---------

Co-authored-by: Ken Ahr <ken.a.iphone@googlemail.com>
2025-10-26 20:47:23 +01:00
KenADev 8eeb13166b fix: Ingestor: Fix error for non-existing datetime.UTC reference (#396)
Co-authored-by: Ken Ahr <ken.a.iphone@googlemail.com>
2025-10-26 20:46:31 +01:00
l5y 80645990cb Chore: bump version to 0.5.4 (#388)
Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-19 10:36:09 +00:00
l5y 96a3bb86e9 Add telemetry formatting module and overlay metrics (#387) 2025-10-19 12:13:32 +02:00
l5y 6775de3cca Prune blank values from API responses (#386) 2025-10-18 20:16:14 +02:00
l5y 8143fbd8f7 Add full support to telemetry schema and API (#385)
* feat: auto-upgrade telemetry schema

* Ensure numeric metrics fallback to valid values

* Format data processing numeric metric lookup
2025-10-18 15:19:33 +02:00
l5y cf3949ef95 Respect PORT environment override (#384) 2025-10-18 13:01:48 +02:00
l5y 32d9da2865 Add instance selector dropdown for federation deployments (#382)
* Add instance selector for federation regions

* Avoid HTML insertion when seeding instance selector
2025-10-18 10:53:26 +02:00
l5y 61e8c92f62 Harden federation announcements (#381) 2025-10-18 10:38:28 +02:00
l5y d954df6294 Ensure private mode disables federation (#380) 2025-10-18 09:48:40 +02:00
l5y 30d535bd43 Ensure private mode disables chat messaging (#378) 2025-10-17 22:47:54 +02:00
l5y d06aa42ab2 Respect FEDERATION flag for federation endpoints (#379) 2025-10-17 22:47:41 +02:00
l5y 108fc93ca1 Expose PRIVATE environment configuration (#377) 2025-10-17 22:43:42 +02:00
l5y 427479c1e6 Fix frontend coverage export for Codecov (#376)
* fix: export frontend coverage for codecov

* Merge V8 file coverages across workers
2025-10-17 22:43:23 +02:00
l5y ee05f312e8 Restrict instance API to recent updates (#374) 2025-10-17 22:17:49 +02:00
l5y c4193e38dc Document and expose federation configuration (#375) 2025-10-17 22:17:32 +02:00
l5y cb9b081606 Chore: bump version to 0.5.3 (#372) 2025-10-17 19:47:18 +00:00
l5y cc8fec6d05 Align theme and info controls (#371)
* Align theme and info controls

* design tweaks
2025-10-17 19:27:14 +00:00
l5y 01665b6e3a Fixes POST request 403 errors on instances behind Cloudflare proxy (#368)
* Add full headers to ingestor POST requests to avoid CF bans

* run black

* Guard Authorization header when token absent

---------

Co-authored-by: varna9000 <milen@aeroisk.com>
2025-10-16 22:29:04 +02:00
l5y 1898a99789 Delay initial federation announcements (#366) 2025-10-16 21:50:43 +02:00
l5y 3eefda9205 Ensure well-known document stays in sync (#365) 2025-10-16 21:43:11 +02:00
l5y a6ba9a8227 Guard federation DNS resolution against restricted networks (#362)
* Guard federation DNS resolution against restricted networks

* Pin federation HTTP clients to vetted IPs
2025-10-16 21:15:34 +02:00
l5y 7055444c4b Add federation ingestion limits and tests (#364) 2025-10-16 21:15:18 +02:00
l5y 4bfc0e25cb Prefer reported primary channel names (#363) 2025-10-16 20:35:24 +02:00
l5y 81335cbf7b Decouple messages API from node joins (#360) 2025-10-16 13:19:29 +02:00
l5y 76b57c08c6 Fix ingestor reconnection detection (#361) 2025-10-16 13:06:32 +02:00
l5y 926b5591b0 Harden instance domain validation (#359) 2025-10-16 10:51:34 +02:00
l5y 957e597004 Ensure INSTANCE_DOMAIN propagates to containers (#358) 2025-10-15 23:22:46 +02:00
l5y 68cfbf139f chore: bump version to 0.5.2 (#356)
Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-15 23:16:30 +02:00
l5y b2f4fcaaa5 Gracefully retry federation announcements over HTTP (#355) 2025-10-15 23:11:59 +02:00
l5y dc2fa9d247 Recursively ingest federated instances (#353)
* Recursively ingest federated instances

* Keep absent is_private nil during signature verification
2025-10-15 21:35:37 +02:00
l5y a32125996c Remove federation timeout environment overrides (#352) 2025-10-15 20:04:19 +02:00
l5y 506a1ab5f6 Close unrelated short info overlays when opening short info (#351)
* Close unrelated overlays when opening short info

* Ensure map overlays respect nested short overlay closing
2025-10-15 16:35:38 +00:00
l5y db7b67d859 Improve federation instance error diagnostics (#350) 2025-10-15 18:35:22 +02:00
l5y 49f08a7f75 Harden federation domain validation and tests (#347)
* Harden federation domain validation and tests

* Preserve domain casing for signature verification

* Forward sanitize helper keyword argument

* Handle mixed-case domains during signature verification
2025-10-15 18:14:31 +02:00
l5y b2d35d3edf Handle malformed instance records (#348) 2025-10-15 17:08:24 +02:00
l5y a9d618cdbc Fix ingestor device mounting for non-serial connections (#346)
* Adjust ingestor device handling

* Restore serial device permissions for ingestor
2025-10-15 16:52:37 +02:00
l5y 6a65abd2e3 Persist instance config assets across Docker restarts (#345) 2025-10-15 16:14:59 +02:00
l5y a3aef8cadd Add modem preset display to node overlay (#340)
* Add modem metadata line to node overlays

* Ensure modem metadata loads for all overlays
2025-10-14 20:59:47 +02:00
l5y cff89a8c88 Display message frequency and channel in chat log (#339)
* Display message frequency and channel in chat log

* Ensure chat prefixes display consistent metadata brackets

* Ensure chat prefixes show non-breaking frequency placeholder

* Adjust chat channel tag placement
2025-10-14 20:56:42 +02:00
l5y 26c1366412 Bump fallback version to v0.5.1 (#338) 2025-10-14 16:51:04 +00:00
l5y 28f5b49f4d docs: update changelog for 0.5.0 (#337) 2025-10-14 16:48:36 +00:00
l5y a46da284e5 Fix ingestor package layout in Docker image (#336) 2025-10-14 18:47:54 +02:00
l5y 22a31b6c80 Ensure node overlays appear above fullscreen map (#333)
* Increase overlay z-index to surface node info

* Ensure short info overlays attach to fullscreen host

* Ensure info overlay participates in fullscreen mode
2025-10-14 15:52:26 +02:00
l5y b7ef0bbfcd Adjust node table columns responsively (#332) 2025-10-14 14:59:47 +02:00
l5y 03b5a10fe4 Add LoRa metadata fields to nodes and messages (#331)
* Add LoRa metadata fields to nodes and messages

* Filter numeric SQLite keys from message rows
2025-10-14 14:51:28 +02:00
l5y e97498d09f Add channel metadata capture for message tagging (#329) 2025-10-13 23:10:01 +02:00
l5y 7db76ec2fc Capture radio metadata for ingestor payloads (#327)
* Capture radio metadata and tag ingestor payloads

* Log captured LoRa metadata when initializing radio config
2025-10-13 22:35:06 +02:00
l5y 63beb2ea6b Avoid mutating frozen node query results (#324) 2025-10-13 17:22:34 +02:00
l5y ffad84f18a Ensure frontend reports git-aware version strings (#321)
* Ensure frontend reports git-aware version strings

* Keep footer fixed across viewport widths
2025-10-13 16:26:57 +02:00
l5y 2642ff7a95 Fix web Docker image to include application code (#322) 2025-10-13 16:25:44 +02:00
l5y 40b6eda096 Refine stacked short info overlays on the map (#319)
* Refine map overlays to use stacked short info panels

* Allow stacked overlays to pass neighbor clicks
2025-10-13 14:53:43 +02:00
l5y dee6ad7e4a Refine environment configuration defaults (#318) 2025-10-13 14:06:14 +02:00
l5y ea9c633eff Fix legacy configuration migration to XDG directories (#317)
* Handle legacy config migration for XDG assets

* Ensure legacy key migration precedes identity load

* Apply rufo formatting to identity module
2025-10-13 14:02:17 +02:00
l5y 9c73fceea7 Adopt XDG base directories for app data and config (#316)
* Support XDG base directories

* Keep Docker MESH_DB on persistent volume
2025-10-13 12:29:56 +02:00
l5y 5133e9d498 refactor: streamline ingestor environment variables (#314)
* refactor: streamline ingestor environment variables

* fix: set connection env var in docker test
2025-10-13 11:02:33 +02:00
l5y b63e5328b1 Reduce auto-fit padding and increase default zoom (#315) 2025-10-13 10:57:54 +02:00
l5y d66b09ddee Ensure APIs filter stale data and refresh node details from latest sources (#312)
* Ensure fresh API data and richer node refresh details

* Refresh map markers with latest node data
2025-10-13 10:54:47 +02:00
l5y 009965f2fb Handle offline tile layer creation failures (#307) 2025-10-13 09:27:03 +02:00
l5y 51e6479ab6 Handle offline tile rendering failures (#306) 2025-10-13 09:26:49 +02:00
l5y 874c8fd73c Fix map auto-fit handling and add controller (#311) 2025-10-13 09:26:35 +02:00
l5y e4c48682b0 Fix map initialization bounds and add coverage (#305)
* Fix map initialization bounds and add coverage

* Handle antimeridian bounds when clustering map points

* Fix dateline-aware map bounds
2025-10-12 19:22:17 +02:00
l5y 00444f7611 test: expand config and sanitizer coverage (#303) 2025-10-12 14:41:20 +02:00
l5y 511e6d377c Add comprehensive theme and background front-end tests (#302) 2025-10-12 14:35:53 +02:00
l5y e6974a683a Document sanitization and helper modules (#301) 2025-10-12 10:09:42 +02:00
l5y c0d68b23d4 Add protobuf stubs for mesh tests (#300) 2025-10-12 10:09:13 +02:00
l5y ee904633a8 Handle CRL lookup failures during federation TLS (#299) 2025-10-12 09:56:53 +02:00
l5y 4329605e6f Ensure JavaScript workflow runs tests with output (#298) 2025-10-12 09:46:42 +02:00
l5y 772c5888c3 Fix ingestor debug timestamps for structured logging (#296) 2025-10-12 09:40:57 +02:00
l5y f04e917cd9 Add Apache license headers to missing sources (#297) 2025-10-12 09:38:04 +02:00
l5y 9e939194ba Update workflows for ingestor, sinatra, and frontend (#295) 2025-10-12 09:36:02 +02:00
l5y e328a20929 Fix IPv6 instance domain canonicalization (#294) 2025-10-12 09:33:03 +02:00
l5y aba94b197d Handle federation HTTPS CRL verification failures (#293) 2025-10-12 09:22:54 +02:00
l5y 80f2bbdb25 Adjust federation announcement cadence (#292) 2025-10-12 09:08:50 +02:00
l5y 522213c040 Restore modular app functionality (#291)
* Restore modular app functionality

* Fix federation thread settings and add coverage

* Use Sinatra set for federation threads

* Restore 41447 as default web port
2025-10-12 08:54:11 +02:00
l5y 58998ba274 Refactor config and metadata helpers into PotatoMesh modules (#290) 2025-10-11 23:19:25 +02:00
l5y 4ad718e164 Update default site configuration environment values (#288) 2025-10-11 21:20:36 +02:00
l5y 707786e222 Add test for draining queue with concurrent enqueue (#287) 2025-10-11 20:38:55 +02:00
l5y 868bf08fd1 Ensure config directories exist in web image (#286) 2025-10-11 20:38:47 +02:00
l5y 1316d4f2d1 Clarify network target parsing (#285) 2025-10-11 20:38:40 +02:00
l5y 9be390ee09 Ensure queue deactivates when empty (#284) 2025-10-11 20:38:27 +02:00
l5y d9ed006b4c Clarify BLE connection phrasing (#283) 2025-10-11 20:31:12 +02:00
l5y d09fc842b8 Configure web container for production (#282) 2025-10-11 19:39:22 +02:00
l5y 73bdd809bd Normalize INSTANCE_DOMAIN configuration to require hostnames (#280)
* Ensure INSTANCE_DOMAIN configuration uses hostname

* Define ip_from_domain before use
2025-10-11 19:39:05 +02:00
l5y f1dba89d4b Run initial federation announcement asynchronously (#281) 2025-10-11 19:38:12 +02:00
l5y 131a63845c Add production build Dockerfile and compose contexts (#279) 2025-10-11 18:23:51 +02:00
l5y 2240be1f2d Improve instance domain detection logic (#278) 2025-10-11 18:22:50 +02:00
l5y a048a83c6c Implement federation announcements and instances API (#277) 2025-10-11 18:01:08 +02:00
l5y 4ef1e29034 Fix federation signature handling and IP guard (#276)
* Fix federation signature handling and IP guard

* Avoid defaulting isPrivate before signature verification

* Normalize instance domain host handling for restricted IP check

* ignore web app credentials

---------

Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-11 17:39:52 +02:00
l5y b21df3de5c Add persistent federation metadata endpoint (#274)
* Add federated metadata endpoint

* Fix configure-time database access

* Fix well-known refresh bypassed by static files

* run rufo

---------

Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-11 13:04:33 +00:00
l5y 678af5e55b Add configurable instance domain with reverse DNS fallback (#272)
* Add instance domain resolution with reverse DNS fallback

* run rufo

---------

Co-authored-by: l5yth <d220195275+l5yth@users.noreply.github.com>
2025-10-11 12:21:55 +00:00
l5y c4fd59626f Add production configuration guidance (#273) 2025-10-11 14:01:53 +02:00
l5y 0a26e4252a Add targeted API endpoints and expose version metadata (#271)
* Add per-node API endpoints and version route

* Adjust version metadata and node lookup route
2025-10-11 12:36:28 +02:00
Nic Jansma d19e032b40 Prometheus metrics updates on startup and for position/telemetry (#270)
* Prometheus metrics updates on startup and for position/telemetry

* Fix per rufo

* CoPilot feedback

* CR feedback
2025-10-11 09:24:12 +02:00
l5y ab9ae796f3 Add hourly reconnect handling for inactive mesh interface (#267)
* Add hourly reconnect handling for inactive mesh interface

* Reset inactivity timestamp after reconnect
2025-10-07 16:41:16 +02:00
Nic Jansma 0f2f2f447c Dockerfile fixes (#268) 2025-10-07 16:34:45 +02:00
Nic Jansma 3a031694db Added prometheus /metrics endpoint (#262)
* Added prometheus /metrics endpoint

* Fixes per CoPilot suggestions

* More Copilot fixes

* Rufo formatted
2025-10-07 16:32:45 +02:00
l5y 3cfbffc155 Add fullscreen toggle to map view (#263)
* Add fullscreen mode controls for map

* Improve fullscreen map scaling and control icon

* Improve fullscreen map sizing and icon
2025-10-07 15:53:18 +02:00
l5y 4f5aec45b3 Relocate JavaScript coverage export under web (#266) 2025-10-07 15:49:40 +02:00
Nic Jansma 2acfca20d9 v4.0.0 version string in web UI (#265) 2025-10-07 13:34:23 +00:00
l5y f2ed5f5c03 Add energy saving mode to ingestor (#256) 2025-10-07 15:28:41 +02:00
l5y db04b85134 chore: restore apache headers (#260) 2025-10-07 10:28:24 +02:00
l5y ba66ac5cea docs: add matrix to readme (#259) 2025-10-07 07:29:12 +00:00
l5y a592b655c4 Force dark theme default based on sanitized cookie (#252)
* Ensure dark theme defaults cleanly on initial load

* Ensure form controls respond to theme
2025-10-07 09:00:55 +02:00
l5y a5a2ae5edc Document mesh ingestor modules with PDoc-style docstrings (#255) 2025-10-07 08:59:38 +02:00
l5y 363b4c5525 Handle missing node IDs in Meshtastic nodeinfo packets (#251)
* Handle Meshtastic nodeinfo packets without IDs

* Guard BLE reader against missing payloads
2025-10-07 08:56:36 +02:00
l5y 16e1304ded Add comprehensive RDoc comments to Ruby helpers (#254) 2025-10-07 08:53:39 +02:00
l5y b89347938a docs: expand jsdoc coverage (#253) 2025-10-07 08:53:26 +02:00
l5y 6969ae6c4a Fix mesh ingestor telemetry and neighbor handling (#249)
* Refine mesh ingestor modularization

* Handle script execution in mesh wrapper

* Ensure mesh shim finds package when run as script

* Ensure queue state resets after send errors
2025-10-07 08:40:28 +02:00
l5y 64f8862676 Refactor front-end assets into external modules (#245)
* Refactor front-end assets into external modules

* Restore chat flag inline script

* Declare legend toggle control variable

* Remove dynamic background generation

* Restore background script with theme-based color

* run rufo
2025-10-07 08:33:06 +02:00
l5y 6660986211 Add tests for helper utilities and asset routes (#243)
* test: expand coverage for helpers and assets

* Adjust failing helper and asset specs

* Adjust specs for fallback node lookup and missing logo
2025-10-07 07:07:23 +02:00
l5y 5dfcc1a5fe docs: add ingestor inline docstrings (#244) 2025-10-07 00:06:42 +02:00
l5y 2efd28766b Add comprehensive coverage tests for mesh ingestor (#241) 2025-10-07 00:04:33 +02:00
l5y c9bba25e5a Add comprehensive inline documentation (#240) 2025-10-07 00:01:31 +02:00
l5y 41976a3b43 Update changelog (#238)
* Update changelog

* Update readme
2025-10-06 08:36:13 +02:00
l5y 5a47a8f8e4 Reformat neighbor overlay details (#237) 2025-10-06 08:08:24 +02:00
l5y c13f3c913f Add neighbor lines toggle to map legend (#236) 2025-10-06 08:05:44 +02:00
l5y 2e9b54b6cf Hide Air Util Tx column on mobile (#235) 2025-10-06 08:04:07 +02:00
l5y 7e844be627 Add overlay for clickable neighbor links on map (#234)
* Add overlay for clickable neighbor links on map

* Fix neighbor overlays and include SNR details

* Prevent map neighbor overlay clicks from closing immediately
2025-10-06 07:41:11 +02:00
l5y b37e55c29a Hide humidity and pressure on mobile (#232) 2025-10-06 06:34:48 +02:00
l5y 332ba044f2 Remove last position timestamp from map info overlay (#233) 2025-10-06 06:34:37 +02:00
l5y 09a2d849ec Improve live node positions and expose precision metadata (#231)
* Fetch latest node positions and precision metadata

* Stop showing position source and precision in UI

* Guard node positions against stale merges
2025-10-05 23:08:57 +02:00
l5y a3fb9b0d5c Show neighbor short names in info overlays (#228)
* Show neighbor short names in info overlays

* Adjust neighbor info placement
2025-10-05 22:04:29 +02:00
l5y 192978acf9 Add telemetry environmental data to node UI (#227) 2025-10-05 21:49:28 +02:00
l5y 581aaea93b Reduce neighbor line opacity (#226) 2025-10-05 21:45:05 +02:00
l5y 299752a4f1 Visualize neighbor connections on map canvas (#224)
* Visualize neighbor connections on map

* Gracefully handle neighbor fetch failures
2025-10-05 21:27:41 +02:00
l5y 142c0aa539 Add clear control to filter input (#225) 2025-10-05 21:26:37 +02:00
l5y 78168ce3db Handle Bluetooth shutdown hangs gracefully (#221)
* Handle Bluetooth shutdown hangs gracefully

* Make interface close guard compatible with patched Event
2025-10-05 21:07:19 +02:00
l5y 332abbc183 Adjust mesh priorities and receive topics (#220) 2025-10-05 20:50:34 +02:00
l5y c136c5cf26 Add BLE and fallback mesh interface handling (#219)
* Add BLE and fallback mesh interface support

* Handle SIGINT by propagating KeyboardInterrupt

* Guard optional BLE dependency

* run black
2025-10-05 20:48:23 +02:00
l5y 2a65e89eee Add neighbor info ingestion and API endpoints (#218)
* Add neighbor info ingestion and API support

* Fix neighbor spec and add fixture

* run black

* run rufo
2025-10-05 12:35:13 +02:00
l5y d6f1e7bc80 Add debug logs for unknown node creation and last-heard updates (#214)
* Add debug logging for unknown nodes and last-heard updates

* Fix debug log syntax
2025-10-04 21:25:23 +02:00
l5y 5ac5f3ec3f Update node last seen when events are received (#212)
* Update node last seen timestamps from event receive times

* run rufo

* fix tests
2025-10-04 21:11:16 +02:00
l5y bb4cbfa62c Improve debug logging for node and telemetry data (#213)
* Improve debug logging for node and telemetry data

* run black
2025-10-04 21:03:03 +02:00
l5y f0d600e5d7 Improve stored message debug logging (#211) 2025-10-04 20:53:54 +02:00
l5y e0f0a6390d Stop repeating ingestor node info snapshot and timestamp debug logs (#210)
* Adjust ingestor node snapshot cadence and debug logging

* Ensure node snapshot waits for data

* run black
2025-10-04 20:41:53 +02:00
l5y d4a27dccf7 Add telemetry API and ingestion support (#205)
* Add telemetry ingestion and API support

* Flatten telemetry storage and API responses

* Fix telemetry insert placeholder count

* Adjust telemetry node updates

* run black

* run rufo
2025-10-04 18:28:18 +02:00
l5y 74c4596dc5 Add private mode to hide chat and message APIs (#204)
* Add private mode to hide chat and message APIs

* run rufo
2025-10-04 09:36:43 +02:00
l5y 1f2328613c Handle offline-ready map fallback (#202) 2025-10-03 11:24:18 +02:00
l5y eeca67f6ea Add linux/armv7 images and configuration support (#201) 2025-10-03 11:11:14 +02:00
l5y 4ae8a1cfca Update Docker documentation (#200)
* Update Docker documentation

* docs: reference compose file
2025-10-03 11:03:25 +02:00
l5y ff06129a6f Update node last seen when ingesting encrypted messages (#198)
* Update node last seen for encrypted messages

* run rufo
2025-10-03 10:59:12 +02:00
l5y 6d7aa4dd56 fix api in readme (#197) 2025-10-01 14:16:54 +00:00
l5y 4548f750d3 Add connection recovery for TCP interface (#186)
* Add connection recovery for TCP interface

* run black
2025-09-27 18:52:56 +02:00
l5y 31f02010d3 bump version to 0.3 (#191)
* bump version to 0.3

* update readme
2025-09-27 18:52:41 +02:00
l5y ec1ea5cbba pgrade styles and fix interface issues (#190) 2025-09-27 18:46:56 +02:00
l5y 8500c59755 some updates in the front (#188)
* ok, i'm added correct image loader

* and some css

* make zebra in a table and add a background and some little changes in app

* for example you can check how it work on https://vrs.kdd2105.ru

* fix ai comments

---------

Co-authored-by: dkorotkih2014-hub <d.korotkih2014@gmail.com>
2025-09-27 18:18:02 +02:00
l5y 556dd6b51c Update last heard on node entry change (#185) 2025-09-26 20:43:53 +02:00
l5y 3863e2d63d Populate chat metadata for unknown nodes (#182)
* Populate chat metadata for unknown nodes

* run rufo

* fix comments

* run rufo
2025-09-26 16:45:42 +02:00
l5y 9e62621819 Update role colors to new palette (#183) 2025-09-26 16:08:14 +02:00
l5y c8c7c8cc05 Add placeholder nodes for unknown senders (#181)
* Add placeholder nodes for unknown senders

* run rufo
2025-09-26 14:24:30 +02:00
l5y 5116313ab0 fix: update role colors and ordering for firmware 2.7.10 (#180) 2025-09-26 13:30:34 +02:00
l5y 66389dd27c Handle plain IP addresses in mesh TCP detection (#154)
* Fix TCP target detection for plain IPs

* run black
2025-09-26 13:25:42 +02:00
l5y ee6501243f Handle encrypted messages (#173)
* Handle encrypted messages

* Remove redundant message node columns

* Preserve original numeric message senders

* Normalize message sender IDs in API responses

* Exclude encrypted messages from API responses

* run rufo
2025-09-24 07:34:28 +02:00
l5y 8dd912175d Add fallback display names for unnamed nodes (#171) 2025-09-23 19:06:28 +02:00
l5y 02f9fb45e2 Ensure routers render above other node types (#169) 2025-09-23 18:59:34 +02:00
l5y 4254dbda91 Reorder lint steps after tests in CI (#168) 2025-09-23 18:31:38 +02:00
l5y a46bed1c33 Handle proto values in nodeinfo payloads (#167) 2025-09-23 18:31:22 +02:00
l5y d711300442 Remove raw payload storage from database schema (#166) 2025-09-23 17:29:08 +02:00
l5y 98a8203591 Add POSITION_APP ingestion and API support (#160)
* Add POSITION_APP ingestion and API support

* Adjust mesh receive subscriptions and priorities

* run linters
2025-09-23 16:42:51 +02:00
l5y 084c5ae158 Add support for NODEINFO_APP packets (#159)
* Add support for NODEINFO_APP packets

* run black
2025-09-23 14:40:35 +02:00
l5y 17018aeb19 Derive SEO metadata from existing config (#153) 2025-09-23 08:20:42 +02:00
l5y 74b3da6f00 tests: create helper script to dump all mesh data from serial (#152)
* tests: create helper script to dump all mesh data from serial

* tests: use public callbacks for dump script
2025-09-23 08:09:31 +02:00
l5y ab1217a8bf Limit chat log to recent entries (#151) 2025-09-22 18:54:09 +02:00
l5y 62de1480f7 Require time library before formatting ISO timestamps (#149)
* Require time library for ISO timestamp formatting

* Default to host networking in Compose
2025-09-22 09:21:04 +02:00
l5y ab2e9b06e1 Define potatomesh network (#148) 2025-09-22 08:58:39 +02:00
l5y e91ad24cf9 Fix sqlite3 native extension on Alpine (#146) 2025-09-22 08:12:48 +02:00
l5y 2e543b7cd4 Allow binding to all interfaces in app.sh (#147) 2025-09-22 08:11:36 +02:00
l5y db4353ccdc Force building sqlite3 gem on Alpine (#145) 2025-09-22 08:10:00 +02:00
l5y 5a610cf08a Support mock serial interface in CI (#143) 2025-09-21 10:00:30 +02:00
165 changed files with 42448 additions and 3056 deletions
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
coverage:
status:
project:
+35 -21
View File
@@ -9,12 +9,14 @@
# Generate a secure token: openssl rand -hex 32
API_TOKEN=your-secure-api-token-here
# Meshtastic device path (required for ingestor)
# Common paths:
# Meshtastic connection target (required for ingestor)
# Common serial paths:
# - Linux: /dev/ttyACM0, /dev/ttyUSB0
# - macOS: /dev/cu.usbserial-*
# - Windows (WSL): /dev/ttyS*
MESH_SERIAL=/dev/ttyACM0
# You may also provide an IP:PORT pair (e.g. 192.168.1.20:4403) or a
# Bluetooth address (e.g. ED:4D:9E:95:CF:60).
CONNECTION=/dev/ttyACM0
# =============================================================================
# SITE CUSTOMIZATION
@@ -24,29 +26,34 @@ MESH_SERIAL=/dev/ttyACM0
SITE_NAME=My Meshtastic Network
# Default Meshtastic channel
DEFAULT_CHANNEL=#MediumFast
CHANNEL=#LongFast
# Default frequency for your region
# Common frequencies: 868MHz (Europe), 915MHz (US), 433MHz (Worldwide)
DEFAULT_FREQUENCY=868MHz
FREQUENCY=915MHz
# Map center coordinates (latitude, longitude)
# Berlin, Germany: 52.502889, 13.404194
# Denver, Colorado: 39.7392, -104.9903
# London, UK: 51.5074, -0.1278
MAP_CENTER_LAT=52.502889
MAP_CENTER_LON=13.404194
MAP_CENTER="38.761944,-27.090833"
# Maximum distance to show nodes (kilometers)
MAX_NODE_DISTANCE_KM=50
MAX_DISTANCE=42
# =============================================================================
# OPTIONAL INTEGRATIONS
# =============================================================================
# Matrix chat room for your community (optional)
# Format: !roomid:matrix.org
MATRIX_ROOM='#meshtastic-berlin:matrix.org'
# Community chat link or Matrix room for your community (optional)
# Matrix aliases (e.g. #meshtastic-berlin:matrix.org) will be linked via matrix.to automatically.
CONTACT_LINK='#potatomesh:dod.ngo'
# Enable or disable PotatoMesh federation features (1=enabled, 0=disabled)
FEDERATION=1
# Hide public mesh messages from unauthenticated visitors (1=hidden, 0=public)
PRIVATE=0
# =============================================================================
@@ -56,16 +63,23 @@ MATRIX_ROOM='#meshtastic-berlin:matrix.org'
# Debug mode (0=off, 1=on)
DEBUG=0
# Meshtastic snapshot interval (seconds)
MESH_SNAPSHOT_SECS=60
# Public domain name for this PotatoMesh instance
# Provide a hostname (with optional port) that resolves to the web service.
# Example: mesh.example.org or mesh.example.org:41447
INSTANCE_DOMAIN=mesh.example.org
# Docker image architecture (linux-amd64, linux-arm64, linux-armv7)
POTATOMESH_IMAGE_ARCH=linux-amd64
# Docker image tag (use "latest" for the newest release or pin to vX.Y)
POTATOMESH_IMAGE_TAG=latest
# Docker Compose networking profile
# Leave unset for Linux hosts (default host networking).
# Set to "bridge" on Docker Desktop (macOS/Windows) if host networking
# is unavailable.
# COMPOSE_PROFILES=bridge
# Meshtastic channel index (0=primary, 1=secondary, etc.)
MESH_CHANNEL_INDEX=0
CHANNEL_INDEX=0
# Database settings
DB_BUSY_TIMEOUT_MS=5000
DB_BUSY_MAX_RETRIES=5
DB_BUSY_RETRY_DELAY=0.05
# Application settings
MAX_JSON_BODY_BYTES=1048576
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
version: 2
updates:
- package-ecosystem: "ruby"
+3 -2
View File
@@ -4,8 +4,9 @@
- **`docker.yml`** - Build and push Docker images to GHCR
- **`codeql.yml`** - Security scanning
- **`python.yml`** - Python testing
- **`ruby.yml`** - Ruby testing
- **`python.yml`** - Python ingestor pipeline
- **`ruby.yml`** - Ruby Sinatra app testing
- **`javascript.yml`** - Frontend test suite
## Usage
+14
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: "CodeQL Advanced"
on:
+36 -8
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Build and Push Docker Images
on:
@@ -33,6 +47,7 @@ jobs:
architecture:
- { name: linux-amd64, platform: linux/amd64, label: "Linux x86_64" }
- { name: linux-arm64, platform: linux/arm64, label: "Linux ARM64" }
- { name: linux-armv7, platform: linux/arm/v7, label: "Linux ARMv7" }
steps:
- name: Checkout repository
@@ -55,12 +70,17 @@ jobs:
id: version
run: |
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
VERSION="${{ github.event.inputs.version }}"
RAW_VERSION="${{ github.event.inputs.version }}"
else
VERSION=${GITHUB_REF#refs/tags/v}
RAW_VERSION=${GITHUB_REF#refs/tags/}
fi
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Published version: $VERSION"
STRIPPED_VERSION=${RAW_VERSION#v}
echo "version=$STRIPPED_VERSION" >> $GITHUB_OUTPUT
echo "version_with_v=v$STRIPPED_VERSION" >> $GITHUB_OUTPUT
echo "raw_version=$RAW_VERSION" >> $GITHUB_OUTPUT
echo "Published version: $STRIPPED_VERSION"
- name: Build and push ${{ matrix.service }} for ${{ matrix.architecture.name }}
uses: docker/build-push-action@v5
@@ -73,6 +93,7 @@ jobs:
tags: |
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:latest
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:${{ steps.version.outputs.version }}
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:${{ steps.version.outputs.version_with_v }}
labels: |
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
@@ -110,12 +131,15 @@ jobs:
- name: Extract version from tag
id: version
run: |
VERSION=${GITHUB_REF#refs/tags/v}
echo "version=$VERSION" >> $GITHUB_OUTPUT
RAW_VERSION=${GITHUB_REF#refs/tags/}
STRIPPED_VERSION=${RAW_VERSION#v}
echo "version=$STRIPPED_VERSION" >> $GITHUB_OUTPUT
echo "version_with_v=v$STRIPPED_VERSION" >> $GITHUB_OUTPUT
- name: Test web application (Linux AMD64)
run: |
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version }}
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version_with_v }}
docker run --rm -d --name web-test -p 41447:41447 \
-e API_TOKEN=test-token \
-e DEBUG=1 \
@@ -127,9 +151,11 @@ jobs:
- name: Test ingestor (Linux AMD64)
run: |
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }}
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version_with_v }}
docker run --rm --name ingestor-test \
-e POTATOMESH_INSTANCE=http://localhost:41447 \
-e API_TOKEN=test-token \
-e CONNECTION=mock \
-e DEBUG=1 \
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }} &
sleep 5
@@ -160,11 +186,13 @@ jobs:
echo "### 🌐 Web Application" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
# Ingestor images
echo "### 📡 Ingestor Service" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
+57
View File
@@ -0,0 +1,57 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: JavaScript
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
permissions:
contents: read
jobs:
frontend:
runs-on: ubuntu-latest
defaults:
run:
working-directory: web
steps:
- uses: actions/checkout@v5
- name: Set up Node.js 22
uses: actions/setup-node@v4
with:
node-version: '22'
- name: Install dependencies
run: npm ci
- name: Run JavaScript tests
run: npm test
- name: Upload coverage to Codecov
if: always()
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: web/reports/javascript-coverage.json
flags: frontend
name: frontend
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Upload test results to Codecov
uses: codecov/test-results-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: web/reports/javascript-junit.xml
flags: frontend
+19 -5
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Python
on:
@@ -10,21 +24,18 @@ permissions:
contents: read
jobs:
test:
ingestor:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Set up Python 3.13
uses: actions/setup-python@v3
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install black pytest pytest-cov meshtastic
- name: Lint with black
run: |
black --check ./
- name: Test with pytest and coverage
run: |
mkdir -p reports
@@ -45,3 +56,6 @@ jobs:
token: ${{ secrets.CODECOV_TOKEN }}
files: reports/python-junit.xml
flags: python-ingestor
- name: Lint with black
run: |
black --check ./
+19 -5
View File
@@ -1,3 +1,17 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
name: Ruby
on:
@@ -10,7 +24,7 @@ permissions:
contents: read
jobs:
test:
sinatra:
defaults:
run:
working-directory: ./web
@@ -29,8 +43,6 @@ jobs:
working-directory: ./web
- name: Set up dependencies
run: bundle install
- name: Run rufo
run: bundle exec rufo --check .
- name: Run tests
run: |
mkdir -p tmp/test-results
@@ -44,12 +56,14 @@ jobs:
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./web/tmp/test-results/rspec.xml
flags: ruby-${{ matrix.ruby-version }}
flags: sinatra-${{ matrix.ruby-version }}
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: false
flags: ruby-${{ matrix.ruby-version }}
flags: sinatra-${{ matrix.ruby-version }}
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: Run rufo
run: bundle exec rufo --check .
+11
View File
@@ -65,3 +65,14 @@ reports/
# AI planning and documentation
ai_docs/
*.log
# Generated credentials for the instance
web/.config
# JavaScript dependencies
node_modules/
web/node_modules/
# Debug symbols
ignored.txt
+39
View File
@@ -0,0 +1,39 @@
# Repository Guidelines
Keep code well structured, modular, and not monolithic. If modules get to big, consider submodules structure.
Make sure all tests pass for Python (`pytest`), Ruby (`rspec`), and JavaScript (`npm test`).
Make sure all code is properly inline documented (PDoc, RDoc, JSDoc, et.c). We do not want any undocumented code.
Make sure all code is 100% unit tested. We want all lines, units, and branches to be thouroughly covered by tests.
New source files should have Apache v2 license headers using the exact string `Copyright © 2025-26 l5yth & contributors`.
Run linters for Python (`black`) and Ruby (`rufo`) to ensure consistent code formatting.
## Project Structure & Module Organization
The repository splits runtime and ingestion logic. `web/` holds the Sinatra dashboard (Ruby code in `lib/potato_mesh`, views in `views/`, static bundles in `public/`).
`data/` hosts the Python Meshtastic ingestor plus migrations and CLI scripts. API fixtures and end-to-end harnesses live in `tests/`. Dockerfiles and compose files support containerized workflows.
## Build, Test, and Development Commands
Run dependency installs inside `web/`: `bundle install` for gems and `npm ci` for JavaScript tooling. Start the app with `cd web && API_TOKEN=dev ./app.sh` for local work or `bundle exec rackup -p 41447` when integrating elsewhere.
Prep ingestion with `python -m venv .venv && pip install -r data/requirements.txt`; `./data/mesh.sh` streams from live radios. `docker-compose -f docker-compose.dev.yml up` brings up the full stack.
## Coding Style & Naming Conventions
Use two-space indentation for Ruby and keep `# frozen_string_literal: true` at the top of new files. Keep Ruby classes/modules in `CamelCase`, filenames in `snake_case.rb`, and feature specs in `*_spec.rb`.
JavaScript follows ES modules under `public/assets/js`; co-locate components with `__tests__` folders and use kebab-case filenames. Format Ruby via `bundle exec rufo .` and Python via `black`. Skip committing generated coverage artifacts.
## Testing Guidelines
Ruby specs run with `cd web && bundle exec rspec`, producing SimpleCov output in `coverage/`. Front-end behaviour is verified through Nodes test runner: `cd web && npm test` writes V8 coverage and JUnit XML under `reports/`.
The ingestion layer is guarded by `pytest -q tests/test_mesh.py`; leave fixtures in `tests/` untouched so CI can replay them. New features should ship with matching specs and updated integration checks.
## Commit & Pull Request Guidelines
Commits should stay imperative and reference issues the way history does (`Add chat log entries... (#408)`). Squash noisy work-in-progress commits before pushing. Pull requests need a concise summary, screenshots or curl traces for UI/API tweaks, and links to tracked issues. Paste the command output for the test suites you ran and mention configuration toggles (`API_TOKEN`, `PRIVATE`) reviewers must set.
## Security & Configuration Tips
Never commit real API tokens or `.sqlite` dumps; use `.env.local` files ignored by Git. Confirm env defaults (`API_TOKEN`, `INSTANCE_DOMAIN`, `PRIVATE`) before deploying, and set `FEDERATION=0` when staging private nodes. Review `PROMETHEUS.md` when exposing metrics so scrape endpoints stay internal.
+220 -1
View File
@@ -1,8 +1,227 @@
# CHANGELOG
## v0.5.4
* Handle naming when primary channel has a name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/422>
* Handle edge case when primary channel has a name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/421>
* Add preset mode to logs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/420>
* Parallelize federation tasks with worker pool by @l5yth in <https://github.com/l5yth/potato-mesh/pull/419>
* Allow filtering chat and logs by node name by @l5yth in <https://github.com/l5yth/potato-mesh/pull/417>
* Gem: Add erb as dependency removed from std by @l5yth in <https://github.com/l5yth/potato-mesh/pull/416>
* Implement support for replies and reactions app by @l5yth in <https://github.com/l5yth/potato-mesh/pull/411>
* Ingestor: Ignore direct messages on default channel by @l5yth in <https://github.com/l5yth/potato-mesh/pull/414>
* Agents: Add instructions by @l5yth in <https://github.com/l5yth/potato-mesh/pull/410>
* Display encrypted messages in frontend log window by @l5yth in <https://github.com/l5yth/potato-mesh/pull/409>
* Add chat log entries for telemetry, position, and neighbor events by @l5yth in <https://github.com/l5yth/potato-mesh/pull/408>
* Handle missing instance domain outside production by @l5yth in <https://github.com/l5yth/potato-mesh/pull/405>
* Add tabbed chat panel with channel grouping by @l5yth in <https://github.com/l5yth/potato-mesh/pull/404>
* Normalize numeric client roles using Meshtastic CLI enums by @l5yth in <https://github.com/l5yth/potato-mesh/pull/402>
* Ensure Docker images publish versioned tags by @l5yth in <https://github.com/l5yth/potato-mesh/pull/403>
* Document environment configuration variables by @l5yth in <https://github.com/l5yth/potato-mesh/pull/400>
* Document federation refresh cadence by @l5yth in <https://github.com/l5yth/potato-mesh/pull/401>
* Add Prometheus monitoring documentation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/399>
* Config: Read PROM_REPORT_IDS from environment by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/398>
* Feat: Mesh-Ingestor: Ability to provide already-existing interface instance by @KenADev in <https://github.com/l5yth/potato-mesh/pull/395>
* Fix: Mesh-Ingestor: Fix error for non-existing datetime.UTC reference by @KenADev in <https://github.com/l5yth/potato-mesh/pull/396>
* Chore: bump version to 0.5.4 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/388>
## v0.5.3
* Add telemetry formatting utilities and extend node overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/387>
* Prune blank values from API responses by @l5yth in <https://github.com/l5yth/potato-mesh/pull/386>
* Add full support to telemetry schema and API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/385>
* Respect PORT environment override by @l5yth in <https://github.com/l5yth/potato-mesh/pull/384>
* Add instance selector dropdown for federation deployments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/382>
* Harden federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/381>
* Ensure private mode disables federation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/380>
* Ensure private mode disables chat messaging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/378>
* Disable federation features when FEDERATION=0 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/379>
* Expose PRIVATE environment configuration across tooling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/377>
* Fix frontend coverage export for Codecov by @l5yth in <https://github.com/l5yth/potato-mesh/pull/376>
* Restrict /api/instances results to recent records by @l5yth in <https://github.com/l5yth/potato-mesh/pull/374>
* Expose FEDERATION environment option across tooling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/375>
* Chore: bump version to 0.5.3 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/372>
## v0.5.2
* Align theme and info controls by @l5yth in <https://github.com/l5yth/potato-mesh/pull/371>
* Fixes POST request 403 errors on instances behind Cloudflare proxy by @varna9000 in <https://github.com/l5yth/potato-mesh/pull/368>
* Delay initial federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/366>
* Ensure well-known document stays in sync on startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/365>
* Guard federation DNS resolution against restricted networks by @l5yth in <https://github.com/l5yth/potato-mesh/pull/362>
* Add federation ingestion limits and tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/364>
* Prefer reported primary channel names by @l5yth in <https://github.com/l5yth/potato-mesh/pull/363>
* Decouple message API node hydration by @l5yth in <https://github.com/l5yth/potato-mesh/pull/360>
* Fix ingestor reconnection detection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/361>
* Harden instance domain validation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/359>
* Ensure INSTANCE_DOMAIN propagates to containers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/358>
* Chore: bump version to 0.5.2 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/356>
* Gracefully retry federation announcements over HTTP by @l5yth in <https://github.com/l5yth/potato-mesh/pull/355>
## v0.5.1
* Recursively ingest federated instances by @l5yth in <https://github.com/l5yth/potato-mesh/pull/353>
* Remove federation timeout environment overrides by @l5yth in <https://github.com/l5yth/potato-mesh/pull/352>
* Close unrelated short info overlays when opening short info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/351>
* Improve federation instance error diagnostics by @l5yth in <https://github.com/l5yth/potato-mesh/pull/350>
* Harden federation domain validation and tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/347>
* Handle malformed instance records gracefully by @l5yth in <https://github.com/l5yth/potato-mesh/pull/348>
* Fix ingestor device mounting for non-serial connections by @l5yth in <https://github.com/l5yth/potato-mesh/pull/346>
* Ensure Docker deployments persist keyfile and well-known assets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/345>
* Add modem preset display to node overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/340>
* Display message frequency and channel in chat log by @l5yth in <https://github.com/l5yth/potato-mesh/pull/339>
* Bump fallback version string to v0.5.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/338>
* Docs: update changelog for 0.5.0 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/337>
* Fix ingestor docker import path by @l5yth in <https://github.com/l5yth/potato-mesh/pull/336>
## v0.5.0
* Ensure node overlays appear above fullscreen map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/333>
* Adjust node table columns responsively by @l5yth in <https://github.com/l5yth/potato-mesh/pull/332>
* Add LoRa metadata fields to nodes and messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/331>
* Add channel metadata capture for message tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/329>
* Capture radio metadata for ingestor payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/327>
* Fix FrozenError when filtering node query results by @l5yth in <https://github.com/l5yth/potato-mesh/pull/324>
* Ensure frontend reports git-aware version strings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/321>
* Ensure web Docker image ships application sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/322>
* Refine stacked short info overlays on the map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/319>
* Refine environment configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/318>
* Fix legacy configuration migration to XDG directories by @l5yth in <https://github.com/l5yth/potato-mesh/pull/317>
* Adopt XDG base directories for app data and config by @l5yth in <https://github.com/l5yth/potato-mesh/pull/316>
* Refactor: streamline ingestor environment variables by @l5yth in <https://github.com/l5yth/potato-mesh/pull/314>
* Adjust map auto-fit padding and default zoom by @l5yth in <https://github.com/l5yth/potato-mesh/pull/315>
* Ensure APIs filter stale data and refresh node details from latest sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/312>
* Improve offline tile fallback initialization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/307>
* Add fallback for offline tile rendering errors by @l5yth in <https://github.com/l5yth/potato-mesh/pull/306>
* Fix map auto-fit handling and add controller by @l5yth in <https://github.com/l5yth/potato-mesh/pull/311>
* Fix map initialization bounds and add coverage by @l5yth in <https://github.com/l5yth/potato-mesh/pull/305>
* Increase coverage for configuration and sanitizer helpers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/303>
* Add comprehensive theme and background front-end tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/302>
* Document sanitization and helper modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/301>
* Add in-repo Meshtastic protobuf stubs for tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/300>
* Handle CRL lookup failures during federation TLS by @l5yth in <https://github.com/l5yth/potato-mesh/pull/299>
* Ensure JavaScript workflow runs frontend tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/298>
* Unify structured logging across application and ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/296>
* Add Apache license headers to missing sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/297>
* Update workflows for ingestor, sinatra, and frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/295>
* Fix IPv6 instance domain canonicalization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/294>
* Handle federation HTTPS CRL verification failures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/293>
* Adjust federation announcement interval to eight hours by @l5yth in <https://github.com/l5yth/potato-mesh/pull/292>
* Restore modular app functionality by @l5yth in <https://github.com/l5yth/potato-mesh/pull/291>
* Refactor config and metadata helpers into PotatoMesh modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/290>
* Update default site configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/288>
* Add regression test for queue drain concurrency by @l5yth in <https://github.com/l5yth/potato-mesh/pull/287>
* Ensure Docker config directories are created for non-root user by @l5yth in <https://github.com/l5yth/potato-mesh/pull/286>
* Clarify numeric address requirement for network target parsing by @l5yth in <https://github.com/l5yth/potato-mesh/pull/285>
* Ensure mesh ingestor queue resets active flag when idle by @l5yth in <https://github.com/l5yth/potato-mesh/pull/284>
* Clarify BLE connection description in README by @l5yth in <https://github.com/l5yth/potato-mesh/pull/283>
* Configure web container for production mode by @l5yth in <https://github.com/l5yth/potato-mesh/pull/282>
* Normalize INSTANCE_DOMAIN configuration to require hostnames by @l5yth in <https://github.com/l5yth/potato-mesh/pull/280>
* Avoid blocking startup on federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/281>
* Fix production Docker builds for web and ingestor images by @l5yth in <https://github.com/l5yth/potato-mesh/pull/279>
* Improve instance domain detection logic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/278>
* Implement federation announcements and instances API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/277>
* Fix federation signature handling and IP guard by @l5yth in <https://github.com/l5yth/potato-mesh/pull/276>
* Add persistent federation metadata endpoint by @l5yth in <https://github.com/l5yth/potato-mesh/pull/274>
* Add configurable instance domain with reverse DNS fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/272>
* Document production deployment configuration by @l5yth in <https://github.com/l5yth/potato-mesh/pull/273>
* Add targeted API endpoints and expose version metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/271>
* Prometheus metrics updates on startup and for position/telemetry by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/270>
* Add hourly reconnect handling for inactive mesh interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/267>
* Dockerfile fixes by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/268>
* Added prometheus /metrics endpoint by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/262>
* Add fullscreen toggle to map view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/263>
* Relocate JS coverage export script into web directory by @l5yth in <https://github.com/l5yth/potato-mesh/pull/266>
* V0.4.0 version string in web UI by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/265>
* Add energy saving cycle to ingestor daemon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/256>
* Chore: restore apache headers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/260>
* Docs: add matrix to readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/259>
* Force dark theme default based on sanitized cookie by @l5yth in <https://github.com/l5yth/potato-mesh/pull/252>
* Document mesh ingestor modules with PDoc-style docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/255>
* Handle missing node IDs in Meshtastic nodeinfo packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/251>
* Document Ruby helper methods with RDoc comments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/254>
* Add JSDoc documentation across client scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/253>
* Fix mesh ingestor telemetry and neighbor handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/249>
* Refactor front-end assets into external modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/245>
* Add tests for helper utilities and asset routes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/243>
* Docs: add ingestor inline docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/244>
* Add comprehensive coverage tests for mesh ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/241>
* Add inline documentation to config helpers and frontend scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/240>
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/238>
## v0.4.0
* Reformat neighbor overlay layout by @l5yth in <https://github.com/l5yth/potato-mesh/pull/237>
* Add legend toggle for neighbor lines by @l5yth in <https://github.com/l5yth/potato-mesh/pull/236>
* Hide Air Util Tx column on mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/235>
* Add overlay for clickable neighbor links on map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/234>
* Hide humidity and pressure columns on mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/232>
* Remove last position timestamp from map info overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/233>
* Improve live node positions and expose precision metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/231>
* Show neighbor short names in info overlays by @l5yth in <https://github.com/l5yth/potato-mesh/pull/228>
* Add telemetry environment metrics to node UI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/227>
* Reduce neighbor line opacity by @l5yth in <https://github.com/l5yth/potato-mesh/pull/226>
* Visualize neighbor connections on map canvas by @l5yth in <https://github.com/l5yth/potato-mesh/pull/224>
* Add clear control to filter input by @l5yth in <https://github.com/l5yth/potato-mesh/pull/225>
* Handle Bluetooth shutdown hangs gracefully by @l5yth in <https://github.com/l5yth/potato-mesh/pull/221>
* Adjust mesh priorities and receive topics by @l5yth in <https://github.com/l5yth/potato-mesh/pull/220>
* Add BLE and fallback mesh interface handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/219>
* Add neighbor info ingestion and API endpoints by @l5yth in <https://github.com/l5yth/potato-mesh/pull/218>
* Add debug logs for unknown node creation and last-heard updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/214>
* Update node last seen when events are received by @l5yth in <https://github.com/l5yth/potato-mesh/pull/212>
* Improve debug logging for node and telemetry data by @l5yth in <https://github.com/l5yth/potato-mesh/pull/213>
* Normalize stored message debug output by @l5yth in <https://github.com/l5yth/potato-mesh/pull/211>
* Stop repeating ingestor node info snapshot and timestamp debug logs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/210>
* Add telemetry API and ingestion support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/205>
* Add private mode to hide chat and message APIs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/204>
* Handle offline-ready map fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/202>
* Add linux/armv7 container builds and configuration options by @l5yth in <https://github.com/l5yth/potato-mesh/pull/201>
* Update Docker documentation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/200>
* Update node last seen when ingesting encrypted messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/198>
* Fix api in readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/197>
## v0.3.0
* Add comprehensive Docker support with multi-architecture builds and automated CI/CD by @trose in <https://github.com/l5yth/potato-mesh/pull/122>
* Add connection recovery for TCP interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/186>
* Bump version to 0.3 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/191>
* Pgrade styles and fix interface issues by @l5yth in <https://github.com/l5yth/potato-mesh/pull/190>
* Some updates in the front by @dkorotkih2014-hub in <https://github.com/l5yth/potato-mesh/pull/188>
* Update last heard on node entry change by @l5yth in <https://github.com/l5yth/potato-mesh/pull/185>
* Populate chat metadata for unknown nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/182>
* Update role color theme to latest palette by @l5yth in <https://github.com/l5yth/potato-mesh/pull/183>
* Add placeholder nodes for unknown senders by @l5yth in <https://github.com/l5yth/potato-mesh/pull/181>
* Update role colors and ordering for firmware 2.7.10 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/180>
* Handle plain IP addresses in mesh TCP detection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/154>
* Handle encrypted messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/173>
* Add fallback display names for unnamed nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/171>
* Ensure routers render above other node types by @l5yth in <https://github.com/l5yth/potato-mesh/pull/169>
* Move lint checks after tests in CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/168>
* Handle proto values in nodeinfo payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/167>
* Remove raw payload storage from database schema by @l5yth in <https://github.com/l5yth/potato-mesh/pull/166>
* Add POSITION_APP ingestion and API support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/160>
* Add support for NODEINFO_APP packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/159>
* Derive SEO metadata from existing config values by @l5yth in <https://github.com/l5yth/potato-mesh/pull/153>
* Tests: create helper script to dump all mesh data from serial by @l5yth in <https://github.com/l5yth/potato-mesh/pull/152>
* Limit chat log to recent entries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/151>
* Require time library before formatting ISO timestamps by @l5yth in <https://github.com/l5yth/potato-mesh/pull/149>
* Define docker compose network by @l5yth in <https://github.com/l5yth/potato-mesh/pull/148>
* Fix sqlite3 native extension on Alpine by @l5yth in <https://github.com/l5yth/potato-mesh/pull/146>
* Fix web app startup binding by @l5yth in <https://github.com/l5yth/potato-mesh/pull/147>
* Ensure sqlite3 builds from source on Alpine by @l5yth in <https://github.com/l5yth/potato-mesh/pull/145>
* Support mock serial interface in CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/143>
* Fix Docker workflow matrix for supported platforms by @l5yth in <https://github.com/l5yth/potato-mesh/pull/142>
* Add clickable role filters to the map legend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/140>
* Rebuild chat log on each refresh by @l5yth in <https://github.com/l5yth/potato-mesh/pull/139>
* Fix: retain alpine runtime libs after removing build deps by @l5yth in <https://github.com/l5yth/potato-mesh/pull/138>
* Fix: support windows ingestor build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/136>
* Fix: use supported ruby image by @l5yth in <https://github.com/l5yth/potato-mesh/pull/135>
* Feat: Add comprehensive Docker support by @trose in <https://github.com/l5yth/potato-mesh/pull/122>
* Chore: bump version to 0.2.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/134>
* Fix dark mode tile styling on new map tiles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/132>
* Switch map tiles to OSM HOT and add theme filters by @l5yth in <https://github.com/l5yth/potato-mesh/pull/130>
* Add footer version display by @l5yth in <https://github.com/l5yth/potato-mesh/pull/128>
* Add responsive controls for map legend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/129>
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/119>
## v0.2.0
+85 -66
View File
@@ -1,88 +1,107 @@
# PotatoMesh Docker Setup
# PotatoMesh Docker Guide
## Quick Start
PotatoMesh publishes ready-to-run container images to the GitHub Packages container
registry (GHCR). You do not need to clone the repository to deploy them—Compose
will pull the latest release images for you.
```bash
./configure.sh
docker-compose up -d
docker-compose logs -f
## Prerequisites
- Docker Engine 24+ or Docker Desktop with the Compose plugin
- Access to `/dev/ttyACM*` (or equivalent) if you plan to attach a Meshtastic
device to the ingestor container
- An API token that authorises the ingestor to post to your PotatoMesh instance
## Images on GHCR
| Service | Image |
|----------|---------------------------------------------------------------------------------------------------------------|
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:<tag>` (e.g. `latest`, `3.0`, or `v3.0`) |
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:<tag>` (e.g. `latest`, `3.0`, or `v3.0`) |
Images are published for every tagged release. Each build receives both semantic
version tags (for example `3.0`) and a matching `v`-prefixed tag (for example
`v3.0`). `latest` always points to the newest release, so pin one of the version
tags when you need a specific build.
## Configure environment
Create a `.env` file alongside your Compose file and populate the variables you
need. At a minimum you must set `API_TOKEN` so the ingestor can authenticate
against the web API.
```env
API_TOKEN=replace-with-a-strong-token
SITE_NAME=PotatoMesh Demo
CONNECTION=/dev/ttyACM0
INSTANCE_DOMAIN=mesh.example.org
```
Access at `http://localhost:41447`
Additional environment variables are optional:
## Configuration
| Variable | Default | Purpose |
| --- | --- | --- |
| `API_TOKEN` | _required_ | Shared secret used by the ingestor and API clients for authenticated `POST` requests. |
| `INSTANCE_DOMAIN` | _auto-detected_ | Public hostname (optionally with port) advertised by the web UI, metadata, and API responses. |
| `SITE_NAME` | `"PotatoMesh Demo"` | Title and branding surfaced in the web UI. |
| `CHANNEL` | `"#LongFast"` | Default LoRa channel label displayed on the dashboard. |
| `FREQUENCY` | `"915MHz"` | Default LoRa frequency description shown in the UI. |
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix room alias rendered in UI footers and overlays. |
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map view. |
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom (disables the auto-fit checkbox when set). |
| `MAX_DISTANCE` | `42` | Maximum relationship distance (km) before edges are hidden. |
| `DEBUG` | `0` | Enables verbose logging across services when set to `1`. |
| `FEDERATION` | `1` | Controls whether the instance announces itself and crawls peers (`1`) or stays isolated (`0`). |
| `PRIVATE` | `0` | Restricts public visibility and disables chat/message endpoints when set to `1`. |
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the radio. |
Edit `.env` file or run `./configure.sh` to set:
The ingestor also respects supporting variables such as `POTATOMESH_INSTANCE`
(defaults to `http://web:41447`) for remote posting and `CHANNEL_INDEX` when
selecting a LoRa channel on serial or Bluetooth connections.
- `API_TOKEN` - Required for ingestor authentication
- `MESH_SERIAL` - Your Meshtastic device path (e.g., `/dev/ttyACM0`)
- `SITE_NAME` - Your mesh network name
- `MAP_CENTER_LAT/LON` - Map center coordinates
## Docker Compose file
## Device Setup
Use the `docker-compose.yml` file provided in the repository (or download the
[raw file from GitHub](https://raw.githubusercontent.com/l5yth/potato-mesh/main/docker-compose.yml)).
It already references the published GHCR images, defines persistent volumes for
data, configuration, and logs, and includes optional bridge-profile services for
environments that require classic port mapping. Place this file in the same
directory as your `.env` file so Compose can pick up both.
The dedicated configuration volume binds to `/app/.config/potato-mesh` inside
the container. This path stores the instance private key and staged
`/.well-known/potato-mesh` documents. Because the volume persists independently
of container lifecycle events, generated credentials are not replaced on reboot
or re-deploy.
## Start the stack
From the directory containing the Compose file:
**Find your device:**
```bash
# Linux
ls /dev/ttyACM* /dev/ttyUSB*
# macOS
ls /dev/cu.usbserial-*
# Windows
ls /dev/ttyS*
docker compose up -d
```
**Set permissions (Linux/macOS):**
Docker automatically pulls the GHCR images when they are not present locally.
The dashboard becomes available at `http://127.0.0.1:41447`. Use the bridge
profile when you need to map the port explicitly:
```bash
sudo chmod 666 /dev/ttyACM0
# Or add user to dialout group
sudo usermod -a -G dialout $USER
COMPOSE_PROFILES=bridge docker compose up -d
```
## Common Commands
## Updating
```bash
# Start services
docker-compose up -d
# View logs
docker-compose logs -f
# Stop services
docker-compose down
# Stop and remove data
docker-compose down -v
# Update images
docker-compose pull && docker-compose up -d
docker compose pull
docker compose up -d
```
## Troubleshooting
**Device access issues:**
```bash
# Check device exists and permissions
ls -la /dev/ttyACM0
- **Serial device permissions (Linux/macOS):** grant access with `sudo chmod 666
/dev/ttyACM0` or add your user to the `dialout` group.
- **Port already in use:** identify the conflicting service with `sudo lsof -i
:41447`.
- **Viewing logs:** `docker compose logs -f` tails output from both services.
# Fix permissions
sudo chmod 666 /dev/ttyACM0
```
**Port conflicts:**
```bash
# Find what's using port 41447
sudo lsof -i :41447
```
**Container issues:**
```bash
# Check logs
docker-compose logs
# Restart services
docker-compose restart
```
For more Docker help, see [Docker Compose documentation](https://docs.docker.com/compose/).
For general Docker support, consult the [Docker Compose documentation](https://docs.docker.com/compose/).
+93
View File
@@ -0,0 +1,93 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: This Dockerfile is kept for backward compatibility. The canonical build
# instructions live in `web/Dockerfile`; keep the two files in sync.
# Main application builder stage
FROM ruby:3.3-alpine AS builder
# Ensure native extensions are built against musl libc rather than
# using glibc precompiled binaries (which fail on Alpine).
ENV BUNDLE_FORCE_RUBY_PLATFORM=true
# Install build dependencies and SQLite3
RUN apk add --no-cache \
build-base \
sqlite-dev \
linux-headers \
pkgconfig
# Set working directory
WORKDIR /app
# Copy Gemfile and install dependencies
COPY web/Gemfile web/Gemfile.lock* ./
# Install gems with SQLite3 support
RUN bundle config set --local force_ruby_platform true && \
bundle config set --local without 'development test' && \
bundle install --jobs=4 --retry=3
# Production stage
FROM ruby:3.3-alpine AS production
# Install runtime dependencies
RUN apk add --no-cache \
sqlite \
tzdata \
curl
# Create non-root user
RUN addgroup -g 1000 -S potatomesh && \
adduser -u 1000 -S potatomesh -G potatomesh
# Set working directory
WORKDIR /app
# Copy installed gems from builder stage
COPY --from=builder /usr/local/bundle /usr/local/bundle
# Copy application code (exclude Dockerfile from web directory)
COPY --chown=potatomesh:potatomesh web/app.rb web/app.sh web/Gemfile web/Gemfile.lock* web/spec/ ./
COPY --chown=potatomesh:potatomesh web/public ./public
COPY --chown=potatomesh:potatomesh web/views/ ./views/
# Copy SQL schema files from data directory
COPY --chown=potatomesh:potatomesh data/*.sql /data/
# Create data directory for SQLite database
RUN mkdir -p /app/data /app/.local/share/potato-mesh && \
chown -R potatomesh:potatomesh /app/data /app/.local
# Switch to non-root user
USER potatomesh
# Expose port
EXPOSE 41447
# Default environment variables (can be overridden by host)
ENV APP_ENV=production \
RACK_ENV=production \
SITE_NAME="PotatoMesh Demo" \
CHANNEL="#LongFast" \
FREQUENCY="915MHz" \
MAP_CENTER="38.761944,-27.090833" \
MAP_ZOOM="" \
MAX_DISTANCE=42 \
CONTACT_LINK="#potatomesh:dod.ngo" \
DEBUG=0
# Start the application
CMD ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
+100
View File
@@ -0,0 +1,100 @@
# Prometheus Monitoring for PotatoMesh
PotatoMesh exposes runtime telemetry through a dedicated Prometheus endpoint so you can
observe message flow, node health, and geospatial metadata alongside the rest of your
infrastructure. This guide explains how the exporter is wired into the web
application, which metrics are available, and how to integrate the endpoint with a
Prometheus server.
## Runtime integration
The Sinatra application automatically loads the `prometheus-client` gem and mounts the
collector and exporter middlewares during boot. No additional configuration is
required to enable the `/metrics` endpoint—running the web application is enough to
serve Prometheus data on the same port as the dashboard. The middleware pair both
collects default Rack statistics and publishes PotatoMesh-specific gauges and
counters that are updated whenever the ingestors process new node records.
A background refresh is triggered during start-up via
`update_all_prometheus_metrics_from_nodes`, which seeds the gauges based on the latest
state in the database. Subsequent POST requests to the ingest APIs update each metric
in near real time.
## Selecting which nodes are exported
To avoid creating high-cardinality time series, PotatoMesh does not export per-node
metrics unless you opt in by providing node identifiers. Control this behaviour with
the `PROM_REPORT_IDS` environment variable:
- Leave the variable unset or blank to only export aggregate gauges such as the total
node count.
- Set `PROM_REPORT_IDS=*` to export metrics for every node in the database.
- Provide a comma-separated list (for example `PROM_REPORT_IDS=ABCD1234,EFGH5678`) to
expose metrics for specific nodes.
The selection applies to both the initial refresh and the incremental updates handled
by the ingest pipeline.
## Available metrics
| Metric name | Type | Labels | Description |
| --- | --- | --- | --- |
| `meshtastic_messages_total` | Counter | _none_ | Increments each time the ingest pipeline accepts a new message payload. |
| `meshtastic_nodes` | Gauge | _none_ | Tracks the number of nodes currently stored in the database. |
| `meshtastic_node` | Gauge | `node`, `short_name`, `long_name`, `hw_model`, `role` | Reports a node as present (value `1`) along with identity metadata. |
| `meshtastic_node_battery_level` | Gauge | `node` | Most recent battery percentage reported by the node. |
| `meshtastic_node_voltage` | Gauge | `node` | Most recent battery voltage reading. |
| `meshtastic_node_uptime_seconds` | Gauge | `node` | Uptime reported by the device in seconds. |
| `meshtastic_node_channel_utilization` | Gauge | `node` | Latest channel utilisation ratio supplied by the node. |
| `meshtastic_node_transmit_air_utilization` | Gauge | `node` | Proportion of on-air time spent transmitting. |
| `meshtastic_node_latitude` | Gauge | `node` | Latitude component of the last known position. |
| `meshtastic_node_longitude` | Gauge | `node` | Longitude component of the last known position. |
| `meshtastic_node_altitude` | Gauge | `node` | Altitude (in metres) of the last known position. |
All per-node gauges are only emitted for identifiers included in `PROM_REPORT_IDS`.
Some values require telemetry packets to be present—for example, devices must provide
metrics or positional updates before the related gauges appear.
## Accessing the `/metrics` endpoint
Once the application is running, query the exporter directly:
```bash
curl http://localhost:41447/metrics
```
Use any HTTP client capable of plain-text requests. Prometheus scrapers should target
the same URL. The endpoint returns data in the standard exposition format produced by
`prometheus-client`.
## Prometheus scrape configuration
Add a job to your Prometheus server configuration that points to the PotatoMesh
instance. This example polls an instance running locally on the default port every 15
seconds:
```yaml
scrape_configs:
- job_name: potatomesh
scrape_interval: 15s
static_configs:
- targets:
- localhost:41447
```
If your deployment requires authentication or runs behind a reverse proxy, configure
Prometheus to match your network topology (for example by adding basic authentication
credentials, custom headers, or TLS settings).
## Troubleshooting
- **No per-node metrics appear.** Ensure that `PROM_REPORT_IDS` is set and that the
specified nodes exist in the database. Set the value to `*` if you want to export
every node during initial validation.
- **Metrics look stale after a restart.** Confirm that the ingestor is still posting
telemetry. The exporter only reflects data stored in the PotatoMesh database.
- **Scrapes time out.** Verify that the Prometheus server can reach the PotatoMesh
HTTP port and that no reverse proxy is blocking the `/metrics` path.
With the endpoint configured, you can build Grafana dashboards or alerting rules to
keep track of community mesh health in real time.
+127 -30
View File
@@ -1,14 +1,15 @@
# 🥔 PotatoMesh
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/l5yth/potato-mesh/ruby.yml?branch=main)](https://github.com/l5yth/potato-mesh/actions)
[![GitHub release (latest by date)](https://img.shields.io/github/v/release/l5yth/potato-mesh)](https://github.com/l5yth/potato-mesh/releases)
[![GitHub release](https://img.shields.io/github/v/release/l5yth/potato-mesh)](https://github.com/l5yth/potato-mesh/releases)
[![codecov](https://codecov.io/gh/l5yth/potato-mesh/branch/main/graph/badge.svg?token=FS7252JVZT)](https://codecov.io/gh/l5yth/potato-mesh)
[![Open-Source License](https://img.shields.io/github/license/l5yth/potato-mesh)](LICENSE)
[![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/l5yth/potato-mesh/issues)
[![Matrix Chat](https://img.shields.io/badge/matrix-%23potatomesh:dod.ngo-blue)](https://matrix.to/#/#potatomesh:dod.ngo)
A simple Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
* Web app with chat window and map view showing nodes and messages.
* Web app with chat window and map view showing nodes, neighbors, telemetry, and messages.
* API to POST (authenticated) and to GET nodes and messages.
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
* Shows new node notifications (first seen) in chat.
@@ -16,15 +17,7 @@ A simple Meshtastic-powered node dashboard for your local community. _No MQTT cl
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
![screenshot of the second version](./scrot-0.2.png)
## 🐳 Quick Start with Docker
```bash
./configure.sh # Configure your setup
docker-compose up -d # Start services
docker-compose logs -f # View logs
```
![screenshot of the fourth version](./scrot-0.4.png)
## Web App
@@ -32,7 +25,7 @@ Requires Ruby for the Sinatra web app and SQLite3 for the app's database.
```bash
pacman -S ruby sqlite3
gem install sinatra sqlite3 rackup puma rspec rack-test rufo
gem install sinatra sqlite3 rackup puma rspec rack-test rufo prometheus-client
cd ./web
bundle install
```
@@ -54,32 +47,110 @@ Puma starting in single mode...
Check [127.0.0.1:41447](http://127.0.0.1:41447/) for the development preview
of the node map. Set `API_TOKEN` required for authorizations on the API's POST endpoints.
### Production
When promoting the app to production, run the server with the minimum required
configuration to ensure secure access and proper routing:
```bash
RACK_ENV="production" \
APP_ENV="production" \
API_TOKEN="SuperSecureTokenReally" \
INSTANCE_DOMAIN="https://potatomesh.net" \
exec ruby app.rb -p 41447 -o 0.0.0.0
```
* `RACK_ENV` and `APP_ENV` must be set to `production` to enable optimized
settings suited for live deployments.
* Bind the server to a production port and all interfaces (`-p 41447 -o 0.0.0.0`)
so that clients can reach the dashboard over the network.
* Provide a strong `API_TOKEN` value to authorize POST requests against the API.
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
links and generated metadata resolve correctly.
The web app can be configured with environment variables (defaults shown):
* `SITE_NAME` - title and header shown in the ui (default: "Meshtastic Berlin")
* `DEFAULT_CHANNEL` - default channel shown in the ui (default: "#MediumFast")
* `DEFAULT_FREQUENCY` - default channel shown in the ui (default: "868MHz")
* `MAP_CENTER_LAT` / `MAP_CENTER_LON` - default map center coordinates (default: `52.502889` / `13.404194`)
* `MAX_NODE_DISTANCE_KM` - hide nodes farther than this distance from the center (default: `137`)
* `MATRIX_ROOM` - matrix room id for a footer link (default: `#meshtastic-berlin:matrix.org`)
| Variable | Default | Purpose |
| --- | --- | --- |
| `API_TOKEN` | _required_ | Shared secret that authorizes ingestors and API clients making `POST` requests. |
| `INSTANCE_DOMAIN` | _auto-detected_ | Public hostname (optionally with port) used for metadata, federation, and generated API links. |
| `SITE_NAME` | `"PotatoMesh Demo"` | Title and header displayed in the UI. |
| `CHANNEL` | `"#LongFast"` | Default channel name displayed in the UI. |
| `FREQUENCY` | `"915MHz"` | Default frequency description displayed in the UI. |
| `CONTACT_LINK` | `"#potatomesh:dod.ngo"` | Chat link or Matrix alias rendered in the footer and overlays. |
| `MAP_CENTER` | `38.761944,-27.090833` | Latitude and longitude that centre the map on load. |
| `MAP_ZOOM` | _unset_ | Fixed Leaflet zoom applied on first load; disables auto-fit when provided. |
| `MAX_DISTANCE` | `42` | Maximum distance (km) before node relationships are hidden on the map. |
| `DEBUG` | `0` | Set to `1` for verbose logging in the web and ingestor services. |
| `FEDERATION` | `1` | Set to `1` to announce your instance and crawl peers, or `0` to disable federation. Private mode overrides this. |
| `PRIVATE` | `0` | Set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients from public listings. |
| `CONNECTION` | `/dev/ttyACM0` | Serial device, TCP endpoint, or Bluetooth target used by the ingestor to reach the Meshtastic radio. |
The application derives SEO-friendly document titles, descriptions, and social
preview tags from these existing configuration values and reuses the bundled
logo for Open Graph and Twitter cards.
Example:
```bash
SITE_NAME="Meshtastic Berlin" MAP_CENTER_LAT=52.502889 MAP_CENTER_LON=13.404194 MAX_NODE_DISTANCE_KM=137 MATRIX_ROOM="#meshtastic-berlin:matrix.org" ./app.sh
SITE_NAME="PotatoMesh Demo" MAP_CENTER=38.761944,-27.090833 MAP_ZOOM=11 MAX_DISTANCE=42 CONTACT_LINK="#potatomesh:dod.ngo" ./app.sh
```
### Configuration & Storage
PotatoMesh stores its runtime assets using the XDG base directory specification.
When XDG directories are not provided the application falls back
to the repository root.
The key is written to `$XDG_CONFIG_HOME/potato-mesh/keyfile` and the
well-known document is staged in
`$XDG_CONFIG_HOME/potato-mesh/well-known/potato-mesh`.
The database can be found in `$XDG_DATA_HOME/potato-mesh`.
### Federation
PotatoMesh instances can optionally federate by publishing signed metadata and
discovering peers. Federation is enabled by default and controlled with the
`FEDERATION` environment variable. Set `FEDERATION=1` (default) to announce your
instance, respond to remote crawlers, and crawl the wider network. Set
`FEDERATION=0` to keep your deployment isolated—federation requests will be
ignored and the ingestor will skip discovery tasks. Private mode still takes
precedence; when `PRIVATE=1`, federation features remain disabled regardless of
the `FEDERATION` value.
When federation is enabled, PotatoMesh automatically refreshes entries from
known peers every eight hours to keep the directory current. Instances that
stop responding are considered stale and are removed from the web frontend after
72 hours, ensuring visitors only see active deployments in the public
directory.
### API
The web app contains an API:
* GET `/api/nodes?limit=100` - returns the latest 100 nodes reported to the app
* GET `/api/messages?limit=100` - returns the latest 100 messages
* GET `/api/positions?limit=100` - returns the latest 100 position data
* GET `/api/messages?limit=100` - returns the latest 100 messages (disabled when `PRIVATE=1`)
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
* GET `/api/instances` - returns known potato-mesh instances in other locations
* GET `/metrics`- metrics for the prometheus endpoint
* GET `/version`- information about the potato-mesh instance
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/positions` - appends positions provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
* POST `/api/telemetry` - appends telemetry provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
* POST `/api/neighbors` - appends neighbor tuples provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
### Observability
PotatoMesh ships with a Prometheus exporter mounted at `/metrics`. Consult
[`PROMETHEUS.md`](./PROMETHEUS.md) for deployment guidance, metric details, and
scrape configuration examples.
## Python Ingestor
The web app is not meant to be run locally connected to a Meshtastic node but rather
@@ -88,8 +159,9 @@ accepts data through the API POST endpoints. Benefit is, here multiple nodes acr
community can feed the dashboard with data. The web app handles messages and nodes
by ID and there will be no duplication.
For convenience, the directory `./data` contains a Python ingestor. It connects to a local
Meshtastic node via serial port to gather nodes and messages seen by the node.
For convenience, the directory `./data` contains a Python ingestor. It connects to a
Meshtastic node via serial port or to a remote device that exposes the Meshtastic TCP
or Bluetooth (BLE) interfaces to gather nodes and messages seen by the node.
```bash
pacman -S python
@@ -105,19 +177,44 @@ to the configured potato-mesh instance.
Check out `mesh.sh` ingestor script in the `./data` directory.
```bash
POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 MESH_SERIAL=/dev/ttyACM0 DEBUG=1 ./mesh.sh
Mesh daemon: nodes+messages → http://127.0.0.1 | port=41447 | channel=0
POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 CONNECTION=/dev/ttyACM0 DEBUG=1 ./mesh.sh
[2025-02-20T12:34:56.789012Z] [potato-mesh] [info] channel=0 context=daemon.main port='41447' target='http://127.0.0.1' Mesh daemon starting
[...]
[debug] upserted node !849b7154 shortName='7154'
[debug] upserted node !ba653ae8 shortName='3ae8'
[debug] upserted node !16ced364 shortName='Pat'
[debug] stored message from '!9ee71c38' to '^all' ch=0 text='Guten Morgen!'
[2025-02-20T12:34:57.012345Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!849b7154 short_name='7154' long_name='7154' Queued node upsert payload
[2025-02-20T12:34:57.456789Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!ba653ae8 short_name='3ae8' long_name='3ae8' Queued node upsert payload
[2025-02-20T12:34:58.001122Z] [potato-mesh] [debug] context=handlers.store_packet_dict channel=0 from_id='!9ee71c38' payload='Guten Morgen!' to_id='^all' Queued message payload
```
Run the script with `POTATOMESH_INSTANCE` and `API_TOKEN` to keep updating
node records and parsing new incoming messages. Enable debug output with `DEBUG=1`,
specify the serial port with `MESH_SERIAL` (default `/dev/ttyACM0`), etc.
specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set it to
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available.
## Demos
Post your nodes here:
* <https://github.com/l5yth/potato-mesh/discussions/258>
## Docker
Docker images are published on Github for each release:
```bash
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
docker pull ghcr.io/l5yth/potato-mesh/web:v3.0 # pinned historical release
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
```
Set `POTATOMESH_IMAGE_TAG` in your `.env` (or environment) to deploy a specific
tagged release with Docker Compose. See the [Docker guide](DOCKER.md) for more
details and custom deployment instructions.
## License
Apache v2.0, Contact <COM0@l5y.tech>
Join our community chat to discuss the dashboard or ask for technical support:
[#potatomesh:dod.ngo](https://matrix.to/#/#potatomesh:dod.ngo)
+131 -29
View File
@@ -1,4 +1,17 @@
#!/bin/bash
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PotatoMesh Configuration Script
# This script helps you configure your PotatoMesh instance with your local settings
@@ -43,10 +56,14 @@ read_with_default() {
update_env() {
local key="$1"
local value="$2"
local escaped_value
# Escape characters that would break the sed replacement delimiter or introduce backreferences
escaped_value=$(printf '%s' "$value" | sed -e 's/[&|]/\\&/g')
if grep -q "^$key=" .env; then
# Update existing value
sed -i.bak "s/^$key=.*/$key=$value/" .env
sed -i.bak "s|^$key=.*|$key=$escaped_value|" .env
else
# Add new value
echo "$key=$value" >> .env
@@ -54,32 +71,77 @@ update_env() {
}
# Get current values from .env if they exist
SITE_NAME=$(grep "^SITE_NAME=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "My Meshtastic Network")
DEFAULT_CHANNEL=$(grep "^DEFAULT_CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#MediumFast")
DEFAULT_FREQUENCY=$(grep "^DEFAULT_FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "868MHz")
MAP_CENTER_LAT=$(grep "^MAP_CENTER_LAT=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "52.502889")
MAP_CENTER_LON=$(grep "^MAP_CENTER_LON=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "13.404194")
MAX_NODE_DISTANCE_KM=$(grep "^MAX_NODE_DISTANCE_KM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "50")
MATRIX_ROOM=$(grep "^MATRIX_ROOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
SITE_NAME=$(grep "^SITE_NAME=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "PotatoMesh Demo")
CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#LongFast")
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
FEDERATION=$(grep "^FEDERATION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "1")
PRIVATE=$(grep "^PRIVATE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
MAP_ZOOM=$(grep "^MAP_ZOOM=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
CONTACT_LINK=$(grep "^CONTACT_LINK=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#potatomesh:dod.ngo")
API_TOKEN=$(grep "^API_TOKEN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
POTATOMESH_IMAGE_ARCH=$(grep "^POTATOMESH_IMAGE_ARCH=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "linux-amd64")
POTATOMESH_IMAGE_TAG=$(grep "^POTATOMESH_IMAGE_TAG=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "latest")
INSTANCE_DOMAIN=$(grep "^INSTANCE_DOMAIN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
DEBUG=$(grep "^DEBUG=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "0")
CONNECTION=$(grep "^CONNECTION=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "/dev/ttyACM0")
echo "📍 Location Settings"
echo "-------------------"
read_with_default "Site Name (your mesh network name)" "$SITE_NAME" SITE_NAME
read_with_default "Map Center Latitude" "$MAP_CENTER_LAT" MAP_CENTER_LAT
read_with_default "Map Center Longitude" "$MAP_CENTER_LON" MAP_CENTER_LON
read_with_default "Max Node Distance (km)" "$MAX_NODE_DISTANCE_KM" MAX_NODE_DISTANCE_KM
read_with_default "Map Center (lat,lon)" "$MAP_CENTER" MAP_CENTER
read_with_default "Default map zoom (leave blank to auto-fit)" "$MAP_ZOOM" MAP_ZOOM
read_with_default "Max Distance (km)" "$MAX_DISTANCE" MAX_DISTANCE
echo ""
echo "📡 Meshtastic Settings"
echo "---------------------"
read_with_default "Default Channel" "$DEFAULT_CHANNEL" DEFAULT_CHANNEL
read_with_default "Default Frequency (868MHz, 915MHz, etc.)" "$DEFAULT_FREQUENCY" DEFAULT_FREQUENCY
read_with_default "Channel" "$CHANNEL" CHANNEL
read_with_default "Frequency (868MHz, 915MHz, etc.)" "$FREQUENCY" FREQUENCY
echo ""
echo "💬 Optional Settings"
echo "-------------------"
read_with_default "Matrix Room (optional, e.g., #meshtastic-berlin:matrix.org)" "$MATRIX_ROOM" MATRIX_ROOM
read_with_default "Chat link or Matrix room (optional)" "$CONTACT_LINK" CONTACT_LINK
read_with_default "Debug logging (1=enabled, 0=disabled)" "$DEBUG" DEBUG
echo ""
echo "🤝 Federation Settings"
echo "----------------------"
echo "Federation shares instance metadata with other PotatoMesh deployments."
echo "Set to 1 to enable discovery or 0 to keep your instance isolated."
read_with_default "Enable federation (1=yes, 0=no)" "$FEDERATION" FEDERATION
echo ""
echo "🙈 Privacy Settings"
echo "-------------------"
echo "Private mode hides public mesh messages from unauthenticated visitors."
echo "Set to 1 to hide public feeds or 0 to keep them visible."
read_with_default "Enable private mode (1=yes, 0=no)" "$PRIVATE" PRIVATE
echo ""
echo "🛠 Docker Settings"
echo "------------------"
echo "Specify the Docker image architecture for your host (linux-amd64, linux-arm64, linux-armv7)."
read_with_default "Docker image architecture" "$POTATOMESH_IMAGE_ARCH" POTATOMESH_IMAGE_ARCH
echo "Enter the Docker image tag to deploy (use 'latest' for the newest release or pin a version such as v3.0)."
read_with_default "Docker image tag (latest, vX.Y, etc.)" "$POTATOMESH_IMAGE_TAG" POTATOMESH_IMAGE_TAG
echo ""
echo "🔌 Ingestor Connection"
echo "----------------------"
echo "Define how the mesh ingestor connects to your Meshtastic device."
echo "Use serial devices like /dev/ttyACM0, TCP endpoints such as tcp://host:port,"
echo "or Bluetooth addresses when supported."
read_with_default "Connection target" "$CONNECTION" CONNECTION
echo ""
echo "🌐 Domain Settings"
echo "------------------"
echo "Provide the public hostname that clients should use to reach this PotatoMesh instance."
echo "Leave blank to allow automatic detection via reverse DNS."
read_with_default "Instance domain (e.g. mesh.example.org)" "$INSTANCE_DOMAIN" INSTANCE_DOMAIN
echo ""
echo "🔐 Security Settings"
@@ -117,17 +179,41 @@ echo "📝 Updating .env file..."
# Update .env file
update_env "SITE_NAME" "\"$SITE_NAME\""
update_env "DEFAULT_CHANNEL" "\"$DEFAULT_CHANNEL\""
update_env "DEFAULT_FREQUENCY" "\"$DEFAULT_FREQUENCY\""
update_env "MAP_CENTER_LAT" "$MAP_CENTER_LAT"
update_env "MAP_CENTER_LON" "$MAP_CENTER_LON"
update_env "MAX_NODE_DISTANCE_KM" "$MAX_NODE_DISTANCE_KM"
update_env "MATRIX_ROOM" "\"$MATRIX_ROOM\""
update_env "CHANNEL" "\"$CHANNEL\""
update_env "FREQUENCY" "\"$FREQUENCY\""
update_env "MAP_CENTER" "\"$MAP_CENTER\""
if [ -n "$MAP_ZOOM" ]; then
update_env "MAP_ZOOM" "$MAP_ZOOM"
else
sed -i.bak '/^MAP_ZOOM=.*/d' .env
fi
update_env "MAX_DISTANCE" "$MAX_DISTANCE"
update_env "CONTACT_LINK" "\"$CONTACT_LINK\""
update_env "DEBUG" "$DEBUG"
update_env "API_TOKEN" "$API_TOKEN"
update_env "POTATOMESH_IMAGE_ARCH" "$POTATOMESH_IMAGE_ARCH"
update_env "POTATOMESH_IMAGE_TAG" "$POTATOMESH_IMAGE_TAG"
update_env "FEDERATION" "$FEDERATION"
update_env "PRIVATE" "$PRIVATE"
update_env "CONNECTION" "$CONNECTION"
if [ -n "$INSTANCE_DOMAIN" ]; then
update_env "INSTANCE_DOMAIN" "$INSTANCE_DOMAIN"
else
sed -i.bak '/^INSTANCE_DOMAIN=.*/d' .env
fi
# Add other common settings if they don't exist
if ! grep -q "^MESH_SERIAL=" .env; then
echo "MESH_SERIAL=/dev/ttyACM0" >> .env
# Migrate legacy connection settings and ensure defaults exist
if grep -q "^MESH_SERIAL=" .env; then
legacy_connection=$(grep "^MESH_SERIAL=" .env | head -n1 | cut -d'=' -f2-)
if [ -n "$legacy_connection" ] && ! grep -q "^CONNECTION=" .env; then
echo "♻️ Migrating legacy MESH_SERIAL value to CONNECTION"
update_env "CONNECTION" "$legacy_connection"
fi
sed -i.bak '/^MESH_SERIAL=.*/d' .env
fi
if ! grep -q "^CONNECTION=" .env; then
echo "CONNECTION=/dev/ttyACM0" >> .env
fi
if ! grep -q "^DEBUG=" .env; then
@@ -142,12 +228,28 @@ echo "✅ Configuration complete!"
echo ""
echo "📋 Your settings:"
echo " Site Name: $SITE_NAME"
echo " Map Center: $MAP_CENTER_LAT, $MAP_CENTER_LON"
echo " Max Distance: ${MAX_NODE_DISTANCE_KM}km"
echo " Channel: $DEFAULT_CHANNEL"
echo " Frequency: $DEFAULT_FREQUENCY"
echo " Matrix Room: ${MATRIX_ROOM:-'Not set'}"
echo " Map Center: $MAP_CENTER"
if [ -n "$MAP_ZOOM" ]; then
echo " Map Zoom: $MAP_ZOOM"
else
echo " Map Zoom: Auto-fit"
fi
echo " Max Distance: ${MAX_DISTANCE}km"
echo " Channel: $CHANNEL"
echo " Frequency: $FREQUENCY"
echo " Chat: ${CONTACT_LINK:-'Not set'}"
echo " Debug Logging: ${DEBUG}"
echo " Connection: ${CONNECTION}"
echo " API Token: ${API_TOKEN:0:8}..."
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
echo " Docker Image Tag: $POTATOMESH_IMAGE_TAG"
echo " Private Mode: ${PRIVATE}"
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
if [ "${FEDERATION:-1}" = "0" ]; then
echo " Federation: Disabled"
else
echo " Federation: Enabled"
fi
echo ""
echo "🚀 You can now start PotatoMesh with:"
echo " docker-compose up -d"
+21 -10
View File
@@ -1,4 +1,17 @@
# syntax=docker/dockerfile:1.6
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG TARGETOS=linux
ARG PYTHON_VERSION=3.12.6
@@ -26,7 +39,7 @@ RUN set -eux; \
python -m pip install --no-cache-dir -r requirements.txt; \
apk del .build-deps
COPY data/ .
COPY data /app/data
RUN addgroup -S potatomesh && \
adduser -S potatomesh -G potatomesh && \
adduser potatomesh dialout && \
@@ -34,14 +47,13 @@ RUN addgroup -S potatomesh && \
USER potatomesh
ENV MESH_SERIAL=/dev/ttyACM0 \
MESH_SNAPSHOT_SECS=60 \
MESH_CHANNEL_INDEX=0 \
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
POTATOMESH_INSTANCE="" \
API_TOKEN=""
CMD ["python", "mesh.py"]
CMD ["python", "-m", "data.mesh"]
# Windows production image
FROM python:${PYTHON_VERSION}-windowsservercore-ltsc2022 AS production-windows
@@ -56,17 +68,16 @@ WORKDIR /app
COPY data/requirements.txt ./
RUN python -m pip install --no-cache-dir -r requirements.txt
COPY data/ .
COPY data /app/data
USER ContainerUser
ENV MESH_SERIAL=/dev/ttyACM0 \
MESH_SNAPSHOT_SECS=60 \
MESH_CHANNEL_INDEX=0 \
ENV CONNECTION=/dev/ttyACM0 \
CHANNEL_INDEX=0 \
DEBUG=0 \
POTATOMESH_INSTANCE="" \
API_TOKEN=""
CMD ["python", "mesh.py"]
CMD ["python", "-m", "data.mesh"]
FROM production-${TARGETOS} AS production
+6 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,3 +17,8 @@
The ``data.mesh`` module exposes helpers for reading Meshtastic node and
message information before forwarding it to the accompanying web application.
"""
VERSION = "0.5.5"
"""Semantic version identifier shared with the dashboard and front-end."""
__version__ = VERSION
+32
View File
@@ -0,0 +1,32 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
PRAGMA journal_mode=WAL;
CREATE TABLE IF NOT EXISTS instances (
id TEXT PRIMARY KEY,
domain TEXT NOT NULL,
pubkey TEXT NOT NULL,
name TEXT,
version TEXT,
channel TEXT,
frequency TEXT,
latitude REAL,
longitude REAL,
last_update_time INTEGER,
is_private BOOLEAN NOT NULL DEFAULT 0,
signature TEXT
);
CREATE UNIQUE INDEX IF NOT EXISTS idx_instances_domain ON instances(domain);
+26 -454
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,458 +13,31 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh daemon helpers for synchronising Meshtastic data.
This module wraps the Meshtastic serial interface and exposes helper
functions that serialise nodes and text messages to JSON before forwarding
them to the accompanying web API. It also provides the long-running daemon
entry point that performs these synchronisation tasks.
"""
import dataclasses
import heapq
import itertools
import json, os, time, threading, signal, urllib.request, urllib.error
from collections.abc import Mapping
from meshtastic.serial_interface import SerialInterface
from pubsub import pub
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import Message as ProtoMessage
# --- Config (env overrides) ---------------------------------------------------
PORT = os.environ.get("MESH_SERIAL", "/dev/ttyACM0")
SNAPSHOT_SECS = int(os.environ.get("MESH_SNAPSHOT_SECS", "60"))
CHANNEL_INDEX = int(os.environ.get("MESH_CHANNEL_INDEX", "0"))
DEBUG = os.environ.get("DEBUG") == "1"
INSTANCE = os.environ.get("POTATOMESH_INSTANCE", "").rstrip("/")
API_TOKEN = os.environ.get("API_TOKEN", "")
# --- POST queue ----------------------------------------------------------------
_POST_QUEUE_LOCK = threading.Lock()
_POST_QUEUE = []
_POST_QUEUE_COUNTER = itertools.count()
_POST_QUEUE_ACTIVE = False
_NODE_POST_PRIORITY = 0
_MESSAGE_POST_PRIORITY = 10
_DEFAULT_POST_PRIORITY = 50
def _get(obj, key, default=None):
"""Return a key or attribute value from ``obj``.
Args:
obj: Mapping or object containing the desired value.
key: Key or attribute name to look up.
default: Value returned when the key is missing.
Returns:
The resolved value if present, otherwise ``default``.
"""
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
# --- HTTP helpers -------------------------------------------------------------
def _post_json(path: str, payload: dict):
"""Send a JSON payload to the configured web API.
Args:
path: API path relative to the configured ``INSTANCE``.
payload: Mapping serialised to JSON for the request body.
"""
if not INSTANCE:
return
url = f"{INSTANCE}{path}"
data = json.dumps(payload).encode("utf-8")
req = urllib.request.Request(
url, data=data, headers={"Content-Type": "application/json"}
)
if API_TOKEN:
req.add_header("Authorization", f"Bearer {API_TOKEN}")
try:
with urllib.request.urlopen(req, timeout=10) as resp:
resp.read()
except Exception as e:
if DEBUG:
print(f"[warn] POST {url} failed: {e}")
def _enqueue_post_json(path: str, payload: dict, priority: int):
"""Store a POST request in the priority queue."""
with _POST_QUEUE_LOCK:
heapq.heappush(
_POST_QUEUE, (priority, next(_POST_QUEUE_COUNTER), path, payload)
)
def _drain_post_queue():
"""Process queued POST requests in priority order."""
global _POST_QUEUE_ACTIVE
while True:
with _POST_QUEUE_LOCK:
if not _POST_QUEUE:
_POST_QUEUE_ACTIVE = False
return
_priority, _idx, path, payload = heapq.heappop(_POST_QUEUE)
_post_json(path, payload)
def _queue_post_json(
path: str, payload: dict, *, priority: int = _DEFAULT_POST_PRIORITY
):
"""Queue a POST request and start processing if idle."""
global _POST_QUEUE_ACTIVE
_enqueue_post_json(path, payload, priority)
with _POST_QUEUE_LOCK:
if _POST_QUEUE_ACTIVE:
return
_POST_QUEUE_ACTIVE = True
_drain_post_queue()
def _clear_post_queue():
"""Clear the pending POST queue (used by tests)."""
global _POST_QUEUE_ACTIVE
with _POST_QUEUE_LOCK:
_POST_QUEUE.clear()
_POST_QUEUE_ACTIVE = False
# --- Node upsert --------------------------------------------------------------
def _node_to_dict(n) -> dict:
"""Convert Meshtastic node or user structures into plain dictionaries.
Args:
n: ``dict``, dataclass or protobuf message describing a node or user.
Returns:
JSON serialisable representation of ``n``.
"""
def _convert(value):
"""Recursively convert dataclasses and protobuf messages."""
if isinstance(value, dict):
return {k: _convert(v) for k, v in value.items()}
if isinstance(value, (list, tuple, set)):
return [_convert(v) for v in value]
if dataclasses.is_dataclass(value):
return {k: _convert(getattr(value, k)) for k in value.__dataclass_fields__}
if isinstance(value, ProtoMessage):
return MessageToDict(
value, preserving_proto_field_name=True, use_integers_for_enums=False
)
if isinstance(value, bytes):
try:
return value.decode()
except Exception:
return value.hex()
if isinstance(value, (str, int, float, bool)) or value is None:
return value
try:
return json.loads(json.dumps(value, default=str))
except Exception:
return str(value)
return _convert(n)
def upsert_node(node_id, n):
"""Forward a node snapshot to the web API.
Args:
node_id: Unique identifier of the node in the mesh.
n: Node object obtained from the Meshtastic serial interface.
"""
ndict = _node_to_dict(n)
_queue_post_json("/api/nodes", {node_id: ndict}, priority=_NODE_POST_PRIORITY)
if DEBUG:
user = _get(ndict, "user") or {}
short = _get(user, "shortName")
print(f"[debug] upserted node {node_id} shortName={short!r}")
# --- Message logging via PubSub -----------------------------------------------
def _iso(ts: int | float) -> str:
"""Return an ISO-8601 timestamp string for ``ts``.
Args:
ts: POSIX timestamp as ``int`` or ``float``.
Returns:
Timestamp formatted with a trailing ``Z`` to denote UTC.
"""
import datetime
return (
datetime.datetime.fromtimestamp(int(ts), datetime.UTC)
.isoformat()
.replace("+00:00", "Z")
)
def _first(d, *names, default=None):
"""Return the first non-empty key from ``names`` (supports nested lookups).
Keys that resolve to ``None`` or an empty string are skipped so callers can
provide multiple potential field names without accidentally capturing an
explicit ``null`` value.
Args:
d: Mapping or object to query.
*names: Candidate field names using dotted paths for nesting.
default: Value returned when all candidates are missing.
Returns:
The first matching value or ``default`` if none resolve to content.
"""
def _mapping_get(obj, key):
if isinstance(obj, Mapping) and key in obj:
return True, obj[key]
if hasattr(obj, "__getitem__"):
try:
return True, obj[key]
except Exception:
pass
if hasattr(obj, key):
return True, getattr(obj, key)
return False, None
for name in names:
cur = d
ok = True
for part in name.split("."):
ok, cur = _mapping_get(cur, part)
if not ok:
break
if ok:
if cur is None:
continue
if isinstance(cur, str) and cur == "":
continue
return cur
return default
def _pkt_to_dict(packet) -> dict:
"""Normalise a received packet into a JSON-friendly dictionary.
Args:
packet: Protobuf ``MeshPacket`` or dictionary received from the daemon.
Returns:
Packet data ready for JSON serialisation.
"""
if isinstance(packet, dict):
return packet
if isinstance(packet, ProtoMessage):
return MessageToDict(
packet, preserving_proto_field_name=True, use_integers_for_enums=False
)
# Last resort: try to read attributes
try:
return json.loads(json.dumps(packet, default=lambda o: str(o)))
except Exception:
return {"_unparsed": str(packet)}
def store_packet_dict(p: dict):
"""Persist text messages extracted from a decoded packet.
Only packets from the ``TEXT_MESSAGE_APP`` port are forwarded to the
web API. Field lookups tolerate camelCase and snake_case variants for
compatibility across Meshtastic releases.
Args:
p: Packet dictionary produced by ``_pkt_to_dict``.
"""
dec = p.get("decoded") or {}
text = _first(dec, "payload.text", "text", default=None)
if not text:
return # ignore non-text packets
# port filter: only keep packets from the TEXT_MESSAGE_APP port
portnum_raw = _first(dec, "portnum", default=None)
portnum = str(portnum_raw).upper() if portnum_raw is not None else None
if portnum and portnum not in {"1", "TEXT_MESSAGE_APP"}:
return # ignore non-text-message ports
# channel (prefer decoded.channel if present; else top-level)
ch = _first(dec, "channel", default=None)
if ch is None:
ch = _first(p, "channel", default=0)
try:
ch = int(ch)
except Exception:
ch = 0
# timestamps & ids
pkt_id = _first(p, "id", "packet_id", "packetId", default=None)
if pkt_id is None:
return # ignore packets without an id
rx_time = int(_first(p, "rxTime", "rx_time", default=time.time()))
from_id = _first(p, "fromId", "from_id", "from", default=None)
to_id = _first(p, "toId", "to_id", "to", default=None)
if (from_id is None or str(from_id) == "") and DEBUG:
try:
raw = json.dumps(p, default=str)
except Exception:
raw = str(p)
print(f"[debug] packet missing from_id: {raw}")
# link metrics
snr = _first(p, "snr", "rx_snr", "rxSnr", default=None)
rssi = _first(p, "rssi", "rx_rssi", "rxRssi", default=None)
hop = _first(p, "hopLimit", "hop_limit", default=None)
msg = {
"id": int(pkt_id),
"rx_time": rx_time,
"rx_iso": _iso(rx_time),
"from_id": from_id,
"to_id": to_id,
"channel": ch,
"portnum": str(portnum) if portnum is not None else None,
"text": text,
"snr": float(snr) if snr is not None else None,
"rssi": int(rssi) if rssi is not None else None,
"hop_limit": int(hop) if hop is not None else None,
}
_queue_post_json("/api/messages", msg, priority=_MESSAGE_POST_PRIORITY)
if DEBUG:
print(
f"[debug] stored message from {from_id!r} to {to_id!r} ch={ch} text={text!r}"
)
# PubSub receive handler
def on_receive(packet, interface):
"""PubSub callback that stores inbound text messages.
Args:
packet: Packet received from the Meshtastic interface.
interface: Serial interface instance (unused).
"""
p = None
try:
p = _pkt_to_dict(packet)
store_packet_dict(p)
except Exception as e:
info = list(p.keys()) if isinstance(p, dict) else type(packet)
print(f"[warn] failed to store packet: {e} | info: {info}")
# --- Main ---------------------------------------------------------------------
def _node_items_snapshot(nodes_obj, retries: int = 3):
"""Return a snapshot list of ``(node_id, node)`` pairs.
The Meshtastic ``SerialInterface`` updates ``iface.nodes`` from another
thread. When that happens during iteration Python raises ``RuntimeError``.
To keep the daemon quiet we retry a few times and, if it keeps changing,
bail out for this loop.
Args:
nodes_obj: Container mapping node IDs to node objects.
retries: Number of attempts performed before giving up.
Returns:
Snapshot of node entries or ``None`` when retries were exhausted because
the container kept mutating.
"""
if not nodes_obj:
return []
items_callable = getattr(nodes_obj, "items", None)
if callable(items_callable):
for _ in range(max(1, retries)):
try:
return list(items_callable())
except RuntimeError as err:
if "dictionary changed size during iteration" not in str(err):
raise
time.sleep(0)
return None
if hasattr(nodes_obj, "__iter__") and hasattr(nodes_obj, "__getitem__"):
for _ in range(max(1, retries)):
try:
keys = list(nodes_obj)
return [(k, nodes_obj[k]) for k in keys]
except RuntimeError as err:
if "dictionary changed size during iteration" not in str(err):
raise
time.sleep(0)
return None
return []
def main():
"""Run the mesh synchronisation daemon."""
# Subscribe to PubSub topics (reliable in current meshtastic)
pub.subscribe(on_receive, "meshtastic.receive")
iface = SerialInterface(devPath=PORT)
stop = threading.Event()
def handle_sig(*_):
"""Stop the daemon when a termination signal is received."""
stop.set()
signal.signal(signal.SIGINT, handle_sig)
signal.signal(signal.SIGTERM, handle_sig)
target = INSTANCE or "(no POTATOMESH_INSTANCE)"
print(
f"Mesh daemon: nodes+messages → {target} | port={PORT} | channel={CHANNEL_INDEX}"
)
while not stop.is_set():
try:
nodes = getattr(iface, "nodes", {}) or {}
node_items = _node_items_snapshot(nodes)
if node_items is None:
if DEBUG:
print(
"[debug] skipping node snapshot; nodes changed during iteration"
)
else:
for node_id, n in node_items:
try:
upsert_node(node_id, n)
except Exception as e:
print(
f"[warn] failed to update node snapshot for {node_id}: {e}"
)
if DEBUG:
print(f"[debug] node object: {n!r}")
except Exception as e:
print(f"[warn] failed to update node snapshot: {e}")
stop.wait(SNAPSHOT_SECS)
try:
iface.close()
except Exception:
pass
"""Backward-compatible entry point for the mesh ingestor daemon."""
from __future__ import annotations
import importlib
import sys
from pathlib import Path
try:
from . import mesh_ingestor as _mesh_ingestor
except ImportError:
if __package__ in {None, ""}:
package_dir = Path(__file__).resolve().parent
project_root = str(package_dir.parent)
if project_root not in sys.path:
sys.path.insert(0, project_root)
_mesh_ingestor = importlib.import_module("data.mesh_ingestor")
else:
raise
# Expose the refactored mesh ingestor module under the legacy name so existing
# imports (``import data.mesh as mesh``) continue to work. Attribute access and
# monkeypatching operate directly on the shared module instance.
sys.modules[__name__] = _mesh_ingestor
if __name__ == "__main__":
main()
_mesh_ingestor.main()
+1 -2
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+129
View File
@@ -0,0 +1,129 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""High-level API for the potato-mesh ingestor."""
from __future__ import annotations
import signal as signal # re-exported for compatibility
import threading as threading # re-exported for compatibility
import sys
import types
from . import channels, config, daemon, handlers, interfaces, queue, serialization
__all__: list[str] = []
def _reexport(module) -> None:
names = getattr(module, "__all__", [])
for name in names:
globals()[name] = getattr(module, name)
__all__.extend(names)
def _export_constants() -> None:
globals()["json"] = queue.json
globals()["urllib"] = queue.urllib
globals()["glob"] = interfaces.glob
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
for _module in (channels, daemon, handlers, interfaces, queue, serialization):
_reexport(_module)
_export_constants()
_CONFIG_ATTRS = {
"CONNECTION",
"SNAPSHOT_SECS",
"CHANNEL_INDEX",
"DEBUG",
"INSTANCE",
"API_TOKEN",
"LORA_FREQ",
"MODEM_PRESET",
"_RECONNECT_INITIAL_DELAY_SECS",
"_RECONNECT_MAX_DELAY_SECS",
"_CLOSE_TIMEOUT_SECS",
"_debug_log",
}
# Legacy export maintained for backwards compatibility.
_CONFIG_ATTRS.add("PORT")
_INTERFACE_ATTRS = {"BLEInterface", "SerialInterface", "TCPInterface"}
_QUEUE_ATTRS = set(queue.__all__)
_HANDLER_ATTRS = set(handlers.__all__)
_DAEMON_ATTRS = set(daemon.__all__)
_SERIALIZATION_ATTRS = set(serialization.__all__)
_INTERFACE_EXPORTS = set(interfaces.__all__)
__all__.extend(sorted(_CONFIG_ATTRS))
__all__.extend(sorted(_INTERFACE_ATTRS))
class _MeshIngestorModule(types.ModuleType):
"""Module proxy that forwards config and interface state."""
def __getattr__(self, name: str): # type: ignore[override]
"""Resolve attributes by delegating to the underlying submodules."""
if name in _CONFIG_ATTRS:
return getattr(config, name)
if name in _INTERFACE_ATTRS:
return getattr(interfaces, name)
if name in _INTERFACE_EXPORTS:
return getattr(interfaces, name)
raise AttributeError(name)
def __setattr__(self, name: str, value): # type: ignore[override]
"""Propagate assignments to the appropriate submodule."""
if name in _CONFIG_ATTRS:
setattr(config, name, value)
super().__setattr__(name, value)
return
if name in _INTERFACE_ATTRS:
setattr(interfaces, name, value)
super().__setattr__(name, value)
return
handled = False
if name in _INTERFACE_EXPORTS:
setattr(interfaces, name, value)
super().__setattr__(name, getattr(interfaces, name, value))
handled = True
if name in _QUEUE_ATTRS:
setattr(queue, name, value)
super().__setattr__(name, getattr(queue, name, value))
handled = True
if name in _HANDLER_ATTRS:
setattr(handlers, name, value)
super().__setattr__(name, getattr(handlers, name, value))
handled = True
if name in _DAEMON_ATTRS:
setattr(daemon, name, value)
super().__setattr__(name, getattr(daemon, name, value))
handled = True
if name in _SERIALIZATION_ATTRS:
setattr(serialization, name, value)
super().__setattr__(name, getattr(serialization, name, value))
handled = True
if handled:
return
super().__setattr__(name, value)
sys.modules[__name__].__class__ = _MeshIngestorModule
+238
View File
@@ -0,0 +1,238 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for capturing and exposing mesh channel metadata."""
from __future__ import annotations
import os
from typing import Any, Iterable, Iterator
from . import config
try: # pragma: no cover - optional dependency for enum introspection
from meshtastic.protobuf import channel_pb2
except Exception: # pragma: no cover - exercised in environments without protobufs
channel_pb2 = None # type: ignore[assignment]
_ROLE_PRIMARY = 1
_ROLE_SECONDARY = 2
if channel_pb2 is not None: # pragma: no branch - evaluated once at import time
try:
_ROLE_PRIMARY = int(channel_pb2.Channel.Role.PRIMARY)
_ROLE_SECONDARY = int(channel_pb2.Channel.Role.SECONDARY)
except Exception: # pragma: no cover - defensive, version specific
_ROLE_PRIMARY = 1
_ROLE_SECONDARY = 2
_CHANNEL_MAPPINGS: tuple[tuple[int, str], ...] = ()
_CHANNEL_LOOKUP: dict[int, str] = {}
def _iter_channel_objects(channels_obj: Any) -> Iterator[Any]:
"""Yield channel descriptors from ``channels_obj``.
The real Meshtastic API exposes channels via protobuf containers that are
list-like. This helper converts the container into a deterministic iterator
while avoiding runtime errors if an unexpected type is supplied.
"""
if channels_obj is None:
return iter(())
if isinstance(channels_obj, dict):
return iter(channels_obj.values())
if isinstance(channels_obj, Iterable):
return iter(list(channels_obj))
length_fn = getattr(channels_obj, "__len__", None)
getitem = getattr(channels_obj, "__getitem__", None)
if callable(length_fn) and callable(getitem):
try:
length = int(length_fn())
except Exception: # pragma: no cover - defensive only
length = None
if length is not None and length >= 0:
snapshot = []
for index in range(length):
try:
snapshot.append(getitem(index))
except Exception: # pragma: no cover - best effort copy
break
return iter(snapshot)
return iter(())
def _primary_channel_name() -> str | None:
"""Return the fallback name to use for the primary channel when needed."""
preset = getattr(config, "MODEM_PRESET", None)
if isinstance(preset, str) and preset.strip():
return preset.strip()
env_name = os.environ.get("CHANNEL", "").strip()
if env_name:
return env_name
return None
def _extract_channel_name(settings_obj: Any) -> str | None:
"""Normalise the configured channel name extracted from ``settings_obj``."""
if settings_obj is None:
return None
if isinstance(settings_obj, dict):
candidate = settings_obj.get("name")
else:
candidate = getattr(settings_obj, "name", None)
if isinstance(candidate, str):
candidate = candidate.strip()
if candidate:
return candidate
return None
def _normalize_role(role: Any) -> int | None:
"""Convert a channel role descriptor into an integer value."""
if isinstance(role, int):
return role
if isinstance(role, str):
value = role.strip().upper()
if value == "PRIMARY":
return _ROLE_PRIMARY
if value == "SECONDARY":
return _ROLE_SECONDARY
try:
return int(value)
except ValueError:
return None
name_attr = getattr(role, "name", None)
if isinstance(name_attr, str):
return _normalize_role(name_attr)
value_attr = getattr(role, "value", None)
if isinstance(value_attr, int):
return value_attr
try:
return int(role) # type: ignore[arg-type]
except Exception:
return None
def _channel_tuple(channel_obj: Any) -> tuple[int, str] | None:
"""Return ``(index, name)`` for ``channel_obj`` when resolvable."""
role_value = _normalize_role(getattr(channel_obj, "role", None))
if role_value == _ROLE_PRIMARY:
channel_index = 0
channel_name = _extract_channel_name(getattr(channel_obj, "settings", None))
if channel_name is None:
channel_name = _primary_channel_name()
elif role_value == _ROLE_SECONDARY:
raw_index = getattr(channel_obj, "index", None)
try:
channel_index = int(raw_index)
except Exception:
channel_index = None
channel_name = _extract_channel_name(getattr(channel_obj, "settings", None))
else:
return None
if not isinstance(channel_index, int):
return None
if not isinstance(channel_name, str) or not channel_name:
return None
return channel_index, channel_name
def capture_from_interface(iface: Any) -> None:
"""Populate the channel cache by inspecting ``iface`` when possible."""
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
if iface is None or _CHANNEL_MAPPINGS:
return
try:
wait_for_config = getattr(iface, "waitForConfig", None)
if callable(wait_for_config):
wait_for_config()
except Exception: # pragma: no cover - hardware dependent safeguard
pass
local_node = getattr(iface, "localNode", None)
channels_obj = getattr(local_node, "channels", None) if local_node else None
channel_entries: list[tuple[int, str]] = []
seen_indices: set[int] = set()
for candidate in _iter_channel_objects(channels_obj):
result = _channel_tuple(candidate)
if result is None:
continue
index, name = result
if index in seen_indices:
continue
channel_entries.append((index, name))
seen_indices.add(index)
if not channel_entries:
return
_CHANNEL_MAPPINGS = tuple(channel_entries)
_CHANNEL_LOOKUP = {index: name for index, name in _CHANNEL_MAPPINGS}
config._debug_log(
"Captured channel metadata",
context="channels.capture",
severity="info",
always=True,
channels=_CHANNEL_MAPPINGS,
)
def channel_mappings() -> tuple[tuple[int, str], ...]:
"""Return the cached ``(index, name)`` channel tuples."""
return _CHANNEL_MAPPINGS
def channel_name(channel_index: int | None) -> str | None:
"""Return the channel name for ``channel_index`` when known."""
if channel_index is None:
return None
return _CHANNEL_LOOKUP.get(int(channel_index))
def _reset_channel_cache() -> None:
"""Clear cached channel data. Intended for use in tests only."""
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
_CHANNEL_MAPPINGS = ()
_CHANNEL_LOOKUP = {}
__all__ = [
"capture_from_interface",
"channel_mappings",
"channel_name",
"_reset_channel_cache",
]
+153
View File
@@ -0,0 +1,153 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration helpers for the potato-mesh ingestor."""
from __future__ import annotations
import os
import sys
from datetime import datetime, timezone
from types import ModuleType
from typing import Any
DEFAULT_SNAPSHOT_SECS = 60
"""Default interval, in seconds, between state snapshot uploads."""
DEFAULT_CHANNEL_INDEX = 0
"""Default LoRa channel index used when none is specified."""
DEFAULT_RECONNECT_INITIAL_DELAY_SECS = 5.0
"""Initial reconnection delay applied after connection loss."""
DEFAULT_RECONNECT_MAX_DELAY_SECS = 60.0
"""Maximum reconnection backoff delay applied by the ingestor."""
DEFAULT_CLOSE_TIMEOUT_SECS = 5.0
"""Grace period for interface shutdown routines to complete."""
DEFAULT_INACTIVITY_RECONNECT_SECS = float(60 * 60)
"""Interval before forcing a reconnect when no packets are observed."""
DEFAULT_ENERGY_ONLINE_DURATION_SECS = 300.0
"""Duration to stay online before entering a low-power sleep cycle."""
DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
"""Sleep duration used when energy saving mode is active."""
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
"""Optional connection target for the mesh interface.
When unset, platform-specific defaults will be inferred by the interface
implementations. The legacy :envvar:`MESH_SERIAL` environment variable is still
accepted for backwards compatibility.
"""
SNAPSHOT_SECS = DEFAULT_SNAPSHOT_SECS
"""Interval, in seconds, between state snapshot uploads."""
CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
"""Index of the LoRa channel to select when connecting."""
DEBUG = os.environ.get("DEBUG") == "1"
INSTANCE = os.environ.get("POTATOMESH_INSTANCE", "").rstrip("/")
API_TOKEN = os.environ.get("API_TOKEN", "")
ENERGY_SAVING = os.environ.get("ENERGY_SAVING") == "1"
"""When ``True``, enables the ingestor's energy saving mode."""
LORA_FREQ: int | None = None
"""Frequency of the local node's configured LoRa region in MHz."""
MODEM_PRESET: str | None = None
"""CamelCase modem preset name reported by the local node."""
_RECONNECT_INITIAL_DELAY_SECS = DEFAULT_RECONNECT_INITIAL_DELAY_SECS
_RECONNECT_MAX_DELAY_SECS = DEFAULT_RECONNECT_MAX_DELAY_SECS
_CLOSE_TIMEOUT_SECS = DEFAULT_CLOSE_TIMEOUT_SECS
_INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
# Backwards compatibility shim for legacy imports.
PORT = CONNECTION
def _debug_log(
message: str,
*,
context: str | None = None,
severity: str = "debug",
always: bool = False,
**metadata: Any,
) -> None:
"""Print ``message`` with a UTC timestamp when ``DEBUG`` is enabled.
Parameters:
message: Text to display when debug logging is active.
context: Optional logical component emitting the message.
severity: Log level label to embed in the formatted output.
always: When ``True``, bypasses the :data:`DEBUG` guard.
**metadata: Additional structured log metadata.
"""
normalized_severity = severity.lower()
if not DEBUG and not always and normalized_severity == "debug":
return
timestamp = datetime.now(timezone.utc).isoformat(timespec="milliseconds")
timestamp = timestamp.replace("+00:00", "Z")
parts = [f"[{timestamp}]", "[potato-mesh]", f"[{normalized_severity}]"]
if context:
parts.append(f"context={context}")
for key, value in sorted(metadata.items()):
parts.append(f"{key}={value!r}")
parts.append(message)
print(" ".join(parts))
__all__ = [
"CONNECTION",
"SNAPSHOT_SECS",
"CHANNEL_INDEX",
"DEBUG",
"INSTANCE",
"API_TOKEN",
"ENERGY_SAVING",
"LORA_FREQ",
"MODEM_PRESET",
"_RECONNECT_INITIAL_DELAY_SECS",
"_RECONNECT_MAX_DELAY_SECS",
"_CLOSE_TIMEOUT_SECS",
"_INACTIVITY_RECONNECT_SECS",
"_ENERGY_ONLINE_DURATION_SECS",
"_ENERGY_SLEEP_SECS",
"_debug_log",
]
class _ConfigModule(ModuleType):
"""Module proxy that keeps connection aliases synchronised."""
def __setattr__(self, name: str, value: Any) -> None: # type: ignore[override]
"""Propagate CONNECTION/PORT assignments to both attributes."""
if name in {"CONNECTION", "PORT"}:
super().__setattr__("CONNECTION", value)
super().__setattr__("PORT", value)
return
super().__setattr__(name, value)
sys.modules[__name__].__class__ = _ConfigModule
+521
View File
@@ -0,0 +1,521 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runtime entry point for the mesh ingestor."""
from __future__ import annotations
import inspect
import signal
import threading
import time
from pubsub import pub
from . import config, handlers, interfaces
_RECEIVE_TOPICS = (
"meshtastic.receive",
"meshtastic.receive.text",
"meshtastic.receive.position",
"meshtastic.receive.user",
"meshtastic.receive.POSITION_APP",
"meshtastic.receive.NODEINFO_APP",
"meshtastic.receive.NEIGHBORINFO_APP",
"meshtastic.receive.TEXT_MESSAGE_APP",
"meshtastic.receive.REACTION_APP",
"meshtastic.receive.TELEMETRY_APP",
)
def _event_wait_allows_default_timeout() -> bool:
"""Return ``True`` when :meth:`threading.Event.wait` accepts ``timeout``.
The behaviour changed between Python versions; this helper shields the
daemon from ``TypeError`` when the default timeout parameter is absent.
"""
try:
wait_signature = inspect.signature(threading.Event.wait)
except (TypeError, ValueError): # pragma: no cover
return True
parameters = list(wait_signature.parameters.values())
if len(parameters) <= 1:
return True
timeout_parameter = parameters[1]
if timeout_parameter.kind in (
inspect.Parameter.VAR_POSITIONAL,
inspect.Parameter.VAR_KEYWORD,
):
return True
return timeout_parameter.default is not inspect._empty
def _subscribe_receive_topics() -> list[str]:
"""Subscribe the packet handler to all receive-related pubsub topics."""
subscribed = []
for topic in _RECEIVE_TOPICS:
try:
pub.subscribe(handlers.on_receive, topic)
subscribed.append(topic)
except Exception as exc: # pragma: no cover
config._debug_log(f"failed to subscribe to {topic!r}: {exc}")
return subscribed
def _node_items_snapshot(
nodes_obj, retries: int = 3
) -> list[tuple[str, object]] | None:
"""Snapshot ``nodes_obj`` to avoid iteration errors during updates.
Parameters:
nodes_obj: Meshtastic nodes mapping or iterable.
retries: Number of attempts when encountering "dictionary changed"
runtime errors.
Returns:
A list of ``(node_id, node)`` tuples, ``None`` when retries are
exhausted, or an empty list when no nodes exist.
"""
if not nodes_obj:
return []
items_callable = getattr(nodes_obj, "items", None)
if callable(items_callable):
for _ in range(max(1, retries)):
try:
return list(items_callable())
except RuntimeError as err:
if "dictionary changed size during iteration" not in str(err):
raise
time.sleep(0)
return None
if hasattr(nodes_obj, "__iter__") and hasattr(nodes_obj, "__getitem__"):
for _ in range(max(1, retries)):
try:
keys = list(nodes_obj)
return [(key, nodes_obj[key]) for key in keys]
except RuntimeError as err:
if "dictionary changed size during iteration" not in str(err):
raise
time.sleep(0)
return None
return []
def _close_interface(iface_obj) -> None:
"""Close ``iface_obj`` while respecting configured timeouts."""
if iface_obj is None:
return
def _do_close() -> None:
try:
iface_obj.close()
except Exception as exc: # pragma: no cover
if config.DEBUG:
config._debug_log(
"Error closing mesh interface",
context="daemon.close",
severity="warn",
error_class=exc.__class__.__name__,
error_message=str(exc),
)
if config._CLOSE_TIMEOUT_SECS <= 0 or not _event_wait_allows_default_timeout():
_do_close()
return
close_thread = threading.Thread(target=_do_close, name="mesh-close", daemon=True)
close_thread.start()
close_thread.join(config._CLOSE_TIMEOUT_SECS)
if close_thread.is_alive():
config._debug_log(
"Mesh interface close timed out",
context="daemon.close",
severity="warn",
timeout_seconds=config._CLOSE_TIMEOUT_SECS,
)
def _is_ble_interface(iface_obj) -> bool:
"""Return ``True`` when ``iface_obj`` appears to be a BLE interface."""
if iface_obj is None:
return False
iface_cls = getattr(iface_obj, "__class__", None)
if iface_cls is None:
return False
module_name = getattr(iface_cls, "__module__", "") or ""
return "ble_interface" in module_name
def _connected_state(candidate) -> bool | None:
"""Return the connection state advertised by ``candidate``.
Parameters:
candidate: Attribute returned from ``iface.isConnected`` on a
Meshtastic interface. The value may be a boolean, a callable that
yields a boolean, or a :class:`threading.Event` instance.
Returns:
``True`` when the interface is believed to be connected, ``False``
when it appears disconnected, and ``None`` when the state cannot be
determined from the provided attribute.
"""
if candidate is None:
return None
if isinstance(candidate, threading.Event):
return candidate.is_set()
is_set_method = getattr(candidate, "is_set", None)
if callable(is_set_method):
try:
return bool(is_set_method())
except Exception:
return None
if callable(candidate):
try:
return bool(candidate())
except Exception:
return None
try:
return bool(candidate)
except Exception: # pragma: no cover - defensive guard
return None
def main(existing_interface=None) -> None:
"""Run the mesh ingestion daemon until interrupted."""
subscribed = _subscribe_receive_topics()
if subscribed:
config._debug_log(
"Subscribed to receive topics",
context="daemon.subscribe",
severity="info",
topics=subscribed,
)
iface = existing_interface
resolved_target = None
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
stop = threading.Event()
initial_snapshot_sent = False
energy_session_deadline = None
iface_connected_at: float | None = None
last_seen_packet_monotonic = handlers.last_packet_monotonic()
last_inactivity_reconnect: float | None = None
inactivity_reconnect_secs = max(
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
)
energy_saving_enabled = config.ENERGY_SAVING
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
energy_sleep_secs = max(0.0, config._ENERGY_SLEEP_SECS)
def _energy_sleep(reason: str) -> None:
if not energy_saving_enabled or energy_sleep_secs <= 0:
return
if config.DEBUG:
config._debug_log(
f"energy saving: {reason}; sleeping for {energy_sleep_secs:g}s"
)
stop.wait(energy_sleep_secs)
def handle_sigterm(*_args) -> None:
stop.set()
def handle_sigint(signum, frame) -> None:
if stop.is_set():
signal.default_int_handler(signum, frame)
return
stop.set()
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGINT, handle_sigint)
signal.signal(signal.SIGTERM, handle_sigterm)
target = config.INSTANCE or "(no POTATOMESH_INSTANCE)"
configured_port = config.CONNECTION
active_candidate = configured_port
announced_target = False
config._debug_log(
"Mesh daemon starting",
context="daemon.main",
severity="info",
target=target,
port=configured_port or "auto",
channel=config.CHANNEL_INDEX,
)
try:
while not stop.is_set():
if iface is None:
try:
if active_candidate:
iface, resolved_target = interfaces._create_serial_interface(
active_candidate
)
else:
iface, resolved_target = interfaces._create_default_interface()
active_candidate = resolved_target
interfaces._ensure_radio_metadata(iface)
interfaces._ensure_channel_metadata(iface)
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
initial_snapshot_sent = False
if not announced_target and resolved_target:
config._debug_log(
"Using mesh interface",
context="daemon.interface",
severity="info",
target=resolved_target,
)
announced_target = True
if energy_saving_enabled and energy_online_secs > 0:
energy_session_deadline = time.monotonic() + energy_online_secs
else:
energy_session_deadline = None
iface_connected_at = time.monotonic()
# Seed the inactivity tracking from the connection time so a
# reconnect is given a full inactivity window even when the
# handler still reports the previous packet timestamp.
last_seen_packet_monotonic = iface_connected_at
last_inactivity_reconnect = None
except interfaces.NoAvailableMeshInterface as exc:
config._debug_log(
"No mesh interface available",
context="daemon.interface",
severity="error",
error_message=str(exc),
)
_close_interface(iface)
raise SystemExit(1) from exc
except Exception as exc:
candidate_desc = active_candidate or "auto"
config._debug_log(
"Failed to create mesh interface",
context="daemon.interface",
severity="warn",
candidate=candidate_desc,
error_class=exc.__class__.__name__,
error_message=str(exc),
)
if configured_port is None:
active_candidate = None
announced_target = False
stop.wait(retry_delay)
if config._RECONNECT_MAX_DELAY_SECS > 0:
retry_delay = min(
(
retry_delay * 2
if retry_delay
else config._RECONNECT_INITIAL_DELAY_SECS
),
config._RECONNECT_MAX_DELAY_SECS,
)
continue
if energy_saving_enabled and iface is not None:
if (
energy_session_deadline is not None
and time.monotonic() >= energy_session_deadline
):
config._debug_log(
"Energy saving disconnect",
context="daemon.energy",
severity="info",
)
_close_interface(iface)
iface = None
announced_target = False
initial_snapshot_sent = False
energy_session_deadline = None
_energy_sleep("disconnected after session")
continue
if (
_is_ble_interface(iface)
and getattr(iface, "client", object()) is None
):
config._debug_log(
"Energy saving BLE disconnect",
context="daemon.energy",
severity="info",
)
_close_interface(iface)
iface = None
announced_target = False
initial_snapshot_sent = False
energy_session_deadline = None
_energy_sleep("BLE client disconnected")
continue
if not initial_snapshot_sent:
try:
nodes = getattr(iface, "nodes", {}) or {}
node_items = _node_items_snapshot(nodes)
if node_items is None:
config._debug_log(
"Skipping node snapshot due to concurrent modification",
context="daemon.snapshot",
)
else:
processed_snapshot_item = False
for node_id, node in node_items:
processed_snapshot_item = True
try:
handlers.upsert_node(node_id, node)
except Exception as exc:
config._debug_log(
"Failed to update node snapshot",
context="daemon.snapshot",
severity="warn",
node_id=node_id,
error_class=exc.__class__.__name__,
error_message=str(exc),
)
if config.DEBUG:
config._debug_log(
"Snapshot node payload",
context="daemon.snapshot",
node=node,
)
if processed_snapshot_item:
initial_snapshot_sent = True
except Exception as exc:
config._debug_log(
"Snapshot refresh failed",
context="daemon.snapshot",
severity="warn",
error_class=exc.__class__.__name__,
error_message=str(exc),
)
_close_interface(iface)
iface = None
stop.wait(retry_delay)
if config._RECONNECT_MAX_DELAY_SECS > 0:
retry_delay = min(
(
retry_delay * 2
if retry_delay
else config._RECONNECT_INITIAL_DELAY_SECS
),
config._RECONNECT_MAX_DELAY_SECS,
)
continue
if iface is not None and inactivity_reconnect_secs > 0:
now_monotonic = time.monotonic()
iface_activity = handlers.last_packet_monotonic()
if (
iface_activity is not None
and iface_connected_at is not None
and iface_activity < iface_connected_at
):
iface_activity = iface_connected_at
if iface_activity is not None and (
last_seen_packet_monotonic is None
or iface_activity > last_seen_packet_monotonic
):
last_seen_packet_monotonic = iface_activity
last_inactivity_reconnect = None
latest_activity = iface_activity
if latest_activity is None and iface_connected_at is not None:
latest_activity = iface_connected_at
if latest_activity is None:
latest_activity = now_monotonic
inactivity_elapsed = now_monotonic - latest_activity
connected_attr = getattr(iface, "isConnected", None)
believed_disconnected = False
connected_state = _connected_state(connected_attr)
if connected_state is None:
if callable(connected_attr):
try:
believed_disconnected = not bool(connected_attr())
except Exception:
believed_disconnected = False
elif connected_attr is not None:
try:
believed_disconnected = not bool(connected_attr)
except Exception: # pragma: no cover - defensive guard
believed_disconnected = False
else:
believed_disconnected = not connected_state
should_reconnect = believed_disconnected or (
inactivity_elapsed >= inactivity_reconnect_secs
)
if should_reconnect:
if (
last_inactivity_reconnect is None
or now_monotonic - last_inactivity_reconnect
>= inactivity_reconnect_secs
):
reason = (
"disconnected"
if believed_disconnected
else f"no data for {inactivity_elapsed:.0f}s"
)
config._debug_log(
"Mesh interface inactivity detected",
context="daemon.interface",
severity="warn",
reason=reason,
)
last_inactivity_reconnect = now_monotonic
_close_interface(iface)
iface = None
announced_target = False
initial_snapshot_sent = False
energy_session_deadline = None
iface_connected_at = None
continue
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
stop.wait(config.SNAPSHOT_SECS)
except KeyboardInterrupt: # pragma: no cover - interactive only
config._debug_log(
"Received KeyboardInterrupt; shutting down",
context="daemon.main",
severity="info",
)
stop.set()
finally:
_close_interface(iface)
__all__ = [
"_RECEIVE_TOPICS",
"_event_wait_allows_default_timeout",
"_node_items_snapshot",
"_subscribe_receive_topics",
"_is_ble_interface",
"_connected_state",
"main",
]
File diff suppressed because it is too large Load Diff
+785
View File
@@ -0,0 +1,785 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mesh interface discovery helpers for interacting with Meshtastic hardware."""
from __future__ import annotations
import contextlib
import glob
import importlib
import ipaddress
import re
import sys
import urllib.parse
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any
try: # pragma: no cover - dependency optional in tests
import meshtastic # type: ignore
except Exception: # pragma: no cover - dependency optional in tests
meshtastic = None # type: ignore[assignment]
from . import channels, config, serialization
def _ensure_mapping(value) -> Mapping | None:
"""Return ``value`` as a mapping when conversion is possible."""
if isinstance(value, Mapping):
return value
if hasattr(value, "__dict__") and isinstance(value.__dict__, Mapping):
return value.__dict__
with contextlib.suppress(Exception):
converted = serialization._node_to_dict(value)
if isinstance(converted, Mapping):
return converted
return None
def _candidate_node_id(mapping: Mapping | None) -> str | None:
"""Extract a canonical node identifier from ``mapping`` when present."""
if mapping is None:
return None
primary_keys = (
"id",
"userId",
"user_id",
"fromId",
"from_id",
"from",
"nodeId",
"node_id",
"nodeNum",
"node_num",
"num",
)
for key in primary_keys:
with contextlib.suppress(Exception):
node_id = serialization._canonical_node_id(mapping.get(key))
if node_id:
return node_id
user_section = _ensure_mapping(mapping.get("user"))
if user_section is not None:
for key in ("id", "userId", "user_id", "num", "nodeNum", "node_num"):
with contextlib.suppress(Exception):
node_id = serialization._canonical_node_id(user_section.get(key))
if node_id:
return node_id
decoded_section = _ensure_mapping(mapping.get("decoded"))
if decoded_section is not None:
node_id = _candidate_node_id(decoded_section)
if node_id:
return node_id
payload_section = _ensure_mapping(mapping.get("payload"))
if payload_section is not None:
node_id = _candidate_node_id(payload_section)
if node_id:
return node_id
for key in ("packet", "meta", "info"):
node_id = _candidate_node_id(_ensure_mapping(mapping.get(key)))
if node_id:
return node_id
for value in mapping.values():
if isinstance(value, (list, tuple)):
for item in value:
node_id = _candidate_node_id(_ensure_mapping(item))
if node_id:
return node_id
else:
node_id = _candidate_node_id(_ensure_mapping(value))
if node_id:
return node_id
return None
def _normalise_nodeinfo_packet(packet) -> dict | None:
"""Return a dictionary view of ``packet`` with a guaranteed ``id`` when known."""
mapping = _ensure_mapping(packet)
if mapping is None:
return None
try:
normalised: dict = dict(mapping)
except Exception:
try:
normalised = {key: mapping[key] for key in mapping}
except Exception:
return None
node_id = _candidate_node_id(normalised)
if node_id and normalised.get("id") != node_id:
normalised["id"] = node_id
return normalised
if TYPE_CHECKING: # pragma: no cover - import only used for type checking
from meshtastic.ble_interface import BLEInterface as _BLEInterface
BLEInterface = None
def _patch_meshtastic_nodeinfo_handler() -> None:
"""Ensure Meshtastic nodeinfo packets always include an ``id`` field."""
module = sys.modules.get("meshtastic", meshtastic)
if module is None:
with contextlib.suppress(Exception):
module = importlib.import_module("meshtastic")
if module is None:
return
globals()["meshtastic"] = module
original = getattr(module, "_onNodeInfoReceive", None)
if not callable(original):
return
mesh_interface_module = getattr(module, "mesh_interface", None)
if mesh_interface_module is None:
with contextlib.suppress(Exception):
mesh_interface_module = importlib.import_module("meshtastic.mesh_interface")
if not getattr(original, "_potato_mesh_safe_wrapper", False):
module._onNodeInfoReceive = _build_safe_nodeinfo_callback(original)
_patch_nodeinfo_handler_class(mesh_interface_module, module)
def _build_safe_nodeinfo_callback(original):
"""Return a wrapper that injects a missing ``id`` before dispatching."""
def _safe_on_node_info_receive(iface, packet): # type: ignore[override]
normalised = _normalise_nodeinfo_packet(packet)
if normalised is not None:
packet = normalised
try:
return original(iface, packet)
except KeyError as exc: # pragma: no cover - defensive only
if exc.args and exc.args[0] == "id":
return None
raise
_safe_on_node_info_receive._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
return _safe_on_node_info_receive
def _update_nodeinfo_handler_aliases(original, replacement) -> None:
"""Ensure Meshtastic modules reference the patched ``NodeInfoHandler``."""
for module_name, module in list(sys.modules.items()):
if not module_name.startswith("meshtastic"):
continue
existing = getattr(module, "NodeInfoHandler", None)
if existing is original:
setattr(module, "NodeInfoHandler", replacement)
def _patch_nodeinfo_handler_class(
mesh_interface_module, meshtastic_module=None
) -> None:
"""Wrap ``NodeInfoHandler.onReceive`` to normalise packets before callbacks."""
if mesh_interface_module is None:
return
handler_class = getattr(mesh_interface_module, "NodeInfoHandler", None)
if handler_class is None:
return
if getattr(handler_class, "_potato_mesh_safe_wrapper", False):
return
original_on_receive = getattr(handler_class, "onReceive", None)
if not callable(original_on_receive):
return
class _SafeNodeInfoHandler(handler_class): # type: ignore[misc]
"""Subclass that guards against missing node identifiers."""
def onReceive(self, iface, packet): # type: ignore[override]
normalised = _normalise_nodeinfo_packet(packet)
if normalised is not None:
packet = normalised
try:
return super().onReceive(iface, packet)
except KeyError as exc: # pragma: no cover - defensive only
if exc.args and exc.args[0] == "id":
return None
raise
_SafeNodeInfoHandler.__name__ = handler_class.__name__
_SafeNodeInfoHandler.__qualname__ = getattr(
handler_class, "__qualname__", handler_class.__name__
)
_SafeNodeInfoHandler.__module__ = getattr(
handler_class, "__module__", mesh_interface_module.__name__
)
_SafeNodeInfoHandler.__doc__ = getattr(
handler_class, "__doc__", _SafeNodeInfoHandler.__doc__
)
_SafeNodeInfoHandler._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
setattr(mesh_interface_module, "NodeInfoHandler", _SafeNodeInfoHandler)
if meshtastic_module is None:
meshtastic_module = globals().get("meshtastic")
if meshtastic_module is not None:
existing_top = getattr(meshtastic_module, "NodeInfoHandler", None)
if existing_top is handler_class:
setattr(meshtastic_module, "NodeInfoHandler", _SafeNodeInfoHandler)
_update_nodeinfo_handler_aliases(handler_class, _SafeNodeInfoHandler)
_patch_meshtastic_nodeinfo_handler()
try: # pragma: no cover - optional dependency may be unavailable
from meshtastic.serial_interface import SerialInterface # type: ignore
except Exception: # pragma: no cover - optional dependency may be unavailable
SerialInterface = None # type: ignore[assignment]
try: # pragma: no cover - optional dependency may be unavailable
from meshtastic.tcp_interface import TCPInterface # type: ignore
except Exception: # pragma: no cover - optional dependency may be unavailable
TCPInterface = None # type: ignore[assignment]
def _patch_meshtastic_ble_receive_loop() -> None:
"""Prevent ``UnboundLocalError`` crashes in Meshtastic's BLE reader."""
try:
from meshtastic import ble_interface as _ble_interface_module # type: ignore
except Exception: # pragma: no cover - dependency optional in tests
return
ble_class = getattr(_ble_interface_module, "BLEInterface", None)
if ble_class is None:
return
original = getattr(ble_class, "_receiveFromRadioImpl", None)
if not callable(original):
return
if getattr(original, "_potato_mesh_safe_wrapper", False):
return
FROMRADIO_UUID = getattr(_ble_interface_module, "FROMRADIO_UUID", None)
BleakDBusError = getattr(_ble_interface_module, "BleakDBusError", ())
BleakError = getattr(_ble_interface_module, "BleakError", ())
logger = getattr(_ble_interface_module, "logger", None)
time = getattr(_ble_interface_module, "time", None)
if not FROMRADIO_UUID or logger is None or time is None:
return
def _safe_receive_from_radio(self): # type: ignore[override]
while self._want_receive:
if self.should_read:
self.should_read = False
retries: int = 0
while self._want_receive:
if self.client is None:
logger.debug("BLE client is None, shutting down")
self._want_receive = False
continue
payload: bytes = b""
try:
payload = bytes(self.client.read_gatt_char(FROMRADIO_UUID))
except BleakDBusError as exc:
logger.debug("Device disconnected, shutting down %s", exc)
self._want_receive = False
payload = b""
except BleakError as exc:
if "Not connected" in str(exc):
logger.debug("Device disconnected, shutting down %s", exc)
self._want_receive = False
payload = b""
else:
raise ble_class.BLEError("Error reading BLE") from exc
if not payload:
if not self._want_receive:
break
if retries < 5:
time.sleep(0.1)
retries += 1
continue
break
logger.debug("FROMRADIO read: %s", payload.hex())
self._handleFromRadio(payload)
else:
time.sleep(0.01)
_safe_receive_from_radio._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
ble_class._receiveFromRadioImpl = _safe_receive_from_radio
_patch_meshtastic_ble_receive_loop()
def _has_field(message: Any, field_name: str) -> bool:
"""Return ``True`` when ``message`` advertises ``field_name`` via ``HasField``."""
if message is None:
return False
has_field = getattr(message, "HasField", None)
if callable(has_field):
try:
return bool(has_field(field_name))
except Exception: # pragma: no cover - defensive guard
return False
return hasattr(message, field_name)
def _enum_name_from_field(message: Any, field_name: str, value: Any) -> str | None:
"""Return the enum name for ``value`` using ``message`` descriptors."""
descriptor = getattr(message, "DESCRIPTOR", None)
if descriptor is None:
return None
fields_by_name = getattr(descriptor, "fields_by_name", {})
field_desc = fields_by_name.get(field_name)
if field_desc is None:
return None
enum_type = getattr(field_desc, "enum_type", None)
if enum_type is None:
return None
enum_values = getattr(enum_type, "values_by_number", {})
enum_value = enum_values.get(value)
if enum_value is None:
return None
return getattr(enum_value, "name", None)
def _resolve_lora_message(local_config: Any) -> Any | None:
"""Return the LoRa configuration sub-message from ``local_config``."""
if local_config is None:
return None
if _has_field(local_config, "lora"):
candidate = getattr(local_config, "lora", None)
if candidate is not None:
return candidate
radio_section = getattr(local_config, "radio", None)
if radio_section is not None:
if _has_field(radio_section, "lora"):
return getattr(radio_section, "lora", None)
if hasattr(radio_section, "lora"):
return getattr(radio_section, "lora")
if hasattr(local_config, "lora"):
return getattr(local_config, "lora")
return None
def _region_frequency(lora_message: Any) -> int | None:
"""Derive the LoRa region frequency in MHz from ``lora_message``."""
if lora_message is None:
return None
region_value = getattr(lora_message, "region", None)
if region_value is None:
return None
enum_name = _enum_name_from_field(lora_message, "region", region_value)
if enum_name:
digits = re.findall(r"\d+", enum_name)
for token in digits:
try:
freq = int(token)
except ValueError: # pragma: no cover - regex guarantees digits
continue
if freq >= 100:
return freq
for token in reversed(digits):
try:
return int(token)
except ValueError: # pragma: no cover - defensive only
continue
if isinstance(region_value, int) and region_value >= 100:
return region_value
return None
def _camelcase_enum_name(name: str | None) -> str | None:
"""Convert ``name`` from ``SCREAMING_SNAKE`` to ``CamelCase``."""
if not name:
return None
parts = re.split(r"[^0-9A-Za-z]+", name.strip())
camel_parts = [part.capitalize() for part in parts if part]
if not camel_parts:
return None
return "".join(camel_parts)
def _modem_preset(lora_message: Any) -> str | None:
"""Return the CamelCase modem preset configured on ``lora_message``."""
if lora_message is None:
return None
descriptor = getattr(lora_message, "DESCRIPTOR", None)
fields_by_name = getattr(descriptor, "fields_by_name", {}) if descriptor else {}
if "modem_preset" in fields_by_name:
preset_field = "modem_preset"
elif "preset" in fields_by_name:
preset_field = "preset"
elif hasattr(lora_message, "modem_preset"):
preset_field = "modem_preset"
elif hasattr(lora_message, "preset"):
preset_field = "preset"
else:
return None
preset_value = getattr(lora_message, preset_field, None)
if preset_value is None:
return None
enum_name = _enum_name_from_field(lora_message, preset_field, preset_value)
if isinstance(enum_name, str) and enum_name:
return _camelcase_enum_name(enum_name)
if isinstance(preset_value, str) and preset_value:
return _camelcase_enum_name(preset_value)
return None
def _ensure_radio_metadata(iface: Any) -> None:
"""Populate cached LoRa metadata by inspecting ``iface`` when available."""
if iface is None:
return
try:
wait_for_config = getattr(iface, "waitForConfig", None)
if callable(wait_for_config):
wait_for_config()
except Exception: # pragma: no cover - hardware dependent guard
pass
local_node = getattr(iface, "localNode", None)
local_config = getattr(local_node, "localConfig", None) if local_node else None
lora_message = _resolve_lora_message(local_config)
if lora_message is None:
return
frequency = _region_frequency(lora_message)
preset = _modem_preset(lora_message)
updated = False
if frequency is not None and getattr(config, "LORA_FREQ", None) is None:
config.LORA_FREQ = frequency
updated = True
if preset is not None and getattr(config, "MODEM_PRESET", None) is None:
config.MODEM_PRESET = preset
updated = True
if updated:
config._debug_log(
"Captured LoRa radio metadata",
context="interfaces.ensure_radio_metadata",
severity="info",
always=True,
lora_freq=frequency,
modem_preset=preset,
)
def _ensure_channel_metadata(iface: Any) -> None:
"""Capture channel metadata by inspecting ``iface`` once per runtime."""
if iface is None:
return
try:
channels.capture_from_interface(iface)
except Exception as exc: # pragma: no cover - defensive instrumentation
config._debug_log(
"Failed to capture channel metadata",
context="interfaces.ensure_channel_metadata",
severity="warn",
error_class=exc.__class__.__name__,
error_message=str(exc),
)
_DEFAULT_TCP_PORT = 4403
_DEFAULT_TCP_TARGET = "http://127.0.0.1"
_DEFAULT_SERIAL_PATTERNS = (
"/dev/ttyACM*",
"/dev/ttyUSB*",
"/dev/tty.usbmodem*",
"/dev/tty.usbserial*",
"/dev/cu.usbmodem*",
"/dev/cu.usbserial*",
)
_BLE_ADDRESS_RE = re.compile(r"^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$")
class _DummySerialInterface:
"""In-memory replacement for ``meshtastic.serial_interface.SerialInterface``."""
def __init__(self) -> None:
self.nodes: dict = {}
def close(self) -> None: # pragma: no cover - nothing to close
pass
def _parse_ble_target(value: str) -> str | None:
"""Return an uppercase BLE MAC address when ``value`` matches the format.
Parameters:
value: User-provided target string.
Returns:
The normalised MAC address or ``None`` when validation fails.
"""
if not value:
return None
value = value.strip()
if not value:
return None
if _BLE_ADDRESS_RE.fullmatch(value):
return value.upper()
return None
def _parse_network_target(value: str) -> tuple[str, int] | None:
"""Return ``(host, port)`` when ``value`` is a numeric IP address string.
Only literal IPv4 or IPv6 addresses are accepted, optionally paired with a
port or scheme. Callers that start from hostnames should resolve them to an
address before invoking this helper.
Parameters:
value: Numeric IP literal or URL describing the TCP interface.
Returns:
A ``(host, port)`` tuple or ``None`` when parsing fails.
"""
if not value:
return None
value = value.strip()
if not value:
return None
def _validated_result(host: str | None, port: int | None) -> tuple[str, int] | None:
if not host:
return None
try:
ipaddress.ip_address(host)
except ValueError:
return None
return host, port or _DEFAULT_TCP_PORT
parsed_values = []
if "://" in value:
parsed_values.append(urllib.parse.urlparse(value, scheme="tcp"))
parsed_values.append(urllib.parse.urlparse(f"//{value}", scheme="tcp"))
for parsed in parsed_values:
try:
port = parsed.port
except ValueError:
port = None
result = _validated_result(parsed.hostname, port)
if result:
return result
if value.count(":") == 1 and not value.startswith("["):
host, _, port_text = value.partition(":")
try:
port = int(port_text) if port_text else None
except ValueError:
port = None
result = _validated_result(host, port)
if result:
return result
return _validated_result(value, None)
def _load_ble_interface():
"""Return :class:`meshtastic.ble_interface.BLEInterface` when available.
Returns:
The resolved BLE interface class.
Raises:
RuntimeError: If the BLE dependencies are not installed.
"""
global BLEInterface
if BLEInterface is not None:
return BLEInterface
try:
from meshtastic.ble_interface import BLEInterface as _resolved_interface
except ImportError as exc: # pragma: no cover - exercised in non-BLE envs
raise RuntimeError(
"BLE interface requested but the Meshtastic BLE dependencies are not installed. "
"Install the 'meshtastic[ble]' extra to enable BLE support."
) from exc
BLEInterface = _resolved_interface
try:
import sys
for module_name in ("data.mesh_ingestor", "data.mesh"):
mesh_module = sys.modules.get(module_name)
if mesh_module is not None:
setattr(mesh_module, "BLEInterface", BLEInterface)
except Exception: # pragma: no cover - defensive only
pass
return _resolved_interface
def _create_serial_interface(port: str) -> tuple[object, str]:
"""Return an appropriate mesh interface for ``port``.
Parameters:
port: User-supplied port string which may represent serial, BLE or TCP.
Returns:
``(interface, resolved_target)`` describing the created interface.
"""
port_value = (port or "").strip()
if port_value.lower() in {"", "mock", "none", "null", "disabled"}:
config._debug_log(
"Using dummy serial interface",
context="interfaces.serial",
port=port_value,
)
return _DummySerialInterface(), "mock"
ble_target = _parse_ble_target(port_value)
if ble_target:
config._debug_log(
"Using BLE interface",
context="interfaces.ble",
address=ble_target,
)
return _load_ble_interface()(address=ble_target), ble_target
network_target = _parse_network_target(port_value)
if network_target:
host, tcp_port = network_target
config._debug_log(
"Using TCP interface",
context="interfaces.tcp",
host=host,
port=tcp_port,
)
return (
TCPInterface(hostname=host, portNumber=tcp_port),
f"tcp://{host}:{tcp_port}",
)
config._debug_log(
"Using serial interface",
context="interfaces.serial",
port=port_value,
)
return SerialInterface(devPath=port_value), port_value
class NoAvailableMeshInterface(RuntimeError):
"""Raised when no default mesh interface can be created."""
def _default_serial_targets() -> list[str]:
"""Return candidate serial device paths for auto-discovery."""
candidates: list[str] = []
seen: set[str] = set()
for pattern in _DEFAULT_SERIAL_PATTERNS:
for path in sorted(glob.glob(pattern)):
if path not in seen:
candidates.append(path)
seen.add(path)
if "/dev/ttyACM0" not in seen:
candidates.append("/dev/ttyACM0")
return candidates
def _create_default_interface() -> tuple[object, str]:
"""Attempt to create the default mesh interface, raising on failure.
Returns:
``(interface, resolved_target)`` for the discovered connection.
Raises:
NoAvailableMeshInterface: When no usable connection can be created.
"""
errors: list[tuple[str, Exception]] = []
for candidate in _default_serial_targets():
try:
return _create_serial_interface(candidate)
except Exception as exc: # pragma: no cover - hardware dependent
errors.append((candidate, exc))
config._debug_log(
"Failed to open serial candidate",
context="interfaces.auto_discovery",
target=candidate,
error_class=exc.__class__.__name__,
error_message=str(exc),
)
try:
return _create_serial_interface(_DEFAULT_TCP_TARGET)
except Exception as exc: # pragma: no cover - network dependent
errors.append((_DEFAULT_TCP_TARGET, exc))
config._debug_log(
"Failed to open TCP fallback",
context="interfaces.auto_discovery",
target=_DEFAULT_TCP_TARGET,
error_class=exc.__class__.__name__,
error_message=str(exc),
)
if errors:
summary = "; ".join(f"{target}: {error}" for target, error in errors)
raise NoAvailableMeshInterface(
f"no mesh interface available ({summary})"
) from errors[-1][1]
raise NoAvailableMeshInterface("no mesh interface available")
__all__ = [
"BLEInterface",
"NoAvailableMeshInterface",
"_ensure_channel_metadata",
"_ensure_radio_metadata",
"_DummySerialInterface",
"_DEFAULT_TCP_PORT",
"_DEFAULT_TCP_TARGET",
"_create_default_interface",
"_create_serial_interface",
"_default_serial_targets",
"_load_ble_interface",
"_parse_ble_target",
"_parse_network_target",
"SerialInterface",
"TCPInterface",
]
+270
View File
@@ -0,0 +1,270 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Priority queue for POST operations."""
from __future__ import annotations
import heapq
import itertools
import json
import threading
import urllib.request
from dataclasses import dataclass, field
from typing import Callable, Iterable, Mapping, Tuple
from . import config
def _stringify_payload_value(value: object) -> str:
"""Return a stable string representation for ``value``."""
if isinstance(value, Mapping):
try:
return json.dumps(
{
str(key): value[key]
for key in sorted(value, key=lambda item: str(item))
},
sort_keys=True,
ensure_ascii=False,
default=str,
)
except Exception: # pragma: no cover - defensive guard
return str(value)
if isinstance(value, (list, tuple)):
try:
return json.dumps(list(value), ensure_ascii=False, default=str)
except Exception: # pragma: no cover - defensive guard
return str(value)
if isinstance(value, set):
try:
return json.dumps(sorted(value, key=str), ensure_ascii=False, default=str)
except Exception: # pragma: no cover - defensive guard
return str(value)
if isinstance(value, bytes):
return json.dumps(value.decode("utf-8", "replace"), ensure_ascii=False)
if isinstance(value, str):
return json.dumps(value, ensure_ascii=False)
return str(value)
def _payload_key_value_pairs(payload: Mapping[str, object]) -> str:
"""Serialise ``payload`` into ``key=value`` pairs for debug logs."""
pairs: list[str] = []
for key in sorted(payload):
try:
formatted = _stringify_payload_value(payload[key])
except Exception: # pragma: no cover - defensive guard
formatted = str(payload[key])
pairs.append(f"{key}={formatted}")
return " ".join(pairs)
_MESSAGE_POST_PRIORITY = 10
_NEIGHBOR_POST_PRIORITY = 20
_POSITION_POST_PRIORITY = 30
_TELEMETRY_POST_PRIORITY = 40
_NODE_POST_PRIORITY = 50
_DEFAULT_POST_PRIORITY = 90
@dataclass
class QueueState:
"""Mutable state for the HTTP POST priority queue."""
lock: threading.Lock = field(default_factory=threading.Lock)
queue: list[tuple[int, int, str, dict]] = field(default_factory=list)
counter: Iterable[int] = field(default_factory=itertools.count)
active: bool = False
STATE = QueueState()
def _post_json(
path: str,
payload: dict,
*,
instance: str | None = None,
api_token: str | None = None,
) -> None:
"""Send a JSON payload to the configured web API.
Parameters:
path: API path relative to the configured instance root.
payload: JSON-serialisable body to transmit.
instance: Optional override for :data:`config.INSTANCE`.
api_token: Optional override for :data:`config.API_TOKEN`.
"""
if instance is None:
instance = config.INSTANCE
if api_token is None:
api_token = config.API_TOKEN
if not instance:
return
url = f"{instance}{path}"
data = json.dumps(payload).encode("utf-8")
# Add full headers to avoid Cloudflare blocks on instances behind cloudflare proxy
headers = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "application/json",
"Accept-Language": "en-US,en;q=0.9",
"Origin": f"{instance}",
"Referer": f"{instance}",
}
if api_token:
headers["Authorization"] = f"Bearer {api_token}"
req = urllib.request.Request(
url,
data=data,
headers=headers,
)
try:
with urllib.request.urlopen(req, timeout=10) as resp:
resp.read()
except Exception as exc: # pragma: no cover - exercised in production
config._debug_log(
"POST request failed",
context="queue.post_json",
severity="warn",
url=url,
error_class=exc.__class__.__name__,
error_message=str(exc),
)
def _enqueue_post_json(
path: str,
payload: dict,
priority: int,
*,
state: QueueState = STATE,
) -> None:
"""Store a POST request in the priority queue.
Parameters:
path: API path for the queued request.
payload: JSON-serialisable body.
priority: Lower values execute first.
state: Shared queue state, injectable for testing.
"""
with state.lock:
counter = next(state.counter)
heapq.heappush(state.queue, (priority, counter, path, payload))
def _drain_post_queue(
state: QueueState = STATE, send: Callable[[str, dict], None] | None = None
) -> None:
"""Process queued POST requests in priority order.
Parameters:
state: Queue container holding pending items.
send: Optional callable used to transmit requests.
"""
if send is None:
send = _post_json
try:
while True:
with state.lock:
if not state.queue:
state.active = False
return
_priority, _idx, path, payload = heapq.heappop(state.queue)
send(path, payload)
finally:
with state.lock:
state.active = False
def _queue_post_json(
path: str,
payload: dict,
*,
priority: int = _DEFAULT_POST_PRIORITY,
state: QueueState = STATE,
send: Callable[[str, dict], None] | None = None,
) -> None:
"""Queue a POST request and start processing if idle.
Parameters:
path: API path for the request.
payload: JSON payload to send.
priority: Scheduling priority where lower values run first.
state: Queue container used to store pending requests.
send: Optional transport override, primarily for tests.
"""
if send is None:
send = _post_json
if config.DEBUG:
formatted_payload = (
_payload_key_value_pairs(payload)
if isinstance(payload, Mapping)
else str(payload)
)
config._debug_log(
f"Forwarding payload to API: {formatted_payload}",
context="queue.queue_post_json",
path=path,
priority=priority,
)
_enqueue_post_json(path, payload, priority, state=state)
with state.lock:
if state.active:
return
state.active = True
_drain_post_queue(state, send=send)
def _clear_post_queue(state: QueueState = STATE) -> None:
"""Clear the pending POST queue.
Parameters:
state: Queue state to reset. Defaults to the global queue.
"""
with state.lock:
state.queue.clear()
state.active = False
__all__ = [
"STATE",
"QueueState",
"_DEFAULT_POST_PRIORITY",
"_MESSAGE_POST_PRIORITY",
"_NEIGHBOR_POST_PRIORITY",
"_NODE_POST_PRIORITY",
"_POSITION_POST_PRIORITY",
"_TELEMETRY_POST_PRIORITY",
"_clear_post_queue",
"_drain_post_queue",
"_enqueue_post_json",
"_post_json",
"_queue_post_json",
]
+776
View File
@@ -0,0 +1,776 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for converting Meshtastic structures into JSON-friendly forms.
The helpers normalise loosely structured Meshtastic packets so they can be
forwarded to the web application using predictable field names and types.
"""
from __future__ import annotations
import base64
import dataclasses
import enum
import importlib
import json
import math
import time
from collections.abc import Mapping
from google.protobuf.json_format import MessageToDict
from google.protobuf.message import DecodeError
from google.protobuf.message import Message as ProtoMessage
_CLI_ROLE_MODULE_NAMES: tuple[str, ...] = (
"meshtastic.cli.common",
"meshtastic.cli.roles",
"meshtastic.cli.enums",
"meshtastic_cli.common",
"meshtastic_cli.roles",
)
"""Possible module paths that may expose the Meshtastic CLI role enum."""
_CLI_ROLE_LOOKUP: dict[int, str] | None = None
"""Cached mapping of CLI role identifiers to their textual names."""
def _get(obj, key, default=None):
"""Return ``obj[key]`` or ``getattr(obj, key)`` when available.
Parameters:
obj: Mapping or object supplying attributes.
key: Name of the attribute or mapping key to retrieve.
default: Fallback value when ``key`` is not present.
Returns:
The resolved value or ``default`` if the lookup fails.
"""
if isinstance(obj, dict):
return obj.get(key, default)
return getattr(obj, key, default)
def _reset_cli_role_cache() -> None:
"""Clear the cached CLI role lookup mapping.
The helper is primarily used by tests to ensure deterministic behaviour
when substituting stub CLI modules.
Returns:
``None``. The next lookup will trigger a fresh import attempt.
"""
global _CLI_ROLE_LOOKUP
_CLI_ROLE_LOOKUP = None
def _load_cli_role_lookup() -> dict[int, str]:
"""Return a mapping of role identifiers from the Meshtastic CLI.
The Meshtastic CLI exposes extended role enums that may include entries
absent from the protobuf definition shipped with the firmware. This
helper lazily imports the CLI module when present and extracts the
available role names so that numeric values received from the firmware can
be normalised into human-friendly strings.
Returns:
Mapping of integer role identifiers to their canonical string names.
"""
global _CLI_ROLE_LOOKUP
if _CLI_ROLE_LOOKUP is not None:
return _CLI_ROLE_LOOKUP
lookup: dict[int, str] = {}
def _from_candidate(candidate) -> dict[int, str]:
mapping: dict[int, str] = {}
if isinstance(candidate, enum.EnumMeta):
for member in candidate: # pragma: no branch - Enum iteration deterministic
try:
mapping[int(member.value)] = str(member.name)
except Exception: # pragma: no cover - defensive guard
continue
return mapping
members = getattr(candidate, "__members__", None)
if isinstance(members, Mapping):
for name, member in members.items():
value = getattr(member, "value", None)
if isinstance(value, (int, enum.IntEnum)):
try:
mapping[int(value)] = str(name)
except Exception: # pragma: no cover - defensive
continue
if mapping:
return mapping
if isinstance(candidate, Mapping):
for key, value in candidate.items():
try:
key_int = int(key)
except Exception: # pragma: no cover - defensive
continue
mapping[key_int] = str(value)
return mapping
for module_name in _CLI_ROLE_MODULE_NAMES:
try:
module = importlib.import_module(module_name)
except Exception: # pragma: no cover - optional dependency
continue
candidates = []
for attr_name in ("Role", "Roles", "ClientRole", "ClientRoles"):
candidate = getattr(module, attr_name, None)
if candidate is not None:
candidates.append(candidate)
for candidate in candidates:
mapping = _from_candidate(candidate)
if not mapping:
continue
lookup.update(mapping)
if lookup:
break
_CLI_ROLE_LOOKUP = {
key: value.strip().upper()
for key, value in lookup.items()
if isinstance(value, str) and value.strip()
}
return _CLI_ROLE_LOOKUP
def _node_to_dict(n) -> dict:
"""Convert ``n`` into a JSON-serialisable mapping.
Parameters:
n: Arbitrary data structure, commonly a protobuf message, dataclass or
nested containers produced by Meshtastic.
Returns:
A plain dictionary containing recursively converted values.
"""
def _convert(value):
if isinstance(value, dict):
return {k: _convert(v) for k, v in value.items()}
if isinstance(value, (list, tuple, set)):
return [_convert(v) for v in value]
if dataclasses.is_dataclass(value):
return {k: _convert(getattr(value, k)) for k in value.__dataclass_fields__}
if isinstance(value, ProtoMessage):
try:
return MessageToDict(
value,
preserving_proto_field_name=True,
use_integers_for_enums=False,
)
except Exception:
if hasattr(value, "to_dict"):
try:
return value.to_dict()
except Exception:
pass
try:
return json.loads(json.dumps(value, default=str))
except Exception:
return str(value)
if isinstance(value, bytes):
try:
return value.decode()
except Exception:
return value.hex()
if isinstance(value, (str, int, float, bool)) or value is None:
return value
try:
return json.loads(json.dumps(value, default=str))
except Exception:
return str(value)
return _convert(n)
def _normalize_user_role(value) -> str | None:
"""Return a canonical role string for ``value`` when possible.
Parameters:
value: Raw role descriptor emitted by the Meshtastic firmware or
decoded JSON payloads.
Returns:
Uppercase role string or ``None`` if the value cannot be resolved.
"""
if value is None:
return None
if isinstance(value, str):
cleaned = value.strip()
if not cleaned:
return None
return cleaned.upper()
numeric = _coerce_int(value)
if numeric is None:
return None
role_name = None
cli_lookup = _load_cli_role_lookup()
role_name = cli_lookup.get(numeric)
if not role_name:
try: # pragma: no branch - minimal control flow
from meshtastic.protobuf import mesh_pb2
role_name = mesh_pb2.User.Role.Name(numeric)
except Exception: # pragma: no cover - depends on protobuf version
role_name = None
if not role_name:
try:
from meshtastic.protobuf import config_pb2
role_name = config_pb2.Config.DeviceConfig.Role.Name(numeric)
except Exception: # pragma: no cover - depends on protobuf version
role_name = None
if role_name:
return role_name.strip().upper()
return str(numeric)
def upsert_payload(node_id, node) -> dict:
"""Return the payload expected by ``/api/nodes`` upsert requests.
Parameters:
node_id: Canonical node identifier.
node: Node representation to convert with :func:`_node_to_dict`.
Returns:
A mapping keyed by ``node_id`` describing the node.
"""
ndict = _node_to_dict(node)
return {node_id: ndict}
def _iso(ts: int | float) -> str:
"""Convert ``ts`` into an ISO-8601 timestamp in UTC."""
import datetime
return (
datetime.datetime.fromtimestamp(int(ts), datetime.timezone.utc)
.isoformat()
.replace("+00:00", "Z")
)
def _first(d, *names, default=None):
"""Return the first matching attribute or key from ``d``.
Parameters:
d: Mapping or object providing nested attributes.
*names: Candidate names, optionally using ``dot.separated`` notation
for nested lookups.
default: Value returned when no candidates succeed.
Returns:
The first non-empty value encountered or ``default``.
"""
def _mapping_get(obj, key):
if isinstance(obj, Mapping) and key in obj:
return True, obj[key]
if hasattr(obj, "__getitem__"):
try:
return True, obj[key]
except Exception:
pass
if hasattr(obj, key):
return True, getattr(obj, key)
return False, None
for name in names:
cur = d
ok = True
for part in name.split("."):
ok, cur = _mapping_get(cur, part)
if not ok:
break
if ok:
if cur is None:
continue
if isinstance(cur, str) and cur == "":
continue
return cur
return default
def _coerce_int(value):
"""Best-effort conversion of ``value`` to an integer.
Parameters:
value: Any type supported by Meshtastic payloads.
Returns:
An integer or ``None`` when conversion is not possible.
"""
if value is None:
return None
if isinstance(value, bool):
return int(value)
if isinstance(value, int):
return value
if isinstance(value, float):
return int(value) if math.isfinite(value) else None
if isinstance(value, (str, bytes, bytearray)):
text = value.decode() if isinstance(value, (bytes, bytearray)) else value
stripped = text.strip()
if not stripped:
return None
try:
if stripped.lower().startswith("0x"):
return int(stripped, 16)
return int(stripped, 10)
except ValueError:
try:
return int(float(stripped))
except ValueError:
return None
try:
return int(value)
except (TypeError, ValueError):
return None
def _coerce_float(value):
"""Best-effort conversion of ``value`` to a float.
Parameters:
value: Any type supported by Meshtastic payloads.
Returns:
A float or ``None`` when conversion fails or results in ``NaN``.
"""
if value is None:
return None
if isinstance(value, bool):
return float(value)
if isinstance(value, (int, float)):
result = float(value)
return result if math.isfinite(result) else None
if isinstance(value, (str, bytes, bytearray)):
text = value.decode() if isinstance(value, (bytes, bytearray)) else value
stripped = text.strip()
if not stripped:
return None
try:
result = float(stripped)
except ValueError:
return None
return result if math.isfinite(result) else None
try:
result = float(value)
except (TypeError, ValueError):
return None
return result if math.isfinite(result) else None
def _pkt_to_dict(packet) -> dict:
"""Normalise a packet into a plain dictionary.
Parameters:
packet: Packet object or mapping emitted by Meshtastic.
Returns:
A dictionary representation suitable for downstream processing.
"""
if isinstance(packet, dict):
return packet
if isinstance(packet, ProtoMessage):
try:
return MessageToDict(
packet, preserving_proto_field_name=True, use_integers_for_enums=False
)
except Exception:
if hasattr(packet, "to_dict"):
try:
return packet.to_dict()
except Exception:
pass
try:
return json.loads(json.dumps(packet, default=lambda o: str(o)))
except Exception:
return {"_unparsed": str(packet)}
def _canonical_node_id(value) -> str | None:
"""Convert node identifiers into the canonical ``!xxxxxxxx`` format.
Parameters:
value: Input identifier which may be an int, float or string.
Returns:
The canonical identifier or ``None`` if conversion fails.
"""
if value is None:
return None
if isinstance(value, (int, float)):
try:
num = int(value)
except (TypeError, ValueError):
return None
if num < 0:
return None
return f"!{num & 0xFFFFFFFF:08x}"
if not isinstance(value, str):
return None
trimmed = value.strip()
if not trimmed:
return None
if trimmed.startswith("^"):
return trimmed
if trimmed.startswith("!"):
body = trimmed[1:]
elif trimmed.lower().startswith("0x"):
body = trimmed[2:]
elif trimmed.isdigit():
try:
return f"!{int(trimmed, 10) & 0xFFFFFFFF:08x}"
except ValueError:
return None
else:
body = trimmed
if not body:
return None
try:
return f"!{int(body, 16) & 0xFFFFFFFF:08x}"
except ValueError:
return None
def _node_num_from_id(node_id) -> int | None:
"""Extract the numeric node ID from a canonical identifier.
Parameters:
node_id: Identifier value accepted by :func:`_canonical_node_id`.
Returns:
The numeric node ID or ``None`` when parsing fails.
"""
if node_id is None:
return None
if isinstance(node_id, (int, float)):
try:
num = int(node_id)
except (TypeError, ValueError):
return None
return num if num >= 0 else None
if not isinstance(node_id, str):
return None
trimmed = node_id.strip()
if not trimmed:
return None
if trimmed.startswith("!"):
trimmed = trimmed[1:]
if trimmed.lower().startswith("0x"):
trimmed = trimmed[2:]
try:
return int(trimmed, 16)
except ValueError:
try:
return int(trimmed, 10)
except ValueError:
return None
def _merge_mappings(base, extra):
"""Merge two mapping-like objects recursively.
Parameters:
base: Existing mapping or mapping-like structure.
extra: Mapping or compatible object whose entries should overlay
``base``.
Returns:
A new dictionary containing the merged values.
"""
base_dict: dict
if isinstance(base, Mapping):
base_dict = dict(base)
elif base:
converted_base = _node_to_dict(base)
base_dict = dict(converted_base) if isinstance(converted_base, Mapping) else {}
else:
base_dict = {}
if not isinstance(extra, Mapping):
converted_extra = _node_to_dict(extra)
if not isinstance(converted_extra, Mapping):
return base_dict
extra = converted_extra
for key, value in extra.items():
if isinstance(value, Mapping):
existing = base_dict.get(key)
base_dict[key] = _merge_mappings(existing, value)
else:
base_dict[key] = _node_to_dict(value)
return base_dict
def _extract_payload_bytes(decoded_section: Mapping) -> bytes | None:
"""Return raw payload bytes from ``decoded_section`` when available.
Parameters:
decoded_section: Mapping that may include a ``payload`` entry.
Returns:
Raw payload bytes or ``None`` when the payload is missing or invalid.
"""
if not isinstance(decoded_section, Mapping):
return None
payload = decoded_section.get("payload")
if isinstance(payload, Mapping):
data = payload.get("__bytes_b64__") or payload.get("bytes")
if isinstance(data, str):
try:
return base64.b64decode(data)
except Exception:
return None
if isinstance(payload, (bytes, bytearray)):
return bytes(payload)
if isinstance(payload, str):
try:
return base64.b64decode(payload)
except Exception:
return None
return None
def _decode_nodeinfo_payload(payload_bytes):
"""Decode ``NodeInfo`` protobuf payloads from raw bytes.
Parameters:
payload_bytes: Serialized protobuf data from a NODEINFO packet.
Returns:
A :class:`meshtastic.protobuf.mesh_pb2.NodeInfo` instance or ``None``.
"""
if not payload_bytes:
return None
try:
from meshtastic.protobuf import mesh_pb2
except Exception:
return None
node_info = mesh_pb2.NodeInfo()
try:
node_info.ParseFromString(payload_bytes)
return node_info
except DecodeError:
try:
user_msg = mesh_pb2.User()
user_msg.ParseFromString(payload_bytes)
except DecodeError:
return None
node_info = mesh_pb2.NodeInfo()
node_info.user.CopyFrom(user_msg)
return node_info
def _nodeinfo_metrics_dict(node_info) -> dict | None:
"""Extract device metric fields from a NodeInfo message.
Parameters:
node_info: Parsed NodeInfo protobuf message.
Returns:
A dictionary containing selected metric fields, or ``None`` when no
metrics are present.
"""
if not node_info:
return None
metrics_field_names = {f[0].name for f in node_info.ListFields()}
if "device_metrics" not in metrics_field_names:
return None
metrics = {}
for field_desc, value in node_info.device_metrics.ListFields():
name = field_desc.name
if name == "battery_level":
metrics["batteryLevel"] = float(value)
elif name == "voltage":
metrics["voltage"] = float(value)
elif name == "channel_utilization":
metrics["channelUtilization"] = float(value)
elif name == "air_util_tx":
metrics["airUtilTx"] = float(value)
elif name == "uptime_seconds":
metrics["uptimeSeconds"] = int(value)
elif name == "humidity":
metrics["humidity"] = float(value)
elif name == "temperature":
metrics["temperature"] = float(value)
elif name == "barometric_pressure":
metrics["barometricPressure"] = float(value)
return metrics or None
def _nodeinfo_position_dict(node_info) -> dict | None:
"""Return a dictionary view of positional data from NodeInfo.
Parameters:
node_info: Parsed NodeInfo protobuf message.
Returns:
A dictionary of positional fields or ``None`` if no data exists.
"""
if not node_info:
return None
fields = {f[0].name for f in node_info.ListFields()}
if "position" not in fields:
return None
result = {}
latitude_i = None
longitude_i = None
for field_desc, value in node_info.position.ListFields():
name = field_desc.name
if name == "latitude_i":
latitude_i = int(value)
result["latitudeI"] = latitude_i
elif name == "longitude_i":
longitude_i = int(value)
result["longitudeI"] = longitude_i
elif name == "latitude":
result["latitude"] = float(value)
elif name == "longitude":
result["longitude"] = float(value)
elif name == "altitude":
result["altitude"] = int(value)
elif name == "time":
result["time"] = int(value)
elif name == "ground_speed":
result["groundSpeed"] = float(value)
elif name == "ground_track":
result["groundTrack"] = float(value)
elif name == "precision_bits":
result["precisionBits"] = int(value)
elif name == "location_source":
# Preserve the raw enum value to allow downstream formatting.
result["locationSource"] = int(value)
if "latitude" not in result and latitude_i is not None:
result["latitude"] = latitude_i / 1e7
if "longitude" not in result and longitude_i is not None:
result["longitude"] = longitude_i / 1e7
return result or None
def _nodeinfo_user_dict(node_info, decoded_user):
"""Combine protobuf and decoded user information into a mapping.
Parameters:
node_info: Parsed NodeInfo protobuf message that may contain a ``user``
field.
decoded_user: Mapping or protobuf message representing decoded user
data from the packet payload.
Returns:
A merged mapping of user information or ``None`` when no data exists.
"""
user_dict = None
if node_info:
field_names = {f[0].name for f in node_info.ListFields()}
if "user" in field_names:
try:
user_dict = MessageToDict(
node_info.user,
preserving_proto_field_name=False,
use_integers_for_enums=False,
)
except Exception:
user_dict = None
if isinstance(decoded_user, ProtoMessage):
try:
decoded_user = MessageToDict(
decoded_user,
preserving_proto_field_name=False,
use_integers_for_enums=False,
)
except Exception:
decoded_user = _node_to_dict(decoded_user)
if isinstance(decoded_user, Mapping):
user_dict = _merge_mappings(user_dict, decoded_user)
if isinstance(user_dict, Mapping):
canonical = _canonical_node_id(user_dict.get("id"))
if canonical:
user_dict = dict(user_dict)
user_dict["id"] = canonical
role_value = user_dict.get("role")
normalized_role = _normalize_user_role(role_value)
if normalized_role and normalized_role != role_value:
user_dict = dict(user_dict)
user_dict["role"] = normalized_role
return user_dict
__all__ = [
"_canonical_node_id",
"_coerce_float",
"_coerce_int",
"_load_cli_role_lookup",
"_normalize_user_role",
"_decode_nodeinfo_payload",
"_extract_payload_bytes",
"_first",
"_get",
"_iso",
"_merge_mappings",
"_node_num_from_id",
"_node_to_dict",
"_nodeinfo_metrics_dict",
"_nodeinfo_position_dict",
"_nodeinfo_user_dict",
"_pkt_to_dict",
"_reset_cli_role_cache",
"DecodeError",
"MessageToDict",
"ProtoMessage",
"upsert_payload",
]
+8 -2
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@@ -21,10 +21,15 @@ CREATE TABLE IF NOT EXISTS messages (
channel INTEGER,
portnum TEXT,
text TEXT,
encrypted TEXT,
snr REAL,
rssi INTEGER,
hop_limit INTEGER,
raw_json TEXT
lora_freq INTEGER,
modem_preset TEXT,
channel_name TEXT,
reply_id INTEGER,
emoji TEXT
);
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
@@ -32,3 +37,4 @@ CREATE INDEX IF NOT EXISTS idx_messages_from_id ON messages(from_id);
CREATE INDEX IF NOT EXISTS idx_messages_to_id ON messages(to_id);
CREATE INDEX IF NOT EXISTS idx_messages_channel ON messages(channel);
CREATE INDEX IF NOT EXISTS idx_messages_portnum ON messages(portnum);
CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id);
@@ -0,0 +1,18 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Add support for encrypted messages to the existing schema.
BEGIN;
ALTER TABLE messages ADD COLUMN encrypted TEXT;
COMMIT;
@@ -0,0 +1,23 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Extend the nodes and messages tables with LoRa metadata columns.
BEGIN;
ALTER TABLE nodes ADD COLUMN lora_freq INTEGER;
ALTER TABLE nodes ADD COLUMN modem_preset TEXT;
ALTER TABLE messages ADD COLUMN lora_freq INTEGER;
ALTER TABLE messages ADD COLUMN modem_preset TEXT;
ALTER TABLE messages ADD COLUMN channel_name TEXT;
COMMIT;
@@ -0,0 +1,36 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Extend the telemetry table with additional environment metrics.
BEGIN;
ALTER TABLE telemetry ADD COLUMN gas_resistance REAL;
ALTER TABLE telemetry ADD COLUMN current REAL;
ALTER TABLE telemetry ADD COLUMN iaq INTEGER;
ALTER TABLE telemetry ADD COLUMN distance REAL;
ALTER TABLE telemetry ADD COLUMN lux REAL;
ALTER TABLE telemetry ADD COLUMN white_lux REAL;
ALTER TABLE telemetry ADD COLUMN ir_lux REAL;
ALTER TABLE telemetry ADD COLUMN uv_lux REAL;
ALTER TABLE telemetry ADD COLUMN wind_direction INTEGER;
ALTER TABLE telemetry ADD COLUMN wind_speed REAL;
ALTER TABLE telemetry ADD COLUMN weight REAL;
ALTER TABLE telemetry ADD COLUMN wind_gust REAL;
ALTER TABLE telemetry ADD COLUMN wind_lull REAL;
ALTER TABLE telemetry ADD COLUMN radiation REAL;
ALTER TABLE telemetry ADD COLUMN rainfall_1h REAL;
ALTER TABLE telemetry ADD COLUMN rainfall_24h REAL;
ALTER TABLE telemetry ADD COLUMN soil_moisture INTEGER;
ALTER TABLE telemetry ADD COLUMN soil_temperature REAL;
COMMIT;
@@ -0,0 +1,21 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
-- Extend the messages table to capture reply relationships and emoji reactions.
BEGIN;
ALTER TABLE messages ADD COLUMN reply_id INTEGER;
ALTER TABLE messages ADD COLUMN emoji TEXT;
CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id);
COMMIT;
+26
View File
@@ -0,0 +1,26 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
CREATE TABLE IF NOT EXISTS neighbors (
node_id TEXT NOT NULL,
neighbor_id TEXT NOT NULL,
snr REAL,
rx_time INTEGER NOT NULL,
PRIMARY KEY (node_id, neighbor_id),
FOREIGN KEY (node_id) REFERENCES nodes(node_id) ON DELETE CASCADE,
FOREIGN KEY (neighbor_id) REFERENCES nodes(node_id) ON DELETE CASCADE
);
CREATE INDEX IF NOT EXISTS idx_neighbors_rx_time ON neighbors(rx_time);
CREATE INDEX IF NOT EXISTS idx_neighbors_neighbor_id ON neighbors(neighbor_id);
+5 -2
View File
@@ -1,4 +1,4 @@
-- Copyright (C) 2025 l5yth
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
@@ -36,9 +36,12 @@ CREATE TABLE IF NOT EXISTS nodes (
uptime_seconds INTEGER,
position_time INTEGER,
location_source TEXT,
precision_bits INTEGER,
latitude REAL,
longitude REAL,
altitude REAL
altitude REAL,
lora_freq INTEGER,
modem_preset TEXT
);
CREATE INDEX IF NOT EXISTS idx_nodes_last_heard ON nodes(last_heard);
+40
View File
@@ -0,0 +1,40 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
CREATE TABLE IF NOT EXISTS positions (
id INTEGER PRIMARY KEY,
node_id TEXT,
node_num INTEGER,
rx_time INTEGER NOT NULL,
rx_iso TEXT NOT NULL,
position_time INTEGER,
to_id TEXT,
latitude REAL,
longitude REAL,
altitude REAL,
location_source TEXT,
precision_bits INTEGER,
sats_in_view INTEGER,
pdop REAL,
ground_speed REAL,
ground_track REAL,
snr REAL,
rssi INTEGER,
hop_limit INTEGER,
bitfield INTEGER,
payload_b64 TEXT
);
CREATE INDEX IF NOT EXISTS idx_positions_rx_time ON positions(rx_time);
CREATE INDEX IF NOT EXISTS idx_positions_node_id ON positions(node_id);
+5 -4
View File
@@ -1,7 +1,8 @@
# Production dependencies
meshtastic>=2.0.0
protobuf>=4.21.12
meshtastic>=2.5.0
protobuf>=5.27.2
# Development dependencies (optional)
black>=23.0.0
pytest>=7.0.0
black>=24.8.0
pytest>=8.3.0
pytest-cov>=5.0.0
+61
View File
@@ -0,0 +1,61 @@
-- Copyright © 2025-26 l5yth & contributors
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
CREATE TABLE IF NOT EXISTS telemetry (
id INTEGER PRIMARY KEY,
node_id TEXT,
node_num INTEGER,
from_id TEXT,
to_id TEXT,
rx_time INTEGER NOT NULL,
rx_iso TEXT NOT NULL,
telemetry_time INTEGER,
channel INTEGER,
portnum TEXT,
hop_limit INTEGER,
snr REAL,
rssi INTEGER,
bitfield INTEGER,
payload_b64 TEXT,
battery_level REAL,
voltage REAL,
channel_utilization REAL,
air_util_tx REAL,
uptime_seconds INTEGER,
temperature REAL,
relative_humidity REAL,
barometric_pressure REAL,
gas_resistance REAL,
current REAL,
iaq INTEGER,
distance REAL,
lux REAL,
white_lux REAL,
ir_lux REAL,
uv_lux REAL,
wind_direction INTEGER,
wind_speed REAL,
weight REAL,
wind_gust REAL,
wind_lull REAL,
radiation REAL,
rainfall_1h REAL,
rainfall_24h REAL,
soil_moisture INTEGER,
soil_temperature REAL
);
CREATE INDEX IF NOT EXISTS idx_telemetry_rx_time ON telemetry(rx_time);
CREATE INDEX IF NOT EXISTS idx_telemetry_node_id ON telemetry(node_id);
CREATE INDEX IF NOT EXISTS idx_telemetry_time ON telemetry(telemetry_time);
+42 -7
View File
@@ -1,21 +1,56 @@
version: '3.8'
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Development overrides for docker-compose.yml
services:
web:
environment:
- DEBUG=1
DEBUG: 1
volumes:
- ./web:/app
- ./data:/data # Mount data directory for SQL files
- /app/vendor/bundle # Exclude vendor directory from volume mount
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/vendor/bundle
web-bridge:
environment:
DEBUG: 1
volumes:
- ./web:/app
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/vendor/bundle
ports:
- "41447:41447"
- "9292:9292" # Additional port for development tools
- "9292:9292"
ingestor:
environment:
- DEBUG=1
DEBUG: 1
volumes:
- ./data:/app
- /app/.local # Exclude Python packages from volume mount
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/.local
- /dev:/dev
ingestor-bridge:
environment:
DEBUG: 1
volumes:
- ./data:/app
- ./data:/app/.local/share/potato-mesh
- ./.config/potato-mesh:/app/.config/potato-mesh
- /app/.local
- /dev:/dev
+37 -19
View File
@@ -1,33 +1,51 @@
version: '3.8'
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Production overrides for docker-compose.yml
services:
web:
build:
context: .
dockerfile: web/Dockerfile
target: production
environment:
- DEBUG=0
DEBUG: 0
restart: always
web-bridge:
build:
context: .
dockerfile: web/Dockerfile
target: production
environment:
DEBUG: 0
restart: always
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
reservations:
memory: 256M
cpus: '0.25'
ingestor:
build:
context: .
dockerfile: data/Dockerfile
target: production
environment:
- DEBUG=0
DEBUG: 0
restart: always
ingestor-bridge:
build:
context: .
dockerfile: data/Dockerfile
target: production
environment:
DEBUG: 0
restart: always
deploy:
resources:
limits:
memory: 256M
cpus: '0.25'
reservations:
memory: 128M
cpus: '0.1'
+106 -56
View File
@@ -1,71 +1,121 @@
version: '3.8'
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
x-web-base: &web-base
image: ghcr.io/l5yth/potato-mesh-web-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
environment:
APP_ENV: ${APP_ENV:-production}
RACK_ENV: ${RACK_ENV:-production}
SITE_NAME: ${SITE_NAME:-PotatoMesh Demo}
CHANNEL: ${CHANNEL:-#LongFast}
FREQUENCY: ${FREQUENCY:-915MHz}
MAP_CENTER: ${MAP_CENTER:-38.761944,-27.090833}
MAP_ZOOM: ${MAP_ZOOM:-""}
MAX_DISTANCE: ${MAX_DISTANCE:-42}
CONTACT_LINK: ${CONTACT_LINK:-#potatomesh:dod.ngo}
FEDERATION: ${FEDERATION:-1}
PRIVATE: ${PRIVATE:-0}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
DEBUG: ${DEBUG:-0}
command: ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
volumes:
- potatomesh_data:/app/.local/share/potato-mesh
- potatomesh_config:/app/.config/potato-mesh
- potatomesh_logs:/app/logs
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
reservations:
memory: 256M
cpus: '0.25'
x-ingestor-base: &ingestor-base
image: ghcr.io/l5yth/potato-mesh-ingestor-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
environment:
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
API_TOKEN: ${API_TOKEN}
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
DEBUG: ${DEBUG:-0}
FEDERATION: ${FEDERATION:-1}
PRIVATE: ${PRIVATE:-0}
volumes:
- potatomesh_data:/app/.local/share/potato-mesh
- potatomesh_config:/app/.config/potato-mesh
- potatomesh_logs:/app/logs
- /dev:/dev
device_cgroup_rules:
- 'c 166:* rwm' # ttyACM devices
- 'c 188:* rwm' # ttyUSB devices
- 'c 4:* rwm' # ttyS devices
privileged: false
restart: unless-stopped
deploy:
resources:
limits:
memory: 256M
cpus: '0.25'
reservations:
memory: 128M
cpus: '0.1'
services:
web:
image: ghcr.io/l5yth/potato-mesh-web-linux-amd64:latest
container_name: potatomesh-web
ports:
- "41447:41447"
environment:
- SITE_NAME=${SITE_NAME:-My Meshtastic Network}
- DEFAULT_CHANNEL=${DEFAULT_CHANNEL:-#MediumFast}
- DEFAULT_FREQUENCY=${DEFAULT_FREQUENCY:-868MHz}
- MAP_CENTER_LAT=${MAP_CENTER_LAT:-52.502889}
- MAP_CENTER_LON=${MAP_CENTER_LON:-13.404194}
- MAX_NODE_DISTANCE_KM=${MAX_NODE_DISTANCE_KM:-50}
- MATRIX_ROOM=${MATRIX_ROOM:-}
- API_TOKEN=${API_TOKEN}
- DEBUG=${DEBUG:-0}
volumes:
- potatomesh_data:/app/data
- potatomesh_logs:/app/logs
networks:
- potatomesh-network
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M
cpus: '0.5'
reservations:
memory: 256M
cpus: '0.25'
<<: *web-base
network_mode: host
ingestor:
image: ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:latest
container_name: potatomesh-ingestor
environment:
- MESH_SERIAL=${MESH_SERIAL:-/dev/ttyACM0}
- MESH_SNAPSHOT_SECS=${MESH_SNAPSHOT_SECS:-60}
- MESH_CHANNEL_INDEX=${MESH_CHANNEL_INDEX:-0}
- POTATOMESH_INSTANCE=${POTATOMESH_INSTANCE:-http://web:41447}
- API_TOKEN=${API_TOKEN}
- DEBUG=${DEBUG:-0}
volumes:
- potatomesh_data:/app/data
- potatomesh_logs:/app/logs
devices:
# Map Meshtastic serial device from host to container
# Common paths: /dev/ttyACM0, /dev/ttyUSB0, /dev/cu.usbserial-*
- ${MESH_SERIAL:-/dev/ttyACM0}:${MESH_SERIAL:-/dev/ttyACM0}
privileged: false
<<: *ingestor-base
network_mode: host
depends_on:
- web
extra_hosts:
- "web:127.0.0.1"
web-bridge:
<<: *web-base
container_name: potatomesh-web-bridge
networks:
- potatomesh-network
ports:
- "41447:41447"
profiles:
- bridge
ingestor-bridge:
<<: *ingestor-base
container_name: potatomesh-ingestor-bridge
networks:
- potatomesh-network
depends_on:
- web
restart: unless-stopped
deploy:
resources:
limits:
memory: 256M
cpus: '0.25'
reservations:
memory: 128M
cpus: '0.1'
- web-bridge
profiles:
- bridge
volumes:
potatomesh_data:
driver: local
potatomesh_config:
driver: local
potatomesh_logs:
driver: local
networks:
potatomesh-network:
driver: bridge
BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 952 KiB

BIN
View File
Binary file not shown.

After

Width:  |  Height:  |  Size: 907 KiB

+31 -4
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,6 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interactive debugging helpers for live Meshtastic sessions."""
import time, json, base64, threading
from pubsub import pub # comes with meshtastic
from meshtastic.serial_interface import SerialInterface
@@ -28,7 +29,14 @@ stop = threading.Event()
def to_jsonable(obj):
"""Recursively convert protobuf/bytes/etc. into JSON-serializable structures."""
"""Recursively convert complex objects into JSON-serialisable structures.
Parameters:
obj: Any Meshtastic-related payload or protobuf message.
Returns:
A structure composed of standard Python types.
"""
if obj is None:
return None
if isinstance(obj, ProtoMessage):
@@ -49,7 +57,14 @@ def to_jsonable(obj):
def extract_text(d):
"""Best-effort pull of decoded text from a dict produced by to_jsonable()."""
"""Best-effort pull of decoded text from :func:`to_jsonable` output.
Parameters:
d: Mapping derived from :func:`to_jsonable`.
Returns:
The decoded text when available, otherwise ``None``.
"""
dec = d.get("decoded") or {}
# Text packets usually at decoded.payload.text
payload = dec.get("payload") or {}
@@ -62,6 +77,12 @@ def extract_text(d):
def on_receive(packet, interface):
"""Display human-readable output for each received packet.
Parameters:
packet: Packet instance supplied by Meshtastic.
interface: Interface that produced the packet.
"""
global packet_count, last_rx_ts
packet_count += 1
last_rx_ts = time.time()
@@ -86,14 +107,20 @@ def on_receive(packet, interface):
def on_connected(interface, *args, **kwargs):
"""Log when a connection is established."""
print("[info] connection established")
def on_disconnected(interface, *args, **kwargs):
"""Log when the interface disconnects."""
print("[info] disconnected")
def main():
"""Run the interactive debugging loop."""
print(f"Opening Meshtastic on {PORT}")
# Use PubSub topics (reliable in current meshtastic)
+127
View File
@@ -0,0 +1,127 @@
#!/usr/bin/env python3
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility script to dump Meshtastic traffic for offline analysis."""
from __future__ import annotations
import json
import os
import signal
import sys
import time
from datetime import datetime, timezone
from meshtastic.mesh_interface import MeshInterface
from meshtastic.serial_interface import SerialInterface
from pubsub import pub
CONNECTION = os.environ.get("CONNECTION") or os.environ.get(
"MESH_SERIAL", "/dev/ttyACM0"
)
"""Connection target opened to capture Meshtastic traffic."""
OUT = os.environ.get("MESH_DUMP_FILE", "meshtastic-dump.ndjson")
# line-buffered append so you can tail -f safely
f = open(OUT, "a", buffering=1, encoding="utf-8")
def now() -> str:
"""Return the current UTC timestamp in ISO 8601 format."""
return datetime.now(timezone.utc).isoformat()
def write(kind: str, payload: dict) -> None:
"""Append a JSON record to the dump file.
Parameters:
kind: Logical record type such as ``"packet"`` or ``"node"``.
payload: Serializable payload containing the record body.
"""
rec = {"ts": now(), "kind": kind, **payload}
f.write(json.dumps(rec, ensure_ascii=False, default=str) + "\n")
# Connect to the node
iface: MeshInterface = SerialInterface(CONNECTION)
# Packet callback: every RF/Mesh packet the node receives/decodes lands here
def on_packet(packet, iface):
"""Write packet metadata whenever the radio receives a frame.
Parameters:
packet: Meshtastic packet object or dictionary.
iface: Interface instance delivering the packet.
"""
# 'packet' already includes decoded fields when available (portnum, payload, position, telemetry, etc.)
write("packet", {"packet": packet})
# Node callback: topology/metadata updates (nodeinfo, hops, lastHeard, etc.)
def on_node(node, iface):
"""Write node metadata updates produced by Meshtastic.
Parameters:
node: Meshtastic node object or mapping.
iface: Interface instance emitting the update.
"""
write("node", {"node": node})
iface.onReceive = on_packet
pub.subscribe(on_node, "meshtastic.node")
# Write a little header so you know what you captured
try:
my = getattr(iface, "myInfo", None)
write(
"meta",
{
"event": "started",
"port": CONNECTION,
"my_node_num": getattr(my, "my_node_num", None) if my else None,
},
)
except Exception as e:
write("meta", {"event": "started", "port": CONNECTION, "error": str(e)})
# Keep the process alive until Ctrl-C
def _stop(signum, frame):
"""Handle termination signals by flushing buffers and exiting."""
write("meta", {"event": "stopping"})
try:
try:
pub.unsubscribe(on_node, "meshtastic.node")
except Exception:
pass
iface.close()
finally:
f.close()
sys.exit(0)
signal.signal(signal.SIGINT, _stop)
signal.signal(signal.SIGTERM, _stop)
# Simple sleep loop; avoids busy-wait
while True:
time.sleep(1)
+240
View File
@@ -0,0 +1,240 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Minimal Meshtastic protobuf stubs for isolated unit testing."""
from __future__ import annotations
import json
import types
from typing import Any, Callable, Dict, Tuple
def _enum_value(name: str, mapping: Dict[str, int]) -> int:
normalized = name.upper()
if normalized not in mapping:
raise KeyError(f"Unknown enum value: {name}")
return mapping[normalized]
def build(message_base, decode_error) -> Tuple[types.ModuleType, types.ModuleType]:
"""Return ``(config_pb2, mesh_pb2)`` stubs built from protobuf shims."""
class _ProtoMessage(message_base):
"""Base class implementing JSON round-tripping for protobuf stubs."""
_FIELD_ALIASES: Dict[str, str] = {}
_FIELD_FACTORIES: Dict[str, Callable[[], "_ProtoMessage"]] = {}
def __init__(self) -> None:
super().__init__()
object.__setattr__(self, "_fields", {})
def __setattr__(
self, name: str, value: Any
) -> None: # noqa: D401 - behaviour documented on base class
object.__setattr__(self, name, value)
if not name.startswith("_"):
self._fields[name] = value
def __getattr__(self, name: str) -> Any:
factories = getattr(self, "_FIELD_FACTORIES", {})
if name in factories:
value = factories[name]()
self.__setattr__(name, value)
return value
raise AttributeError(name)
def _alias_for(self, name: str) -> str:
return self._FIELD_ALIASES.get(name, name)
def _name_for(self, alias: str) -> str:
reverse = getattr(self, "_FIELD_ALIASES", {})
for key, candidate in reverse.items():
if candidate == alias:
return key
return alias
def _to_dict(self) -> Dict[str, Any]:
result: Dict[str, Any] = {}
for name, value in self._fields.items():
alias = self._alias_for(name)
if isinstance(value, _ProtoMessage):
result[alias] = value._to_dict()
elif isinstance(value, list):
result[alias] = [
item._to_dict() if isinstance(item, _ProtoMessage) else item
for item in value
]
else:
result[alias] = value
return result
def SerializeToString(self) -> bytes:
"""Encode the message contents as a JSON byte string."""
return json.dumps(self._to_dict(), sort_keys=True).encode("utf-8")
def ParseFromString(self, payload: bytes) -> None:
"""Populate the message from a JSON byte string."""
try:
data = json.loads(payload.decode("utf-8"))
except Exception as exc: # pragma: no cover - defensive guard
raise decode_error(str(exc)) from exc
self._load_from_dict(data)
def _load_from_dict(self, data: Dict[str, Any]) -> None:
factories = getattr(self, "_FIELD_FACTORIES", {})
for alias, value in data.items():
name = self._name_for(alias)
if name in factories and isinstance(value, dict):
nested = getattr(self, name, None)
if not isinstance(nested, _ProtoMessage):
nested = factories[name]()
object.__setattr__(self, name, nested)
nested._load_from_dict(value)
self._fields[name] = nested
else:
setattr(self, name, value)
def to_dict(self) -> Dict[str, Any]:
"""Return a JSON-compatible representation of the message."""
return self._to_dict()
def ListFields(self):
"""Mimic protobuf ``ListFields`` for the subset of tests used."""
from types import SimpleNamespace
entries = []
for name, value in self._fields.items():
descriptor = SimpleNamespace(name=name)
entries.append((descriptor, value))
return entries
def CopyFrom(self, other: "_ProtoMessage") -> None:
"""Populate this message with values from ``other``."""
if not isinstance(other, _ProtoMessage):
raise TypeError("CopyFrom expects another protobuf message")
self._fields.clear()
for name, value in other._fields.items():
if isinstance(value, _ProtoMessage):
copied = type(value)()
copied.CopyFrom(value)
setattr(self, name, copied)
elif isinstance(value, list):
converted = []
for item in value:
if isinstance(item, _ProtoMessage):
nested = type(item)()
nested.CopyFrom(item)
converted.append(nested)
else:
converted.append(item)
setattr(self, name, converted)
else:
setattr(self, name, value)
class _DeviceMetrics(_ProtoMessage):
_FIELD_ALIASES = {
"battery_level": "batteryLevel",
"voltage": "voltage",
"channel_utilization": "channelUtilization",
"air_util_tx": "airUtilTx",
"uptime_seconds": "uptimeSeconds",
}
class _Position(_ProtoMessage):
_FIELD_ALIASES = {
"latitude_i": "latitudeI",
"longitude_i": "longitudeI",
"location_source": "locationSource",
}
class LocSource:
_VALUES = {
"LOC_UNSET": 0,
"LOC_INTERNAL": 1,
"LOC_EXTERNAL": 2,
}
@classmethod
def Value(cls, name: str) -> int:
return _enum_value(name, cls._VALUES)
class _User(_ProtoMessage):
_FIELD_ALIASES = {
"short_name": "shortName",
"long_name": "longName",
"hw_model": "hwModel",
}
class _NodeInfo(_ProtoMessage):
_FIELD_ALIASES = {
"last_heard": "lastHeard",
"is_favorite": "isFavorite",
"hops_away": "hopsAway",
}
_FIELD_FACTORIES = {
"user": _User,
"device_metrics": _DeviceMetrics,
"position": _Position,
}
def __init__(self) -> None:
super().__init__()
class _HardwareModel:
_VALUES = {
"UNKNOWN": 0,
"TBEAM": 1,
"HELTEC": 2,
}
@classmethod
def Value(cls, name: str) -> int:
return _enum_value(name, cls._VALUES)
mesh_pb2 = types.ModuleType("mesh_pb2")
mesh_pb2.NodeInfo = _NodeInfo
mesh_pb2.User = _User
mesh_pb2.Position = _Position
mesh_pb2.DeviceMetrics = _DeviceMetrics
mesh_pb2.HardwareModel = _HardwareModel
class _RoleEnum:
_VALUES = {
"UNKNOWN": 0,
"CLIENT": 1,
"REPEATER": 2,
"ROUTER": 3,
}
@classmethod
def Value(cls, name: str) -> int:
return _enum_value(name, cls._VALUES)
class _DeviceConfig:
Role = _RoleEnum
class _Config:
DeviceConfig = _DeviceConfig
config_pb2 = types.ModuleType("config_pb2")
config_pb2.Config = _Config
return config_pb2, mesh_pb2
+6 -100
View File
@@ -11,9 +11,11 @@
"rssi": -121,
"hop_limit": 1,
"snr": -13.25,
"lora_freq": 915,
"modem_preset": "LONG_FAST",
"channel_name": "SpecChannel",
"node": {
"snr": -13.25,
"raw_json": null,
"node_id": "!bba83318",
"num": 3148362520,
"short_name": "BerF",
@@ -51,9 +53,11 @@
"rssi": -117,
"hop_limit": 3,
"snr": -12.0,
"lora_freq": 868,
"modem_preset": "MEDIUM_SLOW",
"channel_name": "SpecChannel",
"node": {
"snr": -12.0,
"raw_json": null,
"node_id": "!43b6e530",
"num": 1136059696,
"short_name": "FFSR",
@@ -93,7 +97,6 @@
"snr": -13.5,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!d42e18e8",
"num": 3559790824,
"short_name": "RRun",
@@ -133,7 +136,6 @@
"snr": -13.0,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!d42e18e8",
"num": 3559790824,
"short_name": "RRun",
@@ -173,7 +175,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -213,7 +214,6 @@
"snr": 11.25,
"node": {
"snr": 11.25,
"raw_json": null,
"node_id": "!4ed36bd0",
"num": 1322478544,
"short_name": "RDM",
@@ -253,7 +253,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -293,7 +292,6 @@
"snr": 10.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -333,7 +331,6 @@
"snr": 12.0,
"node": {
"snr": 12.0,
"raw_json": null,
"node_id": "!b03c97a4",
"num": 2956760996,
"short_name": "BLN1",
@@ -373,7 +370,6 @@
"snr": -15.0,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9eeb25ec",
"num": 2666210796,
"short_name": "25ec",
@@ -413,7 +409,6 @@
"snr": 11.25,
"node": {
"snr": 11.25,
"raw_json": null,
"node_id": "!f9b0938c",
"num": 4189098892,
"short_name": "Ed-1",
@@ -453,7 +448,6 @@
"snr": 11.25,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!6c73bf84",
"num": 1819524996,
"short_name": "ts1",
@@ -493,7 +487,6 @@
"snr": 11.25,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -533,7 +526,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -573,7 +565,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!6cf821fb",
"num": 1828200955,
"short_name": "OKP1",
@@ -613,7 +604,6 @@
"snr": 10.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!6cf821fb",
"num": 1828200955,
"short_name": "OKP1",
@@ -653,7 +643,6 @@
"snr": 10.5,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -693,7 +682,6 @@
"snr": 10.25,
"node": {
"snr": 10.25,
"raw_json": null,
"node_id": "!db2b23f4",
"num": 3677037556,
"short_name": "Eagl",
@@ -733,7 +721,6 @@
"snr": 11.25,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!6cf821fb",
"num": 1828200955,
"short_name": "OKP1",
@@ -773,7 +760,6 @@
"snr": 11.0,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -813,7 +799,6 @@
"snr": -11.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!177cfa26",
"num": 394066470,
"short_name": "lun1",
@@ -853,7 +838,6 @@
"snr": 11.25,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!9ea0c780",
"num": 2661336960,
"short_name": "nguE",
@@ -893,7 +877,6 @@
"snr": 10.75,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -933,7 +916,6 @@
"snr": 11.5,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!e80cda12",
"num": 3893156370,
"short_name": "mowW",
@@ -973,7 +955,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -1013,7 +994,6 @@
"snr": 11.5,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -1053,7 +1033,6 @@
"snr": 11.5,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -1093,7 +1072,6 @@
"snr": -11.75,
"node": {
"snr": -9.75,
"raw_json": null,
"node_id": "!a0cb1608",
"num": 2697664008,
"short_name": "KBV5",
@@ -1133,7 +1111,6 @@
"snr": 10.75,
"node": {
"snr": 10.25,
"raw_json": null,
"node_id": "!bcf10936",
"num": 3169913142,
"short_name": "0936",
@@ -1173,7 +1150,6 @@
"snr": 11.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -1213,7 +1189,6 @@
"snr": -13.25,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!a0cc6904",
"num": 2697750788,
"short_name": "Kdû",
@@ -1253,7 +1228,6 @@
"snr": 10.5,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -1293,7 +1267,6 @@
"snr": 11.0,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9eeb25ec",
"num": 2666210796,
"short_name": "25ec",
@@ -1333,7 +1306,6 @@
"snr": -14.0,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!a0cc6904",
"num": 2697750788,
"short_name": "Kdû",
@@ -1373,7 +1345,6 @@
"snr": 11.25,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9eeb25ec",
"num": 2666210796,
"short_name": "25ec",
@@ -1413,7 +1384,6 @@
"snr": 11.5,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9eeb25ec",
"num": 2666210796,
"short_name": "25ec",
@@ -1453,7 +1423,6 @@
"snr": 11.75,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9eeb25ec",
"num": 2666210796,
"short_name": "25ec",
@@ -1493,7 +1462,6 @@
"snr": 11.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -1533,7 +1501,6 @@
"snr": 10.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!03b9ca11",
"num": 62507537,
"short_name": "ca11",
@@ -1573,7 +1540,6 @@
"snr": 7.5,
"node": {
"snr": 10.25,
"raw_json": null,
"node_id": "!db2b23f4",
"num": 3677037556,
"short_name": "Eagl",
@@ -1613,7 +1579,6 @@
"snr": 10.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -1653,7 +1618,6 @@
"snr": 10.75,
"node": {
"snr": 10.25,
"raw_json": null,
"node_id": "!db2b23f4",
"num": 3677037556,
"short_name": "Eagl",
@@ -1693,7 +1657,6 @@
"snr": 10.75,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -1733,7 +1696,6 @@
"snr": 10.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -1773,7 +1735,6 @@
"snr": 10.5,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -1813,7 +1774,6 @@
"snr": 11.0,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!a0cc6904",
"num": 2697750788,
"short_name": "Kdû",
@@ -1853,7 +1813,6 @@
"snr": -12.25,
"node": {
"snr": -12.25,
"raw_json": null,
"node_id": "!2f945044",
"num": 798249028,
"short_name": "BND",
@@ -1893,7 +1852,6 @@
"snr": 11.0,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -1933,7 +1891,6 @@
"snr": 10.5,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9ee71c38",
"num": 2665946168,
"short_name": "1c38",
@@ -1973,7 +1930,6 @@
"snr": 10.75,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -2013,7 +1969,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -2053,7 +2008,6 @@
"snr": 10.5,
"node": {
"snr": -6.25,
"raw_json": null,
"node_id": "!7c5b0920",
"num": 2086340896,
"short_name": "FFTB",
@@ -2093,7 +2047,6 @@
"snr": 10.25,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -2133,7 +2086,6 @@
"snr": 11.25,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!9ea0c780",
"num": 2661336960,
"short_name": "nguE",
@@ -2173,7 +2125,6 @@
"snr": 10.75,
"node": {
"snr": -12.75,
"raw_json": null,
"node_id": "!0910c922",
"num": 152095010,
"short_name": "c922",
@@ -2213,7 +2164,6 @@
"snr": 11.0,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -2253,7 +2203,6 @@
"snr": 11.0,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!9ee71430",
"num": 2665944112,
"short_name": "FiSp",
@@ -2293,7 +2242,6 @@
"snr": 11.5,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -2333,7 +2281,6 @@
"snr": 10.75,
"node": {
"snr": 10.25,
"raw_json": null,
"node_id": "!bcf10936",
"num": 3169913142,
"short_name": "0936",
@@ -2373,7 +2320,6 @@
"snr": 11.0,
"node": {
"snr": 11.25,
"raw_json": null,
"node_id": "!16ced364",
"num": 382653284,
"short_name": "Pat",
@@ -2413,7 +2359,6 @@
"snr": 11.25,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9ee71c38",
"num": 2665946168,
"short_name": "1c38",
@@ -2453,7 +2398,6 @@
"snr": 10.5,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9ee71c38",
"num": 2665946168,
"short_name": "1c38",
@@ -2493,7 +2437,6 @@
"snr": 10.25,
"node": {
"snr": 10.0,
"raw_json": null,
"node_id": "!a3deea53",
"num": 2749295187,
"short_name": "🐸",
@@ -2533,7 +2476,6 @@
"snr": 9.0,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!9ea0c780",
"num": 2661336960,
"short_name": "nguE",
@@ -2573,7 +2515,6 @@
"snr": 11.5,
"node": {
"snr": -13.25,
"raw_json": null,
"node_id": "!bba83318",
"num": 3148362520,
"short_name": "BerF",
@@ -2613,7 +2554,6 @@
"snr": 9.25,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9ee71c38",
"num": 2665946168,
"short_name": "1c38",
@@ -2653,7 +2593,6 @@
"snr": 10.25,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!e80cda12",
"num": 3893156370,
"short_name": "mowW",
@@ -2693,7 +2632,6 @@
"snr": -5.0,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!a0cc6904",
"num": 2697750788,
"short_name": "Kdû",
@@ -2733,7 +2671,6 @@
"snr": 11.0,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!e80cda12",
"num": 3893156370,
"short_name": "mowW",
@@ -2773,7 +2710,6 @@
"snr": 0.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -2813,7 +2749,6 @@
"snr": 11.25,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -2853,7 +2788,6 @@
"snr": 11.5,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -2893,7 +2827,6 @@
"snr": 10.0,
"node": {
"snr": 11.25,
"raw_json": null,
"node_id": "!16ced364",
"num": 382653284,
"short_name": "Pat",
@@ -2933,7 +2866,6 @@
"snr": 11.0,
"node": {
"snr": -9.75,
"raw_json": null,
"node_id": "!a0cb1608",
"num": 2697664008,
"short_name": "KBV5",
@@ -2973,7 +2905,6 @@
"snr": 9.5,
"node": {
"snr": -9.75,
"raw_json": null,
"node_id": "!a0cb1608",
"num": 2697664008,
"short_name": "KBV5",
@@ -3013,7 +2944,6 @@
"snr": 10.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -3053,7 +2983,6 @@
"snr": 11.0,
"node": {
"snr": -12.0,
"raw_json": null,
"node_id": "!43b6e530",
"num": 1136059696,
"short_name": "FFSR",
@@ -3093,7 +3022,6 @@
"snr": 11.0,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!e80cda12",
"num": 3893156370,
"short_name": "mowW",
@@ -3133,7 +3061,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -3173,7 +3100,6 @@
"snr": 10.25,
"node": {
"snr": 11.25,
"raw_json": null,
"node_id": "!16ced364",
"num": 382653284,
"short_name": "Pat",
@@ -3213,7 +3139,6 @@
"snr": 10.5,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -3253,7 +3178,6 @@
"snr": 10.75,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
@@ -3293,7 +3217,6 @@
"snr": 11.0,
"node": {
"snr": 11.0,
"raw_json": null,
"node_id": "!abbdf3f7",
"num": 2881352695,
"short_name": "f3f7",
@@ -3333,7 +3256,6 @@
"snr": 10.5,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!c0c32348",
"num": 3234014024,
"short_name": "CooP",
@@ -3373,7 +3295,6 @@
"snr": 11.0,
"node": {
"snr": 11.25,
"raw_json": null,
"node_id": "!16ced364",
"num": 382653284,
"short_name": "Pat",
@@ -3413,7 +3334,6 @@
"snr": 10.5,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -3453,7 +3373,6 @@
"snr": -12.5,
"node": {
"snr": -9.75,
"raw_json": null,
"node_id": "!a0cb1608",
"num": 2697664008,
"short_name": "KBV5",
@@ -3493,7 +3412,6 @@
"snr": 11.0,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!da635e24",
"num": 3663945252,
"short_name": "LAN",
@@ -3533,7 +3451,6 @@
"snr": -8.75,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -3573,7 +3490,6 @@
"snr": 10.25,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!5d823fb1",
"num": 1568817073,
"short_name": "3fb1",
@@ -3613,7 +3529,6 @@
"snr": 11.25,
"node": {
"snr": -12.0,
"raw_json": null,
"node_id": "!43b6e530",
"num": 1136059696,
"short_name": "FFSR",
@@ -3653,7 +3568,6 @@
"snr": 11.0,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!849a8ba4",
"num": 2224720804,
"short_name": "MGN1",
@@ -3693,7 +3607,6 @@
"snr": -13.25,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!849a8ba4",
"num": 2224720804,
"short_name": "MGN1",
@@ -3733,7 +3646,6 @@
"snr": 10.75,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!9c93a2df",
"num": 2626921183,
"short_name": "xaRa",
@@ -3773,7 +3685,6 @@
"snr": 11.25,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9ee71c38",
"num": 2665946168,
"short_name": "1c38",
@@ -3813,7 +3724,6 @@
"snr": 11.0,
"node": {
"snr": 11.5,
"raw_json": null,
"node_id": "!9ee71c38",
"num": 2665946168,
"short_name": "1c38",
@@ -3853,7 +3763,6 @@
"snr": 11.0,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!5d823fb1",
"num": 1568817073,
"short_name": "3fb1",
@@ -3893,7 +3802,6 @@
"snr": 11.0,
"node": {
"snr": 10.5,
"raw_json": null,
"node_id": "!6c73bf84",
"num": 1819524996,
"short_name": "ts1",
@@ -3933,7 +3841,6 @@
"snr": 11.25,
"node": {
"snr": null,
"raw_json": null,
"node_id": null,
"num": null,
"short_name": null,
@@ -3973,7 +3880,6 @@
"snr": 11.25,
"node": {
"snr": 10.75,
"raw_json": null,
"node_id": "!194a7351",
"num": 424309585,
"short_name": "l5y7",
+20
View File
@@ -0,0 +1,20 @@
[
{
"node_id": "!7c5b0920",
"rx_time": 1758884186,
"node_broadcast_interval_secs": 1800,
"last_sent_by": "!9e99f8c0",
"neighbors": [
{ "node_id": "!2b22accc", "snr": -6.5, "rx_time": 1758884106 },
{ "node_id": "!43ba26d0", "snr": -5.0, "rx_time": 1758884120 },
{ "node_id": "!69ba6f71", "snr": -13.0, "rx_time": 1758884135 },
{ "node_id": "!fa848384", "snr": -14.75, "rx_time": 1758884150 },
{ "node_id": "!da6a35b4", "snr": -6.5, "rx_time": 1758884165 }
]
},
{
"node_id": "!cafebabe",
"rx_time": 1758883200,
"neighbors": []
}
]
+567 -207
View File
File diff suppressed because it is too large Load Diff
+135
View File
@@ -0,0 +1,135 @@
[
{
"id": 1256091342,
"node_id": "!9e95cf60",
"from_id": "!9e95cf60",
"to_id": "^all",
"rx_time": 1758024300,
"rx_iso": "2025-09-16T12:05:00Z",
"telemetry_time": 1758024300,
"channel": 0,
"portnum": "TELEMETRY_APP",
"battery_level": 101,
"bitfield": 1,
"payload_b64": "DTVr0mgSFQhlFQIrh0AdJb8YPyXYFSA9KJTPEg==",
"current": 0.0715,
"gas_resistance": 1456.0,
"iaq": 83,
"distance": 12.5,
"lux": 100.25,
"white_lux": 64.5,
"ir_lux": 12.75,
"uv_lux": 1.6,
"wind_direction": 270,
"wind_speed": 5.9,
"wind_gust": 7.4,
"wind_lull": 4.8,
"weight": 32.7,
"radiation": 0.45,
"rainfall_1h": 0.18,
"rainfall_24h": 1.42,
"soil_moisture": 3100,
"soil_temperature": 18.9,
"device_metrics": {
"batteryLevel": 101,
"voltage": 4.224,
"channelUtilization": 0.59666663,
"airUtilTx": 0.03908333,
"uptimeSeconds": 305044,
"current": 0.0715
},
"raw": {
"device_metrics": {
"battery_level": 101,
"voltage": 4.224,
"channel_utilization": 0.59666663,
"air_util_tx": 0.03908333,
"uptime_seconds": 305044
}
}
},
{
"id": 2817720548,
"node_id": "!2a2a2a2a",
"from_id": "!2a2a2a2a",
"to_id": "^all",
"rx_time": 1758024400,
"rx_iso": "2025-09-16T12:06:40Z",
"telemetry_time": 1758024390,
"channel": 0,
"portnum": "TELEMETRY_APP",
"bitfield": 1,
"environment_metrics": {
"temperature": 21.98,
"relativeHumidity": 39.475586,
"barometricPressure": 1017.8353,
"gasResistance": 1456.0,
"iaq": 83,
"distance": 12.5,
"lux": 100.25,
"whiteLux": 64.5,
"irLux": 12.75,
"uvLux": 1.6,
"windDirection": 270,
"windSpeed": 5.9,
"windGust": 7.4,
"windLull": 4.8,
"weight": 32.7,
"radiation": 0.45,
"rainfall1h": 0.18,
"rainfall24h": 1.42,
"soilMoisture": 3100,
"soilTemperature": 18.9
},
"raw": {
"environment_metrics": {
"temperature": 21.98,
"relative_humidity": 39.475586,
"barometric_pressure": 1017.8353
}
}
},
{
"id": 345678901,
"node_id": "!1234abcd",
"from_id": "!1234abcd",
"node_num": 305441741,
"to_id": "^all",
"rx_time": 1758024500,
"rx_iso": "2025-09-16T12:08:20Z",
"telemetry_time": 1758024450,
"channel": 1,
"portnum": "TELEMETRY_APP",
"payload_b64": "AAEC",
"device_metrics": {
"battery_level": 58.5,
"voltage": 3.92,
"channel_utilization": 0.284,
"air_util_tx": 0.051,
"uptime_seconds": 86400,
"current": 0.033
},
"environment_metrics": {
"temperature": 19.5,
"relative_humidity": 48.2,
"barometric_pressure": 1013.1,
"distance": 7.25,
"lux": 75.5,
"whiteLux": 40.0,
"windDirection": 180,
"windSpeed": 4.3,
"weight": 28.4,
"rainfall24h": 0.75,
"soilMoisture": 2850,
"soilTemperature": 17.1
},
"local_stats": {
"numPacketsTx": 1280,
"numPacketsRx": 1425,
"numClients": 6,
"numNodes": 18,
"freeHeap": 21344,
"heapLowWater": 19876
}
}
]
+216
View File
@@ -0,0 +1,216 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional tests that exercise defensive helpers and interfaces."""
import importlib
import sys
import types
from pathlib import Path
from types import SimpleNamespace
import pytest
REPO_ROOT = Path(__file__).resolve().parents[1]
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
from data.mesh_ingestor import channels, config, interfaces, queue, serialization
@pytest.fixture(autouse=True)
def reset_state(monkeypatch):
"""Ensure mutable singletons are cleaned up between tests."""
repo_root = Path(__file__).resolve().parents[1]
monkeypatch.syspath_prepend(str(repo_root))
channels._reset_channel_cache()
yield
channels._reset_channel_cache()
importlib.reload(config)
def test_config_module_port_aliases(monkeypatch):
"""Ensure the config module keeps CONNECTION and PORT in sync."""
reloaded = importlib.reload(config)
monkeypatch.setattr(reloaded, "CONNECTION", "dev-tty", raising=False)
reloaded.PORT = "new-port"
assert reloaded.CONNECTION == "new-port"
assert reloaded.PORT == "new-port"
def test_queue_stringification_and_ordering():
"""Exercise queue payload formatting and priority ordering."""
mapping_payload = {"b": 1, "a": 2}
assert queue._stringify_payload_value(mapping_payload).startswith('{"a"')
assert queue._stringify_payload_value([1, 2, 3]).startswith("[1")
assert queue._stringify_payload_value({1, 2}).replace(" ", "") in ("[1,2]", "[2,1]")
assert queue._stringify_payload_value(b"bytes") == '"bytes"'
assert queue._stringify_payload_value("text") == '"text"'
pairs = queue._payload_key_value_pairs(mapping_payload)
assert pairs.split(" ") == ["a=2", "b=1"]
state = queue.QueueState()
order = []
queue._enqueue_post_json("/low", {"x": 1}, priority=90, state=state)
queue._enqueue_post_json("/high", {"x": 2}, priority=10, state=state)
state.active = True
queue._drain_post_queue(
state=state, send=lambda path, payload: order.append((path, payload["x"]))
)
assert order == [("/high", 2), ("/low", 1)]
assert state.active is False
assert state.queue == []
def test_channels_iterator_and_capture(monkeypatch):
"""Verify channel helpers normalise roles and cache primary/secondary entries."""
channels._reset_channel_cache()
class StubSettings:
def __init__(self, name):
self.name = name
class PrimaryChannel:
def __init__(self):
self.role = "PRIMARY"
self.settings = StubSettings("Alpha")
class SecondaryChannel:
def __init__(self, index, name):
self.role = "SECONDARY"
self.index = index
self.settings = StubSettings(name)
class Container:
def __len__(self):
return 2
def __getitem__(self, idx):
if idx == 0:
return PrimaryChannel()
if idx == 1:
return SecondaryChannel(5, "Bravo")
raise IndexError
class StubLocalNode:
def __init__(self):
self.channels = Container()
class StubIface:
def __init__(self):
self.localNode = StubLocalNode()
def waitForConfig(self):
return True
channels.capture_from_interface(StubIface())
assert channels.channel_mappings() == ((0, "Alpha"), (5, "Bravo"))
assert channels.channel_name(5) == "Bravo"
assert list(channels._iter_channel_objects({"0": "zero"})) == ["zero"]
def test_candidate_node_id_and_normaliser():
"""Ensure node identifiers are found inside nested payloads."""
nested = {
"payload": {"meta": {"user": {"id": "0x42"}}},
"decoded": {"from": "!0000002a"},
}
node_id = interfaces._candidate_node_id(nested)
assert node_id == "!0000002a"
packet = {"user": {"id": "!0000002a"}, "userId": None}
normalised = interfaces._normalise_nodeinfo_packet(packet)
assert normalised["id"] == "!0000002a"
assert normalised["user"]["id"] == "!0000002a"
def test_safe_nodeinfo_wrapper_handles_missing_id():
"""Cover the KeyError guard and wrapper marker."""
called = {}
def original(_iface, _packet):
called["ran"] = True
raise KeyError("id")
wrapper = interfaces._build_safe_nodeinfo_callback(original)
result = wrapper(SimpleNamespace(), {"anything": 1})
assert called["ran"] is True
assert result is None
assert getattr(wrapper, "_potato_mesh_safe_wrapper")
def test_patch_nodeinfo_handler_class(monkeypatch):
"""Ensure NodeInfoHandler subclasses normalise packets with missing ids."""
class DummyHandler:
def __init__(self):
self.calls = []
def onReceive(self, iface, packet):
self.calls.append(packet)
return packet.get("id")
mesh_interface = types.SimpleNamespace(
NodeInfoHandler=DummyHandler, __name__="meshtastic.mesh_interface"
)
interfaces._patch_nodeinfo_handler_class(mesh_interface)
handler_cls = mesh_interface.NodeInfoHandler
handler = handler_cls()
iface = SimpleNamespace()
packet = {"user": {"id": "abcd"}}
result = handler.onReceive(iface, packet)
assert result == serialization._canonical_node_id("abcd")
assert handler.calls[0]["id"] == serialization._canonical_node_id("abcd")
def test_region_frequency_and_resolution_helpers():
"""Cover enum name parsing for LoRa region frequency."""
class EnumValue:
def __init__(self, name):
self.name = name
class EnumType:
def __init__(self):
self.values_by_number = {1: EnumValue("REGION_915")}
class FieldDesc:
def __init__(self):
self.enum_type = EnumType()
class Descriptor:
def __init__(self):
self.fields_by_name = {"region": FieldDesc()}
class LoraMessage:
def __init__(self, region):
self.region = region
self.DESCRIPTOR = Descriptor()
freq = interfaces._region_frequency(LoraMessage(1))
assert freq == 915
class LocalConfig:
def __init__(self, lora):
self.lora = lora
lora_msg = LoraMessage(1)
resolved = interfaces._resolve_lora_message(LocalConfig(lora_msg))
assert resolved is lora_msg
+2243 -38
View File
File diff suppressed because it is too large Load Diff
+69
View File
@@ -0,0 +1,69 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensure version identifiers stay synchronised across all packages."""
from __future__ import annotations
import json
import re
import sys
from pathlib import Path
REPO_ROOT = Path(__file__).resolve().parents[1]
if str(REPO_ROOT) not in sys.path:
sys.path.insert(0, str(REPO_ROOT))
import data
def _ruby_fallback_version() -> str:
config_path = REPO_ROOT / "web" / "lib" / "potato_mesh" / "config.rb"
contents = config_path.read_text(encoding="utf-8")
inside = False
for line in contents.splitlines():
stripped = line.strip()
if stripped.startswith("def version_fallback"):
inside = True
continue
if inside and stripped == "end":
break
if inside:
literal = re.search(r"['\"](?P<version>[^'\"]+)['\"]", stripped)
if literal:
return literal.group("version")
raise AssertionError("Unable to locate version_fallback definition in config.rb")
def _javascript_package_version() -> str:
package_path = REPO_ROOT / "web" / "package.json"
data = json.loads(package_path.read_text(encoding="utf-8"))
version = data.get("version")
if isinstance(version, str):
return version
raise AssertionError("package.json does not expose a string version")
def test_version_identifiers_match_across_languages() -> None:
"""Guard against version drift between Python, Ruby, and JavaScript."""
python_version = getattr(data, "__version__", None)
assert (
isinstance(python_version, str) and python_version
), "data.__version__ missing"
ruby_version = _ruby_fallback_version()
javascript_version = _javascript_package_version()
assert python_version == ruby_version == javascript_version
+1 -2
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
+46 -20
View File
@@ -1,6 +1,25 @@
# syntax=docker/dockerfile:1.6
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Main application builder stage
FROM ruby:3.3-alpine AS builder
# Ensure native extensions are built against musl libc rather than
# using glibc precompiled binaries (which fail on Alpine).
ENV BUNDLE_FORCE_RUBY_PLATFORM=true
# Install build dependencies and SQLite3
RUN apk add --no-cache \
build-base \
@@ -15,7 +34,8 @@ WORKDIR /app
COPY web/Gemfile web/Gemfile.lock* ./
# Install gems with SQLite3 support
RUN bundle config set --local without 'development test' && \
RUN bundle config set --local force_ruby_platform true && \
bundle config set --local without 'development test' && \
bundle install --jobs=4 --retry=3
# Production stage
@@ -37,16 +57,24 @@ WORKDIR /app
# Copy installed gems from builder stage
COPY --from=builder /usr/local/bundle /usr/local/bundle
# Copy application code (exclude Dockerfile from web directory)
COPY --chown=potatomesh:potatomesh web/app.rb web/app.sh web/Gemfile web/Gemfile.lock* web/public/ web/spec/ ./
COPY --chown=potatomesh:potatomesh web/views/ ./views/
# Copy application code (excluding the Dockerfile which is not required at runtime)
COPY --chown=potatomesh:potatomesh web/app.rb ./
COPY --chown=potatomesh:potatomesh web/app.sh ./
COPY --chown=potatomesh:potatomesh web/Gemfile ./
COPY --chown=potatomesh:potatomesh web/Gemfile.lock* ./
COPY --chown=potatomesh:potatomesh web/lib ./lib
COPY --chown=potatomesh:potatomesh web/spec ./spec
COPY --chown=potatomesh:potatomesh web/public ./public
COPY --chown=potatomesh:potatomesh web/views ./views
COPY --chown=potatomesh:potatomesh web/scripts ./scripts
# Copy SQL schema files from data directory
COPY --chown=potatomesh:potatomesh data/*.sql /data/
# Create data directory for SQLite database
RUN mkdir -p /app/data && \
chown -R potatomesh:potatomesh /app/data
# Create data and configuration directories with correct ownership
RUN mkdir -p /app/.local/share/potato-mesh \
&& mkdir -p /app/.config/potato-mesh/well-known \
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config
# Switch to non-root user
USER potatomesh
@@ -55,19 +83,17 @@ USER potatomesh
EXPOSE 41447
# Default environment variables (can be overridden by host)
ENV APP_ENV=production \
MESH_DB=/app/data/mesh.db \
DB_BUSY_TIMEOUT_MS=5000 \
DB_BUSY_MAX_RETRIES=5 \
DB_BUSY_RETRY_DELAY=0.05 \
MAX_JSON_BODY_BYTES=1048576 \
SITE_NAME="Berlin Mesh Network" \
DEFAULT_CHANNEL="#MediumFast" \
DEFAULT_FREQUENCY="868MHz" \
MAP_CENTER_LAT=52.502889 \
MAP_CENTER_LON=13.404194 \
MAX_NODE_DISTANCE_KM=50 \
MATRIX_ROOM="" \
ENV RACK_ENV=production \
APP_ENV=production \
XDG_DATA_HOME=/app/.local/share \
XDG_CONFIG_HOME=/app/.config \
SITE_NAME="PotatoMesh Demo" \
CHANNEL="#LongFast" \
FREQUENCY="915MHz" \
MAP_CENTER="38.761944,-27.090833" \
MAP_ZOOM="" \
MAX_DISTANCE=42 \
CONTACT_LINK="#potatomesh:dod.ngo" \
DEBUG=0
# Start the application
+3 -1
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,9 +15,11 @@
source "https://rubygems.org"
gem "sinatra", "~> 4.0"
gem "erb", "~> 4.0"
gem "sqlite3", "~> 1.7"
gem "rackup", "~> 2.2"
gem "puma", "~> 7.0"
gem "prometheus-client"
group :test do
gem "rspec", "~> 3.12"
+3 -565
View File
@@ -1,4 +1,4 @@
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,568 +14,6 @@
# frozen_string_literal: true
# Main Sinatra application exposing the Meshtastic node and message archive.
# The daemon in +data/mesh.py+ pushes updates into the SQLite database that
# this web process reads from, providing JSON APIs and a rendered HTML index
# page for human visitors.
require "sinatra"
require "json"
require "sqlite3"
require "fileutils"
require "logger"
require "rack/utils"
require "open3"
require_relative "lib/potato_mesh/application"
DB_PATH = ENV.fetch("MESH_DB", File.join(__dir__, "../data/mesh.db"))
DB_BUSY_TIMEOUT_MS = ENV.fetch("DB_BUSY_TIMEOUT_MS", "5000").to_i
DB_BUSY_MAX_RETRIES = ENV.fetch("DB_BUSY_MAX_RETRIES", "5").to_i
DB_BUSY_RETRY_DELAY = ENV.fetch("DB_BUSY_RETRY_DELAY", "0.05").to_f
WEEK_SECONDS = 7 * 24 * 60 * 60
DEFAULT_MAX_JSON_BODY_BYTES = 1_048_576
MAX_JSON_BODY_BYTES = begin
raw = ENV.fetch("MAX_JSON_BODY_BYTES", DEFAULT_MAX_JSON_BODY_BYTES.to_s)
value = Integer(raw, 10)
value.positive? ? value : DEFAULT_MAX_JSON_BODY_BYTES
rescue ArgumentError
DEFAULT_MAX_JSON_BODY_BYTES
end
VERSION_FALLBACK = "v0.2.1"
def determine_app_version
repo_root = File.expand_path("..", __dir__)
git_dir = File.join(repo_root, ".git")
return VERSION_FALLBACK unless File.directory?(git_dir)
stdout, status = Open3.capture2("git", "-C", repo_root, "describe", "--tags", "--long", "--abbrev=7")
return VERSION_FALLBACK unless status.success?
raw = stdout.strip
return VERSION_FALLBACK if raw.empty?
match = /\A(?<tag>.+)-(?<count>\d+)-g(?<hash>[0-9a-f]+)\z/.match(raw)
return raw unless match
tag = match[:tag]
count = match[:count].to_i
hash = match[:hash]
return tag if count.zero?
"#{tag}+#{count}-#{hash}"
rescue StandardError
VERSION_FALLBACK
end
APP_VERSION = determine_app_version
set :public_folder, File.join(__dir__, "public")
set :views, File.join(__dir__, "views")
SITE_NAME = ENV.fetch("SITE_NAME", "Meshtastic Berlin")
DEFAULT_CHANNEL = ENV.fetch("DEFAULT_CHANNEL", "#MediumFast")
DEFAULT_FREQUENCY = ENV.fetch("DEFAULT_FREQUENCY", "868MHz")
MAP_CENTER_LAT = ENV.fetch("MAP_CENTER_LAT", "52.502889").to_f
MAP_CENTER_LON = ENV.fetch("MAP_CENTER_LON", "13.404194").to_f
MAX_NODE_DISTANCE_KM = ENV.fetch("MAX_NODE_DISTANCE_KM", "137").to_f
MATRIX_ROOM = ENV.fetch("MATRIX_ROOM", "#meshtastic-berlin:matrix.org")
DEBUG = ENV["DEBUG"] == "1"
class << Sinatra::Application
def apply_logger_level!
logger = settings.logger
return unless logger
logger.level = DEBUG ? Logger::DEBUG : Logger::WARN
end
end
Sinatra::Application.configure do
app_logger = Logger.new($stdout)
set :logger, app_logger
use Rack::CommonLogger, app_logger
Sinatra::Application.apply_logger_level!
end
# Open the SQLite database with a configured busy timeout.
#
# @param readonly [Boolean] whether to open the database in read-only mode.
# @return [SQLite3::Database]
def open_database(readonly: false)
SQLite3::Database.new(DB_PATH, readonly: readonly).tap do |db|
db.busy_timeout = DB_BUSY_TIMEOUT_MS
end
end
# Execute the provided block, retrying when SQLite reports the database is
# temporarily locked.
#
# @param max_retries [Integer] maximum number of retries after the initial
# attempt.
# @param base_delay [Float] base delay in seconds for linear backoff between
# retries.
# @yieldreturn [Object] result of the block once it succeeds.
def with_busy_retry(max_retries: DB_BUSY_MAX_RETRIES, base_delay: DB_BUSY_RETRY_DELAY)
attempts = 0
begin
yield
rescue SQLite3::BusyException
attempts += 1
raise if attempts > max_retries
sleep(base_delay * attempts)
retry
end
end
# Checks whether the SQLite database already contains the required tables.
#
# @return [Boolean] true when both +nodes+ and +messages+ tables exist.
def db_schema_present?
return false unless File.exist?(DB_PATH)
db = open_database(readonly: true)
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages')").flatten
tables.include?("nodes") && tables.include?("messages")
rescue SQLite3::Exception
false
ensure
db&.close
end
# Create the SQLite database and seed it with the node and message schemas.
#
# @return [void]
def init_db
FileUtils.mkdir_p(File.dirname(DB_PATH))
db = open_database
%w[nodes messages].each do |schema|
sql_file = File.expand_path("../data/#{schema}.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
ensure
db&.close
end
init_db unless db_schema_present?
# Retrieve recently heard nodes ordered by their last contact time.
#
# @param limit [Integer] maximum number of rows returned.
# @return [Array<Hash>] collection of node records formatted for the API.
def query_nodes(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_heard = now - WEEK_SECONDS
rows = db.execute <<~SQL, [min_last_heard, limit]
SELECT node_id, short_name, long_name, hw_model, role, snr,
battery_level, voltage, last_heard, first_heard,
uptime_seconds, channel_utilization, air_util_tx,
position_time, latitude, longitude, altitude
FROM nodes
WHERE last_heard >= ?
ORDER BY last_heard DESC
LIMIT ?
SQL
rows.each do |r|
r["role"] ||= "CLIENT"
lh = r["last_heard"]&.to_i
pt = r["position_time"]&.to_i
lh = now if lh && lh > now
pt = nil if pt && pt > now
r["last_heard"] = lh
r["position_time"] = pt
r["last_seen_iso"] = Time.at(lh).utc.iso8601 if lh
r["pos_time_iso"] = Time.at(pt).utc.iso8601 if pt
end
rows
ensure
db&.close
end
# GET /api/nodes
#
# Returns a JSON array of the most recently heard nodes.
get "/api/nodes" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_nodes(limit).to_json
end
# Retrieve recent text messages joined with related node information.
#
# @param limit [Integer] maximum number of rows returned.
# @return [Array<Hash>] collection of message rows suitable for serialisation.
def query_messages(limit)
db = open_database(readonly: true)
db.results_as_hash = true
rows = db.execute <<~SQL, [limit]
SELECT m.*, n.*, m.snr AS msg_snr
FROM messages m
LEFT JOIN nodes n ON (
m.from_id = n.node_id OR (
CAST(m.from_id AS TEXT) <> '' AND
CAST(m.from_id AS TEXT) GLOB '[0-9]*' AND
CAST(m.from_id AS INTEGER) = n.num
)
)
ORDER BY m.rx_time DESC
LIMIT ?
SQL
msg_fields = %w[id rx_time rx_iso from_id to_id channel portnum text msg_snr rssi hop_limit]
rows.each do |r|
if DEBUG && (r["from_id"].nil? || r["from_id"].to_s.empty?)
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
Kernel.warn "[debug] messages row before join: #{raw.inspect}"
Kernel.warn "[debug] row after join: #{r.inspect}"
end
node = {}
r.keys.each do |k|
next if msg_fields.include?(k)
node[k] = r.delete(k)
end
r["snr"] = r.delete("msg_snr")
if r["from_id"] && (node["node_id"].nil? || node["node_id"].to_s.empty?)
lookup_keys = []
canonical = normalize_node_id(db, r["from_id"])
lookup_keys << canonical if canonical
raw_ref = r["from_id"].to_s.strip
lookup_keys << raw_ref unless raw_ref.empty?
lookup_keys << raw_ref.to_i if raw_ref.match?(/\A[0-9]+\z/)
fallback = nil
lookup_keys.uniq.each do |ref|
sql = ref.is_a?(Integer) ? "SELECT * FROM nodes WHERE num = ?" : "SELECT * FROM nodes WHERE node_id = ?"
fallback = db.get_first_row(sql, [ref])
break if fallback
end
if fallback
fallback.each do |key, value|
next unless key.is_a?(String)
next if msg_fields.include?(key)
node[key] = value if node[key].nil?
end
end
end
node["role"] = "CLIENT" if node.key?("role") && (node["role"].nil? || node["role"].to_s.empty?)
r["node"] = node
if DEBUG && (r["from_id"].nil? || r["from_id"].to_s.empty?)
Kernel.warn "[debug] row after processing: #{r.inspect}"
end
end
rows
ensure
db&.close
end
# GET /api/messages
#
# Returns a JSON array of stored text messages including node metadata.
get "/api/messages" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_messages(limit).to_json
end
# Determine the numeric node reference for a canonical node identifier.
#
# The Meshtastic protobuf encodes the node ID as a hexadecimal string prefixed
# with an exclamation mark (for example ``!4ed36bd0``). Many payloads also
# include a decimal ``num`` alias, but some integrations omit it. When the
# alias is missing we can reconstruct it from the canonical identifier so that
# later joins using ``nodes.num`` continue to work.
#
# @param node_id [String, nil] canonical node identifier (e.g. ``!4ed36bd0``).
# @param payload [Hash] raw node payload provided by the data daemon.
# @return [Integer, nil] numeric node reference if it can be determined.
def resolve_node_num(node_id, payload)
raw = payload["num"]
case raw
when Integer
return raw
when Numeric
return raw.to_i
when String
trimmed = raw.strip
return nil if trimmed.empty?
return Integer(trimmed, 10) if trimmed.match?(/\A[0-9]+\z/)
return Integer(trimmed.delete_prefix("0x").delete_prefix("0X"), 16) if trimmed.match?(/\A0[xX][0-9A-Fa-f]+\z/)
if trimmed.match?(/\A[0-9A-Fa-f]+\z/)
canonical = node_id.is_a?(String) ? node_id.strip : ""
return Integer(trimmed, 16) if canonical.match?(/\A!?[0-9A-Fa-f]+\z/)
end
end
return nil unless node_id.is_a?(String)
hex = node_id.strip
return nil if hex.empty?
hex = hex.delete_prefix("!")
return nil unless hex.match?(/\A[0-9A-Fa-f]+\z/)
Integer(hex, 16)
rescue ArgumentError
nil
end
# Insert or update a node row with the most recent metrics.
#
# @param db [SQLite3::Database] open database handle.
# @param node_id [String] primary identifier for the node.
# @param n [Hash] node payload provided by the data daemon.
def upsert_node(db, node_id, n)
user = n["user"] || {}
met = n["deviceMetrics"] || {}
pos = n["position"] || {}
role = user["role"] || "CLIENT"
lh = n["lastHeard"]
pt = pos["time"]
now = Time.now.to_i
pt = nil if pt && pt > now
lh = now if lh && lh > now
lh = pt if pt && (!lh || lh < pt)
bool = ->(v) {
case v
when true then 1
when false then 0
else v
end
}
node_num = resolve_node_num(node_id, n)
row = [
node_id,
node_num,
user["shortName"],
user["longName"],
user["macaddr"],
user["hwModel"] || n["hwModel"],
role,
user["publicKey"],
bool.call(user["isUnmessagable"]),
bool.call(n["isFavorite"]),
n["hopsAway"],
n["snr"],
lh,
lh,
met["batteryLevel"],
met["voltage"],
met["channelUtilization"],
met["airUtilTx"],
met["uptimeSeconds"],
pt,
pos["locationSource"],
pos["latitude"],
pos["longitude"],
pos["altitude"],
]
with_busy_retry do
db.execute <<~SQL, row
INSERT INTO nodes(node_id,num,short_name,long_name,macaddr,hw_model,role,public_key,is_unmessagable,is_favorite,
hops_away,snr,last_heard,first_heard,battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,
position_time,location_source,latitude,longitude,altitude)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
ON CONFLICT(node_id) DO UPDATE SET
num=excluded.num, short_name=excluded.short_name, long_name=excluded.long_name, macaddr=excluded.macaddr,
hw_model=excluded.hw_model, role=excluded.role, public_key=excluded.public_key, is_unmessagable=excluded.is_unmessagable,
is_favorite=excluded.is_favorite, hops_away=excluded.hops_away, snr=excluded.snr, last_heard=excluded.last_heard,
battery_level=excluded.battery_level, voltage=excluded.voltage, channel_utilization=excluded.channel_utilization,
air_util_tx=excluded.air_util_tx, uptime_seconds=excluded.uptime_seconds, position_time=excluded.position_time,
location_source=excluded.location_source, latitude=excluded.latitude, longitude=excluded.longitude,
altitude=excluded.altitude
WHERE COALESCE(excluded.last_heard,0) >= COALESCE(nodes.last_heard,0)
SQL
end
end
# Ensure the request includes the expected bearer token.
#
# @return [void]
# @raise [Sinatra::Halt] when authentication fails.
def require_token!
token = ENV["API_TOKEN"]
provided = request.env["HTTP_AUTHORIZATION"].to_s.sub(/^Bearer\s+/i, "")
halt 403, { error: "Forbidden" }.to_json unless token && !token.empty? && secure_token_match?(token, provided)
end
# Perform a constant-time comparison between two strings, returning false on
# length mismatches or invalid input.
#
# @param expected [String]
# @param provided [String]
# @return [Boolean]
def secure_token_match?(expected, provided)
return false unless expected.is_a?(String) && provided.is_a?(String)
expected_bytes = expected.b
provided_bytes = provided.b
return false unless expected_bytes.bytesize == provided_bytes.bytesize
Rack::Utils.secure_compare(expected_bytes, provided_bytes)
rescue Rack::Utils::SecurityError
false
end
# Read the request body enforcing a maximum allowed size.
#
# @param limit [Integer, nil] optional override for the number of bytes.
# @return [String]
def read_json_body(limit: nil)
max_bytes = limit || MAX_JSON_BODY_BYTES
max_bytes = max_bytes.to_i
max_bytes = MAX_JSON_BODY_BYTES if max_bytes <= 0
body = request.body.read(max_bytes + 1)
body = "" if body.nil?
halt 413, { error: "payload too large" }.to_json if body.bytesize > max_bytes
body
ensure
request.body.rewind if request.body.respond_to?(:rewind)
end
# Determine whether the canonical node identifier should replace the provided
# sender reference for a message payload.
#
# @param message [Object] raw request payload element.
# @return [Boolean]
def prefer_canonical_sender?(message)
message.is_a?(Hash) && message.key?("packet_id") && !message.key?("id")
end
# Insert a text message if it does not already exist.
#
# @param db [SQLite3::Database] open database handle.
# @param m [Hash] message payload provided by the data daemon.
def insert_message(db, m)
msg_id = m["id"] || m["packet_id"]
return unless msg_id
rx_time = m["rx_time"]&.to_i || Time.now.to_i
rx_iso = m["rx_iso"] || Time.at(rx_time).utc.iso8601
raw_from_id = m["from_id"]
if raw_from_id.nil? || raw_from_id.to_s.strip.empty?
alt_from = m["from"]
raw_from_id = alt_from unless alt_from.nil? || alt_from.to_s.strip.empty?
end
trimmed_from_id = raw_from_id.nil? ? nil : raw_from_id.to_s.strip
trimmed_from_id = nil if trimmed_from_id&.empty?
canonical_from_id = normalize_node_id(db, raw_from_id)
use_canonical = canonical_from_id && (trimmed_from_id.nil? || prefer_canonical_sender?(m))
from_id = if use_canonical
canonical_from_id.to_s.strip
else
trimmed_from_id
end
from_id = nil if from_id&.empty?
row = [
msg_id,
rx_time,
rx_iso,
from_id,
m["to_id"],
m["channel"],
m["portnum"],
m["text"],
m["snr"],
m["rssi"],
m["hop_limit"],
]
with_busy_retry do
existing = db.get_first_row("SELECT from_id FROM messages WHERE id = ?", [msg_id])
if existing
if from_id
existing_from = existing.is_a?(Hash) ? existing["from_id"] : existing[0]
existing_from_str = existing_from&.to_s
should_update = existing_from_str.nil? || existing_from_str.strip.empty?
should_update ||= existing_from != from_id
db.execute("UPDATE messages SET from_id = ? WHERE id = ?", [from_id, msg_id]) if should_update
end
else
begin
db.execute <<~SQL, row
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,snr,rssi,hop_limit)
VALUES (?,?,?,?,?,?,?,?,?,?,?)
SQL
rescue SQLite3::ConstraintException
db.execute("UPDATE messages SET from_id = ? WHERE id = ?", [from_id, msg_id]) if from_id
end
end
end
end
# Resolve a node reference to the canonical node ID when possible.
#
# @param db [SQLite3::Database] open database handle.
# @param node_ref [Object] raw node identifier or numeric reference.
# @return [String, nil] canonical node ID or nil if it cannot be resolved.
def normalize_node_id(db, node_ref)
return nil if node_ref.nil?
ref_str = node_ref.to_s.strip
return nil if ref_str.empty?
node_id = db.get_first_value("SELECT node_id FROM nodes WHERE node_id = ?", [ref_str])
return node_id if node_id
begin
ref_num = Integer(ref_str, 10)
rescue ArgumentError
return nil
end
db.get_first_value("SELECT node_id FROM nodes WHERE num = ?", [ref_num])
end
# POST /api/nodes
#
# Upserts one or more nodes provided as a JSON object keyed by node ID.
post "/api/nodes" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
halt 400, { error: "too many nodes" }.to_json if data.is_a?(Hash) && data.size > 1000
db = open_database
data.each do |node_id, node|
upsert_node(db, node_id, node)
end
{ status: "ok" }.to_json
ensure
db&.close
end
# POST /api/messages
#
# Accepts an array or object describing text messages and stores each entry.
post "/api/messages" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
messages = data.is_a?(Array) ? data : [data]
halt 400, { error: "too many messages" }.to_json if messages.size > 1000
db = open_database
messages.each do |msg|
insert_message(db, msg)
end
{ status: "ok" }.to_json
ensure
db&.close
end
# GET /
#
# Renders the main site with configuration-driven defaults for the template.
get "/" do
erb :index, locals: {
site_name: SITE_NAME,
default_channel: DEFAULT_CHANNEL,
default_frequency: DEFAULT_FREQUENCY,
map_center_lat: MAP_CENTER_LAT,
map_center_lon: MAP_CENTER_LON,
max_node_distance_km: MAX_NODE_DISTANCE_KM,
matrix_room: MATRIX_ROOM,
version: APP_VERSION,
}
end
PotatoMesh::Application.run! if $PROGRAM_NAME == __FILE__
+3 -3
View File
@@ -1,6 +1,5 @@
#!/usr/bin/env bash
# Copyright (C) 2025 l5yth
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,4 +16,5 @@
set -euo pipefail
bundle install
exec ruby app.rb -p 41447 -o 127.0.0.1
exec bundle exec ruby app.rb -p 41447 -o 0.0.0.0
+206
View File
@@ -0,0 +1,206 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "sinatra/base"
require "json"
require "sqlite3"
require "fileutils"
require "logger"
require "rack/utils"
require "open3"
require "resolv"
require "socket"
require "time"
require "openssl"
require "base64"
require "prometheus/client"
require "prometheus/client/formats/text"
require "prometheus/middleware/collector"
require "prometheus/middleware/exporter"
require "net/http"
require "uri"
require "ipaddr"
require "set"
require "digest"
require_relative "config"
require_relative "sanitizer"
require_relative "meta"
require_relative "logging"
require_relative "application/helpers"
require_relative "application/errors"
require_relative "application/database"
require_relative "application/networking"
require_relative "application/identity"
require_relative "application/worker_pool"
require_relative "application/federation"
require_relative "application/prometheus"
require_relative "application/queries"
require_relative "application/data_processing"
require_relative "application/filesystem"
require_relative "application/instances"
require_relative "application/routes/api"
require_relative "application/routes/ingest"
require_relative "application/routes/root"
module PotatoMesh
class Application < Sinatra::Base
extend App::Helpers
extend App::Database
extend App::Networking
extend App::Identity
extend App::Federation
extend App::Instances
extend App::Prometheus
extend App::Queries
extend App::DataProcessing
extend App::Filesystem
helpers App::Helpers
include App::Database
include App::Networking
include App::Identity
include App::Federation
include App::Instances
include App::Prometheus
include App::Queries
include App::DataProcessing
include App::Filesystem
register App::Routes::Api
register App::Routes::Ingest
register App::Routes::Root
DEFAULT_PORT = 41_447
DEFAULT_BIND_ADDRESS = "0.0.0.0"
APP_VERSION = determine_app_version
INSTANCE_PRIVATE_KEY, INSTANCE_KEY_GENERATED = load_or_generate_instance_private_key
INSTANCE_PUBLIC_KEY_PEM = INSTANCE_PRIVATE_KEY.public_key.export
SELF_INSTANCE_ID = Digest::SHA256.hexdigest(INSTANCE_PUBLIC_KEY_PEM)
INSTANCE_DOMAIN, INSTANCE_DOMAIN_SOURCE = determine_instance_domain
# Adjust the runtime logger severity to match the DEBUG flag.
#
# @return [void]
def self.apply_logger_level!
logger = settings.logger
return unless logger
logger.level = PotatoMesh::Config.debug? ? Logger::DEBUG : Logger::WARN
end
# Determine the port the application should listen on by honouring the
# conventional +PORT+ environment variable used by hosting platforms. Any
# non-numeric or out-of-range values fall back to the provided default to
# keep the application bootable in misconfigured environments.
#
# @param default_port [Integer] fallback port when +ENV['PORT']+ is absent or invalid.
# @return [Integer] port number for the HTTP server.
def self.resolve_port(default_port: DEFAULT_PORT)
raw_port = ENV["PORT"]
return default_port if raw_port.nil?
trimmed = raw_port.to_s.strip
return default_port if trimmed.empty?
begin
port = Integer(trimmed, 10)
rescue ArgumentError
return default_port
end
return default_port unless port.positive?
return default_port unless PotatoMesh::Sanitizer.valid_port?(trimmed)
port
end
configure do
set :public_folder, File.expand_path("../../public", __dir__)
set :views, File.expand_path("../../views", __dir__)
set :federation_thread, nil
set :federation_worker_pool, nil
set :port, resolve_port
set :bind, DEFAULT_BIND_ADDRESS
app_logger = PotatoMesh::Logging.build_logger($stdout)
set :logger, app_logger
use Rack::CommonLogger, app_logger
use Rack::Deflater
use ::Prometheus::Middleware::Collector
use ::Prometheus::Middleware::Exporter
apply_logger_level!
perform_initial_filesystem_setup!
cleanup_legacy_well_known_artifacts
init_db unless db_schema_present?
ensure_schema_upgrades
log_instance_domain_resolution
log_instance_public_key
refresh_well_known_document_if_stale
ensure_self_instance_record!
update_all_prometheus_metrics_from_nodes
if federation_enabled?
ensure_federation_worker_pool!
else
shutdown_federation_worker_pool!
end
if federation_announcements_active?
start_initial_federation_announcement!
start_federation_announcer!
elsif federation_enabled?
debug_log(
"Federation announcements disabled",
context: "federation",
reason: "test environment",
)
else
debug_log(
"Federation announcements disabled",
context: "federation",
reason: "configuration",
)
end
end
end
end
if defined?(Sinatra::Application) && Sinatra::Application != PotatoMesh::Application
Sinatra.send(:remove_const, :Application)
end
Sinatra::Application = PotatoMesh::Application unless defined?(Sinatra::Application)
APP_VERSION = PotatoMesh::Application::APP_VERSION unless defined?(APP_VERSION)
SELF_INSTANCE_ID = PotatoMesh::Application::SELF_INSTANCE_ID unless defined?(SELF_INSTANCE_ID)
[
PotatoMesh::App::Helpers,
PotatoMesh::App::Database,
PotatoMesh::App::Networking,
PotatoMesh::App::Identity,
PotatoMesh::App::Federation,
PotatoMesh::App::Instances,
PotatoMesh::App::Prometheus,
PotatoMesh::App::Queries,
PotatoMesh::App::DataProcessing,
].each do |mod|
Object.include(mod) unless Object < mod
end
File diff suppressed because it is too large Load Diff
+193
View File
@@ -0,0 +1,193 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Database
# Column definitions required for environment telemetry support. Each
# entry pairs the column name with the SQL type used when backfilling
# legacy databases that pre-date the extended telemetry schema.
TELEMETRY_COLUMN_DEFINITIONS = [
["gas_resistance", "REAL"],
["current", "REAL"],
["iaq", "INTEGER"],
["distance", "REAL"],
["lux", "REAL"],
["white_lux", "REAL"],
["ir_lux", "REAL"],
["uv_lux", "REAL"],
["wind_direction", "INTEGER"],
["wind_speed", "REAL"],
["weight", "REAL"],
["wind_gust", "REAL"],
["wind_lull", "REAL"],
["radiation", "REAL"],
["rainfall_1h", "REAL"],
["rainfall_24h", "REAL"],
["soil_moisture", "INTEGER"],
["soil_temperature", "REAL"],
].freeze
# Open a connection to the application database applying common pragmas.
#
# @param readonly [Boolean] whether to open the database in read-only mode.
# @return [SQLite3::Database] configured database handle.
def open_database(readonly: false)
SQLite3::Database.new(PotatoMesh::Config.db_path, readonly: readonly).tap do |db|
db.busy_timeout = PotatoMesh::Config.db_busy_timeout_ms
db.execute("PRAGMA foreign_keys = ON")
end
end
# Execute the provided block and retry when SQLite reports a busy error.
#
# @param max_retries [Integer] maximum number of retries when locked.
# @param base_delay [Float] incremental back-off delay between retries.
# @yield Executes the database operation.
# @return [Object] result of the block.
def with_busy_retry(
max_retries: PotatoMesh::Config.db_busy_max_retries,
base_delay: PotatoMesh::Config.db_busy_retry_delay
)
attempts = 0
begin
yield
rescue SQLite3::BusyException
attempts += 1
raise if attempts > max_retries
sleep(base_delay * attempts)
retry
end
end
# Determine whether the database schema has already been provisioned.
#
# @return [Boolean] true when all required tables exist.
def db_schema_present?
return false unless File.exist?(PotatoMesh::Config.db_path)
db = open_database(readonly: true)
required = %w[nodes messages positions telemetry neighbors instances]
tables =
db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances')",
).flatten
(required - tables).empty?
rescue SQLite3::Exception
false
ensure
db&.close
end
# Create the database schema using the bundled SQL files.
#
# @return [void]
def init_db
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
db = open_database
%w[nodes messages positions telemetry neighbors instances].each do |schema|
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
ensure
db&.close
end
# Apply any schema migrations required for older installations.
#
# @return [void]
def ensure_schema_upgrades
db = open_database
node_columns = db.execute("PRAGMA table_info(nodes)").map { |row| row[1] }
unless node_columns.include?("precision_bits")
db.execute("ALTER TABLE nodes ADD COLUMN precision_bits INTEGER")
node_columns << "precision_bits"
end
unless node_columns.include?("lora_freq")
db.execute("ALTER TABLE nodes ADD COLUMN lora_freq INTEGER")
end
unless node_columns.include?("modem_preset")
db.execute("ALTER TABLE nodes ADD COLUMN modem_preset TEXT")
end
message_columns = db.execute("PRAGMA table_info(messages)").map { |row| row[1] }
unless message_columns.include?("lora_freq")
db.execute("ALTER TABLE messages ADD COLUMN lora_freq INTEGER")
end
unless message_columns.include?("modem_preset")
db.execute("ALTER TABLE messages ADD COLUMN modem_preset TEXT")
end
unless message_columns.include?("channel_name")
db.execute("ALTER TABLE messages ADD COLUMN channel_name TEXT")
end
unless message_columns.include?("reply_id")
db.execute("ALTER TABLE messages ADD COLUMN reply_id INTEGER")
message_columns << "reply_id"
end
unless message_columns.include?("emoji")
db.execute("ALTER TABLE messages ADD COLUMN emoji TEXT")
message_columns << "emoji"
end
reply_index_exists =
db.get_first_value(
"SELECT COUNT(*) FROM sqlite_master WHERE type='index' AND name='idx_messages_reply_id'",
).to_i > 0
unless reply_index_exists
db.execute("CREATE INDEX IF NOT EXISTS idx_messages_reply_id ON messages(reply_id)")
end
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='instances'").flatten
if tables.empty?
sql_file = File.expand_path("../../../../data/instances.sql", __dir__)
db.execute_batch(File.read(sql_file))
end
telemetry_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='telemetry'").flatten
if telemetry_tables.empty?
telemetry_schema = File.expand_path("../../../../data/telemetry.sql", __dir__)
db.execute_batch(File.read(telemetry_schema))
end
telemetry_columns = db.execute("PRAGMA table_info(telemetry)").map { |row| row[1] }
TELEMETRY_COLUMN_DEFINITIONS.each do |name, type|
next if telemetry_columns.include?(name)
db.execute("ALTER TABLE telemetry ADD COLUMN #{name} #{type}")
telemetry_columns << name
end
rescue SQLite3::SQLException, Errno::ENOENT => e
warn_log(
"Failed to apply schema upgrade",
context: "database.schema",
error_class: e.class.name,
error_message: e.message,
)
ensure
db&.close
end
end
end
end
+22
View File
@@ -0,0 +1,22 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
# Raised when a remote instance fails to provide valid federation data.
class InstanceFetchError < StandardError; end
end
end
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,123 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "fileutils"
module PotatoMesh
module App
# Filesystem helpers responsible for migrating legacy assets to XDG compliant
# directories and preparing runtime storage locations.
module Filesystem
# Execute all filesystem migrations required before the application boots.
#
# @return [void]
def perform_initial_filesystem_setup!
migrate_legacy_database!
migrate_legacy_keyfile!
migrate_legacy_well_known_assets!
end
private
# Copy the legacy database file into the configured XDG data directory.
#
# @return [void]
def migrate_legacy_database!
return unless default_database_destination?
migrate_legacy_file(
PotatoMesh::Config.legacy_db_path,
PotatoMesh::Config.db_path,
chmod: 0o600,
context: "filesystem.db",
)
end
# Copy the legacy keyfile into the configured XDG configuration directory.
#
# @return [void]
def migrate_legacy_keyfile!
PotatoMesh::Config.legacy_keyfile_candidates.each do |candidate|
migrate_legacy_file(
candidate,
PotatoMesh::Config.keyfile_path,
chmod: 0o600,
context: "filesystem.keys",
)
end
end
# Copy the legacy well-known document into the configured XDG directory.
#
# @return [void]
def migrate_legacy_well_known_assets!
destination = File.join(
PotatoMesh::Config.well_known_storage_root,
File.basename(PotatoMesh::Config.well_known_relative_path),
)
PotatoMesh::Config.legacy_well_known_candidates.each do |candidate|
migrate_legacy_file(
candidate,
destination,
chmod: 0o644,
context: "filesystem.well_known",
)
end
end
# Migrate a legacy file if it exists and the destination has not been created yet.
#
# @param source_path [String] absolute path to the legacy file.
# @param destination_path [String] absolute path to the new file location.
# @param chmod [Integer, nil] optional permission bits applied to the destination file.
# @param context [String] logging context describing the migration target.
# @return [void]
def migrate_legacy_file(source_path, destination_path, chmod:, context:)
return if source_path == destination_path
return unless File.exist?(source_path)
return if File.exist?(destination_path)
FileUtils.mkdir_p(File.dirname(destination_path))
FileUtils.cp(source_path, destination_path)
File.chmod(chmod, destination_path) if chmod
debug_log(
"Migrated legacy file to XDG directory",
context: context,
source: source_path,
destination: destination_path,
)
rescue SystemCallError => e
warn_log(
"Failed to migrate legacy file",
context: context,
source: source_path,
destination: destination_path,
error_class: e.class.name,
error_message: e.message,
)
end
# Determine whether the database destination matches the configured default.
#
# @return [Boolean] true when the destination should receive migrated data.
def default_database_destination?
PotatoMesh::Config.db_path == PotatoMesh::Config.default_db_path
end
end
end
end
+426
View File
@@ -0,0 +1,426 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
# Shared view and controller helper methods. Each helper is documented with
# its intended consumers to ensure consistent behaviour across the Sinatra
# application.
module Helpers
# Fetch an application level constant exposed by {PotatoMesh::Application}.
#
# @param name [Symbol] constant identifier to retrieve.
# @return [Object] constant value stored on the application class.
def app_constant(name)
PotatoMesh::Application.const_get(name)
end
# Retrieve the configured Prometheus report identifiers as an array.
#
# @return [Array<String>] list of report IDs used on the metrics page.
def prom_report_ids
PotatoMesh::Config.prom_report_id_list
end
# Read a text configuration value with a fallback.
#
# @param key [String] environment variable key.
# @param default [String] fallback value when unset.
# @return [String] sanitised configuration string.
def fetch_config_string(key, default)
PotatoMesh::Config.fetch_string(key, default)
end
# Proxy for {PotatoMesh::Sanitizer.string_or_nil}.
#
# @param value [Object] value to sanitise.
# @return [String, nil] cleaned string or nil.
def string_or_nil(value)
PotatoMesh::Sanitizer.string_or_nil(value)
end
# Proxy for {PotatoMesh::Sanitizer.sanitize_instance_domain}.
#
# @param value [Object] candidate domain string.
# @param downcase [Boolean] whether to force lowercase normalisation.
# @return [String, nil] canonical domain or nil.
def sanitize_instance_domain(value, downcase: true)
PotatoMesh::Sanitizer.sanitize_instance_domain(value, downcase: downcase)
end
# Proxy for {PotatoMesh::Sanitizer.instance_domain_host}.
#
# @param domain [String] domain literal.
# @return [String, nil] host portion of the domain.
def instance_domain_host(domain)
PotatoMesh::Sanitizer.instance_domain_host(domain)
end
# Proxy for {PotatoMesh::Sanitizer.ip_from_domain}.
#
# @param domain [String] domain literal.
# @return [IPAddr, nil] parsed address object.
def ip_from_domain(domain)
PotatoMesh::Sanitizer.ip_from_domain(domain)
end
# Proxy for {PotatoMesh::Sanitizer.sanitized_string}.
#
# @param value [Object] arbitrary input.
# @return [String] trimmed string representation.
def sanitized_string(value)
PotatoMesh::Sanitizer.sanitized_string(value)
end
# Retrieve the site name presented to users.
#
# @return [String] sanitised site label.
def sanitized_site_name
PotatoMesh::Sanitizer.sanitized_site_name
end
# Retrieve the configured channel.
#
# @return [String] sanitised channel identifier.
def sanitized_channel
PotatoMesh::Sanitizer.sanitized_channel
end
# Retrieve the configured frequency descriptor.
#
# @return [String] sanitised frequency text.
def sanitized_frequency
PotatoMesh::Sanitizer.sanitized_frequency
end
# Build the configuration hash exposed to the frontend application.
#
# @return [Hash] JSON serialisable configuration payload.
def frontend_app_config
{
refreshIntervalSeconds: PotatoMesh::Config.refresh_interval_seconds,
refreshMs: PotatoMesh::Config.refresh_interval_seconds * 1000,
chatEnabled: !private_mode?,
channel: sanitized_channel,
frequency: sanitized_frequency,
contactLink: sanitized_contact_link,
contactLinkUrl: sanitized_contact_link_url,
mapCenter: {
lat: PotatoMesh::Config.map_center_lat,
lon: PotatoMesh::Config.map_center_lon,
},
mapZoom: PotatoMesh::Config.map_zoom,
maxDistanceKm: PotatoMesh::Config.max_distance_km,
tileFilters: PotatoMesh::Config.tile_filters,
instanceDomain: app_constant(:INSTANCE_DOMAIN),
instancesFeatureEnabled: federation_enabled? && !private_mode?,
}
end
# Retrieve the configured contact link or nil when unset.
#
# @return [String, nil] contact link identifier.
def sanitized_contact_link
PotatoMesh::Sanitizer.sanitized_contact_link
end
# Retrieve the hyperlink derived from the configured contact link.
#
# @return [String, nil] hyperlink pointing to the community chat.
def sanitized_contact_link_url
PotatoMesh::Sanitizer.sanitized_contact_link_url
end
# Retrieve the configured maximum node distance in kilometres.
#
# @return [Numeric, nil] maximum distance or nil if disabled.
def sanitized_max_distance_km
PotatoMesh::Sanitizer.sanitized_max_distance_km
end
# Format a kilometre value for human readable output.
#
# @param distance [Numeric] distance in kilometres.
# @return [String] formatted distance value.
def formatted_distance_km(distance)
PotatoMesh::Meta.formatted_distance_km(distance)
end
# Build the canonical node detail path for the supplied identifier.
#
# @param identifier [String, nil] node identifier in ``!xxxx`` notation.
# @return [String, nil] detail path including the canonical ``!`` prefix.
def node_detail_path(identifier)
ident = string_or_nil(identifier)
return nil unless ident && !ident.empty?
trimmed = ident.strip
return nil if trimmed.empty?
body = trimmed.start_with?("!") ? trimmed[1..-1] : trimmed
return nil unless body && !body.empty?
escaped = Rack::Utils.escape_path(body)
"/nodes/!#{escaped}"
end
# Present a version string with a leading ``v`` when missing to keep
# UI labels consistent across tagged and fallback builds.
#
# @param version [String, nil] raw application version string.
# @return [String, nil] version string prefixed with ``v`` when needed.
def display_version(version)
return nil if version.nil? || version.to_s.strip.empty?
text = version.to_s.strip
text.start_with?("v") ? text : "v#{text}"
end
# Render a linked long name pointing to the node detail page.
#
# @param long_name [String] display name for the node.
# @param identifier [String, nil] canonical node identifier.
# @param css_class [String, nil] optional CSS class applied to the anchor.
# @return [String] escaped HTML snippet.
def node_long_name_link(long_name, identifier, css_class: "node-long-link")
text = string_or_nil(long_name)
return "" unless text
href = node_detail_path(identifier)
escaped_text = Rack::Utils.escape_html(text)
return escaped_text unless href
canonical_identifier = canonical_node_identifier(identifier)
class_attr = css_class ? %( class="#{css_class}") : ""
data_attrs = %( data-node-detail-link="true")
if canonical_identifier
escaped_identifier = Rack::Utils.escape_html(canonical_identifier)
data_attrs = %(#{data_attrs} data-node-id="#{escaped_identifier}")
end
%(<a#{class_attr} href="#{href}"#{data_attrs}>#{escaped_text}</a>)
end
# Normalise a node identifier by ensuring the canonical ``!`` prefix.
#
# @param identifier [String, nil] raw identifier string.
# @return [String, nil] canonical identifier or ``nil`` when unavailable.
def canonical_node_identifier(identifier)
ident = string_or_nil(identifier)
return nil unless ident && !ident.empty?
trimmed = ident.strip
return nil if trimmed.empty?
trimmed.start_with?("!") ? trimmed : "!#{trimmed}"
end
# Generate the meta description used in SEO tags.
#
# @return [String] combined descriptive sentence.
def meta_description
PotatoMesh::Meta.description(private_mode: private_mode?)
end
# Generate the structured meta configuration for the UI.
#
# @return [Hash] frozen configuration metadata.
def meta_configuration
PotatoMesh::Meta.configuration(private_mode: private_mode?)
end
# Coerce an arbitrary value into an integer when possible.
#
# @param value [Object] user supplied value.
# @return [Integer, nil] parsed integer or nil when invalid.
def coerce_integer(value)
case value
when Integer
value
when Float
value.finite? ? value.to_i : nil
when Numeric
value.to_i
when String
trimmed = value.strip
return nil if trimmed.empty?
return trimmed.to_i(16) if trimmed.match?(/\A0[xX][0-9A-Fa-f]+\z/)
return trimmed.to_i(10) if trimmed.match?(/\A-?\d+\z/)
begin
float_val = Float(trimmed)
float_val.finite? ? float_val.to_i : nil
rescue ArgumentError
nil
end
else
nil
end
end
# Coerce an arbitrary value into a floating point number when possible.
#
# @param value [Object] user supplied value.
# @return [Float, nil] parsed float or nil when invalid.
def coerce_float(value)
case value
when Float
value.finite? ? value : nil
when Integer
value.to_f
when Numeric
value.to_f
when String
trimmed = value.strip
return nil if trimmed.empty?
begin
float_val = Float(trimmed)
float_val.finite? ? float_val : nil
rescue ArgumentError
nil
end
else
nil
end
end
# Coerce an arbitrary value into a boolean according to common truthy
# conventions.
#
# @param value [Object] user supplied value.
# @return [Boolean, nil] boolean interpretation or nil when unknown.
def coerce_boolean(value)
case value
when true, false
value
when String
trimmed = value.strip.downcase
return true if %w[true 1 yes y].include?(trimmed)
return false if %w[false 0 no n].include?(trimmed)
nil
when Numeric
!value.to_i.zero?
else
nil
end
end
# Normalise PEM encoded public key content into LF line endings.
#
# @param value [String, #to_s, nil] raw PEM content.
# @return [String, nil] cleaned PEM string or nil when blank.
def sanitize_public_key_pem(value)
return nil if value.nil?
pem = value.is_a?(String) ? value : value.to_s
pem = pem.gsub(/\r\n?/, "\n")
return nil if pem.strip.empty?
pem
end
# Recursively coerce hash keys to strings and normalise nested arrays.
#
# @param value [Object] JSON compatible value.
# @return [Object] structure with canonical string keys.
def normalize_json_value(value)
case value
when Hash
value.each_with_object({}) do |(key, val), memo|
memo[key.to_s] = normalize_json_value(val)
end
when Array
value.map { |element| normalize_json_value(element) }
else
value
end
end
# Parse JSON payloads or hashes into normalised hashes with string keys.
#
# @param value [Hash, String, nil] raw JSON object or string representation.
# @return [Hash, nil] canonicalised hash or nil when parsing fails.
def normalize_json_object(value)
case value
when Hash
normalize_json_value(value)
when String
trimmed = value.strip
return nil if trimmed.empty?
begin
parsed = JSON.parse(trimmed)
rescue JSON::ParserError
return nil
end
parsed.is_a?(Hash) ? normalize_json_value(parsed) : nil
else
nil
end
end
# Emit a structured debug log entry tagged with the calling context.
#
# @param message [String] text to emit.
# @param context [String] logical source of the message.
# @param metadata [Hash] additional structured key/value data.
# @return [void]
def debug_log(message, context: "app", **metadata)
logger = PotatoMesh::Logging.logger_for(self)
PotatoMesh::Logging.log(logger, :debug, message, context: context, **metadata)
end
# Emit a structured warning log entry tagged with the calling context.
#
# @param message [String] text to emit.
# @param context [String] logical source of the message.
# @param metadata [Hash] additional structured key/value data.
# @return [void]
def warn_log(message, context: "app", **metadata)
logger = PotatoMesh::Logging.logger_for(self)
PotatoMesh::Logging.log(logger, :warn, message, context: context, **metadata)
end
# Indicate whether private mode has been requested.
#
# @return [Boolean] true when PRIVATE=1.
def private_mode?
PotatoMesh::Config.private_mode_enabled?
end
# Identify whether the Rack environment corresponds to the test suite.
#
# @return [Boolean] true when RACK_ENV is "test".
def test_environment?
ENV["RACK_ENV"] == "test"
end
# Determine whether the application is running in a production environment.
#
# @return [Boolean] true when APP_ENV or RACK_ENV resolves to "production".
def production_environment?
app_env = string_or_nil(ENV["APP_ENV"])&.downcase
rack_env = string_or_nil(ENV["RACK_ENV"])&.downcase
app_env == "production" || rack_env == "production"
end
# Determine whether federation features should be active.
#
# @return [Boolean] true when federation configuration allows it.
def federation_enabled?
PotatoMesh::Config.federation_enabled?
end
# Determine whether federation announcements should run asynchronously.
#
# @return [Boolean] true when announcements are enabled.
def federation_announcements_active?
federation_enabled? && !test_environment?
end
end
end
end
+290
View File
@@ -0,0 +1,290 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Identity
# Resolve the current application version string using git metadata when available.
#
# @return [String] semantic version compatible identifier.
def determine_app_version
repo_root = locate_git_repo_root(File.expand_path("../../..", __dir__))
return PotatoMesh::Config.version_fallback unless repo_root
stdout, status = Open3.capture2("git", "-C", repo_root, "describe", "--tags", "--long", "--abbrev=7")
return PotatoMesh::Config.version_fallback unless status.success?
raw = stdout.strip
return PotatoMesh::Config.version_fallback if raw.empty?
match = /\A(?<tag>.+)-(?<count>\d+)-g(?<hash>[0-9a-f]+)\z/.match(raw)
return raw unless match
tag = match[:tag]
count = match[:count].to_i
hash = match[:hash]
return tag if count.zero?
"#{tag}+#{count}-#{hash}"
rescue StandardError
PotatoMesh::Config.version_fallback
end
# Discover the root directory of the git repository containing the
# application by traversing parent directories until a ``.git`` entry is
# located. This supports both traditional repositories where ``.git`` is a
# directory and worktree checkouts where it is a plain file.
#
# @param start_dir [String] absolute path where the search should begin.
# @return [String, nil] absolute path to the repository root when found,
# otherwise ``nil``.
def locate_git_repo_root(start_dir)
current = File.expand_path(start_dir)
loop do
git_entry = File.join(current, ".git")
return current if File.exist?(git_entry)
parent = File.dirname(current)
break if parent == current
current = parent
end
nil
end
# Load the persisted instance private key or generate a new one when absent.
#
# @return [Array<OpenSSL::PKey::RSA, Boolean>] tuple of key and generation flag.
def load_or_generate_instance_private_key
keyfile_path = PotatoMesh::Config.keyfile_path
migrate_legacy_keyfile_for_identity!(keyfile_path)
FileUtils.mkdir_p(File.dirname(keyfile_path))
if File.exist?(keyfile_path)
contents = File.binread(keyfile_path)
return [OpenSSL::PKey.read(contents), false]
end
key = OpenSSL::PKey::RSA.new(2048)
File.open(keyfile_path, File::WRONLY | File::CREAT | File::TRUNC, 0o600) do |file|
file.write(key.export)
end
[key, true]
rescue OpenSSL::PKey::PKeyError, ArgumentError => e
warn_log(
"Failed to load instance private key",
context: "identity.keys",
error_class: e.class.name,
error_message: e.message,
)
key = OpenSSL::PKey::RSA.new(2048)
File.open(keyfile_path, File::WRONLY | File::CREAT | File::TRUNC, 0o600) do |file|
file.write(key.export)
end
[key, true]
end
# Migrate an existing legacy keyfile into the configured destination.
#
# @param destination_path [String] absolute path where the keyfile should reside.
# @return [void]
def migrate_legacy_keyfile_for_identity!(destination_path)
return if File.exist?(destination_path)
PotatoMesh::Config.legacy_keyfile_candidates.each do |candidate|
next unless File.exist?(candidate)
next if candidate == destination_path
begin
FileUtils.mkdir_p(File.dirname(destination_path))
FileUtils.cp(candidate, destination_path)
File.chmod(0o600, destination_path)
debug_log(
"Migrated legacy keyfile to XDG directory",
context: "identity.keys",
source: candidate,
destination: destination_path,
)
rescue SystemCallError => e
warn_log(
"Failed to migrate legacy keyfile",
context: "identity.keys",
source: candidate,
destination: destination_path,
error_class: e.class.name,
error_message: e.message,
)
next
end
break
end
end
private :migrate_legacy_keyfile_for_identity!, :locate_git_repo_root
# Return the directory used to store well-known documents.
#
# @return [String] absolute path to the staging directory.
def well_known_directory
PotatoMesh::Config.well_known_storage_root
end
# Determine the absolute path to the well-known document file.
#
# @return [String] filesystem path for the JSON document.
def well_known_file_path
File.join(
well_known_directory,
File.basename(PotatoMesh::Config.well_known_relative_path),
)
end
# Remove legacy well-known artifacts from previous releases.
#
# @return [void]
def cleanup_legacy_well_known_artifacts
legacy_path = PotatoMesh::Config.legacy_public_well_known_path
FileUtils.rm_f(legacy_path)
legacy_dir = File.dirname(legacy_path)
FileUtils.rmdir(legacy_dir) if Dir.exist?(legacy_dir) && Dir.empty?(legacy_dir)
rescue SystemCallError
# Ignore errors removing legacy static files; failure only means the directory
# or file did not exist or is in use.
end
# Construct the JSON body and detached signature for the well-known document.
#
# @return [Array(String, String)] pair of JSON output and base64 signature.
def build_well_known_document
last_update = latest_node_update_timestamp
domain_value = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
payload = {
publicKey: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
name: sanitized_site_name,
version: app_constant(:APP_VERSION),
domain: domain_value,
lastUpdate: last_update,
}
signed_payload = JSON.generate(payload, sort_keys: true)
signature = Base64.strict_encode64(
app_constant(:INSTANCE_PRIVATE_KEY).sign(OpenSSL::Digest::SHA256.new, signed_payload),
)
document = payload.merge(
signature: signature,
signatureAlgorithm: PotatoMesh::Config.instance_signature_algorithm,
signedPayload: Base64.strict_encode64(signed_payload),
)
json_output = JSON.pretty_generate(document)
[json_output, signature]
end
# Regenerate the well-known document when it is stale or when the existing
# content no longer matches the current instance configuration.
#
# @return [void]
def refresh_well_known_document_if_stale
FileUtils.mkdir_p(well_known_directory)
path = well_known_file_path
now = Time.now
json_output, signature = build_well_known_document
expected_contents = json_output.end_with?("\n") ? json_output : "#{json_output}\n"
needs_update = true
if File.exist?(path)
current_contents = File.binread(path)
mtime = File.mtime(path)
if current_contents == expected_contents &&
(now - mtime) < PotatoMesh::Config.well_known_refresh_interval
needs_update = false
end
end
return unless needs_update
File.open(path, File::WRONLY | File::CREAT | File::TRUNC, 0o644) do |file|
file.write(expected_contents)
end
debug_log(
"Refreshed well-known document content",
context: "identity.well_known",
path: PotatoMesh::Config.well_known_relative_path,
bytes: json_output.bytesize,
document: json_output,
)
debug_log(
"Refreshed well-known document signature",
context: "identity.well_known",
path: PotatoMesh::Config.well_known_relative_path,
algorithm: PotatoMesh::Config.instance_signature_algorithm,
signature: signature,
)
end
# Retrieve the latest node update timestamp from the database.
#
# @return [Integer, nil] Unix timestamp or nil when unavailable.
def latest_node_update_timestamp
return nil unless File.exist?(PotatoMesh::Config.db_path)
db = open_database(readonly: true)
value = db.get_first_value("SELECT MAX(last_heard) FROM nodes")
value&.to_i
rescue SQLite3::Exception
nil
ensure
db&.close
end
# Emit a debug entry describing the active instance key material.
#
# @return [void]
def log_instance_public_key
debug_log(
"Loaded instance public key",
context: "identity.keys",
public_key_pem: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
)
if app_constant(:INSTANCE_KEY_GENERATED)
debug_log(
"Generated new instance private key",
context: "identity.keys",
path: PotatoMesh::Config.keyfile_path,
)
end
end
# Emit a debug entry describing how the instance domain was derived.
#
# @return [void]
def log_instance_domain_resolution
source = app_constant(:INSTANCE_DOMAIN_SOURCE) || :unknown
debug_log(
"Resolved instance domain",
context: "identity.domain",
source: source,
domain: app_constant(:INSTANCE_DOMAIN),
)
end
end
end
end
@@ -0,0 +1,210 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
# Helper methods for maintaining and presenting instance records.
module Instances
# Remove duplicate instance records grouped by their canonical domain name
# while favouring the most recent entry.
#
# @return [void]
def clean_duplicate_instances!
db = open_database
rows = with_busy_retry do
db.execute(
<<~SQL
SELECT rowid, domain, last_update_time
FROM instances
WHERE domain IS NOT NULL AND TRIM(domain) != ''
SQL
)
end
grouped = rows.group_by do |row|
sanitize_instance_domain(row[1])&.downcase
rescue StandardError
nil
end
deletions = []
updates = {}
grouped.each do |canonical_domain, entries|
next if canonical_domain.nil?
next if entries.size <= 1
sorted_entries = entries.sort_by do |entry|
timestamp = coerce_integer(entry[2]) || -1
[timestamp, entry[0].to_i]
end
keeper = sorted_entries.last
next unless keeper
deletions.concat(sorted_entries[0...-1].map { |entry| entry[0].to_i })
current_domain = entries.find { |entry| entry[0] == keeper[0] }&.[](1)
if canonical_domain && current_domain != canonical_domain
updates[keeper[0].to_i] = canonical_domain
end
removed_count = sorted_entries.length - 1
warn_log(
"Removed duplicate instance records",
context: "instances.cleanup",
domain: canonical_domain,
removed: removed_count,
) if removed_count.positive?
end
unless deletions.empty?
placeholders = Array.new(deletions.size, "?").join(",")
with_busy_retry do
db.execute("DELETE FROM instances WHERE rowid IN (#{placeholders})", deletions)
end
end
updates.each do |rowid, canonical_domain|
with_busy_retry do
db.execute("UPDATE instances SET domain = ? WHERE rowid = ?", [canonical_domain, rowid])
end
end
rescue SQLite3::Exception => e
warn_log(
"Failed to clean duplicate instances",
context: "instances.cleanup",
error_class: e.class.name,
error_message: e.message,
)
ensure
db&.close
end
# Normalise and validate an instance database row for API presentation.
#
# @param row [Hash] raw database row with string keys.
# @return [Hash, nil] cleaned hash or +nil+ when the row is discarded.
def normalize_instance_row(row)
unless row.is_a?(Hash)
warn_log(
"Discarded malformed instance row",
context: "instances.normalize",
reason: "row not hash",
)
return nil
end
id = string_or_nil(row["id"])
domain = sanitize_instance_domain(row["domain"])&.downcase
pubkey = sanitize_public_key_pem(row["pubkey"])
signature = string_or_nil(row["signature"])
last_update_time = coerce_integer(row["last_update_time"])
is_private_raw = row["is_private"]
private_flag = coerce_boolean(is_private_raw)
if private_flag.nil?
numeric_private = coerce_integer(is_private_raw)
private_flag = !numeric_private.to_i.zero? if numeric_private
end
private_flag = false if private_flag.nil?
if id.nil? || domain.nil? || pubkey.nil?
warn_log(
"Discarded malformed instance row",
context: "instances.normalize",
instance_id: row["id"],
domain: row["domain"],
reason: "missing required fields",
)
return nil
end
payload = {
"id" => id,
"domain" => domain,
"pubkey" => pubkey,
"name" => string_or_nil(row["name"]),
"version" => string_or_nil(row["version"]),
"channel" => string_or_nil(row["channel"]),
"frequency" => string_or_nil(row["frequency"]),
"latitude" => coerce_float(row["latitude"]),
"longitude" => coerce_float(row["longitude"]),
"lastUpdateTime" => last_update_time,
"isPrivate" => private_flag,
"signature" => signature,
}
payload.reject { |_, value| value.nil? }
rescue StandardError => e
warn_log(
"Failed to normalise instance row",
context: "instances.normalize",
instance_id: row.respond_to?(:[]) ? row["id"] : nil,
domain: row.respond_to?(:[]) ? row["domain"] : nil,
error_class: e.class.name,
error_message: e.message,
)
nil
end
# Fetch all instance rows ready to be served by the API while handling
# malformed rows gracefully. The dataset is restricted to records updated
# within the rolling window defined by PotatoMesh::Config.week_seconds.
#
# @return [Array<Hash>] list of cleaned instance payloads.
def load_instances_for_api
clean_duplicate_instances!
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_update_time = now - PotatoMesh::Config.week_seconds
sql = <<~SQL
SELECT id, domain, pubkey, name, version, channel, frequency,
latitude, longitude, last_update_time, is_private, signature
FROM instances
WHERE domain IS NOT NULL AND TRIM(domain) != ''
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
AND last_update_time IS NOT NULL AND last_update_time >= ?
ORDER BY LOWER(domain)
SQL
rows = with_busy_retry do
db.execute(sql, min_last_update_time)
end
rows.each_with_object([]) do |row, memo|
normalized = normalize_instance_row(row)
next unless normalized
last_update_time = normalized["lastUpdateTime"]
next unless last_update_time.is_a?(Integer) && last_update_time >= min_last_update_time
memo << normalized
end
rescue SQLite3::Exception => e
warn_log(
"Failed to load instance records",
context: "instances.load",
error_class: e.class.name,
error_message: e.message,
)
[]
ensure
db&.close
end
end
end
end
@@ -0,0 +1,357 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Networking
# Normalise the configured instance domain by stripping schemes and verifying structure.
#
# @param raw [String, nil] environment supplied domain or URL.
# @return [String, nil] canonicalised hostname with optional port.
def canonicalize_configured_instance_domain(raw)
return nil if raw.nil?
trimmed = raw.to_s.strip
return nil if trimmed.empty?
candidate = trimmed
if candidate.include?("://")
begin
uri = URI.parse(candidate)
rescue URI::InvalidURIError => e
raise "INSTANCE_DOMAIN must be a valid hostname or URL, but parsing #{candidate.inspect} failed: #{e.message}"
end
unless uri.host
raise "INSTANCE_DOMAIN URL must include a hostname: #{candidate.inspect}"
end
if uri.userinfo
raise "INSTANCE_DOMAIN URL must not include credentials: #{candidate.inspect}"
end
if uri.path && !uri.path.empty? && uri.path != "/"
raise "INSTANCE_DOMAIN URL must not include a path component: #{candidate.inspect}"
end
if uri.query || uri.fragment
raise "INSTANCE_DOMAIN URL must not include query or fragment data: #{candidate.inspect}"
end
hostname = uri.hostname
unless hostname
raise "INSTANCE_DOMAIN URL must include a hostname: #{candidate.inspect}"
end
ip_host = ipv6_literal?(hostname)
candidate_host = ip_host ? "[#{ip_host}]" : hostname
candidate = candidate_host
port = uri.port
candidate = "#{candidate_host}:#{port}" if port_required?(uri, trimmed)
end
ipv6_with_port = candidate.match(/\A(?<address>.+):(?<port>\d+)\z/)
if ipv6_with_port
address = ipv6_with_port[:address]
port = ipv6_with_port[:port]
literal = ipv6_literal?(address)
if literal && PotatoMesh::Sanitizer.valid_port?(port)
candidate = "[#{literal}]:#{port}"
else
ipv6_literal = ipv6_literal?(candidate)
candidate = "[#{ipv6_literal}]" if ipv6_literal
end
else
ipv6_literal = ipv6_literal?(candidate)
candidate = "[#{ipv6_literal}]" if ipv6_literal
end
sanitized = sanitize_instance_domain(candidate)
unless sanitized
raise "INSTANCE_DOMAIN must be a bare hostname (optionally with a port) without schemes or paths: #{raw.inspect}"
end
ensure_ipv6_instance_domain(sanitized).downcase
end
# Resolve the best domain for the running instance using configuration and network discovery.
#
# @return [Array(String, Symbol)] tuple containing the domain and the discovery source.
def determine_instance_domain
raw = ENV["INSTANCE_DOMAIN"]
if raw
canonical = canonicalize_configured_instance_domain(raw)
return [canonical, :environment] if canonical
end
reverse = sanitize_instance_domain(reverse_dns_domain)
return [reverse, :reverse_dns] if reverse
public_ip = discover_public_ip_address
return [public_ip, :public_ip] if public_ip
protected_ip = discover_protected_ip_address
return [protected_ip, :protected_ip] if protected_ip
[discover_local_ip_address, :local_ip]
end
# Attempt to determine the reverse DNS hostname for the local machine.
#
# @return [String, nil] resolved hostname or nil when unavailable.
def reverse_dns_domain
Socket.ip_address_list.each do |address|
next unless address.respond_to?(:ip?) && address.ip?
loopback =
(address.respond_to?(:ipv4_loopback?) && address.ipv4_loopback?) ||
(address.respond_to?(:ipv6_loopback?) && address.ipv6_loopback?)
next if loopback
link_local =
address.respond_to?(:ipv6_linklocal?) && address.ipv6_linklocal?
next if link_local
ip = address.ip_address
next if ip.nil? || ip.empty?
begin
hostname = Resolv.getname(ip)
trimmed = hostname&.strip
return trimmed unless trimmed.nil? || trimmed.empty?
rescue Resolv::ResolvError, Resolv::ResolvTimeout, SocketError
next
end
end
nil
end
# Identify the first public IP address of the current host.
#
# @return [String, nil] public IP address string or nil.
def discover_public_ip_address
address = ip_address_candidates.find { |candidate| public_ip_address?(candidate) }
address&.ip_address
end
# Identify a private yet non-loopback IP address suitable for protected networks.
#
# @return [String, nil] protected network address or nil.
def discover_protected_ip_address
address = ip_address_candidates.find { |candidate| protected_ip_address?(candidate) }
address&.ip_address
end
# Collect viable socket addresses for evaluation.
#
# @return [Array<#ip?>] list of socket addresses supporting IP queries.
def ip_address_candidates
Socket.ip_address_list.select { |addr| addr.respond_to?(:ip?) && addr.ip? }
end
# Determine whether a socket address represents a public IP.
#
# @param addr [Addrinfo] candidate socket address.
# @return [Boolean] true when the address is publicly routable.
def public_ip_address?(addr)
ip = ipaddr_from(addr)
return false unless ip
return false if loopback_address?(addr, ip)
return false if link_local_address?(addr, ip)
return false if private_address?(addr, ip)
return false if unspecified_address?(ip)
true
end
# Determine whether a socket address resides on a protected private network.
#
# @param addr [Addrinfo] candidate socket address.
# @return [Boolean] true when the address is private but not loopback/link-local.
def protected_ip_address?(addr)
ip = ipaddr_from(addr)
return false unless ip
return false if loopback_address?(addr, ip)
return false if link_local_address?(addr, ip)
private_address?(addr, ip)
end
# Parse an IP address from the provided socket address.
#
# @param addr [Addrinfo] socket address to examine.
# @return [IPAddr, nil] parsed IP or nil when invalid.
def ipaddr_from(addr)
ip = addr.ip_address
return nil if ip.nil? || ip.empty?
IPAddr.new(ip)
rescue IPAddr::InvalidAddressError
nil
end
# Determine whether a socket address is loopback.
#
# @param addr [Addrinfo] socket address to inspect.
# @param ip [IPAddr] parsed IP representation of the address.
# @return [Boolean] true when the address is loopback.
def loopback_address?(addr, ip)
(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) ||
(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?) ||
ip.loopback?
end
# Determine whether a socket address is link-local.
#
# @param addr [Addrinfo] socket address to inspect.
# @param ip [IPAddr] parsed IP representation of the address.
# @return [Boolean] true when the address is link-local.
def link_local_address?(addr, ip)
(addr.respond_to?(:ipv6_linklocal?) && addr.ipv6_linklocal?) ||
(ip.respond_to?(:link_local?) && ip.link_local?)
end
# Determine whether a socket address is private.
#
# @param addr [Addrinfo] socket address to inspect.
# @param ip [IPAddr] parsed IP representation of the address.
# @return [Boolean] true when the address is private.
def private_address?(addr, ip)
if addr.respond_to?(:ipv4?) && addr.ipv4? && addr.respond_to?(:ipv4_private?)
addr.ipv4_private?
else
ip.private?
end
end
# Identify unspecified IP addresses.
#
# @param ip [IPAddr] parsed IP.
# @return [Boolean] true for unspecified addresses (0.0.0.0 / ::).
def unspecified_address?(ip)
(ip.ipv4? || ip.ipv6?) && ip.to_i.zero?
end
# Choose the most appropriate local IP address for the instance domain.
#
# @return [String] selected IP address string.
def discover_local_ip_address
candidates = ip_address_candidates
ipv4 = candidates.find do |addr|
addr.respond_to?(:ipv4?) && addr.ipv4? && !(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?)
end
return ipv4.ip_address if ipv4
non_loopback = candidates.find do |addr|
!(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) &&
!(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?)
end
return non_loopback.ip_address if non_loopback
loopback = candidates.find do |addr|
(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) ||
(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?)
end
return loopback.ip_address if loopback
"127.0.0.1"
end
# Determine whether an IP should be restricted from exposure.
#
# @param ip [IPAddr] candidate IP address.
# @return [Boolean] true when the IP should not be exposed.
def restricted_ip_address?(ip)
return true if ip.loopback?
return true if ip.private?
return true if ip.link_local?
return true if ip.to_i.zero?
false
end
# Normalize IPv6 instance domains so that they remain bracketed and URI-compatible.
#
# @param domain [String] sanitized hostname optionally including a port suffix.
# @return [String] domain with IPv6 literals wrapped in brackets when necessary.
def ensure_ipv6_instance_domain(domain)
bracketed_match = domain.match(/\A\[(?<host>[^\]]+)\](?::(?<port>\d+))?\z/)
if bracketed_match
host = bracketed_match[:host]
port = bracketed_match[:port]
ipv6 = ipv6_literal?(host)
if ipv6
return "[#{ipv6}]#{port ? ":#{port}" : ""}"
end
return domain
end
host_candidate = domain
port_candidate = nil
split_host, separator, split_port = domain.rpartition(":")
if !separator.empty? && split_port.match?(/\A\d+\z/) && !split_host.empty? && !split_host.end_with?(":")
host_candidate = split_host
port_candidate = split_port
end
if port_candidate
ipv6_host = ipv6_literal?(host_candidate)
return "[#{ipv6_host}]:#{port_candidate}" if ipv6_host
host_candidate = domain
port_candidate = nil
end
ipv6 = ipv6_literal?(host_candidate)
return "[#{ipv6}]" if ipv6
domain
end
# Parse an IPv6 literal and return its canonical representation when valid.
#
# @param candidate [String] potential IPv6 literal.
# @return [String, nil] normalized IPv6 literal or nil when the candidate is not IPv6.
def ipv6_literal?(candidate)
IPAddr.new(candidate).yield_self do |ip|
return ip.ipv6? ? ip.to_s : nil
end
rescue IPAddr::InvalidAddressError
nil
end
# Determine whether a URI's port should be included in the canonicalized domain.
#
# @param uri [URI::Generic] parsed URI for the instance domain.
# @param raw [String] original sanitized input string.
# @return [Boolean] true when the port must be preserved.
def port_required?(uri, raw)
port = uri.port
return false unless port
return true unless uri.respond_to?(:default_port) && uri.default_port && port == uri.default_port
raw_port_fragment = ":#{port}"
sanitized_raw = raw.strip
sanitized_raw.end_with?(raw_port_fragment)
end
end
end
end
@@ -0,0 +1,198 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Prometheus
MESSAGES_TOTAL = ::Prometheus::Client::Counter.new(
:meshtastic_messages_total,
docstring: "Total number of messages received",
)
NODES_GAUGE = ::Prometheus::Client::Gauge.new(
:meshtastic_nodes,
docstring: "Number of nodes tracked",
)
NODE_GAUGE = ::Prometheus::Client::Gauge.new(
:meshtastic_node,
docstring: "Presence of a Meshtastic node",
labels: %i[node short_name long_name hw_model role],
)
NODE_BATTERY_LEVEL = ::Prometheus::Client::Gauge.new(
:meshtastic_node_battery_level,
docstring: "Battery level of a Meshtastic node",
labels: [:node],
)
NODE_VOLTAGE = ::Prometheus::Client::Gauge.new(
:meshtastic_node_voltage,
docstring: "Battery voltage of a Meshtastic node",
labels: [:node],
)
NODE_UPTIME = ::Prometheus::Client::Gauge.new(
:meshtastic_node_uptime_seconds,
docstring: "Uptime reported by a Meshtastic node",
labels: [:node],
)
NODE_CHANNEL_UTIL = ::Prometheus::Client::Gauge.new(
:meshtastic_node_channel_utilization,
docstring: "Channel utilization reported by a Meshtastic node",
labels: [:node],
)
NODE_AIR_UTIL_TX = ::Prometheus::Client::Gauge.new(
:meshtastic_node_transmit_air_utilization,
docstring: "Transmit air utilization reported by a Meshtastic node",
labels: [:node],
)
NODE_LATITUDE = ::Prometheus::Client::Gauge.new(
:meshtastic_node_latitude,
docstring: "Latitude of a Meshtastic node",
labels: [:node],
)
NODE_LONGITUDE = ::Prometheus::Client::Gauge.new(
:meshtastic_node_longitude,
docstring: "Longitude of a Meshtastic node",
labels: [:node],
)
NODE_ALTITUDE = ::Prometheus::Client::Gauge.new(
:meshtastic_node_altitude,
docstring: "Altitude of a Meshtastic node",
labels: [:node],
)
METRICS = [
MESSAGES_TOTAL,
NODES_GAUGE,
NODE_GAUGE,
NODE_BATTERY_LEVEL,
NODE_VOLTAGE,
NODE_UPTIME,
NODE_CHANNEL_UTIL,
NODE_AIR_UTIL_TX,
NODE_LATITUDE,
NODE_LONGITUDE,
NODE_ALTITUDE,
].freeze
METRICS.each do |metric|
::Prometheus::Client.registry.register(metric)
rescue ::Prometheus::Client::Registry::AlreadyRegisteredError
# Ignore duplicate registrations when the code is reloaded.
end
def update_prometheus_metrics(node_id, user = nil, role = "", met = nil, pos = nil)
ids = prom_report_ids
return if ids.empty? || !node_id
return unless ids[0] == "*" || ids.include?(node_id)
if user && user.is_a?(Hash) && role && role != ""
NODE_GAUGE.set(
1,
labels: {
node: node_id,
short_name: user["shortName"],
long_name: user["longName"],
hw_model: user["hwModel"],
role: role,
},
)
end
if met && met.is_a?(Hash)
if met["batteryLevel"]
NODE_BATTERY_LEVEL.set(met["batteryLevel"], labels: { node: node_id })
end
if met["voltage"]
NODE_VOLTAGE.set(met["voltage"], labels: { node: node_id })
end
if met["uptimeSeconds"]
NODE_UPTIME.set(met["uptimeSeconds"], labels: { node: node_id })
end
if met["channelUtilization"]
NODE_CHANNEL_UTIL.set(met["channelUtilization"], labels: { node: node_id })
end
if met["airUtilTx"]
NODE_AIR_UTIL_TX.set(met["airUtilTx"], labels: { node: node_id })
end
end
if pos && pos.is_a?(Hash)
if pos["latitude"]
NODE_LATITUDE.set(pos["latitude"], labels: { node: node_id })
end
if pos["longitude"]
NODE_LONGITUDE.set(pos["longitude"], labels: { node: node_id })
end
if pos["altitude"]
NODE_ALTITUDE.set(pos["altitude"], labels: { node: node_id })
end
end
end
def update_all_prometheus_metrics_from_nodes
nodes = query_nodes(1000)
NODES_GAUGE.set(nodes.size)
ids = prom_report_ids
unless ids.empty?
nodes.each do |n|
node_id = n["node_id"]
next if ids[0] != "*" && !ids.include?(node_id)
update_prometheus_metrics(
node_id,
{
"shortName" => n["short_name"] || "",
"longName" => n["long_name"] || "",
"hwModel" => n["hw_model"] || "",
},
n["role"] || "",
{
"batteryLevel" => n["battery_level"],
"voltage" => n["voltage"],
"uptimeSeconds" => n["uptime_seconds"],
"channelUtilization" => n["channel_utilization"],
"airUtilTx" => n["air_util_tx"],
},
{
"latitude" => n["latitude"],
"longitude" => n["longitude"],
"altitude" => n["altitude"],
},
)
end
end
end
end
end
end
+469
View File
@@ -0,0 +1,469 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Queries
MAX_QUERY_LIMIT = 1000
# Remove nil or empty values from an API response hash to reduce payload size
# while preserving legitimate zero-valued measurements.
# Integer keys emitted by SQLite are ignored because the JSON representation
# only exposes symbolic keys. Strings containing only whitespace are treated
# as empty to mirror sanitisation elsewhere in the application, and any other
# objects responding to `empty?` are dropped when they contain no data.
#
# @param row [Hash] raw database row to compact.
# @return [Hash] cleaned hash without blank values.
def compact_api_row(row)
return {} unless row.is_a?(Hash)
row.each_with_object({}) do |(key, value), acc|
next if key.is_a?(Integer)
next if value.nil?
if value.is_a?(String)
trimmed = value.strip
next if trimmed.empty?
acc[key] = value
next
end
next if value.respond_to?(:empty?) && value.empty?
acc[key] = value
end
end
# Normalise a caller-provided limit to a sane, positive integer.
#
# @param limit [Object] value coerced to an integer.
# @param default [Integer] fallback used when coercion fails.
# @return [Integer] limit clamped between 1 and MAX_QUERY_LIMIT.
def coerce_query_limit(limit, default: 200)
coerced = begin
if limit.is_a?(Integer)
limit
else
Integer(limit, 10)
end
rescue ArgumentError, TypeError
nil
end
coerced = default if coerced.nil? || coerced <= 0
coerced = MAX_QUERY_LIMIT if coerced > MAX_QUERY_LIMIT
coerced
end
def node_reference_tokens(node_ref)
parts = canonical_node_parts(node_ref)
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
string_values = []
numeric_values = []
case node_ref
when Integer
numeric_values << node_ref
string_values << node_ref.to_s
when Numeric
coerced = node_ref.to_i
numeric_values << coerced
string_values << coerced.to_s
when String
trimmed = node_ref.strip
unless trimmed.empty?
string_values << trimmed
numeric_values << trimmed.to_i if trimmed.match?(/\A-?\d+\z/)
end
when nil
# no-op
else
coerced = node_ref.to_s.strip
string_values << coerced unless coerced.empty?
end
if canonical_id
string_values << canonical_id
string_values << canonical_id.upcase
end
if numeric_id
numeric_values << numeric_id
string_values << numeric_id.to_s
end
cleaned_strings = string_values.compact.map(&:to_s).map(&:strip).reject(&:empty?).uniq
cleaned_numbers = numeric_values.compact.map do |value|
begin
Integer(value, 10)
rescue ArgumentError, TypeError
nil
end
end.compact.uniq
{
string_values: cleaned_strings,
numeric_values: cleaned_numbers,
}
end
def node_lookup_clause(node_ref, string_columns:, numeric_columns: [])
tokens = node_reference_tokens(node_ref)
string_values = tokens[:string_values]
numeric_values = tokens[:numeric_values]
clauses = []
params = []
unless string_columns.empty? || string_values.empty?
string_columns.each do |column|
placeholders = Array.new(string_values.length, "?").join(", ")
clauses << "#{column} IN (#{placeholders})"
params.concat(string_values)
end
end
unless numeric_columns.empty? || numeric_values.empty?
numeric_columns.each do |column|
placeholders = Array.new(numeric_values.length, "?").join(", ")
clauses << "#{column} IN (#{placeholders})"
params.concat(numeric_values)
end
end
return nil if clauses.empty?
["(#{clauses.join(" OR ")})", params]
end
def query_nodes(limit, node_ref: nil)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
now = Time.now.to_i
min_last_heard = now - PotatoMesh::Config.week_seconds
params = []
where_clauses = []
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["num"])
return [] unless clause
where_clauses << clause.first
params.concat(clause.last)
else
where_clauses << "last_heard >= ?"
params << min_last_heard
end
if private_mode?
where_clauses << "(role IS NULL OR role <> 'CLIENT_HIDDEN')"
end
sql = <<~SQL
SELECT node_id, short_name, long_name, hw_model, role, snr,
battery_level, voltage, last_heard, first_heard,
uptime_seconds, channel_utilization, air_util_tx,
position_time, location_source, precision_bits,
latitude, longitude, altitude, lora_freq, modem_preset
FROM nodes
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
sql += <<~SQL
ORDER BY last_heard DESC
LIMIT ?
SQL
params << limit
rows = db.execute(sql, params)
rows = rows.select do |r|
last_candidate = [r["last_heard"], r["position_time"], r["first_heard"]]
.map { |value| coerce_integer(value) }
.compact
.max
last_candidate && last_candidate >= min_last_heard
end
rows.each do |r|
r["role"] ||= "CLIENT"
lh = r["last_heard"]&.to_i
pt = r["position_time"]&.to_i
lh = now if lh && lh > now
pt = nil if pt && pt > now
r["last_heard"] = lh
r["position_time"] = pt
r["last_seen_iso"] = Time.at(lh).utc.iso8601 if lh
r["pos_time_iso"] = Time.at(pt).utc.iso8601 if pt
pb = r["precision_bits"]
r["precision_bits"] = pb.to_i if pb
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
def query_messages(limit, node_ref: nil, include_encrypted: false)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = [
"(COALESCE(TRIM(m.text), '') != '' OR COALESCE(TRIM(m.encrypted), '') != '' OR m.reply_id IS NOT NULL OR COALESCE(TRIM(m.emoji), '') != '')",
]
include_encrypted = !!include_encrypted
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
where_clauses << "m.rx_time >= ?"
params << min_rx_time
unless include_encrypted
where_clauses << "COALESCE(TRIM(m.encrypted), '') = ''"
end
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["m.from_id", "m.to_id"])
return [] unless clause
where_clauses << clause.first
params.concat(clause.last)
end
sql = <<~SQL
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
m.lora_freq, m.modem_preset, m.channel_name, m.snr,
m.reply_id, m.emoji
FROM messages m
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n"
sql += <<~SQL
ORDER BY m.rx_time DESC
LIMIT ?
SQL
params << limit
rows = db.execute(sql, params)
rows.each do |r|
r.delete_if { |key, _| key.is_a?(Integer) }
r["reply_id"] = coerce_integer(r["reply_id"]) if r.key?("reply_id")
r["emoji"] = string_or_nil(r["emoji"]) if r.key?("emoji")
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
debug_log(
"Message query produced empty sender",
context: "queries.messages",
stage: "raw_row",
row: raw,
)
end
canonical_from_id = string_or_nil(normalize_node_id(db, r["from_id"]))
node_id = canonical_from_id || string_or_nil(r["from_id"])
if canonical_from_id
raw_from_id = string_or_nil(r["from_id"])
if raw_from_id.nil? || raw_from_id.match?(/\A[0-9]+\z/)
r["from_id"] = canonical_from_id
elsif raw_from_id.start_with?("!") && raw_from_id.casecmp(canonical_from_id) != 0
r["from_id"] = canonical_from_id
end
end
r["node_id"] = node_id if node_id
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
debug_log(
"Message query produced empty sender",
context: "queries.messages",
stage: "after_normalization",
row: r,
)
end
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
def query_positions(limit, node_ref: nil)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
params << min_rx_time
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
return [] unless clause
where_clauses << clause.first
params.concat(clause.last)
end
sql = <<~SQL
SELECT * FROM positions
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
sql += <<~SQL
ORDER BY rx_time DESC
LIMIT ?
SQL
params << limit
rows = db.execute(sql, params)
rows.each do |r|
rx_time = coerce_integer(r["rx_time"])
r["rx_time"] = rx_time if rx_time
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
node_num = coerce_integer(r["node_num"])
r["node_num"] = node_num if node_num
position_time = coerce_integer(r["position_time"])
position_time = nil if position_time && position_time > now
r["position_time"] = position_time
r["position_time_iso"] = Time.at(position_time).utc.iso8601 if position_time
r["precision_bits"] = coerce_integer(r["precision_bits"])
r["sats_in_view"] = coerce_integer(r["sats_in_view"])
r["pdop"] = coerce_float(r["pdop"])
r["snr"] = coerce_float(r["snr"])
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
def query_neighbors(limit, node_ref: nil)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
where_clauses << "COALESCE(rx_time, 0) >= ?"
params << min_rx_time
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
return [] unless clause
where_clauses << clause.first
params.concat(clause.last)
end
sql = <<~SQL
SELECT * FROM neighbors
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
sql += <<~SQL
ORDER BY rx_time DESC
LIMIT ?
SQL
params << limit
rows = db.execute(sql, params)
rows.each do |r|
rx_time = coerce_integer(r["rx_time"])
rx_time = now if rx_time && rx_time > now
r["rx_time"] = rx_time if rx_time
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time
r["snr"] = coerce_float(r["snr"])
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
def query_telemetry(limit, node_ref: nil)
limit = coerce_query_limit(limit)
db = open_database(readonly: true)
db.results_as_hash = true
params = []
where_clauses = []
now = Time.now.to_i
min_rx_time = now - PotatoMesh::Config.week_seconds
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
params << min_rx_time
if node_ref
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
return [] unless clause
where_clauses << clause.first
params.concat(clause.last)
end
sql = <<~SQL
SELECT * FROM telemetry
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
sql += <<~SQL
ORDER BY rx_time DESC
LIMIT ?
SQL
params << limit
rows = db.execute(sql, params)
rows.each do |r|
rx_time = coerce_integer(r["rx_time"])
r["rx_time"] = rx_time if rx_time
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
node_num = coerce_integer(r["node_num"])
r["node_num"] = node_num if node_num
telemetry_time = coerce_integer(r["telemetry_time"])
telemetry_time = nil if telemetry_time && telemetry_time > now
r["telemetry_time"] = telemetry_time
r["telemetry_time_iso"] = Time.at(telemetry_time).utc.iso8601 if telemetry_time
r["channel"] = coerce_integer(r["channel"])
r["hop_limit"] = coerce_integer(r["hop_limit"])
r["rssi"] = coerce_integer(r["rssi"])
r["bitfield"] = coerce_integer(r["bitfield"])
r["snr"] = coerce_float(r["snr"])
r["battery_level"] = coerce_float(r["battery_level"])
r["voltage"] = coerce_float(r["voltage"])
r["channel_utilization"] = coerce_float(r["channel_utilization"])
r["air_util_tx"] = coerce_float(r["air_util_tx"])
r["uptime_seconds"] = coerce_integer(r["uptime_seconds"])
r["temperature"] = coerce_float(r["temperature"])
r["relative_humidity"] = coerce_float(r["relative_humidity"])
r["barometric_pressure"] = coerce_float(r["barometric_pressure"])
r["gas_resistance"] = coerce_float(r["gas_resistance"])
r["current"] = coerce_float(r["current"])
r["iaq"] = coerce_integer(r["iaq"])
r["distance"] = coerce_float(r["distance"])
r["lux"] = coerce_float(r["lux"])
r["white_lux"] = coerce_float(r["white_lux"])
r["ir_lux"] = coerce_float(r["ir_lux"])
r["uv_lux"] = coerce_float(r["uv_lux"])
r["wind_direction"] = coerce_integer(r["wind_direction"])
r["wind_speed"] = coerce_float(r["wind_speed"])
r["weight"] = coerce_float(r["weight"])
r["wind_gust"] = coerce_float(r["wind_gust"])
r["wind_lull"] = coerce_float(r["wind_lull"])
r["radiation"] = coerce_float(r["radiation"])
r["rainfall_1h"] = coerce_float(r["rainfall_1h"])
r["rainfall_24h"] = coerce_float(r["rainfall_24h"])
r["soil_moisture"] = coerce_integer(r["soil_moisture"])
r["soil_temperature"] = coerce_float(r["soil_temperature"])
end
rows.map { |row| compact_api_row(row) }
ensure
db&.close
end
end
end
end
@@ -0,0 +1,151 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Routes
module Api
# Register read-only API endpoints that expose cached mesh data and
# instance metadata. Invoked by Sinatra during extension registration.
#
# @param app [Sinatra::Base] application instance receiving the routes.
# @return [void]
def self.registered(app)
app.before "/api/messages*" do
halt 404 if private_mode?
end
app.get "/version" do
content_type :json
last_update = latest_node_update_timestamp
payload = {
name: sanitized_site_name,
version: app_constant(:APP_VERSION),
lastNodeUpdate: last_update,
config: {
siteName: sanitized_site_name,
channel: sanitized_channel,
frequency: sanitized_frequency,
contactLink: sanitized_contact_link,
contactLinkUrl: sanitized_contact_link_url,
refreshIntervalSeconds: PotatoMesh::Config.refresh_interval_seconds,
mapCenter: {
lat: PotatoMesh::Config.map_center_lat,
lon: PotatoMesh::Config.map_center_lon,
},
maxDistanceKm: PotatoMesh::Config.max_distance_km,
instanceDomain: app_constant(:INSTANCE_DOMAIN),
privateMode: private_mode?,
},
}
payload.to_json
end
app.get "/.well-known/potato-mesh" do
refresh_well_known_document_if_stale
cache_control :public, max_age: PotatoMesh::Config.well_known_refresh_interval
content_type :json
send_file well_known_file_path
end
app.get "/api/nodes" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_nodes(limit).to_json
end
app.get "/api/nodes/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
rows = query_nodes(limit, node_ref: node_ref)
halt 404, { error: "not found" }.to_json if rows.empty?
rows.first.to_json
end
app.get "/api/messages" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
include_encrypted = coerce_boolean(params["encrypted"]) || false
query_messages(limit, include_encrypted: include_encrypted).to_json
end
app.get "/api/messages/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
include_encrypted = coerce_boolean(params["encrypted"]) || false
query_messages(limit, node_ref: node_ref, include_encrypted: include_encrypted).to_json
end
app.get "/api/positions" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit).to_json
end
app.get "/api/positions/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_positions(limit, node_ref: node_ref).to_json
end
app.get "/api/neighbors" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit).to_json
end
app.get "/api/neighbors/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_neighbors(limit, node_ref: node_ref).to_json
end
app.get "/api/telemetry" do
content_type :json
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit).to_json
end
app.get "/api/telemetry/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
halt 400, { error: "missing node id" }.to_json unless node_ref
limit = [params["limit"]&.to_i || 200, 1000].min
query_telemetry(limit, node_ref: node_ref).to_json
end
app.get "/api/instances" do
# Prevent the federation catalog from being exposed when federation is disabled.
halt 404 unless federation_enabled?
content_type :json
ensure_self_instance_record!
payload = load_instances_for_api
JSON.generate(payload)
end
end
end
end
end
end
@@ -0,0 +1,328 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Routes
module Ingest
# Register ingest endpoints used by the Python collector to persist
# nodes, messages, and federation announcements.
#
# @param app [Sinatra::Base] application instance receiving the routes.
# @return [void]
def self.registered(app)
app.post "/api/nodes" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
unless data.is_a?(Hash)
halt 400, { error: "invalid payload" }.to_json
end
halt 400, { error: "too many nodes" }.to_json if data.size > 1000
db = open_database
data.each do |node_id, node|
upsert_node(db, node_id, node)
end
PotatoMesh::App::Prometheus::NODES_GAUGE.set(query_nodes(1000).length)
{ status: "ok" }.to_json
ensure
db&.close
end
app.post "/api/messages" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
messages = data.is_a?(Array) ? data : [data]
halt 400, { error: "too many messages" }.to_json if messages.size > 1000
db = open_database
messages.each do |msg|
insert_message(db, msg)
end
{ status: "ok" }.to_json
ensure
db&.close
end
app.post "/api/instances" do
content_type :json
begin
payload = JSON.parse(read_json_body)
rescue JSON::ParserError => e
warn_log(
"Instance registration rejected",
context: "ingest.register",
reason: "invalid JSON",
error_class: e.class.name,
error_message: e.message,
)
halt 400, { error: "invalid JSON" }.to_json
end
unless payload.is_a?(Hash)
warn_log(
"Instance registration rejected",
context: "ingest.register",
reason: "payload is not an object",
)
halt 400, { error: "invalid payload" }.to_json
end
id = string_or_nil(payload["id"]) || string_or_nil(payload["instanceId"])
raw_domain_input = payload["domain"]
raw_domain = sanitize_instance_domain(raw_domain_input, downcase: false)
normalized_domain = raw_domain && sanitize_instance_domain(raw_domain)
unless raw_domain && normalized_domain
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: string_or_nil(raw_domain_input),
reason: "invalid domain",
)
halt 400, { error: "invalid domain" }.to_json
end
pubkey = sanitize_public_key_pem(payload["pubkey"])
name = string_or_nil(payload["name"])
version = string_or_nil(payload["version"])
channel = string_or_nil(payload["channel"])
frequency = string_or_nil(payload["frequency"])
latitude = coerce_float(payload["latitude"])
longitude = coerce_float(payload["longitude"])
last_update_time = coerce_integer(payload["last_update_time"] || payload["lastUpdateTime"])
raw_private = payload.key?("isPrivate") ? payload["isPrivate"] : payload["is_private"]
is_private = coerce_boolean(raw_private)
signature = string_or_nil(payload["signature"])
attributes = {
id: id,
domain: normalized_domain,
pubkey: pubkey,
name: name,
version: version,
channel: channel,
frequency: frequency,
latitude: latitude,
longitude: longitude,
last_update_time: last_update_time,
is_private: is_private,
}
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
warn_log(
"Instance registration rejected",
context: "ingest.register",
reason: "missing required fields",
)
halt 400, { error: "missing required fields" }.to_json
end
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
# Some remote peers sign payloads using a canonicalised lowercase
# domain while still sending a mixed-case domain. Retry signature
# verification with the original casing when the first attempt
# fails to maximise interoperability.
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
alternate_attributes = attributes.merge(domain: raw_domain)
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
end
unless signature_valid
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: raw_domain || attributes[:domain],
reason: "invalid signature",
)
halt 400, { error: "invalid signature" }.to_json
end
if attributes[:is_private]
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: "instance marked private",
)
halt 403, { error: "instance marked private" }.to_json
end
ip = ip_from_domain(attributes[:domain])
if ip && restricted_ip_address?(ip)
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: "restricted IP address",
resolved_ip: ip,
)
halt 400, { error: "restricted domain" }.to_json
end
begin
resolve_remote_ip_addresses(URI.parse("https://#{attributes[:domain]}"))
rescue ArgumentError => e
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: "restricted domain",
error_message: e.message,
)
halt 400, { error: "restricted domain" }.to_json
rescue SocketError
# DNS lookups that fail to resolve are handled later when the
# registration flow attempts to contact the remote instance.
end
well_known, well_known_meta = fetch_instance_json(attributes[:domain], "/.well-known/potato-mesh")
unless well_known
details_list = Array(well_known_meta).map(&:to_s)
details = details_list.empty? ? "no response" : details_list.join("; ")
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: "failed to fetch well-known document",
details: details,
)
halt 400, { error: "failed to verify well-known document" }.to_json
end
valid, reason = validate_well_known_document(well_known, attributes[:domain], attributes[:pubkey])
unless valid
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: reason || "invalid well-known document",
)
halt 400, { error: reason || "invalid well-known document" }.to_json
end
remote_nodes, node_source = fetch_instance_json(attributes[:domain], "/api/nodes")
unless remote_nodes
details_list = Array(node_source).map(&:to_s)
details = details_list.empty? ? "no response" : details_list.join("; ")
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: "failed to fetch nodes",
details: details,
)
halt 400, { error: "failed to fetch nodes" }.to_json
end
fresh, freshness_reason = validate_remote_nodes(remote_nodes)
unless fresh
warn_log(
"Instance registration rejected",
context: "ingest.register",
domain: attributes[:domain],
reason: freshness_reason || "stale node data",
)
halt 400, { error: freshness_reason || "stale node data" }.to_json
end
db = open_database
upsert_instance_record(db, attributes, signature)
enqueued = enqueue_federation_crawl(
attributes[:domain],
per_response_limit: PotatoMesh::Config.federation_max_instances_per_response,
overall_limit: PotatoMesh::Config.federation_max_domains_per_crawl,
)
debug_log(
"Registered remote instance",
context: "ingest.register",
domain: attributes[:domain],
instance_id: attributes[:id],
crawl_enqueued: enqueued,
)
status 201
{ status: "registered" }.to_json
ensure
db&.close
end
app.post "/api/positions" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
positions = data.is_a?(Array) ? data : [data]
halt 400, { error: "too many positions" }.to_json if positions.size > 1000
db = open_database
positions.each do |pos|
insert_position(db, pos)
end
{ status: "ok" }.to_json
ensure
db&.close
end
app.post "/api/neighbors" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
neighbor_payloads = data.is_a?(Array) ? data : [data]
halt 400, { error: "too many neighbor packets" }.to_json if neighbor_payloads.size > 1000
db = open_database
neighbor_payloads.each do |packet|
insert_neighbors(db, packet)
end
{ status: "ok" }.to_json
ensure
db&.close
end
app.post "/api/telemetry" do
require_token!
content_type :json
begin
data = JSON.parse(read_json_body)
rescue JSON::ParserError
halt 400, { error: "invalid JSON" }.to_json
end
telemetry_packets = data.is_a?(Array) ? data : [data]
halt 400, { error: "too many telemetry packets" }.to_json if telemetry_packets.size > 1000
db = open_database
telemetry_packets.each do |packet|
insert_telemetry(db, packet)
end
{ status: "ok" }.to_json
ensure
db&.close
end
end
end
end
end
end
@@ -0,0 +1,225 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
module Routes
module Root
module Helpers
# Determine the initial theme from the request cookie and persist
# sanitised values back to the client to avoid invalid states.
#
# @return [String] normalised theme value ('dark' or 'light').
def resolve_initial_theme
raw_theme = request.cookies["theme"]
theme = %w[dark light].include?(raw_theme) ? raw_theme : "dark"
if raw_theme != theme
response.set_cookie(
"theme",
value: theme,
path: "/",
max_age: 60 * 60 * 24 * 7,
same_site: :lax,
)
end
theme
end
# Render a dashboard-oriented ERB template within the shared layout.
#
# @param template [Symbol] identifier for the ERB template.
# @param view_mode [Symbol, String] logical view identifier for CSS hooks.
# @param extra_locals [Hash] additional locals merged into the rendering context.
# @return [String] rendered ERB output.
def render_root_view(template, view_mode: :dashboard, extra_locals: {})
meta = meta_configuration
config = frontend_app_config
theme = resolve_initial_theme
view_mode_sym = view_mode.respond_to?(:to_sym) ? view_mode.to_sym : view_mode
base_locals = {
site_name: meta[:name],
meta_title: meta[:title],
meta_name: meta[:name],
meta_description: meta[:description],
channel: sanitized_channel,
frequency: sanitized_frequency,
map_center_lat: PotatoMesh::Config.map_center_lat,
map_center_lon: PotatoMesh::Config.map_center_lon,
max_distance_km: PotatoMesh::Config.max_distance_km,
contact_link: sanitized_contact_link,
contact_link_url: sanitized_contact_link_url,
version: display_version(app_constant(:APP_VERSION)),
private_mode: private_mode?,
federation_enabled: federation_enabled?,
refresh_interval_seconds: PotatoMesh::Config.refresh_interval_seconds,
app_config_json: JSON.generate(config),
initial_theme: theme,
current_view_mode: view_mode_sym,
map_zoom: PotatoMesh::Config.map_zoom,
}
sanitized_locals = extra_locals.is_a?(Hash) ? extra_locals : {}
merged_locals = base_locals.merge(sanitized_locals)
erb template, layout: :"layouts/app", locals: merged_locals
end
# Remove keys with +nil+ values from the provided hash, returning a
# shallow copy. Hash#compact is only available in newer Ruby
# versions; this helper keeps behaviour consistent across supported
# releases.
#
# @param value [Hash, nil] collection subject to filtering.
# @return [Hash] hash excluding +nil+ values.
def reject_nil_values(value)
return {} unless value.is_a?(Hash)
value.each_with_object({}) do |(key, entry), memo|
memo[key] = entry unless entry.nil?
end
end
# Assemble the payload embedded into the node detail view. The
# payload provides a canonical identifier alongside any cached node,
# telemetry, or position rows that may already exist in the
# database. When no persisted data is available the method returns
# +nil+ so the caller can surface a 404 error.
#
# @param node_ref [Object] raw node identifier from the request.
# @return [Hash, nil] structured node reference payload or nil when
# the node cannot be located.
def build_node_detail_reference(node_ref)
tokens = canonical_node_parts(node_ref)
search_ref = tokens ? tokens.first : node_ref
node_row = query_nodes(1, node_ref: search_ref).first
telemetry_row = query_telemetry(1, node_ref: search_ref).first
position_row = query_positions(1, node_ref: search_ref).first
candidates = [node_row, telemetry_row, position_row].compact
return nil if candidates.empty?
canonical_id = string_or_nil(node_row&.fetch("node_id", nil))
canonical_id ||= string_or_nil(telemetry_row&.fetch("node_id", nil))
canonical_id ||= string_or_nil(position_row&.fetch("node_id", nil))
canonical_id ||= string_or_nil(tokens&.fetch(0, nil))
if canonical_id
canonical_id = canonical_id.start_with?("!") ? canonical_id : "!#{canonical_id}"
end
return nil unless canonical_id
numeric_id = coerce_integer(node_row&.fetch("num", nil))
numeric_id ||= coerce_integer(telemetry_row&.fetch("node_num", nil))
numeric_id ||= coerce_integer(position_row&.fetch("node_num", nil))
numeric_id ||= tokens&.fetch(1, nil)
short_id = string_or_nil(node_row&.fetch("short_name", nil))
short_id ||= string_or_nil(telemetry_row&.fetch("short_name", nil))
short_id ||= string_or_nil(position_row&.fetch("short_name", nil))
short_id ||= tokens&.fetch(2, nil)
fallback_row = node_row || telemetry_row || position_row
fallback = fallback_row ? compact_api_row(fallback_row) : nil
telemetry = telemetry_row ? compact_api_row(telemetry_row) : nil
position = position_row ? compact_api_row(position_row) : nil
{
"nodeId" => canonical_id,
"nodeNum" => numeric_id,
"shortId" => short_id,
"fallback" => fallback,
"telemetry" => telemetry,
"position" => position,
}
end
end
def self.registered(app)
app.helpers Helpers
app.get "/favicon.ico" do
cache_control :public, max_age: PotatoMesh::Config.week_seconds
ico_path = File.join(settings.public_folder, "favicon.ico")
if File.file?(ico_path)
send_file ico_path, type: "image/x-icon"
else
send_file File.join(settings.public_folder, "potatomesh-logo.svg"), type: "image/svg+xml"
end
end
app.get "/potatomesh-logo.svg" do
path = File.expand_path("potatomesh-logo.svg", settings.public_folder)
settings.logger&.info("logo_path=#{path} exist=#{File.exist?(path)} file=#{File.file?(path)}")
halt 404, "Not Found" unless File.exist?(path) && File.readable?(path)
content_type "image/svg+xml"
last_modified File.mtime(path)
cache_control :public, max_age: 3600
send_file path
end
app.get "/" do
render_root_view(:index, view_mode: :dashboard)
end
app.get %r{/map/?} do
render_root_view(:map, view_mode: :map)
end
app.get %r{/chat/?} do
render_root_view(:chat, view_mode: :chat)
end
app.get %r{/charts/?} do
render_root_view(:charts, view_mode: :charts)
end
app.get "/nodes/:id" do
node_ref = params.fetch("id", nil)
reference_payload = build_node_detail_reference(node_ref)
halt 404, "Not Found" unless reference_payload
fallback = reference_payload["fallback"] || {}
short_name = string_or_nil(fallback["short_name"]) || reference_payload["shortId"]
long_name = string_or_nil(fallback["long_name"])
role = string_or_nil(fallback["role"])
canonical_id = string_or_nil(reference_payload["nodeId"])
render_root_view(
:node_detail,
view_mode: :node_detail,
extra_locals: {
node_reference_json: JSON.generate(reject_nil_values(reference_payload)),
node_page_short_name: short_name,
node_page_long_name: long_name,
node_page_role: role,
node_page_identifier: canonical_id,
},
)
end
app.get %r{/nodes/?} do
render_root_view(:nodes, view_mode: :nodes)
end
app.get "/metrics" do
content_type ::Prometheus::Client::Formats::Text::CONTENT_TYPE
::Prometheus::Client::Formats::Text.marshal(::Prometheus::Client.registry)
end
end
end
end
end
end
@@ -0,0 +1,214 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
module App
# WorkerPool executes submitted blocks using a bounded set of Ruby threads.
#
# The pool enforces an upper bound on queued tasks, surfaces errors raised
# by jobs, and supports graceful shutdown during application teardown.
class WorkerPool
# Raised when the worker pool queue has reached its configured capacity.
class QueueFullError < StandardError; end
# Raised when a task fails to complete before the requested timeout.
class TaskTimeoutError < StandardError; end
# Raised when scheduling occurs after the pool has been shut down.
class ShutdownError < StandardError; end
# Internal structure responsible for coordinating task completion.
class Task
# @return [Object, nil] value produced by the task block when available.
attr_reader :value
# @return [StandardError, nil] error raised by the task block when set.
attr_reader :error
def initialize
@mutex = Mutex.new
@condition = ConditionVariable.new
@complete = false
@value = nil
@error = nil
end
# Mark the task as completed successfully.
#
# @param result [Object] value produced by the job.
# @return [void]
def fulfill(result)
@mutex.synchronize do
return if @complete
@complete = true
@value = result
@condition.broadcast
end
end
# Mark the task as failed with the provided error.
#
# @param failure [StandardError] exception raised while executing the job.
# @return [void]
def reject(failure)
@mutex.synchronize do
return if @complete
@complete = true
@error = failure
@condition.broadcast
end
end
# Wait for the task to complete, raising any stored failure.
#
# @param timeout [Numeric, nil] optional timeout in seconds.
# @return [Object] the value produced by the job when successful.
# @raise [TaskTimeoutError] when the timeout elapses prior to completion.
# @raise [StandardError] when the job raised an exception.
def wait(timeout: nil)
deadline = timeout && monotonic_now + timeout
@mutex.synchronize do
until @complete
if deadline
remaining = deadline - monotonic_now
raise TaskTimeoutError, "task deadline exceeded" if remaining <= 0
@condition.wait(@mutex, remaining)
else
@condition.wait(@mutex)
end
end
raise @error if @error
@value
end
end
# Check whether the task has finished executing.
#
# @return [Boolean] true when the task is complete.
def complete?
@mutex.synchronize { @complete }
end
private
def monotonic_now
Process.clock_gettime(Process::CLOCK_MONOTONIC)
end
end
STOP_SIGNAL = Object.new
# @return [Array<Thread>] threads created to service the pool.
attr_reader :threads
# Initialize a worker pool using the supplied configuration.
#
# @param size [Integer] number of worker threads to spawn.
# @param max_queue [Integer, nil] optional upper bound on queued jobs.
# @param name [String] prefix assigned to worker thread names.
def initialize(size:, max_queue: nil, name: "worker-pool")
raise ArgumentError, "size must be positive" unless size.is_a?(Integer) && size.positive?
@name = name
@queue = max_queue ? SizedQueue.new(max_queue) : Queue.new
@threads = []
@stopped = false
@mutex = Mutex.new
spawn_workers(size)
end
# Determine whether the worker pool is still accepting work.
#
# @return [Boolean] true when the pool remains active.
def alive?
@mutex.synchronize { !@stopped }
end
# Submit a block of work for asynchronous execution.
#
# @yieldreturn [Object] result produced by the job block.
# @return [Task] task tracking the asynchronous execution.
# @raise [QueueFullError] when the queue cannot accept additional work.
# @raise [ShutdownError] when the pool is no longer active.
def schedule(&block)
raise ArgumentError, "block required" unless block
task = Task.new
@mutex.synchronize do
raise ShutdownError, "worker pool has been shut down" if @stopped
begin
@queue.push([task, block], true)
rescue ThreadError => e
raise QueueFullError, e.message
end
end
task
end
# Stop accepting work and wait for the worker threads to finish.
#
# @param timeout [Numeric, nil] seconds to wait for each worker to exit.
# @return [void]
def shutdown(timeout: nil)
threads = nil
@mutex.synchronize do
return if @stopped
@stopped = true
threads = @threads.dup
end
threads.each { @queue << STOP_SIGNAL }
threads.each { |thread| thread.join(timeout) }
end
private
def spawn_workers(size)
size.times do |index|
worker = Thread.new do
Thread.current.name = "#{@name}-#{index}" if Thread.current.respond_to?(:name=)
Thread.current.report_on_exception = false if Thread.current.respond_to?(:report_on_exception=)
loop do
task, block = @queue.pop
break if task.equal?(STOP_SIGNAL)
begin
result = block.call
task.fulfill(result)
rescue StandardError => e
task.reject(e)
end
end
end
@threads << worker
end
end
end
end
end
+611
View File
@@ -0,0 +1,611 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
module PotatoMesh
# Configuration wrapper responsible for exposing ENV backed settings used by
# the web and data ingestion services.
module Config
module_function
DEFAULT_DB_BUSY_TIMEOUT_MS = 5_000
DEFAULT_DB_BUSY_MAX_RETRIES = 5
DEFAULT_DB_BUSY_RETRY_DELAY = 0.05
DEFAULT_MAX_JSON_BODY_BYTES = 1_048_576
DEFAULT_REFRESH_INTERVAL_SECONDS = 60
DEFAULT_TILE_FILTER_LIGHT = "grayscale(1) saturate(0) brightness(0.92) contrast(1.05)"
DEFAULT_TILE_FILTER_DARK = "grayscale(1) invert(1) brightness(0.9) contrast(1.08)"
DEFAULT_MAP_CENTER_LAT = 38.761944
DEFAULT_MAP_CENTER_LON = -27.090833
DEFAULT_MAP_CENTER = "#{DEFAULT_MAP_CENTER_LAT},#{DEFAULT_MAP_CENTER_LON}"
DEFAULT_CHANNEL = "#LongFast"
DEFAULT_FREQUENCY = "915MHz"
DEFAULT_CONTACT_LINK = "#potatomesh:dod.ngo"
DEFAULT_MAX_DISTANCE_KM = 42.0
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 15
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT = 60
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE = 64
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL = 256
DEFAULT_FEDERATION_WORKER_POOL_SIZE = 4
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
# Retrieve the configured API token used for authenticated requests.
#
# @return [String, nil] API token when provided, otherwise nil.
def api_token
fetch_string("API_TOKEN", nil)
end
# Retrieve an explicit instance domain override when present.
#
# @return [String, nil] hostname or host:port pair supplied via ENV.
def instance_domain
fetch_string("INSTANCE_DOMAIN", nil)
end
# Determine whether private mode should be activated.
#
# @return [Boolean] true when PRIVATE=1 in the environment.
def private_mode_enabled?
value = ENV.fetch("PRIVATE", "0")
value.to_s.strip == "1"
end
# Determine whether federation features are permitted for the instance.
#
# Federation is disabled when ``PRIVATE=1`` regardless of the
# ``FEDERATION`` environment variable to ensure a private deployment does
# not announce itself or crawl peers.
#
# @return [Boolean] true when federation should remain active.
def federation_enabled?
return false if private_mode_enabled?
value = ENV.fetch("FEDERATION", "1")
value.to_s.strip != "0"
end
# Resolve the absolute path to the web application root directory.
#
# @return [String] absolute filesystem path of the web folder.
def web_root
@web_root ||= File.expand_path("../..", __dir__)
end
# Resolve the repository root directory relative to the web folder.
#
# @return [String] path to the Git repository root.
def repo_root
@repo_root ||= File.expand_path("..", web_root)
end
# Resolve the current XDG data directory for PotatoMesh content.
#
# @return [String] absolute path to the PotatoMesh data directory.
def data_directory
File.join(resolve_xdg_home("XDG_DATA_HOME", %w[.local share]), "potato-mesh")
end
# Resolve the current XDG configuration directory for PotatoMesh files.
#
# @return [String] absolute path to the PotatoMesh configuration directory.
def config_directory
File.join(resolve_xdg_home("XDG_CONFIG_HOME", %w[.config]), "potato-mesh")
end
# Build the default SQLite database path inside the data directory.
#
# @return [String] absolute path to the managed +mesh.db+ file.
def default_db_path
File.join(data_directory, "mesh.db")
end
# Legacy database path bundled alongside the repository.
#
# @return [String] absolute path to the repository managed database file.
def legacy_db_path
File.expand_path("../data/mesh.db", web_root)
end
# Determine the configured database location, defaulting to the bundled
# SQLite file.
#
# @return [String] absolute path to the database file.
def db_path
default_db_path
end
# Retrieve the SQLite busy timeout duration in milliseconds.
#
# @return [Integer] timeout value in milliseconds.
def db_busy_timeout_ms
DEFAULT_DB_BUSY_TIMEOUT_MS
end
# Retrieve the maximum number of retries when encountering SQLITE_BUSY.
#
# @return [Integer] maximum retry attempts.
def db_busy_max_retries
DEFAULT_DB_BUSY_MAX_RETRIES
end
# Retrieve the backoff delay between busy retries in seconds.
#
# @return [Float] seconds to wait between retries.
def db_busy_retry_delay
DEFAULT_DB_BUSY_RETRY_DELAY
end
# Convenience constant describing the number of seconds in a week.
#
# @return [Integer] seconds in seven days.
def week_seconds
7 * 24 * 60 * 60
end
# Default upper bound for accepted JSON payload sizes.
#
# @return [Integer] byte ceiling for HTTP request bodies.
def default_max_json_body_bytes
DEFAULT_MAX_JSON_BODY_BYTES
end
# Determine the maximum allowed JSON body size with validation.
#
# @return [Integer] configured byte limit.
def max_json_body_bytes
default_max_json_body_bytes
end
# Provide the fallback version string when git metadata is unavailable.
#
# @return [String] semantic version identifier.
def version_fallback
"0.5.5"
end
# Default refresh interval for frontend polling routines.
#
# @return [Integer] refresh period in seconds.
def default_refresh_interval_seconds
DEFAULT_REFRESH_INTERVAL_SECONDS
end
# Fetch the refresh interval, ensuring a positive integer value.
#
# @return [Integer] polling cadence in seconds.
def refresh_interval_seconds
default_refresh_interval_seconds
end
# Retrieve the CSS filter used for light themed maps.
#
# @return [String] CSS filter string.
def map_tile_filter_light
DEFAULT_TILE_FILTER_LIGHT
end
# Retrieve the CSS filter used for dark themed maps.
#
# @return [String] CSS filter string for dark tiles.
def map_tile_filter_dark
DEFAULT_TILE_FILTER_DARK
end
# Provide a simple hash of tile filters for template use.
#
# @return [Hash] frozen mapping of themes to CSS filters.
def tile_filters
{
light: map_tile_filter_light,
dark: map_tile_filter_dark,
}.freeze
end
# Retrieve the raw comma separated Prometheus report identifiers.
#
# @return [String] comma separated list of report IDs.
def prom_report_ids
fetch_string("PROM_REPORT_IDS", "")
end
# Transform Prometheus report identifiers into a cleaned array.
#
# @return [Array<String>] list of unique report identifiers.
def prom_report_id_list
prom_report_ids.split(",").map(&:strip).reject(&:empty?)
end
# Path storing the instance private key used for signing.
#
# @return [String] absolute location of the PEM file.
def keyfile_path
File.join(config_directory, "keyfile")
end
# Sub-path used when exposing well known configuration files.
#
# @return [String] relative path within the public directory.
def well_known_relative_path
File.join(".well-known", "potato-mesh")
end
# Filesystem directory used to stage /.well-known artifacts.
#
# @return [String] absolute storage path.
def well_known_storage_root
File.join(config_directory, "well-known")
end
# Legacy configuration directory bundled with the repository.
#
# @return [String] absolute path to the repository managed configuration directory.
def legacy_config_directory
File.join(web_root, ".config")
end
# Legacy keyfile location used before introducing XDG directories.
#
# @return [String] absolute filesystem path to the legacy keyfile.
def legacy_keyfile_path
legacy_keyfile_candidates.find { |path| File.exist?(path) } || legacy_keyfile_candidates.first
end
# Enumerate known legacy keyfile locations for migration.
#
# @return [Array<String>] ordered list of absolute legacy keyfile paths.
def legacy_keyfile_candidates
[
File.join(web_root, ".config", "keyfile"),
File.join(web_root, ".config", "potato-mesh", "keyfile"),
File.join(web_root, "config", "keyfile"),
File.join(web_root, "config", "potato-mesh", "keyfile"),
].map { |path| File.expand_path(path) }.uniq
end
# Legacy location for well known assets within the public folder.
#
# @return [String] absolute path to the legacy output directory.
def legacy_public_well_known_path
File.join(web_root, "public", well_known_relative_path)
end
# Enumerate known legacy well-known document locations for migration.
#
# @return [Array<String>] ordered list of absolute legacy well-known document paths.
def legacy_well_known_candidates
filename = File.basename(well_known_relative_path)
[
File.join(web_root, ".config", "well-known", filename),
File.join(web_root, ".config", ".well-known", filename),
File.join(web_root, ".config", "potato-mesh", "well-known", filename),
File.join(web_root, ".config", "potato-mesh", ".well-known", filename),
File.join(web_root, "config", "well-known", filename),
File.join(web_root, "config", ".well-known", filename),
File.join(web_root, "config", "potato-mesh", "well-known", filename),
File.join(web_root, "config", "potato-mesh", ".well-known", filename),
].map { |path| File.expand_path(path) }.uniq
end
# Interval used to refresh well known documents from disk.
#
# @return [Integer] refresh duration in seconds.
def well_known_refresh_interval
24 * 60 * 60
end
# Cryptographic algorithm identifier for HTTP signatures.
#
# @return [String] RFC-compliant algorithm label.
def instance_signature_algorithm
"rsa-sha256"
end
# Connection timeout used when establishing federation HTTP sockets.
#
# The timeout can be customised with the REMOTE_INSTANCE_CONNECT_TIMEOUT
# environment variable to accommodate slower or distant federation peers.
#
# @return [Integer] connect timeout in seconds.
def remote_instance_http_timeout
fetch_positive_integer(
"REMOTE_INSTANCE_CONNECT_TIMEOUT",
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT,
)
end
# Read timeout used when streaming federation HTTP responses.
#
# The timeout can be customised with the REMOTE_INSTANCE_READ_TIMEOUT
# environment variable to accommodate slower or distant federation peers.
#
# @return [Integer] read timeout in seconds.
def remote_instance_read_timeout
fetch_positive_integer(
"REMOTE_INSTANCE_READ_TIMEOUT",
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT,
)
end
# Limit the number of remote instances processed from a single response.
#
# @return [Integer] maximum entries processed per /api/instances payload.
def federation_max_instances_per_response
fetch_positive_integer(
"FEDERATION_MAX_INSTANCES_PER_RESPONSE",
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE,
)
end
# Limit the total number of distinct domains crawled during one ingestion.
#
# @return [Integer] maximum unique domains visited per crawl.
def federation_max_domains_per_crawl
fetch_positive_integer(
"FEDERATION_MAX_DOMAINS_PER_CRAWL",
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL,
)
end
# Determine the worker pool size used for federation tasks.
#
# @return [Integer] number of worker threads dedicated to federation jobs.
def federation_worker_pool_size
fetch_positive_integer(
"FEDERATION_WORKERS",
DEFAULT_FEDERATION_WORKER_POOL_SIZE,
)
end
# Determine the queue capacity for pending federation jobs.
#
# @return [Integer] maximum number of queued tasks before rejecting work.
def federation_worker_queue_capacity
fetch_positive_integer(
"FEDERATION_WORK_QUEUE",
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY,
)
end
# Determine the timeout applied when awaiting federation worker tasks.
#
# @return [Integer] seconds to wait for asynchronous jobs to complete.
def federation_task_timeout_seconds
fetch_positive_integer(
"FEDERATION_TASK_TIMEOUT",
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS,
)
end
# Maximum acceptable age for remote node data.
#
# @return [Integer] seconds before remote nodes are considered stale.
def remote_instance_max_node_age
86_400
end
# Minimum node count expected from a remote instance before storing.
#
# @return [Integer] node threshold for remote ingestion.
def remote_instance_min_node_count
10
end
# Domains used to seed the federation discovery process.
#
# @return [Array<String>] list of default seed domains.
def federation_seed_domains
["potatomesh.net"].freeze
end
# Determine how often we broadcast federation announcements.
#
# @return [Integer] number of seconds between announcement cycles.
def federation_announcement_interval
8 * 60 * 60
end
# Determine the grace period before sending the initial federation announcement.
#
# @return [Integer] seconds to wait before the first broadcast cycle.
def initial_federation_delay_seconds
fetch_positive_integer(
"INITIAL_FEDERATION_DELAY_SECONDS",
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS,
)
end
# Retrieve the configured site name for presentation.
#
# @return [String] human friendly site label.
def site_name
fetch_string("SITE_NAME", "PotatoMesh Demo")
end
# Retrieve the default radio channel label.
#
# @return [String] channel name from configuration.
def channel
fetch_string("CHANNEL", DEFAULT_CHANNEL)
end
# Retrieve the default radio frequency description.
#
# @return [String] frequency identifier.
def frequency
fetch_string("FREQUENCY", DEFAULT_FREQUENCY)
end
# Parse the configured map centre coordinates.
#
# @return [Hash{Symbol=>Float}] latitude and longitude in decimal degrees.
def map_center
raw = fetch_string("MAP_CENTER", DEFAULT_MAP_CENTER)
lat_str, lon_str = raw.split(",", 2).map { |part| part&.strip }.compact
lat = Float(lat_str, exception: false)
lon = Float(lon_str, exception: false)
lat = DEFAULT_MAP_CENTER_LAT unless lat
lon = DEFAULT_MAP_CENTER_LON unless lon
{ lat: lat, lon: lon }
end
# Map display latitude centre for the frontend map widget.
#
# @return [Float] latitude in decimal degrees.
def map_center_lat
map_center[:lat]
end
# Map display longitude centre for the frontend map widget.
#
# @return [Float] longitude in decimal degrees.
def map_center_lon
map_center[:lon]
end
# Retrieve an explicit map zoom override when provided.
#
# @return [Float, nil] positive zoom value or +nil+ when unset.
def map_zoom
raw = fetch_string("MAP_ZOOM", nil)
return nil unless raw
zoom = Float(raw, exception: false)
return nil unless zoom
return nil unless zoom.positive?
zoom
end
# Maximum straight-line distance between nodes before relationships are
# hidden.
#
# @return [Float] distance in kilometres.
def max_distance_km
raw = fetch_string("MAX_DISTANCE", nil)
parsed = raw && Float(raw, exception: false)
return parsed if parsed && parsed.positive?
DEFAULT_MAX_DISTANCE_KM
end
# Contact link for community discussion.
#
# @return [String] contact URI or identifier.
def contact_link
fetch_string("CONTACT_LINK", DEFAULT_CONTACT_LINK)
end
# Retrieve the configured connection target for the ingestor service.
#
# @return [String] serial device, TCP endpoint, or Bluetooth target.
def connection_target
fetch_string("CONNECTION", "/dev/ttyACM0")
end
# Determine the best URL to represent the configured contact link.
#
# @return [String, nil] absolute URL when derivable, otherwise nil.
def contact_link_url
link = contact_link.to_s.strip
return nil if link.empty?
if matrix_alias?(link)
"https://matrix.to/#/#{link}"
elsif link.match?(%r{\Ahttps?://}i)
link
else
nil
end
end
# Check whether a contact link is a Matrix room alias.
#
# @param link [String] candidate link string.
# @return [Boolean] true when the link resembles a Matrix alias.
def matrix_alias?(link)
link.match?(/\A[#!][^\s:]+:[^\s]+\z/)
end
# Check whether verbose debugging is enabled for the runtime.
#
# @return [Boolean] true when DEBUG=1.
def debug?
ENV["DEBUG"] == "1"
end
# Fetch and sanitise string based configuration values.
#
# @param key [String] environment variable to read.
# @param default [String] fallback value when unset or blank.
# @return [String] cleaned configuration string.
def fetch_string(key, default)
value = ENV[key]
return default if value.nil?
trimmed = value.strip
trimmed.empty? ? default : trimmed
end
# Fetch and validate integer based configuration flags.
#
# @param key [String] environment variable to read.
# @param default [Integer] fallback value when unset or invalid.
# @return [Integer] positive integer sourced from configuration.
def fetch_positive_integer(key, default)
value = ENV[key]
return default if value.nil?
trimmed = value.strip
return default if trimmed.empty?
begin
parsed = Integer(trimmed, 10)
rescue ArgumentError
return default
end
parsed.positive? ? parsed : default
end
# Resolve the effective XDG directory honoring environment overrides.
#
# @param env_key [String] name of the environment variable to inspect.
# @param fallback_segments [Array<String>] path segments appended to the user home directory.
# @return [String] absolute base directory referenced by the XDG variable.
def resolve_xdg_home(env_key, fallback_segments)
raw = fetch_string(env_key, nil)
candidate = raw && !raw.empty? ? raw : nil
return File.expand_path(candidate) if candidate
base_home = safe_home_directory
File.expand_path(File.join(base_home, *fallback_segments))
end
# Retrieve the current user's home directory handling runtime failures.
#
# @return [String] absolute path to the user home or web root fallback.
def safe_home_directory
home = Dir.home
return web_root if home.nil? || home.empty?
home
rescue ArgumentError, RuntimeError
web_root
end
end
end
+101
View File
@@ -0,0 +1,101 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "logger"
require "time"
module PotatoMesh
# Logging utilities shared across the web application.
module Logging
LOGGER_NAME = "potato-mesh" # :nodoc:
module_function
# Build a logger configured with the potato-mesh formatter.
#
# @param io [#write] destination for log output.
# @return [Logger] configured logger instance.
def build_logger(io = $stdout)
logger = Logger.new(io)
logger.progname = LOGGER_NAME
logger.formatter = method(:formatter)
logger
end
# Format log entries with a consistent structure understood by the UI.
#
# @param severity [String] Ruby logger severity constant (e.g., "DEBUG").
# @param time [Time] timestamp when the log entry was created.
# @param progname [String, nil] optional application name emitting the log.
# @param message [String] body of the log message.
# @return [String] formatted log entry.
def formatter(severity, time, progname, message)
timestamp = time.utc.iso8601(3)
body = message.is_a?(String) ? message : message.inspect
"[#{timestamp}] [#{progname || LOGGER_NAME}] [#{severity.downcase}] #{body}\n"
end
# Emit a structured log entry to the provided logger instance.
#
# @param logger [Logger, nil] logger to emit against.
# @param severity [Symbol] target severity (e.g., :debug, :info).
# @param message [String] primary message text.
# @param context [String, nil] logical component generating the entry.
# @param metadata [Hash] supplemental structured data for the log.
# @return [void]
def log(logger, severity, message, context: nil, **metadata)
return unless logger
parts = []
parts << "context=#{context}" if context
metadata.each do |key, value|
parts << format_metadata_pair(key, value)
end
parts << message
logger.public_send(severity, parts.join(" "))
end
# Retrieve the canonical logger for the web application.
#
# @param target [Object, nil] object with optional +settings.logger+ accessor.
# @return [Logger, nil] logger instance when available.
def logger_for(target = nil)
if target.respond_to?(:settings) && target.settings.respond_to?(:logger)
return target.settings.logger
end
if defined?(PotatoMesh::Application) &&
PotatoMesh::Application.respond_to?(:settings) &&
PotatoMesh::Application.settings.respond_to?(:logger)
return PotatoMesh::Application.settings.logger
end
nil
end
# Format metadata key/value pairs for structured logging output.
#
# @param key [Symbol, String]
# @param value [Object]
# @return [String]
def format_metadata_pair(key, value)
"#{key}=#{value.inspect}"
end
private_class_method :format_metadata_pair
end
end
+82
View File
@@ -0,0 +1,82 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require_relative "config"
require_relative "sanitizer"
module PotatoMesh
# Helper functions used to generate SEO metadata and formatted values.
module Meta
module_function
# Format a distance in kilometres without trailing decimal precision when unnecessary.
#
# @param distance [Numeric] distance in kilometres.
# @return [String] formatted kilometre value.
def formatted_distance_km(distance)
format("%.1f", distance).sub(/\.0\z/, "")
end
# Construct the meta description string displayed to search engines and social previews.
#
# @param private_mode [Boolean] whether private mode is enabled.
# @return [String] generated description text.
def description(private_mode:)
site = Sanitizer.sanitized_site_name
channel = Sanitizer.sanitized_channel
frequency = Sanitizer.sanitized_frequency
contact = Sanitizer.sanitized_contact_link
summary = "Live Meshtastic mesh map for #{site}"
if channel.empty? && frequency.empty?
summary += "."
elsif channel.empty?
summary += " tuned to #{frequency}."
elsif frequency.empty?
summary += " on #{channel}."
else
summary += " on #{channel} (#{frequency})."
end
activity_sentence = if private_mode
"Track nodes and coverage in real time."
else
"Track nodes, messages, and coverage in real time."
end
sentences = [summary, activity_sentence]
if (distance = Sanitizer.sanitized_max_distance_km)
sentences << "Shows nodes within roughly #{formatted_distance_km(distance)} km of the map center."
end
sentences << "Join the community in #{contact} via chat." if contact
sentences.join(" ")
end
# Build a hash of meta configuration values used by templating layers.
#
# @param private_mode [Boolean] whether private mode is enabled.
# @return [Hash] structured metadata for templates.
def configuration(private_mode:)
site = Sanitizer.sanitized_site_name
{
title: site,
name: site,
description: description(private_mode: private_mode),
}.freeze
end
end
end
+242
View File
@@ -0,0 +1,242 @@
# Copyright © 2025-26 l5yth & contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# frozen_string_literal: true
require "ipaddr"
require_relative "config"
module PotatoMesh
# Utility module responsible for coercing and sanitising user provided
# configuration strings. Each helper is exposed as a module function so it
# can be consumed both by the web layer and background jobs without
# instantiation overhead.
module Sanitizer
module_function
# Coerce an arbitrary value into a trimmed string unless the content is
# empty.
#
# @param value [Object, nil] arbitrary input that should be converted.
# @return [String, nil] trimmed string representation or +nil+ when blank.
def string_or_nil(value)
return nil if value.nil?
str = value.is_a?(String) ? value : value.to_s
trimmed = str.strip
trimmed.empty? ? nil : trimmed
end
# Ensure a value is a valid instance domain according to RFC 1035/3986
# rules. Hostnames must include at least one dot-separated label and a
# top-level domain containing an alphabetic character. Literal IP
# addresses must be provided in standard dotted decimal form or enclosed in
# brackets when IPv6 notation is used. Optional ports must fall within the
# valid TCP/UDP range. Any opaque identifiers, URIs, or malformed hosts are
# rejected.
#
# @param value [String, Object, nil] candidate domain name.
# @param downcase [Boolean] whether to force the result to lowercase.
# @return [String, nil] canonical domain value or +nil+ when invalid.
def sanitize_instance_domain(value, downcase: true)
host = string_or_nil(value)
return nil unless host
trimmed = host.strip
trimmed = trimmed.delete_suffix(".") while trimmed.end_with?(".")
return nil if trimmed.empty?
return nil if trimmed.match?(%r{[\s/\\@]})
if trimmed.start_with?("[")
match = trimmed.match(/\A\[(?<address>[^\]]+)\](?::(?<port>\d+))?\z/)
return nil unless match
address = match[:address]
port = match[:port]
return nil if port && !valid_port?(port)
begin
IPAddr.new(address)
rescue IPAddr::InvalidAddressError
return nil
end
sanitized_address = downcase ? address.downcase : address
return "[#{sanitized_address}]#{port ? ":#{port}" : ""}"
end
domain = trimmed
port = nil
if domain.include?(":")
host_part, port_part = domain.split(":", 2)
return nil if host_part.nil? || host_part.empty?
return nil unless port_part && port_part.match?(/\A\d+\z/)
return nil unless valid_port?(port_part)
return nil if port_part.include?(":")
domain = host_part
port = port_part
end
unless valid_hostname?(domain) || valid_ipv4_literal?(domain)
return nil
end
sanitized_domain = downcase ? domain.downcase : domain
port ? "#{sanitized_domain}:#{port}" : sanitized_domain
end
# Determine whether the supplied hostname conforms to RFC 1035 label
# requirements and includes a valid top-level domain.
#
# @param hostname [String] host component without any port information.
# @return [Boolean] true when the hostname is valid.
def valid_hostname?(hostname)
return false if hostname.length > 253
labels = hostname.split(".")
return false if labels.length < 2
return false unless labels.all? { |label| valid_hostname_label?(label) }
top_level = labels.last
top_level.match?(/[a-z]/i)
end
# Validate a single hostname label ensuring the first and last characters
# are alphanumeric and that no unsupported symbols are present.
#
# @param label [String] hostname component between dots.
# @return [Boolean] true when the label is valid.
def valid_hostname_label?(label)
return false if label.empty?
return false if label.length > 63
label.match?(/\A[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\z/i)
end
# Validate whether a candidate represents a dotted decimal IPv4 literal.
#
# @param address [String] IP address string without port information.
# @return [Boolean] true when the address is a valid IPv4 literal.
def valid_ipv4_literal?(address)
return false unless address.match?(/\A\d{1,3}(?:\.\d{1,3}){3}\z/)
address.split(".").all? { |octet| octet.to_i.between?(0, 255) }
end
# Determine whether a port string represents a valid TCP/UDP port.
#
# @param port [String] numeric port representation.
# @return [Boolean] true when the port falls within the acceptable range.
def valid_port?(port)
value = port.to_i
value.positive? && value <= 65_535
end
# Extract the host component from a potentially bracketed domain literal.
#
# @param domain [String, nil] raw domain string received from the user.
# @return [String, nil] host portion of the domain, or +nil+ when invalid.
def instance_domain_host(domain)
return nil if domain.nil?
candidate = domain.strip
return nil if candidate.empty?
if candidate.start_with?("[")
match = candidate.match(/\A\[(?<host>[^\]]+)\](?::(?<port>\d+))?\z/)
return match[:host] if match
return nil
end
host, port = candidate.split(":", 2)
if port && !host.include?(":") && port.match?(/\A\d+\z/)
return host
end
candidate
end
# Resolve a validated domain string into an IP address object.
#
# @param domain [String, nil] domain literal potentially including port.
# @return [IPAddr, nil] parsed IP address when valid.
def ip_from_domain(domain)
host = instance_domain_host(domain)
return nil unless host
IPAddr.new(host)
rescue IPAddr::InvalidAddressError
nil
end
# Normalise a value into a trimmed string representation.
#
# @param value [Object] arbitrary object to coerce into text.
# @return [String] trimmed string version of the supplied value.
def sanitized_string(value)
value.to_s.strip
end
# Retrieve the configured site name as a cleaned string.
#
# @return [String] trimmed configuration value.
def sanitized_site_name
sanitized_string(Config.site_name)
end
# Retrieve the configured channel as a cleaned string.
#
# @return [String] trimmed configuration value.
def sanitized_channel
sanitized_string(Config.channel)
end
# Retrieve the configured frequency as a cleaned string.
#
# @return [String] trimmed configuration value.
def sanitized_frequency
sanitized_string(Config.frequency)
end
# Retrieve the configured contact link and normalise blank values to nil.
#
# @return [String, nil] contact link identifier or +nil+ when blank.
def sanitized_contact_link
value = sanitized_string(Config.contact_link)
value.empty? ? nil : value
end
# Retrieve the best effort URL for the configured contact link.
#
# @return [String, nil] contact hyperlink when derivable.
def sanitized_contact_link_url
Config.contact_link_url
end
# Return a positive numeric maximum distance when configured.
#
# @return [Numeric, nil] distance value in kilometres.
def sanitized_max_distance_km
distance = Config.max_distance_km
return nil unless distance.is_a?(Numeric)
return nil unless distance.positive?
distance
end
end
end
+173
View File
@@ -0,0 +1,173 @@
{
"name": "potato-mesh",
"version": "0.5.5",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "potato-mesh",
"version": "0.5.5",
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-reports": "^3.2.0",
"v8-to-istanbul": "^9.3.0"
}
},
"node_modules/@jridgewell/resolve-uri": {
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
"integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=6.0.0"
}
},
"node_modules/@jridgewell/sourcemap-codec": {
"version": "1.5.5",
"resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
"integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
"dev": true,
"license": "MIT"
},
"node_modules/@jridgewell/trace-mapping": {
"version": "0.3.31",
"resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz",
"integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/resolve-uri": "^3.1.0",
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
"node_modules/@types/istanbul-lib-coverage": {
"version": "2.0.6",
"resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz",
"integrity": "sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==",
"dev": true,
"license": "MIT"
},
"node_modules/convert-source-map": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz",
"integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==",
"dev": true,
"license": "MIT"
},
"node_modules/has-flag": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
"integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=8"
}
},
"node_modules/html-escaper": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz",
"integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==",
"dev": true,
"license": "MIT"
},
"node_modules/istanbul-lib-coverage": {
"version": "3.2.2",
"resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz",
"integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==",
"dev": true,
"license": "BSD-3-Clause",
"engines": {
"node": ">=8"
}
},
"node_modules/istanbul-lib-report": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz",
"integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"istanbul-lib-coverage": "^3.0.0",
"make-dir": "^4.0.0",
"supports-color": "^7.1.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/istanbul-reports": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz",
"integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==",
"dev": true,
"license": "BSD-3-Clause",
"dependencies": {
"html-escaper": "^2.0.0",
"istanbul-lib-report": "^3.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/make-dir": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz",
"integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==",
"dev": true,
"license": "MIT",
"dependencies": {
"semver": "^7.5.3"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/semver": {
"version": "7.7.3",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz",
"integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==",
"dev": true,
"license": "ISC",
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/supports-color": {
"version": "7.2.0",
"resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
"integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
"dev": true,
"license": "MIT",
"dependencies": {
"has-flag": "^4.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/v8-to-istanbul": {
"version": "9.3.0",
"resolved": "https://registry.npmjs.org/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz",
"integrity": "sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA==",
"dev": true,
"license": "ISC",
"dependencies": {
"@jridgewell/trace-mapping": "^0.3.12",
"@types/istanbul-lib-coverage": "^2.0.1",
"convert-source-map": "^2.0.0"
},
"engines": {
"node": ">=10.12.0"
}
}
}
}
+15
View File
@@ -0,0 +1,15 @@
{
"name": "potato-mesh",
"version": "0.5.5",
"type": "module",
"private": true,
"scripts": {
"test": "mkdir -p reports coverage && NODE_V8_COVERAGE=coverage node --test --experimental-test-coverage --test-reporter=spec --test-reporter-destination=stdout --test-reporter=junit --test-reporter-destination=reports/javascript-junit.xml && node ./scripts/export-coverage.js"
},
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
"istanbul-reports": "^3.2.0",
"v8-to-istanbul": "^9.3.0"
}
}
@@ -0,0 +1,149 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
fetchAggregatedTelemetry,
initializeChartsPage,
buildMovingAverageSeries,
} from '../charts-page.js';
function createResponse(status, body) {
return {
ok: status >= 200 && status < 300,
status,
async json() {
return body;
},
};
}
test('fetchAggregatedTelemetry requests the latest 1000 telemetry entries', async () => {
const requests = [];
const fetchImpl = async url => {
requests.push(url);
return createResponse(200, [{ rx_time: 1_700_000_000, node_id: '!demo' }]);
};
const snapshots = await fetchAggregatedTelemetry({ fetchImpl });
assert.equal(requests.length, 1);
assert.equal(requests[0], '/api/telemetry?limit=1000');
assert.equal(Array.isArray(snapshots), true);
assert.equal(snapshots[0].node_id, '!demo');
});
test('fetchAggregatedTelemetry validates fetch availability and response codes', async () => {
await assert.rejects(() => fetchAggregatedTelemetry({ fetchImpl: null }), /fetch implementation/i);
const fetchImpl = async () => createResponse(503, []);
await assert.rejects(() => fetchAggregatedTelemetry({ fetchImpl }), /Failed to fetch telemetry/);
});
test('initializeChartsPage renders the telemetry charts when snapshots are available', async () => {
const container = { innerHTML: '' };
const documentStub = {
getElementById(id) {
return id === 'chartsPage' ? container : null;
},
};
const fetchImpl = async () => createResponse(200, [{ rx_time: 1_700_000_000, temperature: 22.5 }]);
let receivedOptions = null;
const renderCharts = (node, options) => {
receivedOptions = options;
return '<section class="node-detail__charts">Charts</section>';
};
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, true);
assert.equal(container.innerHTML.includes('node-detail__charts'), true);
assert.ok(receivedOptions);
assert.equal(receivedOptions.chartOptions.windowMs, 86_400_000);
assert.equal(typeof receivedOptions.chartOptions.lineReducer, 'function');
const average = receivedOptions.chartOptions.lineReducer(
[
{ timestamp: 0, value: 0 },
{ timestamp: 1_800_000, value: 10 },
{ timestamp: 3_600_000, value: 20 },
],
);
assert.equal(Array.isArray(average), true);
});
test('initializeChartsPage shows an error message when fetching fails', async () => {
const container = { innerHTML: '' };
const documentStub = {
getElementById() {
return container;
},
};
const fetchImpl = async () => {
throw new Error('network');
};
const renderCharts = () => '<section>unused</section>';
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, false);
assert.equal(container.innerHTML.includes('Failed to load telemetry charts.'), true);
});
test('initializeChartsPage handles missing containers and empty telemetry snapshots', async () => {
const documentMissing = { getElementById() { return null; } };
const noneResult = await initializeChartsPage({ document: documentMissing });
assert.equal(noneResult, false);
const container = { innerHTML: '' };
const documentStub = {
getElementById() {
return container;
},
};
const fetchImpl = async () => createResponse(200, []);
const renderCharts = () => '';
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, true);
assert.equal(container.innerHTML.includes('Telemetry snapshots are unavailable.'), true);
});
test('initializeChartsPage shows a status when rendering produces no markup', async () => {
const container = { innerHTML: '' };
const documentStub = {
getElementById() {
return container;
},
};
const fetchImpl = async () => createResponse(200, [{ rx_time: 1_700_000_000 }]);
const renderCharts = () => '';
const result = await initializeChartsPage({ document: documentStub, fetchImpl, renderCharts });
assert.equal(result, true);
assert.equal(container.innerHTML.includes('Telemetry snapshots are unavailable.'), true);
});
test('initializeChartsPage validates the document contract', async () => {
await assert.rejects(() => initializeChartsPage({ document: {} }), /getElementById/);
});
test('buildMovingAverageSeries computes a rolling mean across the window', () => {
const points = [
{ timestamp: 0, value: 0 },
{ timestamp: 30 * 60 * 1000, value: 30 },
{ timestamp: 60 * 60 * 1000, value: 60 },
{ timestamp: 90 * 60 * 1000, value: 90 },
];
const averages = buildMovingAverageSeries(points, 60 * 60 * 1000);
assert.equal(averages.length, points.length);
assert.equal(Math.round(averages[0].value), 0);
assert.equal(Math.round(averages[1].value), 15);
assert.equal(Math.round(averages[2].value), 30);
assert.equal(Math.round(averages[3].value), 60);
});
@@ -0,0 +1,180 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
extractChatMessageMetadata,
formatChatMessagePrefix,
formatChatChannelTag,
formatChatPresetTag,
formatNodeAnnouncementPrefix,
__test__
} from '../chat-format.js';
const {
firstNonNull,
normalizeString,
normalizeFrequency,
normalizeFrequencySlot,
FREQUENCY_PLACEHOLDER,
resolveModemPresetCandidate,
normalizePresetString,
abbreviatePreset,
derivePresetInitials,
normalizePresetSlot,
PRESET_PLACEHOLDER
} = __test__;
test('extractChatMessageMetadata prefers explicit region_frequency and channel_name', () => {
const payload = {
region_frequency: 868,
channel_name: ' Test Channel ',
lora_freq: 915,
channelName: 'Ignored'
};
const result = extractChatMessageMetadata(payload);
assert.deepEqual(result, { frequency: '868', channelName: 'Test Channel', presetCode: null });
});
test('extractChatMessageMetadata falls back to LoRa metadata', () => {
const payload = {
lora_freq: 915,
channelName: 'SpecChannel',
modem_preset: 'MediumFast'
};
const result = extractChatMessageMetadata(payload);
assert.deepEqual(result, { frequency: '915', channelName: 'SpecChannel', presetCode: 'MF' });
});
test('extractChatMessageMetadata returns null metadata for invalid input', () => {
assert.deepEqual(extractChatMessageMetadata(null), { frequency: null, channelName: null, presetCode: null });
assert.deepEqual(extractChatMessageMetadata(undefined), { frequency: null, channelName: null, presetCode: null });
});
test('extractChatMessageMetadata inspects nested node payloads for modem presets', () => {
const payload = {
node: {
modem_preset: 'ShortTurbo'
}
};
const result = extractChatMessageMetadata(payload);
assert.equal(result.presetCode, 'ST');
});
test('firstNonNull returns the first non-null candidate', () => {
assert.equal(firstNonNull(null, undefined, '', 'value'), '');
assert.equal(firstNonNull(undefined, null), null);
});
test('normalizeString trims strings and rejects empties', () => {
assert.equal(normalizeString(' Spec '), 'Spec');
assert.equal(normalizeString(' '), null);
assert.equal(normalizeString(123), '123');
assert.equal(normalizeString(Number.POSITIVE_INFINITY), null);
});
test('normalizeFrequency handles numeric and string inputs', () => {
assert.equal(normalizeFrequency(915), '915');
assert.equal(normalizeFrequency(868.125), '868.125');
assert.equal(normalizeFrequency(' 868MHz '), '868');
assert.equal(normalizeFrequency('n/a'), 'n/a');
assert.equal(normalizeFrequency(-5), null);
assert.equal(normalizeFrequency(null), null);
});
test('formatChatMessagePrefix preserves bracket placeholders', () => {
assert.equal(
formatChatMessagePrefix({ timestamp: '11:46:48', frequency: '868' }),
'[11:46:48][868]'
);
assert.equal(
formatChatMessagePrefix({ timestamp: '16:19:19', frequency: null }),
`[16:19:19][${FREQUENCY_PLACEHOLDER}]`
);
assert.equal(
formatChatMessagePrefix({ timestamp: '09:00:00', frequency: '' }),
`[09:00:00][${FREQUENCY_PLACEHOLDER}]`
);
});
test('formatChatChannelTag wraps channel names after the short name slot', () => {
assert.equal(
formatChatChannelTag({ channelName: 'TEST' }),
'[TEST]'
);
assert.equal(
formatChatChannelTag({ channelName: '' }),
'[]'
);
assert.equal(
formatChatChannelTag({ channelName: null }),
'[]'
);
});
test('formatChatPresetTag renders preset hints with placeholders', () => {
assert.equal(formatChatPresetTag({ presetCode: 'MF' }), '[MF]');
assert.equal(formatChatPresetTag({ presetCode: null }), `[${PRESET_PLACEHOLDER}]`);
});
test('formatNodeAnnouncementPrefix includes optional frequency bracket', () => {
assert.equal(
formatNodeAnnouncementPrefix({ timestamp: '12:34:56', frequency: '868' }),
'[12:34:56][868]'
);
assert.equal(
formatNodeAnnouncementPrefix({ timestamp: '01:02:03', frequency: null }),
`[01:02:03][${FREQUENCY_PLACEHOLDER}]`
);
});
test('normalizeFrequencySlot returns placeholder when frequency is missing', () => {
assert.equal(normalizeFrequencySlot(null), FREQUENCY_PLACEHOLDER);
assert.equal(normalizeFrequencySlot(''), FREQUENCY_PLACEHOLDER);
assert.equal(normalizeFrequencySlot(undefined), FREQUENCY_PLACEHOLDER);
assert.equal(normalizeFrequencySlot('915'), '915');
});
test('resolveModemPresetCandidate walks nested payloads', () => {
const nested = { node: { modemPreset: 'LongFast' } };
assert.equal(resolveModemPresetCandidate(nested), 'LongFast');
});
test('normalizePresetString trims strings and ignores empties', () => {
assert.equal(normalizePresetString(' MediumSlow '), 'MediumSlow');
assert.equal(normalizePresetString(' '), null);
assert.equal(normalizePresetString(null), null);
});
test('abbreviatePreset maps known presets to codes', () => {
assert.equal(abbreviatePreset('VeryLongSlow'), 'VL');
assert.equal(abbreviatePreset('customPreset'), 'CP');
assert.equal(abbreviatePreset('X'), 'X?');
});
test('derivePresetInitials falls back to segmented tokens', () => {
assert.equal(derivePresetInitials('Long Moderate'), 'LM');
assert.equal(derivePresetInitials('ShortTurbo'), 'ST');
assert.equal(derivePresetInitials('Z'), 'Z?');
});
test('normalizePresetSlot enforces placeholders and uppercase output', () => {
assert.equal(normalizePresetSlot('mf'), 'MF');
assert.equal(normalizePresetSlot(''), PRESET_PLACEHOLDER);
assert.equal(normalizePresetSlot(null), PRESET_PLACEHOLDER);
});
@@ -0,0 +1,100 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { formatPositionHighlights, formatTelemetryHighlights } from '../chat-log-highlights.js';
test('formatTelemetryHighlights includes formatted numeric metrics', () => {
const highlights = formatTelemetryHighlights({
temperature: 21.44,
relative_humidity: 54.27,
});
assert.deepEqual(highlights, [
{ label: 'Temperature', value: '21.4°C' },
{ label: 'Humidity', value: '54.3%' },
]);
});
test('formatTelemetryHighlights prefers nested telemetry when top-level values are stale', () => {
const highlights = formatTelemetryHighlights({
channel_utilization: 0,
device_metrics: { channelUtilization: 0.561 },
});
assert.deepEqual(highlights, [
{ label: 'Channel Util', value: '0.561%' },
]);
});
test('formatPositionHighlights renders coordinate and movement data', () => {
const highlights = formatPositionHighlights({
latitude: 52.1234567,
longitude: 13.7654321,
altitude: 150.5,
accuracy: 3.2,
speed: 1.234,
heading: 181.6,
satellites: 7,
});
assert.deepEqual(highlights, [
{ label: 'Lat', value: '52.12346' },
{ label: 'Lon', value: '13.76543' },
{ label: 'Alt', value: '150.5m' },
{ label: 'Accuracy', value: '3.2m' },
{ label: 'Speed', value: '1.2 m/s' },
{ label: 'Heading', value: '182°' },
{ label: 'Sats', value: '7' },
]);
});
test('formatPositionHighlights normalises integer microdegree fields', () => {
const highlights = formatPositionHighlights({
position: {
latitude_i: 52_123_456,
longitude_i: 13_765_432,
},
});
assert.deepEqual(highlights.slice(0, 2), [
{ label: 'Lat', value: '52.12346' },
{ label: 'Lon', value: '13.76543' },
]);
});
test('formatters return empty arrays when payloads are missing', () => {
assert.deepEqual(formatTelemetryHighlights(null), []);
assert.deepEqual(formatPositionHighlights(undefined), []);
assert.deepEqual(formatPositionHighlights({}), []);
});
test('formatPositionHighlights omits zero-valued movement metrics while keeping coordinates', () => {
const highlights = formatPositionHighlights({
latitude: 0,
longitude: 0,
altitude: 0,
speed: '0',
accuracy: 0,
});
assert.deepEqual(highlights, [
{ label: 'Lat', value: '0.00000' },
{ label: 'Lon', value: '0.00000' },
]);
});
@@ -0,0 +1,251 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
CHAT_LOG_ENTRY_TYPES,
buildChatTabModel,
MAX_CHANNEL_INDEX,
normaliseChannelIndex,
normaliseChannelName,
resolveTimestampSeconds
} from '../chat-log-tabs.js';
const NOW = 1_000_000;
const WINDOW = 60 * 60; // one hour
function fixtureNodes() {
return [
{ id: 'recent-node', first_heard: NOW - 120 },
{ id: 'stale-node', first_heard: NOW - WINDOW - 1 },
{ id: 'iso-node', firstHeard: null, first_heard_iso: new Date((NOW - 30) * 1000).toISOString() }
];
}
function fixtureMessages() {
return [
{ id: 'recent-default', rx_time: NOW - 5, channel: 0, channel_name: ' MediumFast ' },
{ id: 'primary-preset', rx_time: NOW - 8, channel: 0, modem_preset: ' ShortFast ' },
{ id: 'env-default', rx_time: NOW - 12, channel: 0 },
{ id: 'recent-alt', rx_time: NOW - 10, channel_index: '1', channel_name: ' BerlinMesh ' },
{ id: 'stale', rx_time: NOW - WINDOW - 5, channel: 2 },
{ id: 'encrypted', rx_time: NOW - 20, channel: 3, encrypted: true },
{ id: 'no-index', rx_time: NOW - 15, channel_name: 'Fallback' },
{ id: 'too-high', rx_time: NOW - 25, channel: MAX_CHANNEL_INDEX + 5, channel_name: 'Ignored' },
{ id: 'iso-ts', rxTime: null, rx_iso: new Date((NOW - 40) * 1000).toISOString(), channel: 1 }
];
}
function buildModel(overrides = {}) {
return buildChatTabModel({
nodes: fixtureNodes(),
messages: fixtureMessages(),
nowSeconds: NOW,
windowSeconds: WINDOW,
primaryChannelFallbackLabel: '#EnvDefault',
...overrides
});
}
test('buildChatTabModel returns sorted nodes and channel buckets', () => {
const model = buildModel();
assert.equal(model.logEntries.length, 3);
assert.deepEqual(model.logEntries.map(entry => entry.type), [
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED
]);
assert.deepEqual(
model.logEntries.map(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED ? entry.message.id : entry.node.id),
['recent-node', 'iso-node', 'encrypted']
);
assert.equal(model.channels.length, 5);
assert.deepEqual(model.channels.map(channel => channel.label), [
'EnvDefault',
'Fallback',
'MediumFast',
'ShortFast',
'BerlinMesh'
]);
const channelByLabel = Object.fromEntries(model.channels.map(channel => [channel.label, channel]));
const envChannel = channelByLabel.EnvDefault;
assert.equal(envChannel.index, 0);
assert.equal(envChannel.id, 'channel-0-envdefault');
assert.deepEqual(envChannel.entries.map(entry => entry.message.id), ['env-default']);
const fallbackChannel = channelByLabel.Fallback;
assert.equal(fallbackChannel.index, 0);
assert.equal(fallbackChannel.id, 'channel-0-fallback');
assert.deepEqual(fallbackChannel.entries.map(entry => entry.message.id), ['no-index']);
const namedPrimaryChannel = channelByLabel.MediumFast;
assert.equal(namedPrimaryChannel.index, 0);
assert.equal(namedPrimaryChannel.id, 'channel-0-mediumfast');
assert.deepEqual(namedPrimaryChannel.entries.map(entry => entry.message.id), ['recent-default']);
const presetChannel = channelByLabel.ShortFast;
assert.equal(presetChannel.index, 0);
assert.equal(presetChannel.id, 'channel-0-shortfast');
assert.deepEqual(presetChannel.entries.map(entry => entry.message.id), ['primary-preset']);
const secondaryChannel = channelByLabel.BerlinMesh;
assert.equal(secondaryChannel.index, 1);
assert.equal(secondaryChannel.id, 'channel-1');
assert.equal(secondaryChannel.entries.length, 2);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), ['iso-ts', 'recent-alt']);
});
test('buildChatTabModel always includes channel zero bucket', () => {
const model = buildChatTabModel({ nodes: [], messages: [], nowSeconds: NOW, windowSeconds: WINDOW });
assert.equal(model.channels.length, 1);
assert.equal(model.channels[0].index, 0);
assert.equal(model.channels[0].entries.length, 0);
});
test('buildChatTabModel falls back to numeric label when no metadata provided', () => {
const model = buildChatTabModel({
nodes: [],
messages: [{ id: 'plain', rx_time: NOW - 5, channel: 0 }],
nowSeconds: NOW,
windowSeconds: WINDOW,
primaryChannelFallbackLabel: ''
});
assert.equal(model.channels.length, 1);
assert.equal(model.channels[0].label, '0');
assert.equal(model.channels[0].id, 'channel-0');
});
test('normaliseChannelIndex handles numeric and textual input', () => {
assert.equal(normaliseChannelIndex(2.9), 2);
assert.equal(normaliseChannelIndex(' 7 '), 7);
assert.equal(normaliseChannelIndex('bad'), null);
assert.equal(normaliseChannelIndex(null), null);
});
test('normaliseChannelName trims strings and allows numeric values', () => {
assert.equal(normaliseChannelName(' Berlin '), 'Berlin');
assert.equal(normaliseChannelName(5), '5');
assert.equal(normaliseChannelName(''), null);
assert.equal(normaliseChannelName(undefined), null);
});
test('resolveTimestampSeconds prefers numeric but falls back to ISO parsing', () => {
assert.equal(resolveTimestampSeconds(1234, null), 1234);
const iso = '1970-01-01T00:10:00Z';
assert.equal(resolveTimestampSeconds('not-numeric', iso), 600);
assert.equal(resolveTimestampSeconds('bad', 'invalid'), null);
});
test('buildChatTabModel includes telemetry, position, and neighbor events', () => {
const nodeId = '!node';
const neighborId = '!peer';
const model = buildChatTabModel({
nodes: [{
node_id: nodeId,
first_heard: NOW - 50,
last_heard: NOW - 40,
short_name: 'NODE',
long_name: 'Node Example'
}],
telemetry: [{ node_id: nodeId, rx_time: NOW - 30 }],
positions: [{ node_id: nodeId, rx_time: NOW - 20 }],
neighbors: [{ node_id: nodeId, neighbor_id: neighborId, rx_time: NOW - 10 }],
messages: [],
nowSeconds: NOW,
windowSeconds: WINDOW
});
assert.deepEqual(model.logEntries.map(entry => entry.type), [
CHAT_LOG_ENTRY_TYPES.NODE_NEW,
CHAT_LOG_ENTRY_TYPES.NODE_INFO,
CHAT_LOG_ENTRY_TYPES.TELEMETRY,
CHAT_LOG_ENTRY_TYPES.POSITION,
CHAT_LOG_ENTRY_TYPES.NEIGHBOR
]);
assert.equal(model.logEntries[0].nodeId, nodeId);
const lastEntry = model.logEntries[model.logEntries.length - 1];
assert.equal(lastEntry.neighborId, neighborId);
});
test('buildChatTabModel merges dedicated encrypted log feed without altering channels', () => {
const regularMessages = fixtureMessages().filter(message => !message.encrypted);
const encryptedOnly = [
{ id: 'log-only', encrypted: true, rx_time: NOW - 3, channel: 7 }
];
const model = buildChatTabModel({
nodes: [],
messages: regularMessages,
logOnlyMessages: encryptedOnly,
nowSeconds: NOW,
windowSeconds: WINDOW
});
const encryptedEntries = model.logEntries.filter(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED);
assert.equal(encryptedEntries.length, 1);
assert.equal(encryptedEntries[0]?.message?.id, 'log-only');
const channelMessageIds = model.channels.reduce((acc, channel) => {
if (!channel || !Array.isArray(channel.entries)) {
return acc;
}
for (const entry of channel.entries) {
if (entry && entry.message && entry.message.id) {
acc.push(entry.message.id);
}
}
return acc;
}, []);
assert.ok(!channelMessageIds.includes('log-only'));
});
test('buildChatTabModel de-duplicates encrypted messages across feeds', () => {
const duplicateMessage = { id: 'dup', encrypted: true, rx_time: NOW - 4 };
const model = buildChatTabModel({
nodes: [],
messages: [duplicateMessage],
logOnlyMessages: [duplicateMessage],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const encryptedEntries = model.logEntries.filter(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED);
assert.equal(encryptedEntries.length, 1);
assert.equal(encryptedEntries[0]?.message?.id, 'dup');
});
test('buildChatTabModel ignores plaintext log-only entries', () => {
const logOnlyMessages = [
{ id: 'plain', encrypted: false, rx_time: NOW - 5 },
{ id: 'enc', encrypted: true, rx_time: NOW - 4 }
];
const model = buildChatTabModel({
nodes: [],
messages: [],
logOnlyMessages,
nowSeconds: NOW,
windowSeconds: WINDOW
});
const encryptedEntries = model.logEntries.filter(entry => entry.type === CHAT_LOG_ENTRY_TYPES.MESSAGE_ENCRYPTED);
assert.equal(encryptedEntries.length, 1);
assert.equal(encryptedEntries[0]?.message?.id, 'enc');
});
@@ -0,0 +1,109 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { CHAT_LOG_ENTRY_TYPES } from '../chat-log-tabs.js';
import {
chatLogEntryMatchesQuery,
chatMessageMatchesQuery,
filterChatModel,
normaliseChatFilterQuery
} from '../chat-search.js';
test('normaliseChatFilterQuery lower-cases and trims user input', () => {
assert.equal(normaliseChatFilterQuery(' MIXED Case '), 'mixed case');
assert.equal(normaliseChatFilterQuery(null), '');
});
test('chatMessageMatchesQuery inspects text and node metadata', () => {
const message = { text: 'Hello Mesh', node: { short_name: 'ALFA', long_name: 'Alpha Node' } };
const helloQuery = normaliseChatFilterQuery('mesh');
assert.equal(chatMessageMatchesQuery(message, helloQuery), true);
const aliasQuery = normaliseChatFilterQuery('alfa');
assert.equal(chatMessageMatchesQuery(message, aliasQuery), true);
const missQuery = normaliseChatFilterQuery('bravo');
assert.equal(chatMessageMatchesQuery(message, missQuery), false);
});
test('chatLogEntryMatchesQuery recognises position highlight values', () => {
const entry = {
type: CHAT_LOG_ENTRY_TYPES.POSITION,
ts: 1,
position: { latitude: 51.5, longitude: 0 },
node: { node_id: '!alpha', short_name: 'Alpha' }
};
const query = normaliseChatFilterQuery('51.50000');
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
const missQuery = normaliseChatFilterQuery('bravo');
assert.equal(chatLogEntryMatchesQuery(entry, missQuery), false);
});
test('chatLogEntryMatchesQuery uses enriched node context for lookups', () => {
const entry = {
type: CHAT_LOG_ENTRY_TYPES.TELEMETRY,
nodeId: '!alpha',
telemetry: { voltage: 12.1 },
node: { short_name: 'ALFA', long_name: 'Alpha Node' }
};
const query = normaliseChatFilterQuery('alpha node');
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
});
test('chatLogEntryMatchesQuery inspects neighbor node context', () => {
const entry = {
type: CHAT_LOG_ENTRY_TYPES.NEIGHBOR,
neighborId: '!bravo',
neighborNode: { short_name: 'BRAV', long_name: 'Bravo Station' }
};
const query = normaliseChatFilterQuery('bravo station');
assert.equal(chatLogEntryMatchesQuery(entry, query), true);
});
test('filterChatModel filters both log entries and channel messages', () => {
const model = {
logEntries: [
{ type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, nodeId: '!alpha', node: { short_name: 'Alpha' } },
{ type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, nodeId: '!bravo', node: { short_name: 'Bravo' } }
],
channels: [
{
index: 0,
label: '0',
entries: [
{ ts: 1, message: { text: 'Ping Alpha', node: { short_name: 'Alpha' } } },
{ ts: 2, message: { text: 'Ack Bravo', node: { short_name: 'Bravo' } } }
]
}
]
};
const result = filterChatModel(model, 'bravo');
assert.equal(result.logEntries.length, 1);
assert.equal(result.logEntries[0].nodeId, '!bravo');
assert.equal(result.channels.length, 1);
assert.deepEqual(result.channels[0].entries.map(entry => entry.message.text), ['Ack Bravo']);
});
test('filterChatModel returns original references when query is empty', () => {
const model = {
logEntries: [{ type: CHAT_LOG_ENTRY_TYPES.NODE_INFO, nodeId: '!alpha', node: { short_name: 'Alpha' } }],
channels: [{ index: 0, label: '0', entries: [] }]
};
const result = filterChatModel(model, ' ');
assert.strictEqual(result.logEntries, model.logEntries);
assert.strictEqual(result.channels, model.channels);
});
@@ -0,0 +1,194 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { renderChatTabs } from '../chat-tabs.js';
class MockClassList {
constructor() {
this._values = new Set();
}
add(...names) {
names.forEach(name => {
if (name) this._values.add(name);
});
}
remove(...names) {
names.forEach(name => {
if (name) this._values.delete(name);
});
}
contains(name) {
return this._values.has(name);
}
}
class MockFragment {
constructor() {
this.children = [];
this.isFragment = true;
}
appendChild(node) {
this.children.push(node);
return node;
}
}
class MockElement {
constructor(tagName) {
this.tagName = tagName.toUpperCase();
this.children = [];
this.attributes = new Map();
this.dataset = {};
this.classList = new MockClassList();
this.listeners = new Map();
this.hidden = false;
this.scrollTop = 0;
this.scrollHeight = 200;
}
appendChild(node) {
this.children.push(node);
return node;
}
replaceChildren(...nodes) {
this.children = [];
for (const node of nodes) {
if (!node) continue;
if (node.isFragment && Array.isArray(node.children)) {
this.children.push(...node.children);
} else {
this.children.push(node);
}
}
}
setAttribute(name, value) {
const strValue = String(value);
this.attributes.set(name, strValue);
if (name === 'id') {
this.id = strValue;
}
if (name.startsWith('data-')) {
const key = name
.slice(5)
.replace(/-([a-z])/g, (_, c) => c.toUpperCase());
this.dataset[key] = strValue;
}
}
getAttribute(name) {
return this.attributes.has(name) ? this.attributes.get(name) : null;
}
addEventListener(event, handler) {
this.listeners.set(event, handler);
}
dispatch(event) {
const handler = this.listeners.get(event);
if (handler) {
handler({});
}
}
}
function createMockDocument() {
return {
createElement(tag) {
return new MockElement(tag);
},
createDocumentFragment() {
return new MockFragment();
}
};
}
test('renderChatTabs creates tab markup and selects default active tab', () => {
const document = createMockDocument();
const container = new MockElement('div');
const tabs = [
{ id: 'log', label: 'Log', content: new MockElement('div') },
{ id: 'channel-0', label: 'Default', content: new MockElement('div') },
{ id: 'channel-1', label: 'Alt', content: new MockElement('div') }
];
const active = renderChatTabs({
document,
container,
tabs,
defaultActiveTabId: 'channel-0'
});
assert.equal(active, 'channel-0');
assert.equal(container.dataset.activeTab, 'channel-0');
assert.equal(container.children.length, 2);
const [tabList, panelWrapper] = container.children;
assert.equal(tabList.children.length, 3);
assert.equal(panelWrapper.children.length, 3);
assert.equal(panelWrapper.children[1].hidden, false);
assert.equal(panelWrapper.children[1].scrollTop, panelWrapper.children[1].scrollHeight);
assert.equal(panelWrapper.children[0].hidden, true);
tabList.children[0].dispatch('click');
assert.equal(container.dataset.activeTab, 'log');
assert.equal(panelWrapper.children[0].hidden, false);
assert.equal(panelWrapper.children[1].hidden, true);
});
test('renderChatTabs reuses previous active tab when still available', () => {
const document = createMockDocument();
const container = new MockElement('div');
container.dataset.activeTab = 'log';
const tabs = [
{ id: 'log', label: 'Log', content: new MockElement('div') },
{ id: 'channel-0', label: 'Default', content: new MockElement('div') }
];
const active = renderChatTabs({
document,
container,
tabs,
previousActiveTabId: 'log',
defaultActiveTabId: 'channel-0'
});
assert.equal(active, 'log');
const [tabList, panels] = container.children;
assert.equal(tabList.children[0].getAttribute('aria-selected'), 'true');
assert.equal(panels.children[0].hidden, false);
});
test('renderChatTabs clears container when no tabs exist', () => {
const document = createMockDocument();
const container = new MockElement('div');
container.replaceChildren(new MockElement('span'));
const active = renderChatTabs({ document, container, tabs: [] });
assert.equal(active, null);
assert.equal(container.children.length, 0);
assert.equal(container.dataset.activeTab, '');
});
@@ -0,0 +1,128 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { documentStub, resetDocumentStub } from './document-stub.js';
import { readAppConfig } from '../config.js';
import { DEFAULT_CONFIG, mergeConfig } from '../settings.js';
test('readAppConfig returns an empty object when the configuration element is missing', () => {
resetDocumentStub();
assert.deepEqual(readAppConfig(), {});
});
test('readAppConfig returns an empty object when the attribute is empty', () => {
resetDocumentStub();
documentStub.setConfigElement({ getAttribute: () => '' });
assert.deepEqual(readAppConfig(), {});
});
test('readAppConfig parses configuration JSON from the DOM attribute', () => {
resetDocumentStub();
const data = { refreshMs: 5000, chatEnabled: false };
documentStub.setConfigElement({
getAttribute: name => (name === 'data-app-config' ? JSON.stringify(data) : null)
});
assert.deepEqual(readAppConfig(), data);
});
test('readAppConfig returns an empty object and logs on parse failure', () => {
resetDocumentStub();
let called = false;
const originalError = console.error;
console.error = () => {
called = true;
};
documentStub.setConfigElement({
getAttribute: name => (name === 'data-app-config' ? 'not-json' : null)
});
assert.deepEqual(readAppConfig(), {});
assert.equal(called, true);
console.error = originalError;
});
test('readAppConfig ignores non-object JSON payloads', () => {
resetDocumentStub();
documentStub.setConfigElement({
getAttribute: name => (name === 'data-app-config' ? '42' : null)
});
assert.deepEqual(readAppConfig(), {});
});
test('mergeConfig applies default values when fields are missing', () => {
const result = mergeConfig({});
assert.deepEqual(result, {
...DEFAULT_CONFIG,
mapCenter: { ...DEFAULT_CONFIG.mapCenter },
tileFilters: { ...DEFAULT_CONFIG.tileFilters }
});
});
test('mergeConfig coerces numeric values and nested objects', () => {
const result = mergeConfig({
refreshIntervalSeconds: '30',
refreshMs: '45000',
mapCenter: { lat: '10.5', lon: '20.1' },
tileFilters: { dark: 'contrast(2)' },
mapZoom: '12',
chatEnabled: 0,
channel: '#Custom',
frequency: '915MHz',
contactLink: 'https://example.org/chat',
contactLinkUrl: 'https://example.org/chat',
maxDistanceKm: '55.5'
});
assert.equal(result.refreshIntervalSeconds, 30);
assert.equal(result.refreshMs, 45000);
assert.deepEqual(result.mapCenter, { lat: 10.5, lon: 20.1 });
assert.deepEqual(result.tileFilters, { light: DEFAULT_CONFIG.tileFilters.light, dark: 'contrast(2)' });
assert.equal(result.mapZoom, 12);
assert.equal(result.chatEnabled, false);
assert.equal(result.channel, '#Custom');
assert.equal(result.frequency, '915MHz');
assert.equal(result.contactLink, 'https://example.org/chat');
assert.equal(result.contactLinkUrl, 'https://example.org/chat');
assert.equal(result.maxDistanceKm, 55.5);
});
test('mergeConfig falls back to defaults for invalid numeric values', () => {
const result = mergeConfig({
refreshIntervalSeconds: 'NaN',
refreshMs: 'NaN',
maxDistanceKm: 'oops',
mapZoom: 'not-a-number'
});
assert.equal(result.refreshIntervalSeconds, DEFAULT_CONFIG.refreshIntervalSeconds);
assert.equal(result.refreshMs, DEFAULT_CONFIG.refreshMs);
assert.equal(result.maxDistanceKm, DEFAULT_CONFIG.maxDistanceKm);
assert.equal(result.mapZoom, null);
});
test('mergeConfig treats blank mapZoom as null', () => {
const result = mergeConfig({ mapZoom: '' });
assert.equal(result.mapZoom, null);
});
test('document stub returns null for unrelated selectors', () => {
resetDocumentStub();
assert.equal(documentStub.querySelector('#missing'), null);
});
@@ -0,0 +1,101 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Minimal document implementation that exposes the subset of behaviour needed
* by the front-end modules during unit tests.
*/
class DocumentStub {
/**
* Instantiate a new stub with a clean internal state.
*/
constructor() {
this.reset();
}
/**
* Clear tracked configuration elements and registered event listeners.
*
* @returns {void}
*/
reset() {
this.configElement = null;
this.listeners = new Map();
}
/**
* Provide an element that will be returned by ``querySelector`` when the
* configuration selector is requested.
*
* @param {?Element} element DOM node exposing ``getAttribute``.
* @returns {void}
*/
setConfigElement(element) {
this.configElement = element;
}
/**
* Return the registered configuration element when the matching selector is
* provided.
*
* @param {string} selector CSS selector requested by the module under test.
* @returns {?Element} Config element or ``null`` when unavailable.
*/
querySelector(selector) {
if (selector === '[data-app-config]') {
return this.configElement;
}
return null;
}
/**
* Register an event handler, mirroring the DOM ``addEventListener`` API.
*
* @param {string} event Event identifier.
* @param {Function} handler Callback invoked when ``dispatchEvent`` is
* called.
* @returns {void}
*/
addEventListener(event, handler) {
this.listeners.set(event, handler);
}
/**
* Trigger a previously registered listener.
*
* @param {string} event Event identifier used when registering the handler.
* @returns {void}
*/
dispatchEvent(event) {
const handler = this.listeners.get(event);
if (handler) {
handler();
}
}
}
export const documentStub = new DocumentStub();
/**
* Reset the shared stub between test cases to avoid state bleed.
*
* @returns {void}
*/
export function resetDocumentStub() {
documentStub.reset();
}
globalThis.document = documentStub;
@@ -0,0 +1,292 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Simple class list implementation supporting the subset of DOMTokenList
* behaviour required by the tests.
*/
class MockClassList {
constructor() {
this._values = new Set();
}
/**
* Add one or more CSS classes to the element.
*
* @param {...string} names Class names to insert into the list.
* @returns {void}
*/
add(...names) {
names.forEach(name => {
if (name) {
this._values.add(name);
}
});
}
/**
* Remove one or more CSS classes from the element.
*
* @param {...string} names Class names to delete from the list.
* @returns {void}
*/
remove(...names) {
names.forEach(name => {
if (name) {
this._values.delete(name);
}
});
}
/**
* Determine whether the class list currently contains ``name``.
*
* @param {string} name Target class name.
* @returns {boolean} ``true`` when the class is present.
*/
contains(name) {
return this._values.has(name);
}
/**
* Toggle the provided class name.
*
* @param {string} name Class name to toggle.
* @param {boolean} [force] Optional forced state mirroring ``DOMTokenList``.
* @returns {boolean} ``true`` when the class is present after toggling.
*/
toggle(name, force) {
if (force === true) {
this._values.add(name);
return true;
}
if (force === false) {
this._values.delete(name);
return false;
}
if (this._values.has(name)) {
this._values.delete(name);
return false;
}
this._values.add(name);
return true;
}
}
/**
* Minimal DOM element implementation exposing the subset of behaviour exercised
* by the frontend entrypoints.
*/
class MockElement {
/**
* @param {string} tagName Element name used for diagnostics.
* @param {Map<string, MockElement>} registry Storage shared with the
* containing document to support ``getElementById``.
*/
constructor(tagName, registry) {
this.tagName = tagName.toUpperCase();
this._registry = registry;
this.attributes = new Map();
this.dataset = {};
this.style = {};
this.textContent = '';
this.classList = new MockClassList();
}
/**
* Associate an attribute with the element.
*
* @param {string} name Attribute identifier.
* @param {string} value Attribute value.
* @returns {void}
*/
setAttribute(name, value) {
this.attributes.set(name, String(value));
if (name === 'id' && this._registry) {
this._registry.set(String(value), this);
}
}
/**
* Retrieve an attribute value.
*
* @param {string} name Attribute identifier.
* @returns {?string} Matching attribute or ``null`` when absent.
*/
getAttribute(name) {
return this.attributes.has(name) ? this.attributes.get(name) : null;
}
}
/**
* Create a deterministic DOM environment that provides just enough behaviour
* for the UI scripts to execute inside Node.js unit tests.
*
* @param {{
* readyState?: 'loading' | 'interactive' | 'complete',
* cookie?: string,
* includeBody?: boolean,
* bodyHasDarkClass?: boolean
* }} [options]
* @returns {{
* window: Window & { dispatchEvent: Function },
* document: Document,
* createElement: (tagName?: string, id?: string) => MockElement,
* registerElement: (id: string, element: MockElement) => void,
* setComputedStyleImplementation: (impl: Function) => void,
* triggerDOMContentLoaded: () => void,
* dispatchWindowEvent: (event: string) => void,
* getCookieString: () => string,
* setCookieString: (value: string) => void,
* cleanup: () => void
* }}
*/
export function createDomEnvironment(options = {}) {
const {
readyState = 'complete',
cookie = '',
includeBody = true,
bodyHasDarkClass = true
} = options;
const originalWindow = globalThis.window;
const originalDocument = globalThis.document;
const registry = new Map();
const documentListeners = new Map();
const windowListeners = new Map();
let computedStyleImpl = null;
let cookieStore = cookie;
const document = {
readyState,
documentElement: new MockElement('html', registry),
body: includeBody ? new MockElement('body', registry) : null,
addEventListener(event, handler) {
documentListeners.set(event, handler);
},
removeEventListener(event) {
documentListeners.delete(event);
},
dispatchEvent(event) {
const handler = documentListeners.get(event);
if (handler) handler();
},
getElementById(id) {
return registry.get(id) || null;
},
querySelector() {
return null;
},
createElement(tagName) {
return new MockElement(tagName, registry);
}
};
if (document.body && bodyHasDarkClass) {
document.body.classList.add('dark');
}
Object.defineProperty(document, 'cookie', {
get() {
return cookieStore;
},
set(value) {
cookieStore = cookieStore ? `${cookieStore}; ${value}` : value;
}
});
const window = {
document,
addEventListener(event, handler) {
windowListeners.set(event, handler);
},
removeEventListener(event) {
windowListeners.delete(event);
},
dispatchEvent(event) {
const handler = windowListeners.get(event);
if (handler) handler();
},
getComputedStyle(target) {
if (typeof computedStyleImpl === 'function') {
return computedStyleImpl(target);
}
return {
getPropertyValue() {
return '';
}
};
}
};
globalThis.window = window;
globalThis.document = document;
/**
* Create and optionally register a mock element.
*
* @param {string} [tagName='div'] Tag name of the element.
* @param {string} [id] Optional identifier registered with the document.
* @returns {MockElement} New mock element instance.
*/
function createElement(tagName = 'div', id) {
const element = new MockElement(tagName, registry);
if (id) {
element.setAttribute('id', id);
}
return element;
}
/**
* Register an element instance so that ``getElementById`` can resolve it.
*
* @param {string} id Element identifier.
* @param {MockElement} element Element instance to register.
* @returns {void}
*/
function registerElement(id, element) {
registry.set(id, element);
}
return {
window,
document,
createElement,
registerElement,
setComputedStyleImplementation(impl) {
computedStyleImpl = impl;
},
triggerDOMContentLoaded() {
const handler = documentListeners.get('DOMContentLoaded');
if (handler) handler();
},
dispatchWindowEvent(event) {
const handler = windowListeners.get(event);
if (handler) handler();
},
getCookieString() {
return cookieStore;
},
setCookieString(value) {
cookieStore = value;
},
cleanup() {
globalThis.window = originalWindow;
globalThis.document = originalDocument;
}
};
}
@@ -0,0 +1,174 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createDomEnvironment } from './dom-environment.js';
import { buildInstanceUrl, initializeInstanceSelector, __test__ } from '../instance-selector.js';
const { resolveInstanceLabel } = __test__;
function setupSelectElement(document) {
const select = document.createElement('select');
const listeners = new Map();
const options = [];
Object.defineProperty(select, 'options', {
get() {
return options;
}
});
Object.defineProperty(select, 'value', {
get() {
if (typeof select.selectedIndex !== 'number') {
return '';
}
const current = options[select.selectedIndex];
return current ? current.value : '';
},
set(newValue) {
const index = options.findIndex(option => option.value === newValue);
select.selectedIndex = index >= 0 ? index : -1;
}
});
select.selectedIndex = -1;
select.appendChild = option => {
options.push(option);
if (select.selectedIndex === -1) {
select.selectedIndex = 0;
}
return option;
};
select.remove = index => {
if (index >= 0 && index < options.length) {
options.splice(index, 1);
if (options.length === 0) {
select.selectedIndex = -1;
} else if (select.selectedIndex >= options.length) {
select.selectedIndex = options.length - 1;
}
}
};
select.addEventListener = (event, handler) => {
listeners.set(event, handler);
};
select.dispatchEvent = event => {
const key = typeof event === 'string' ? event : event?.type;
const handler = listeners.get(key);
if (handler) {
handler(event);
}
};
return select;
}
test('resolveInstanceLabel falls back to the domain when the name is missing', () => {
assert.equal(resolveInstanceLabel({ domain: 'mesh.example' }), 'mesh.example');
assert.equal(resolveInstanceLabel({ name: ' Mesh Name ' }), 'Mesh Name');
assert.equal(resolveInstanceLabel(null), '');
});
test('buildInstanceUrl normalises domains into navigable HTTPS URLs', () => {
assert.equal(buildInstanceUrl('mesh.example'), 'https://mesh.example');
assert.equal(buildInstanceUrl(' https://mesh.example '), 'https://mesh.example');
assert.equal(buildInstanceUrl(''), null);
assert.equal(buildInstanceUrl(null), null);
});
test('initializeInstanceSelector populates options alphabetically and selects the configured domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const fetchCalls = [];
const fetchImpl = async url => {
fetchCalls.push(url);
return {
ok: true,
async json() {
return [
{ name: 'Zulu Mesh', domain: 'zulu.mesh' },
{ name: 'Alpha Mesh', domain: 'alpha.mesh' },
{ domain: 'beta.mesh' }
];
}
};
};
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document,
instanceDomain: 'beta.mesh',
defaultLabel: 'Select region ...'
});
assert.equal(fetchCalls.length, 1);
assert.equal(select.options.length, 4);
assert.equal(select.options[0].textContent, 'Select region ...');
assert.equal(select.options[1].textContent, 'Alpha Mesh');
assert.equal(select.options[2].textContent, 'beta.mesh');
assert.equal(select.options[3].textContent, 'Zulu Mesh');
assert.equal(select.options[select.selectedIndex].value, 'beta.mesh');
} finally {
env.cleanup();
}
});
test('initializeInstanceSelector navigates to the chosen instance domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const fetchImpl = async () => ({
ok: true,
async json() {
return [{ domain: 'mesh.example' }];
}
});
let navigatedTo = null;
const navigate = url => {
navigatedTo = url;
};
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document,
navigate,
defaultLabel: 'Select region ...'
});
assert.equal(select.options.length, 2);
assert.equal(select.options[1].value, 'mesh.example');
select.value = 'mesh.example';
select.dispatchEvent({ type: 'change', target: select });
assert.equal(navigatedTo, 'https://mesh.example');
} finally {
env.cleanup();
}
});
@@ -0,0 +1,162 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createMapAutoFitController } from '../map-auto-fit-controller.js';
class ToggleStub extends EventTarget {
constructor(checked = true) {
super();
this.checked = checked;
}
/**
* @param {Event} event - Event to dispatch to listeners.
* @returns {boolean} Dispatch status.
*/
dispatchEvent(event) {
return super.dispatchEvent(event);
}
}
class WindowStub {
constructor() {
this.listeners = new Map();
}
addEventListener(type, listener) {
this.listeners.set(type, listener);
}
removeEventListener(type, listener) {
const existing = this.listeners.get(type);
if (existing === listener) {
this.listeners.delete(type);
}
}
emit(type) {
const listener = this.listeners.get(type);
if (listener) listener();
}
}
test('recordFit stores and clones the last fit snapshot', () => {
const toggle = new ToggleStub(true);
const controller = createMapAutoFitController({ toggleEl: toggle, defaultPaddingPx: 20 });
assert.equal(controller.getLastFit(), null);
controller.recordFit([[10, 20], [30, 40]], { paddingPx: 12, maxZoom: 9 });
const snapshot = controller.getLastFit();
assert.ok(snapshot);
assert.deepEqual(snapshot.bounds, [[10, 20], [30, 40]]);
assert.deepEqual(snapshot.options, { paddingPx: 12, maxZoom: 9 });
snapshot.bounds[0][0] = -999;
snapshot.options.paddingPx = -1;
const secondSnapshot = controller.getLastFit();
assert.deepEqual(secondSnapshot?.bounds, [[10, 20], [30, 40]]);
assert.deepEqual(secondSnapshot?.options, { paddingPx: 12, maxZoom: 9 });
});
test('recordFit ignores invalid bounds and normalises fit options', () => {
const controller = createMapAutoFitController({ defaultPaddingPx: 16 });
controller.recordFit(null);
assert.equal(controller.getLastFit(), null);
controller.recordFit([[10, Number.NaN], [20, 30]]);
assert.equal(controller.getLastFit(), null);
controller.recordFit([[10, 11], [12, 13]], { paddingPx: -5, maxZoom: 0 });
const snapshot = controller.getLastFit();
assert.ok(snapshot);
assert.deepEqual(snapshot.options, { paddingPx: 16 });
});
test('handleUserInteraction disables auto-fit unless suppressed', () => {
const toggle = new ToggleStub(true);
let changeEvents = 0;
toggle.addEventListener('change', () => {
changeEvents += 1;
});
const controller = createMapAutoFitController({ toggleEl: toggle });
controller.runAutoFitOperation(() => {
assert.equal(controller.handleUserInteraction(), false);
assert.equal(toggle.checked, true);
});
assert.equal(changeEvents, 0);
assert.equal(controller.handleUserInteraction(), true);
assert.equal(toggle.checked, false);
assert.equal(changeEvents, 1);
assert.equal(controller.handleUserInteraction(), false);
assert.equal(changeEvents, 1);
});
test('isAutoFitEnabled reflects the toggle state', () => {
const toggle = new ToggleStub(false);
const controller = createMapAutoFitController({ toggleEl: toggle });
assert.equal(controller.isAutoFitEnabled(), false);
toggle.checked = true;
assert.equal(controller.isAutoFitEnabled(), true);
});
test('runAutoFitOperation returns callback results and tolerates missing functions', () => {
const controller = createMapAutoFitController();
assert.equal(controller.runAutoFitOperation(), undefined);
let active = false;
const result = controller.runAutoFitOperation(() => {
active = true;
return 42;
});
assert.equal(active, true);
assert.equal(result, 42);
});
test('attachResizeListener forwards snapshots and supports teardown', () => {
const windowStub = new WindowStub();
const controller = createMapAutoFitController({ windowObject: windowStub, defaultPaddingPx: 24 });
controller.recordFit([[1, 2], [3, 4]], { paddingPx: 30 });
let snapshots = [];
const detach = controller.attachResizeListener(snapshot => {
snapshots.push(snapshot);
});
windowStub.emit('resize');
windowStub.emit('orientationchange');
assert.equal(snapshots.length, 2);
assert.deepEqual(snapshots[0], { bounds: [[1, 2], [3, 4]], options: { paddingPx: 30 } });
detach();
windowStub.emit('resize');
assert.equal(snapshots.length, 2);
const noop = controller.attachResizeListener();
assert.equal(typeof noop, 'function');
noop();
});
@@ -0,0 +1,47 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { resolveAutoFitBoundsConfig, __testUtils } from '../map-auto-fit-settings.js';
const { MINIMUM_AUTO_FIT_RANGE_KM, AUTO_FIT_PADDING_FRACTION } = __testUtils;
test('resolveAutoFitBoundsConfig returns defaults without a distance limit', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: false, maxDistanceKm: null });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.equal(config.minimumRangeKm, MINIMUM_AUTO_FIT_RANGE_KM);
});
test('resolveAutoFitBoundsConfig constrains minimum range by the limit radius', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: true, maxDistanceKm: 2 });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.ok(config.minimumRangeKm >= MINIMUM_AUTO_FIT_RANGE_KM);
assert.ok(config.minimumRangeKm <= 2);
});
test('resolveAutoFitBoundsConfig respects small distance limits', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: true, maxDistanceKm: 0.1 });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.equal(config.minimumRangeKm, 0.1);
});
test('resolveAutoFitBoundsConfig tolerates invalid input', () => {
const config = resolveAutoFitBoundsConfig({ hasDistanceLimit: true, maxDistanceKm: -5 });
assert.equal(config.paddingFraction, AUTO_FIT_PADDING_FRACTION);
assert.equal(config.minimumRangeKm, MINIMUM_AUTO_FIT_RANGE_KM);
});
@@ -0,0 +1,138 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
computeBoundingBox,
computeBoundsForPoints,
haversineDistanceKm,
__testUtils
} from '../map-bounds.js';
const { clampLatitude, clampLongitude, normaliseRange, normaliseLongitudeAround } = __testUtils;
function approximatelyEqual(actual, expected, epsilon = 1e-3) {
assert.ok(Math.abs(actual - expected) <= epsilon, `${actual} is not within ${epsilon} of ${expected}`);
}
test('clamp helpers bound invalid coordinates', () => {
assert.equal(clampLatitude(120), 90);
assert.equal(clampLatitude(-95), -90);
assert.equal(clampLatitude(Number.POSITIVE_INFINITY), 90);
assert.equal(clampLatitude(Number.NEGATIVE_INFINITY), -90);
assert.equal(clampLongitude(200), 180);
assert.equal(clampLongitude(-220), -180);
assert.equal(clampLongitude(Number.POSITIVE_INFINITY), 180);
assert.equal(clampLongitude(Number.NEGATIVE_INFINITY), -180);
});
test('normaliseRange enforces minimum distance for invalid inputs', () => {
assert.equal(normaliseRange(-1, 2), 2);
assert.equal(normaliseRange(Number.NaN, 3), 3);
assert.equal(normaliseRange(0, 1), 1);
assert.equal(normaliseRange(4, 2), 4);
});
test('computeBoundingBox returns null for invalid centres', () => {
assert.equal(computeBoundingBox(null, 10), null);
assert.equal(computeBoundingBox({ lat: 'x', lon: 0 }, 5), null);
assert.equal(computeBoundingBox({ lat: 0, lon: NaN }, 5), null);
});
test('computeBoundingBox returns symmetric bounds for mid-latitude centre', () => {
const bounds = computeBoundingBox({ lat: 0, lon: 0 }, 10);
assert.ok(bounds);
const [[south, west], [north, east]] = bounds;
approximatelyEqual(north, -south, 1e-4);
approximatelyEqual(east, -west, 1e-4);
assert.ok(north > 0 && east > 0);
});
test('computeBoundingBox clamps longitude span near the poles', () => {
const bounds = computeBoundingBox({ lat: 89.9, lon: 45 }, 2000);
assert.ok(bounds);
const [[south, west], [north, east]] = bounds;
approximatelyEqual(south, 72.0, 1e-1);
assert.equal(west, -180);
assert.equal(east, 180);
assert.equal(north, 90);
});
test('haversineDistanceKm matches known city distance', () => {
// Approximate distance between Paris (48.8566, 2.3522) and Berlin (52.52, 13.4050)
const distance = haversineDistanceKm(48.8566, 2.3522, 52.52, 13.405);
approximatelyEqual(distance, 878.8, 2);
});
test('computeBoundsForPoints returns null when no valid points exist', () => {
assert.equal(computeBoundsForPoints([]), null);
assert.equal(computeBoundsForPoints([[Number.NaN, 0]]), null);
});
test('computeBoundsForPoints expands bounds with padding and minimum radius', () => {
const bounds = computeBoundsForPoints(
[
[38.0, -27.1],
[38.05, -27.08]
],
{ paddingFraction: 0.2, minimumRangeKm: 2 }
);
assert.ok(bounds);
const [[south, west], [north, east]] = bounds;
assert.ok(north > 38.05);
assert.ok(south < 38.0);
assert.ok(east > -27.08);
assert.ok(west < -27.1);
});
test('computeBoundsForPoints respects the configured minimum range for single points', () => {
const bounds = computeBoundsForPoints([[12.34, 56.78]], { minimumRangeKm: 5 });
assert.ok(bounds);
const [[south], [north]] = bounds;
assert.ok(north - south > 0.05);
});
test('computeBoundsForPoints preserves tight bounds across the antimeridian', () => {
const points = [
[10.0, 179.5],
[11.2, -179.7],
[9.5, 179.2]
];
const bounds = computeBoundsForPoints(points, { paddingFraction: 0.1 });
assert.ok(bounds);
const [[south, west], [north, east]] = bounds;
assert.ok(north - south < 10, 'latitude span should remain tight');
const lonSpan = Math.abs(east - west);
const normalizedSpan = lonSpan > 180 ? 360 - lonSpan : lonSpan;
assert.ok(normalizedSpan < 40, 'longitude span should wrap tightly around the dateline');
for (const [, lon] of points) {
const adjustedLon = normaliseLongitudeAround(lon, (west + east) / 2);
assert.ok(adjustedLon >= west - 1e-6 && adjustedLon <= east + 1e-6, 'point longitude should lie within bounds');
}
assert.ok(east > 180 || west < -180, 'bounds should extend beyond the canonical range when necessary');
});
@@ -0,0 +1,244 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { attachNodeInfoRefreshToMarker, overlayToPopupNode } from '../map-marker-node-info.js';
function createFakeMarker(anchor) {
const handlers = {};
return {
handlers,
on(name, handler) {
if (!handlers[name]) handlers[name] = [];
handlers[name].push(handler);
return this;
},
getElement() {
return anchor;
},
trigger(name, payload) {
for (const handler of handlers[name] || []) {
handler(payload);
}
},
};
}
test('attachNodeInfoRefreshToMarker refreshes markers with merged overlay details', async () => {
const anchor = { id: 'anchor-el' };
const marker = createFakeMarker(anchor);
const popupUpdates = [];
const detailCalls = [];
let prevented = false;
let stopped = false;
let token = 0;
const refreshCalls = [];
attachNodeInfoRefreshToMarker({
marker,
getOverlayFallback: () => ({ nodeId: '!foo', shortName: 'Foo', role: 'CLIENT', neighbors: [] }),
refreshNodeInformation: async reference => {
refreshCalls.push(reference);
return { battery: 55.5, telemetryTime: 123, neighbors: [{ neighbor_id: '!bar', snr: 9.5 }] };
},
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
createRequestToken: el => {
assert.equal(el, anchor);
return ++token;
},
isTokenCurrent: (el, candidate) => {
assert.equal(el, anchor);
return candidate === token;
},
showLoading: (el, info) => {
assert.equal(el, anchor);
assert.equal(info.nodeId, '!foo');
},
showDetails: (el, info) => {
detailCalls.push({ el, info });
},
showError: () => {
assert.fail('showError should not be invoked on success');
},
updatePopup: info => {
popupUpdates.push(info);
},
});
const clickEvent = {
originalEvent: {
preventDefault() {
prevented = true;
},
stopPropagation() {
stopped = true;
},
},
};
marker.trigger('click', clickEvent);
await new Promise(resolve => setImmediate(resolve));
assert.equal(prevented, true);
assert.equal(stopped, true);
assert.equal(refreshCalls.length, 1);
assert.deepEqual(refreshCalls[0], {
nodeId: '!foo',
fallback: { nodeId: '!foo', shortName: 'Foo', role: 'CLIENT', neighbors: [] },
});
assert.ok(popupUpdates.length >= 1);
const merged = popupUpdates[popupUpdates.length - 1];
assert.equal(merged.battery, 55.5);
assert.equal(merged.telemetryTime, 123);
assert.equal(detailCalls.length, 1);
assert.equal(detailCalls[0].el, anchor);
assert.equal(detailCalls[0].info.battery, 55.5);
});
test('attachNodeInfoRefreshToMarker surfaces errors with fallback overlays', async () => {
const anchor = { id: 'anchor' };
const marker = createFakeMarker(anchor);
let token = 0;
let errorCaptured = null;
let detailCalls = 0;
let updateCalls = 0;
attachNodeInfoRefreshToMarker({
marker,
getOverlayFallback: () => ({ nodeId: '!oops', shortName: 'Oops' }),
refreshNodeInformation: async () => {
throw new Error('boom');
},
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
createRequestToken: el => {
assert.equal(el, anchor);
return ++token;
},
isTokenCurrent: (el, candidate) => {
assert.equal(el, anchor);
return candidate === token;
},
showLoading: () => {},
showDetails: () => {
detailCalls += 1;
},
showError: (el, info, error) => {
assert.equal(el, anchor);
assert.equal(info.nodeId, '!oops');
errorCaptured = error;
},
updatePopup: () => {
updateCalls += 1;
},
});
marker.trigger('click', { originalEvent: {} });
await new Promise(resolve => setImmediate(resolve));
assert.ok(errorCaptured instanceof Error);
assert.equal(errorCaptured.message, 'boom');
assert.equal(detailCalls, 0);
assert.equal(updateCalls, 2);
});
test('attachNodeInfoRefreshToMarker skips refresh when identifiers are missing', async () => {
const anchor = { id: 'anchor' };
const marker = createFakeMarker(anchor);
let token = 0;
let refreshed = false;
let detailsShown = 0;
attachNodeInfoRefreshToMarker({
marker,
getOverlayFallback: () => ({ shortName: 'Unknown' }),
refreshNodeInformation: async () => {
refreshed = true;
},
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
createRequestToken: el => {
assert.equal(el, anchor);
return ++token;
},
isTokenCurrent: (el, candidate) => {
assert.equal(el, anchor);
return candidate === token;
},
showLoading: () => {
assert.fail('showLoading should not run without identifiers');
},
showDetails: (el, info) => {
assert.equal(el, anchor);
assert.equal(info.shortName, 'Unknown');
detailsShown += 1;
},
});
marker.trigger('click', { originalEvent: {} });
await new Promise(resolve => setImmediate(resolve));
assert.equal(refreshed, false);
assert.equal(detailsShown, 1);
});
test('attachNodeInfoRefreshToMarker honours shouldHandleClick predicate', async () => {
const marker = createFakeMarker({ id: 'anchor' });
let token = 0;
let refreshed = false;
attachNodeInfoRefreshToMarker({
marker,
getOverlayFallback: () => ({ nodeId: '!skip' }),
refreshNodeInformation: async () => {
refreshed = true;
},
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
createRequestToken: () => ++token,
isTokenCurrent: (el, candidate) => candidate === token,
shouldHandleClick: () => false,
});
marker.trigger('click', { originalEvent: {} });
await new Promise(resolve => setImmediate(resolve));
assert.equal(refreshed, false);
});
test('overlayToPopupNode normalises raw overlay payloads', () => {
const overlay = {
nodeId: '!foo',
nodeNum: 42,
shortName: 'Foo',
role: 'ROUTER',
battery: '77.5',
neighbors: [
{ neighbor_id: '!bar', snr: '12.5', neighbor_short_name: 'Bar' },
null,
],
};
const popupNode = overlayToPopupNode(overlay);
assert.equal(popupNode.node_id, '!foo');
assert.equal(popupNode.node_num, 42);
assert.equal(popupNode.short_name, 'Foo');
assert.equal(popupNode.role, 'ROUTER');
assert.equal(popupNode.battery_level, 77.5);
assert.equal(Array.isArray(popupNode.neighbors), true);
assert.equal(popupNode.neighbors.length, 1);
assert.equal(popupNode.neighbors[0].node.node_id, '!bar');
assert.equal(popupNode.neighbors[0].snr, 12.5);
});
@@ -0,0 +1,41 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { MESSAGE_LIMIT, normaliseMessageLimit } from '../message-limit.js';
test('normaliseMessageLimit defaults to the message limit for invalid input', () => {
assert.equal(normaliseMessageLimit(undefined), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(null), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(''), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit('abc'), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(-100), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(0), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(Number.POSITIVE_INFINITY), MESSAGE_LIMIT);
});
test('normaliseMessageLimit clamps numeric input to the upper bound', () => {
assert.equal(normaliseMessageLimit(MESSAGE_LIMIT + 1), MESSAGE_LIMIT);
assert.equal(normaliseMessageLimit(MESSAGE_LIMIT * 2), MESSAGE_LIMIT);
});
test('normaliseMessageLimit accepts positive finite values', () => {
assert.equal(normaliseMessageLimit(250), 250);
assert.equal(normaliseMessageLimit('750'), 750);
assert.equal(normaliseMessageLimit(42.9), 42);
});
@@ -0,0 +1,123 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createMessageNodeHydrator } from '../message-node-hydrator.js';
/**
* Capture warning invocations produced during a test run.
*/
class LoggerStub {
constructor() {
this.messages = [];
}
/**
* Record a warning message for later inspection.
*
* @param {...*} args Warning arguments.
* @returns {void}
*/
warn(...args) {
this.messages.push(args);
}
}
test('hydrate attaches cached nodes without performing lookups', async () => {
const node = { node_id: '!abc', short_name: 'Node' };
const nodesById = new Map([[node.node_id, node]]);
const hydrator = createMessageNodeHydrator({
fetchNodeById: async () => {
throw new Error('fetch should not be called');
},
applyNodeFallback: () => {}
});
const messages = [{ node_id: '!abc', text: 'Hello' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(result.length, 1);
assert.strictEqual(result[0].node, node);
assert.equal(nodesById.size, 1);
});
test('hydrate fetches missing nodes once and caches the result', async () => {
let fetchCalls = 0;
const fetchedNode = { node_id: '!fetch', short_name: 'Fetched' };
const hydrator = createMessageNodeHydrator({
fetchNodeById: async id => {
fetchCalls += 1;
assert.equal(id, '!fetch');
return { ...fetchedNode };
},
applyNodeFallback: () => {}
});
const nodesById = new Map();
const messages = [{ from_id: '!fetch', text: 'one' }, { node_id: '!fetch', text: 'two' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(fetchCalls, 1);
assert.strictEqual(nodesById.get('!fetch').short_name, 'Fetched');
assert.strictEqual(result[0].node, nodesById.get('!fetch'));
assert.strictEqual(result[1].node, nodesById.get('!fetch'));
});
test('hydrate falls back to placeholders when lookups fail', async () => {
const logger = new LoggerStub();
let fallbackCalls = 0;
const hydrator = createMessageNodeHydrator({
fetchNodeById: async () => null,
applyNodeFallback: node => {
fallbackCalls += 1;
if (!node.short_name) {
node.short_name = 'Fallback';
}
},
logger
});
const nodesById = new Map();
const messages = [{ from_id: '!missing', text: 'hi' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(nodesById.has('!missing'), false);
assert.equal(fallbackCalls, 1);
assert.ok(result[0].node);
assert.equal(result[0].node.short_name, 'Fallback');
assert.equal(logger.messages.length, 0);
});
test('hydrate records warning when fetch rejects', async () => {
const logger = new LoggerStub();
const hydrator = createMessageNodeHydrator({
fetchNodeById: async () => {
throw new Error('network error');
},
applyNodeFallback: () => {},
logger
});
const nodesById = new Map();
const messages = [{ from_id: '!warn', text: 'warn' }];
const result = await hydrator.hydrate(messages, nodesById);
assert.equal(result[0].node.node_id, '!warn');
assert.ok(logger.messages.length >= 1);
assert.equal(nodesById.has('!warn'), false);
});
@@ -0,0 +1,104 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
buildMessageBody,
buildMessageIndex,
normaliseMessageId,
resolveReplyPrefix
} from '../message-replies.js';
test('normaliseMessageId coerces numeric identifiers', () => {
assert.equal(normaliseMessageId(42), '42');
assert.equal(normaliseMessageId(' 0042 '), '42');
assert.equal(normaliseMessageId('alpha'), 'alpha');
assert.equal(normaliseMessageId(null), null);
});
test('buildMessageIndex normalises identifiers and ignores duplicates', () => {
const messages = [
{ id: '001', text: 'first' },
{ packet_id: 1, text: 'second' },
{ id: '2', text: 'third' }
];
const index = buildMessageIndex(messages);
assert.equal(index.size, 2);
assert.equal(index.get('1'), messages[0]);
assert.equal(index.get('2'), messages[2]);
});
test('resolveReplyPrefix renders reply badge and buildMessageBody joins emoji', () => {
const parent = {
id: 99,
node: { short_name: 'BEEF', long_name: 'Parent Node', role: 'CLIENT' },
text: 'parent message'
};
const reaction = { id: 100, reply_id: 99, emoji: '🔥' };
const index = buildMessageIndex([parent, reaction]);
const prefix = resolveReplyPrefix({
message: reaction,
messagesById: index,
nodesById: new Map(),
renderShortHtml: (short, role, longName) => `SHORT(${short}|${role}|${longName})`,
escapeHtml: value => `ESC(${value})`
});
assert.equal(
prefix,
'<span class="chat-entry-reply">[ESC(in reply to) SHORT(BEEF|CLIENT|Parent Node)]</span>'
);
const body = buildMessageBody({
message: { text: 'Hello', emoji: ' 🔥 ' },
escapeHtml: value => `ESC(${value})`,
renderEmojiHtml: value => `EMOJI(${value})`
});
assert.equal(body, 'ESC(Hello) EMOJI(🔥)');
});
test('buildMessageBody suppresses reaction slot markers and formats counts', () => {
const reaction = {
text: ' 1 ',
emoji: '👍',
portnum: 'REACTION_APP',
reply_id: 123,
};
const body = buildMessageBody({
message: reaction,
escapeHtml: value => `ESC(${value})`,
renderEmojiHtml: value => `EMOJI(${value})`
});
assert.equal(body, 'EMOJI(👍)');
const countedReaction = {
text: '2',
emoji: '✨',
reply_id: 123
};
const countedBody = buildMessageBody({
message: countedReaction,
escapeHtml: value => `ESC(${value})`,
renderEmojiHtml: value => `EMOJI(${value})`
});
assert.equal(countedBody, 'ESC(×2) EMOJI(✨)');
});
@@ -0,0 +1,134 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import { createNodeDetailOverlayManager } from '../node-detail-overlay.js';
function createOverlayHarness() {
const overlayListeners = new Map();
const documentListeners = new Map();
const content = { innerHTML: '' };
const closeButton = {
listeners: new Map(),
focusCalled: false,
addEventListener(event, handler) {
this.listeners.set(event, handler);
},
click() {
const handler = this.listeners.get('click');
if (handler) handler({ preventDefault() {} });
},
focus() {
this.focusCalled = true;
},
};
const dialog = {
focusCalled: false,
focus() {
this.focusCalled = true;
},
};
const overlay = {
hidden: true,
style: {},
addEventListener(event, handler) {
overlayListeners.set(event, handler);
},
trigger(event, payload) {
const handler = overlayListeners.get(event);
if (handler) handler(payload);
},
querySelector(selector) {
if (selector === '.node-detail-overlay__dialog') return dialog;
if (selector === '.node-detail-overlay__close') return closeButton;
if (selector === '.node-detail-overlay__content') return content;
return null;
},
};
const body = {
style: {
overflow: '',
removeProperty(prop) {
this[prop] = '';
},
},
};
const document = {
body,
getElementById(id) {
return id === 'nodeDetailOverlay' ? overlay : null;
},
addEventListener(event, handler) {
documentListeners.set(event, handler);
},
removeEventListener(event) {
documentListeners.delete(event);
},
triggerKeydown(key) {
const handler = documentListeners.get('keydown');
if (handler) {
handler({ key, preventDefault() {} });
}
},
};
return { document, overlay, content, closeButton };
}
test('createNodeDetailOverlayManager renders fetched markup and restores focus', async () => {
const { document, overlay, content, closeButton } = createOverlayHarness();
const focusTarget = {
focusCalled: false,
focus() {
this.focusCalled = true;
},
};
const manager = createNodeDetailOverlayManager({
document,
fetchNodeDetail: async reference => `<section class="node-detail">${reference.nodeId}</section>`,
});
assert.ok(manager);
await manager.open({ nodeId: '!alpha' }, { trigger: focusTarget, label: 'Alpha' });
assert.equal(overlay.hidden, false);
assert.equal(content.innerHTML.includes('!alpha'), true);
assert.equal(closeButton.focusCalled, true);
manager.close();
assert.equal(overlay.hidden, true);
assert.equal(focusTarget.focusCalled, true);
});
test('createNodeDetailOverlayManager surfaces errors and supports escape closing', async () => {
const { document, overlay, content } = createOverlayHarness();
const errors = [];
const manager = createNodeDetailOverlayManager({
document,
fetchNodeDetail: async () => {
throw new Error('boom');
},
logger: {
error(err) {
errors.push(err);
},
},
});
assert.ok(manager);
await manager.open({ nodeId: '!fail' });
assert.equal(content.innerHTML.includes('Failed to load node details.'), true);
assert.equal(errors.length, 1);
document.triggerKeydown?.('Escape');
assert.equal(overlay.hidden, true);
});

Some files were not shown because too many files have changed in this diff Show More