mirror of
https://github.com/l5yth/potato-mesh.git
synced 2026-03-28 17:42:48 +01:00
Compare commits
214 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dc2fa9d247 | ||
|
|
a32125996c | ||
|
|
506a1ab5f6 | ||
|
|
db7b67d859 | ||
|
|
49f08a7f75 | ||
|
|
b2d35d3edf | ||
|
|
a9d618cdbc | ||
|
|
6a65abd2e3 | ||
|
|
a3aef8cadd | ||
|
|
cff89a8c88 | ||
|
|
26c1366412 | ||
|
|
28f5b49f4d | ||
|
|
a46da284e5 | ||
|
|
22a31b6c80 | ||
|
|
b7ef0bbfcd | ||
|
|
03b5a10fe4 | ||
|
|
e97498d09f | ||
|
|
7db76ec2fc | ||
|
|
63beb2ea6b | ||
|
|
ffad84f18a | ||
|
|
2642ff7a95 | ||
|
|
40b6eda096 | ||
|
|
dee6ad7e4a | ||
|
|
ea9c633eff | ||
|
|
9c73fceea7 | ||
|
|
5133e9d498 | ||
|
|
b63e5328b1 | ||
|
|
d66b09ddee | ||
|
|
009965f2fb | ||
|
|
51e6479ab6 | ||
|
|
874c8fd73c | ||
|
|
e4c48682b0 | ||
|
|
00444f7611 | ||
|
|
511e6d377c | ||
|
|
e6974a683a | ||
|
|
c0d68b23d4 | ||
|
|
ee904633a8 | ||
|
|
4329605e6f | ||
|
|
772c5888c3 | ||
|
|
f04e917cd9 | ||
|
|
9e939194ba | ||
|
|
e328a20929 | ||
|
|
aba94b197d | ||
|
|
80f2bbdb25 | ||
|
|
522213c040 | ||
|
|
58998ba274 | ||
|
|
4ad718e164 | ||
|
|
707786e222 | ||
|
|
868bf08fd1 | ||
|
|
1316d4f2d1 | ||
|
|
9be390ee09 | ||
|
|
d9ed006b4c | ||
|
|
d09fc842b8 | ||
|
|
73bdd809bd | ||
|
|
f1dba89d4b | ||
|
|
131a63845c | ||
|
|
2240be1f2d | ||
|
|
a048a83c6c | ||
|
|
4ef1e29034 | ||
|
|
b21df3de5c | ||
|
|
678af5e55b | ||
|
|
c4fd59626f | ||
|
|
0a26e4252a | ||
|
|
d19e032b40 | ||
|
|
ab9ae796f3 | ||
|
|
0f2f2f447c | ||
|
|
3a031694db | ||
|
|
3cfbffc155 | ||
|
|
4f5aec45b3 | ||
|
|
2acfca20d9 | ||
|
|
f2ed5f5c03 | ||
|
|
db04b85134 | ||
|
|
ba66ac5cea | ||
|
|
a592b655c4 | ||
|
|
a5a2ae5edc | ||
|
|
363b4c5525 | ||
|
|
16e1304ded | ||
|
|
b89347938a | ||
|
|
6969ae6c4a | ||
|
|
64f8862676 | ||
|
|
6660986211 | ||
|
|
5dfcc1a5fe | ||
|
|
2efd28766b | ||
|
|
c9bba25e5a | ||
|
|
41976a3b43 | ||
|
|
5a47a8f8e4 | ||
|
|
c13f3c913f | ||
|
|
2e9b54b6cf | ||
|
|
7e844be627 | ||
|
|
b37e55c29a | ||
|
|
332ba044f2 | ||
|
|
09a2d849ec | ||
|
|
a3fb9b0d5c | ||
|
|
192978acf9 | ||
|
|
581aaea93b | ||
|
|
299752a4f1 | ||
|
|
142c0aa539 | ||
|
|
78168ce3db | ||
|
|
332abbc183 | ||
|
|
c136c5cf26 | ||
|
|
2a65e89eee | ||
|
|
d6f1e7bc80 | ||
|
|
5ac5f3ec3f | ||
|
|
bb4cbfa62c | ||
|
|
f0d600e5d7 | ||
|
|
e0f0a6390d | ||
|
|
d4a27dccf7 | ||
|
|
74c4596dc5 | ||
|
|
1f2328613c | ||
|
|
eeca67f6ea | ||
|
|
4ae8a1cfca | ||
|
|
ff06129a6f | ||
|
|
6d7aa4dd56 | ||
|
|
4548f750d3 | ||
|
|
31f02010d3 | ||
|
|
ec1ea5cbba | ||
|
|
8500c59755 | ||
|
|
556dd6b51c | ||
|
|
3863e2d63d | ||
|
|
9e62621819 | ||
|
|
c8c7c8cc05 | ||
|
|
5116313ab0 | ||
|
|
66389dd27c | ||
|
|
ee6501243f | ||
|
|
8dd912175d | ||
|
|
02f9fb45e2 | ||
|
|
4254dbda91 | ||
|
|
a46bed1c33 | ||
|
|
d711300442 | ||
|
|
98a8203591 | ||
|
|
084c5ae158 | ||
|
|
17018aeb19 | ||
|
|
74b3da6f00 | ||
|
|
ab1217a8bf | ||
|
|
62de1480f7 | ||
|
|
ab2e9b06e1 | ||
|
|
e91ad24cf9 | ||
|
|
2e543b7cd4 | ||
|
|
db4353ccdc | ||
|
|
5a610cf08a | ||
|
|
71b854998c | ||
|
|
0a70ae4b3e | ||
|
|
6e709b0b67 | ||
|
|
a4256cee83 | ||
|
|
89f0b1bcfe | ||
|
|
e8af3b2397 | ||
|
|
812d3c851f | ||
|
|
608d1e0396 | ||
|
|
63787454ca | ||
|
|
55c1384f80 | ||
|
|
6750d7bc12 | ||
|
|
d33fcaf5db | ||
|
|
7974fd9597 | ||
|
|
dcb512636c | ||
|
|
7c6bf801e9 | ||
|
|
71e9f89aae | ||
|
|
0936c6087b | ||
|
|
95e3e8723a | ||
|
|
671a910936 | ||
|
|
3b64e829a8 | ||
|
|
84ed739a61 | ||
|
|
cffdb7dca6 | ||
|
|
4182a9f83c | ||
|
|
9873f6105d | ||
|
|
8d3829cc4e | ||
|
|
e424485761 | ||
|
|
baf7f5d137 | ||
|
|
3edf60c625 | ||
|
|
1beb343501 | ||
|
|
0c0f877b13 | ||
|
|
f7a1b5c5ad | ||
|
|
051d09dcaf | ||
|
|
eb900aecb6 | ||
|
|
f16393eafd | ||
|
|
49dcfebfb3 | ||
|
|
1c13b99f3b | ||
|
|
54a1eb5b42 | ||
|
|
2818c6d2b8 | ||
|
|
f4aa5d3873 | ||
|
|
542f4dd0e2 | ||
|
|
4a72cdda75 | ||
|
|
4b9d581448 | ||
|
|
1d3b3f11e9 | ||
|
|
e97824fd0b | ||
|
|
1cd9058685 | ||
|
|
47e23ea14c | ||
|
|
afd18794c7 | ||
|
|
203bd623bd | ||
|
|
2b6b44a31d | ||
|
|
0059a6aab3 | ||
|
|
fc30a080ff | ||
|
|
7399c02be9 | ||
|
|
02e985d2a8 | ||
|
|
954352809f | ||
|
|
7eb36a5a3d | ||
|
|
0768b4d91a | ||
|
|
be1306c9c0 | ||
|
|
7904717597 | ||
|
|
e2c19e1611 | ||
|
|
b230e79ab0 | ||
|
|
31727e35bb | ||
|
|
22127bbfb4 | ||
|
|
413278544a | ||
|
|
580a588df7 | ||
|
|
b39b83fb51 | ||
|
|
6d948603c9 | ||
|
|
648bcc9b92 | ||
|
|
4dc1227be7 | ||
|
|
3b097feaae | ||
|
|
da2e5fbde1 | ||
|
|
003db7c36a | ||
|
|
9aa640338d | ||
|
|
3c24b71f16 | ||
|
|
eee6738a9c |
6
.codecov.yml
Normal file
6
.codecov.yml
Normal file
@@ -0,0 +1,6 @@
|
||||
coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 99%
|
||||
threshold: 1%
|
||||
76
.dockerignore
Normal file
76
.dockerignore
Normal file
@@ -0,0 +1,76 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
README.md
|
||||
CHANGELOG.md
|
||||
*.md
|
||||
|
||||
# Docker files
|
||||
docker-compose*.yml
|
||||
.dockerignore
|
||||
|
||||
# Environment files
|
||||
.env*
|
||||
!.env.example
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Runtime data
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage/
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
vendor/
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
|
||||
# IDE files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS generated files
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# Test files
|
||||
tests/
|
||||
spec/
|
||||
test_*
|
||||
*_test.py
|
||||
*_spec.rb
|
||||
|
||||
# Development files
|
||||
ai_docs/
|
||||
71
.env.example
Normal file
71
.env.example
Normal file
@@ -0,0 +1,71 @@
|
||||
# PotatoMesh Environment Configuration
|
||||
# Copy this file to .env and customize for your setup
|
||||
|
||||
# =============================================================================
|
||||
# REQUIRED SETTINGS
|
||||
# =============================================================================
|
||||
|
||||
# API authentication token (required for ingestor communication)
|
||||
# Generate a secure token: openssl rand -hex 32
|
||||
API_TOKEN=your-secure-api-token-here
|
||||
|
||||
# Meshtastic connection target (required for ingestor)
|
||||
# Common serial paths:
|
||||
# - Linux: /dev/ttyACM0, /dev/ttyUSB0
|
||||
# - macOS: /dev/cu.usbserial-*
|
||||
# - Windows (WSL): /dev/ttyS*
|
||||
# You may also provide an IP:PORT pair (e.g. 192.168.1.20:4403) or a
|
||||
# Bluetooth address (e.g. ED:4D:9E:95:CF:60).
|
||||
CONNECTION=/dev/ttyACM0
|
||||
|
||||
# =============================================================================
|
||||
# SITE CUSTOMIZATION
|
||||
# =============================================================================
|
||||
|
||||
# Your mesh network name
|
||||
SITE_NAME=My Meshtastic Network
|
||||
|
||||
# Default Meshtastic channel
|
||||
CHANNEL=#LongFast
|
||||
|
||||
# Default frequency for your region
|
||||
# Common frequencies: 868MHz (Europe), 915MHz (US), 433MHz (Worldwide)
|
||||
FREQUENCY=915MHz
|
||||
|
||||
# Map center coordinates (latitude, longitude)
|
||||
# Berlin, Germany: 52.502889, 13.404194
|
||||
# Denver, Colorado: 39.7392, -104.9903
|
||||
# London, UK: 51.5074, -0.1278
|
||||
MAP_CENTER="38.761944,-27.090833"
|
||||
|
||||
# Maximum distance to show nodes (kilometers)
|
||||
MAX_DISTANCE=42
|
||||
|
||||
# =============================================================================
|
||||
# OPTIONAL INTEGRATIONS
|
||||
# =============================================================================
|
||||
|
||||
# Community chat link or Matrix room for your community (optional)
|
||||
# Matrix aliases (e.g. #meshtastic-berlin:matrix.org) will be linked via matrix.to automatically.
|
||||
CONTACT_LINK='#potatomesh:dod.ngo'
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ADVANCED SETTINGS
|
||||
# =============================================================================
|
||||
|
||||
# Debug mode (0=off, 1=on)
|
||||
DEBUG=0
|
||||
|
||||
# Docker image architecture (linux-amd64, linux-arm64, linux-armv7)
|
||||
POTATOMESH_IMAGE_ARCH=linux-amd64
|
||||
|
||||
# Docker Compose networking profile
|
||||
# Leave unset for Linux hosts (default host networking).
|
||||
# Set to "bridge" on Docker Desktop (macOS/Windows) if host networking
|
||||
# is unavailable.
|
||||
# COMPOSE_PROFILES=bridge
|
||||
|
||||
# Meshtastic channel index (0=primary, 1=secondary, etc.)
|
||||
CHANNEL_INDEX=0
|
||||
|
||||
13
.github/dependabot.yml
vendored
13
.github/dependabot.yml
vendored
@@ -1,15 +1,10 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "ruby" # See documentation for possible values
|
||||
directory: "/web" # Location of package manifests
|
||||
- package-ecosystem: "ruby"
|
||||
directory: "/web"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "python" # See documentation for possible values
|
||||
directory: "/data" # Location of package manifests
|
||||
- package-ecosystem: "python"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
19
.github/workflows/README.md
vendored
Normal file
19
.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# GitHub Actions Workflows
|
||||
|
||||
## Workflows
|
||||
|
||||
- **`docker.yml`** - Build and push Docker images to GHCR
|
||||
- **`codeql.yml`** - Security scanning
|
||||
- **`python.yml`** - Python ingestor pipeline
|
||||
- **`ruby.yml`** - Ruby Sinatra app testing
|
||||
- **`javascript.yml`** - Frontend test suite
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build locally
|
||||
docker-compose build
|
||||
|
||||
# Deploy
|
||||
docker-compose up -d
|
||||
```
|
||||
62
.github/workflows/codeql.yml
vendored
62
.github/workflows/codeql.yml
vendored
@@ -1,14 +1,3 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL Advanced"
|
||||
|
||||
on:
|
||||
@@ -20,20 +9,10 @@ on:
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze (${{ matrix.language }})
|
||||
# Runner size impacts CodeQL analysis time. To learn more, please see:
|
||||
# - https://gh.io/recommended-hardware-resources-for-running-codeql
|
||||
# - https://gh.io/supported-runners-and-hardware-resources
|
||||
# - https://gh.io/using-larger-runners (GitHub.com only)
|
||||
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
|
||||
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
|
||||
permissions:
|
||||
# required for all workflows
|
||||
security-events: write
|
||||
|
||||
# required to fetch internal or private CodeQL packs
|
||||
packages: read
|
||||
|
||||
# only required for workflows in private repositories
|
||||
actions: read
|
||||
contents: read
|
||||
|
||||
@@ -47,53 +26,14 @@ jobs:
|
||||
build-mode: none
|
||||
- language: javascript-typescript
|
||||
build-mode: none
|
||||
# CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'rust', 'swift'
|
||||
# Use `c-cpp` to analyze code written in C, C++ or both
|
||||
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
|
||||
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
|
||||
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
|
||||
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
|
||||
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
|
||||
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
# Add any setup steps before running the `github/codeql-action/init` action.
|
||||
# This includes steps like installing compilers or runtimes (`actions/setup-node`
|
||||
# or others). This is typically only required for manual builds.
|
||||
# - name: Setup runtime (example)
|
||||
# uses: actions/setup-example@v1
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
uses: actions/checkout@v5
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v3
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
|
||||
# queries: security-extended,security-and-quality
|
||||
|
||||
# If the analyze step fails for one of the languages you are analyzing with
|
||||
# "We were unable to automatically build your code", modify the matrix above
|
||||
# to set the build mode to "manual" for that language. Then modify this step
|
||||
# to build your code.
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
|
||||
- if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code, for example:'
|
||||
echo ' make bootstrap'
|
||||
echo ' make release'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v3
|
||||
with:
|
||||
|
||||
174
.github/workflows/docker.yml
vendored
Normal file
174
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
name: Build and Push Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: [ 'v*' ]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to publish (e.g., 1.0.0)'
|
||||
required: true
|
||||
default: '1.0.0'
|
||||
publish_all_variants:
|
||||
description: 'Publish all Docker image variants (latest tag)'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_PREFIX: l5yth/potato-mesh
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: (startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push') || github.event_name == 'workflow_dispatch'
|
||||
environment: production
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
service: [web, ingestor]
|
||||
architecture:
|
||||
- { name: linux-amd64, platform: linux/amd64, label: "Linux x86_64" }
|
||||
- { name: linux-arm64, platform: linux/arm64, label: "Linux ARM64" }
|
||||
- { name: linux-armv7, platform: linux/arm/v7, label: "Linux ARMv7" }
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU emulation
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version from tag or input
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
else
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Published version: $VERSION"
|
||||
|
||||
- name: Build and push ${{ matrix.service }} for ${{ matrix.architecture.name }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./${{ matrix.service == 'web' && 'web/Dockerfile' || 'data/Dockerfile' }}
|
||||
target: production
|
||||
platforms: ${{ matrix.architecture.platform }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:latest
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:${{ steps.version.outputs.version }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
|
||||
org.opencontainers.image.licenses=Apache-2.0
|
||||
org.opencontainers.image.version=${{ steps.version.outputs.version }}
|
||||
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || 'Ingestor' }} (${{ matrix.architecture.label }})
|
||||
org.opencontainers.image.vendor=PotatoMesh
|
||||
org.opencontainers.image.architecture=${{ matrix.architecture.name }}
|
||||
org.opencontainers.image.os=linux
|
||||
org.opencontainers.image.arch=${{ matrix.architecture.name }}
|
||||
cache-from: type=gha,scope=${{ matrix.service }}-${{ matrix.architecture.name }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.service }}-${{ matrix.architecture.name }}
|
||||
|
||||
test-images:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-and-push
|
||||
if: startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Test web application (Linux AMD64)
|
||||
run: |
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version }}
|
||||
docker run --rm -d --name web-test -p 41447:41447 \
|
||||
-e API_TOKEN=test-token \
|
||||
-e DEBUG=1 \
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version }}
|
||||
sleep 10
|
||||
curl -f http://localhost:41447/ || exit 1
|
||||
docker stop web-test
|
||||
|
||||
- name: Test ingestor (Linux AMD64)
|
||||
run: |
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }}
|
||||
docker run --rm --name ingestor-test \
|
||||
-e POTATOMESH_INSTANCE=http://localhost:41447 \
|
||||
-e API_TOKEN=test-token \
|
||||
-e CONNECTION=mock \
|
||||
-e DEBUG=1 \
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }} &
|
||||
sleep 5
|
||||
docker stop ingestor-test || true
|
||||
|
||||
publish-summary:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-and-push, test-images]
|
||||
if: always() && startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push'
|
||||
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Publish release summary
|
||||
run: |
|
||||
echo "## 🚀 PotatoMesh Images Published to GHCR" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Version:** ${{ steps.version.outputs.version }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Published Images:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Web images
|
||||
echo "### 🌐 Web Application" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Ingestor images
|
||||
echo "### 📡 Ingestor Service" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
43
.github/workflows/javascript.yml
vendored
Normal file
43
.github/workflows/javascript.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: JavaScript
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
frontend:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: web
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Node.js 22
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Run JavaScript tests
|
||||
run: npm test
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: web/reports/javascript-coverage.json
|
||||
flags: frontend
|
||||
name: frontend
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: web/reports/javascript-junit.xml
|
||||
flags: frontend
|
||||
47
.github/workflows/python.yml
vendored
Normal file
47
.github/workflows/python.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Python
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
ingestor:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install black pytest pytest-cov meshtastic
|
||||
- name: Test with pytest and coverage
|
||||
run: |
|
||||
mkdir -p reports
|
||||
pytest --cov=data --cov-report=term --cov-report=xml:reports/python-coverage.xml --junitxml=reports/python-junit.xml
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: reports/python-coverage.xml
|
||||
flags: python-ingestor
|
||||
name: python-ingestor
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: reports/python-junit.xml
|
||||
flags: python-ingestor
|
||||
- name: Lint with black
|
||||
run: |
|
||||
black --check ./
|
||||
55
.github/workflows/ruby.yml
vendored
Normal file
55
.github/workflows/ruby.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Ruby
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
sinatra:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./web
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
ruby-version: ['3.3', '3.4']
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
ruby-version: ${{ matrix.ruby-version }}
|
||||
bundler-cache: true
|
||||
working-directory: ./web
|
||||
- name: Set up dependencies
|
||||
run: bundle install
|
||||
- name: Run tests
|
||||
run: |
|
||||
mkdir -p tmp/test-results
|
||||
bundle exec rspec \
|
||||
--require rspec_junit_formatter \
|
||||
--format progress \
|
||||
--format RspecJunitFormatter \
|
||||
--out tmp/test-results/rspec.xml
|
||||
- name: Upload test results to Codecov
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./web/tmp/test-results/rspec.xml
|
||||
flags: sinatra-${{ matrix.ruby-version }}
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
fail_ci_if_error: false
|
||||
flags: sinatra-${{ matrix.ruby-version }}
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Run rufo
|
||||
run: bundle exec rufo --check .
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -11,7 +11,7 @@
|
||||
/tmp/
|
||||
|
||||
# Used by dotenv library to load environment variables.
|
||||
# .env
|
||||
.env
|
||||
|
||||
# Ignore Byebug command history file.
|
||||
.byebug_history
|
||||
@@ -57,3 +57,15 @@ Gemfile.lock
|
||||
|
||||
# Python cache directories
|
||||
__pycache__/
|
||||
.coverage
|
||||
coverage/
|
||||
coverage.xml
|
||||
htmlcov/
|
||||
reports/
|
||||
|
||||
# AI planning and documentation
|
||||
ai_docs/
|
||||
*.log
|
||||
|
||||
# Generated credentials for the instance
|
||||
web/.config
|
||||
|
||||
243
CHANGELOG.md
Normal file
243
CHANGELOG.md
Normal file
@@ -0,0 +1,243 @@
|
||||
# CHANGELOG
|
||||
|
||||
## v0.5.0
|
||||
|
||||
* Ensure node overlays appear above fullscreen map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/333>
|
||||
* Adjust node table columns responsively by @l5yth in <https://github.com/l5yth/potato-mesh/pull/332>
|
||||
* Add LoRa metadata fields to nodes and messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/331>
|
||||
* Add channel metadata capture for message tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/329>
|
||||
* Capture radio metadata for ingestor payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/327>
|
||||
* Fix FrozenError when filtering node query results by @l5yth in <https://github.com/l5yth/potato-mesh/pull/324>
|
||||
* Ensure frontend reports git-aware version strings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/321>
|
||||
* Ensure web Docker image ships application sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/322>
|
||||
* Refine stacked short info overlays on the map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/319>
|
||||
* Refine environment configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/318>
|
||||
* Fix legacy configuration migration to XDG directories by @l5yth in <https://github.com/l5yth/potato-mesh/pull/317>
|
||||
* Adopt XDG base directories for app data and config by @l5yth in <https://github.com/l5yth/potato-mesh/pull/316>
|
||||
* Refactor: streamline ingestor environment variables by @l5yth in <https://github.com/l5yth/potato-mesh/pull/314>
|
||||
* Adjust map auto-fit padding and default zoom by @l5yth in <https://github.com/l5yth/potato-mesh/pull/315>
|
||||
* Ensure APIs filter stale data and refresh node details from latest sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/312>
|
||||
* Improve offline tile fallback initialization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/307>
|
||||
* Add fallback for offline tile rendering errors by @l5yth in <https://github.com/l5yth/potato-mesh/pull/306>
|
||||
* Fix map auto-fit handling and add controller by @l5yth in <https://github.com/l5yth/potato-mesh/pull/311>
|
||||
* Fix map initialization bounds and add coverage by @l5yth in <https://github.com/l5yth/potato-mesh/pull/305>
|
||||
* Increase coverage for configuration and sanitizer helpers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/303>
|
||||
* Add comprehensive theme and background front-end tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/302>
|
||||
* Document sanitization and helper modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/301>
|
||||
* Add in-repo Meshtastic protobuf stubs for tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/300>
|
||||
* Handle CRL lookup failures during federation TLS by @l5yth in <https://github.com/l5yth/potato-mesh/pull/299>
|
||||
* Ensure JavaScript workflow runs frontend tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/298>
|
||||
* Unify structured logging across application and ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/296>
|
||||
* Add Apache license headers to missing sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/297>
|
||||
* Update workflows for ingestor, sinatra, and frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/295>
|
||||
* Fix IPv6 instance domain canonicalization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/294>
|
||||
* Handle federation HTTPS CRL verification failures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/293>
|
||||
* Adjust federation announcement interval to eight hours by @l5yth in <https://github.com/l5yth/potato-mesh/pull/292>
|
||||
* Restore modular app functionality by @l5yth in <https://github.com/l5yth/potato-mesh/pull/291>
|
||||
* Refactor config and metadata helpers into PotatoMesh modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/290>
|
||||
* Update default site configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/288>
|
||||
* Add regression test for queue drain concurrency by @l5yth in <https://github.com/l5yth/potato-mesh/pull/287>
|
||||
* Ensure Docker config directories are created for non-root user by @l5yth in <https://github.com/l5yth/potato-mesh/pull/286>
|
||||
* Clarify numeric address requirement for network target parsing by @l5yth in <https://github.com/l5yth/potato-mesh/pull/285>
|
||||
* Ensure mesh ingestor queue resets active flag when idle by @l5yth in <https://github.com/l5yth/potato-mesh/pull/284>
|
||||
* Clarify BLE connection description in README by @l5yth in <https://github.com/l5yth/potato-mesh/pull/283>
|
||||
* Configure web container for production mode by @l5yth in <https://github.com/l5yth/potato-mesh/pull/282>
|
||||
* Normalize INSTANCE_DOMAIN configuration to require hostnames by @l5yth in <https://github.com/l5yth/potato-mesh/pull/280>
|
||||
* Avoid blocking startup on federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/281>
|
||||
* Fix production Docker builds for web and ingestor images by @l5yth in <https://github.com/l5yth/potato-mesh/pull/279>
|
||||
* Improve instance domain detection logic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/278>
|
||||
* Implement federation announcements and instances API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/277>
|
||||
* Fix federation signature handling and IP guard by @l5yth in <https://github.com/l5yth/potato-mesh/pull/276>
|
||||
* Add persistent federation metadata endpoint by @l5yth in <https://github.com/l5yth/potato-mesh/pull/274>
|
||||
* Add configurable instance domain with reverse DNS fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/272>
|
||||
* Document production deployment configuration by @l5yth in <https://github.com/l5yth/potato-mesh/pull/273>
|
||||
* Add targeted API endpoints and expose version metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/271>
|
||||
* Prometheus metrics updates on startup and for position/telemetry by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/270>
|
||||
* Add hourly reconnect handling for inactive mesh interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/267>
|
||||
* Dockerfile fixes by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/268>
|
||||
* Added prometheus /metrics endpoint by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/262>
|
||||
* Add fullscreen toggle to map view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/263>
|
||||
* Relocate JS coverage export script into web directory by @l5yth in <https://github.com/l5yth/potato-mesh/pull/266>
|
||||
* V0.4.0 version string in web UI by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/265>
|
||||
* Add energy saving cycle to ingestor daemon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/256>
|
||||
* Chore: restore apache headers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/260>
|
||||
* Docs: add matrix to readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/259>
|
||||
* Force dark theme default based on sanitized cookie by @l5yth in <https://github.com/l5yth/potato-mesh/pull/252>
|
||||
* Document mesh ingestor modules with PDoc-style docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/255>
|
||||
* Handle missing node IDs in Meshtastic nodeinfo packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/251>
|
||||
* Document Ruby helper methods with RDoc comments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/254>
|
||||
* Add JSDoc documentation across client scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/253>
|
||||
* Fix mesh ingestor telemetry and neighbor handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/249>
|
||||
* Refactor front-end assets into external modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/245>
|
||||
* Add tests for helper utilities and asset routes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/243>
|
||||
* Docs: add ingestor inline docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/244>
|
||||
* Add comprehensive coverage tests for mesh ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/241>
|
||||
* Add inline documentation to config helpers and frontend scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/240>
|
||||
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/238>
|
||||
|
||||
## v0.4.0
|
||||
|
||||
* Reformat neighbor overlay layout by @l5yth in <https://github.com/l5yth/potato-mesh/pull/237>
|
||||
* Add legend toggle for neighbor lines by @l5yth in <https://github.com/l5yth/potato-mesh/pull/236>
|
||||
* Hide Air Util Tx column on mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/235>
|
||||
* Add overlay for clickable neighbor links on map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/234>
|
||||
* Hide humidity and pressure columns on mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/232>
|
||||
* Remove last position timestamp from map info overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/233>
|
||||
* Improve live node positions and expose precision metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/231>
|
||||
* Show neighbor short names in info overlays by @l5yth in <https://github.com/l5yth/potato-mesh/pull/228>
|
||||
* Add telemetry environment metrics to node UI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/227>
|
||||
* Reduce neighbor line opacity by @l5yth in <https://github.com/l5yth/potato-mesh/pull/226>
|
||||
* Visualize neighbor connections on map canvas by @l5yth in <https://github.com/l5yth/potato-mesh/pull/224>
|
||||
* Add clear control to filter input by @l5yth in <https://github.com/l5yth/potato-mesh/pull/225>
|
||||
* Handle Bluetooth shutdown hangs gracefully by @l5yth in <https://github.com/l5yth/potato-mesh/pull/221>
|
||||
* Adjust mesh priorities and receive topics by @l5yth in <https://github.com/l5yth/potato-mesh/pull/220>
|
||||
* Add BLE and fallback mesh interface handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/219>
|
||||
* Add neighbor info ingestion and API endpoints by @l5yth in <https://github.com/l5yth/potato-mesh/pull/218>
|
||||
* Add debug logs for unknown node creation and last-heard updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/214>
|
||||
* Update node last seen when events are received by @l5yth in <https://github.com/l5yth/potato-mesh/pull/212>
|
||||
* Improve debug logging for node and telemetry data by @l5yth in <https://github.com/l5yth/potato-mesh/pull/213>
|
||||
* Normalize stored message debug output by @l5yth in <https://github.com/l5yth/potato-mesh/pull/211>
|
||||
* Stop repeating ingestor node info snapshot and timestamp debug logs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/210>
|
||||
* Add telemetry API and ingestion support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/205>
|
||||
* Add private mode to hide chat and message APIs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/204>
|
||||
* Handle offline-ready map fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/202>
|
||||
* Add linux/armv7 container builds and configuration options by @l5yth in <https://github.com/l5yth/potato-mesh/pull/201>
|
||||
* Update Docker documentation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/200>
|
||||
* Update node last seen when ingesting encrypted messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/198>
|
||||
* Fix api in readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/197>
|
||||
|
||||
## v0.3.0
|
||||
|
||||
* Add connection recovery for TCP interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/186>
|
||||
* Bump version to 0.3 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/191>
|
||||
* Pgrade styles and fix interface issues by @l5yth in <https://github.com/l5yth/potato-mesh/pull/190>
|
||||
* Some updates in the front by @dkorotkih2014-hub in <https://github.com/l5yth/potato-mesh/pull/188>
|
||||
* Update last heard on node entry change by @l5yth in <https://github.com/l5yth/potato-mesh/pull/185>
|
||||
* Populate chat metadata for unknown nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/182>
|
||||
* Update role color theme to latest palette by @l5yth in <https://github.com/l5yth/potato-mesh/pull/183>
|
||||
* Add placeholder nodes for unknown senders by @l5yth in <https://github.com/l5yth/potato-mesh/pull/181>
|
||||
* Update role colors and ordering for firmware 2.7.10 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/180>
|
||||
* Handle plain IP addresses in mesh TCP detection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/154>
|
||||
* Handle encrypted messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/173>
|
||||
* Add fallback display names for unnamed nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/171>
|
||||
* Ensure routers render above other node types by @l5yth in <https://github.com/l5yth/potato-mesh/pull/169>
|
||||
* Move lint checks after tests in CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/168>
|
||||
* Handle proto values in nodeinfo payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/167>
|
||||
* Remove raw payload storage from database schema by @l5yth in <https://github.com/l5yth/potato-mesh/pull/166>
|
||||
* Add POSITION_APP ingestion and API support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/160>
|
||||
* Add support for NODEINFO_APP packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/159>
|
||||
* Derive SEO metadata from existing config values by @l5yth in <https://github.com/l5yth/potato-mesh/pull/153>
|
||||
* Tests: create helper script to dump all mesh data from serial by @l5yth in <https://github.com/l5yth/potato-mesh/pull/152>
|
||||
* Limit chat log to recent entries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/151>
|
||||
* Require time library before formatting ISO timestamps by @l5yth in <https://github.com/l5yth/potato-mesh/pull/149>
|
||||
* Define docker compose network by @l5yth in <https://github.com/l5yth/potato-mesh/pull/148>
|
||||
* Fix sqlite3 native extension on Alpine by @l5yth in <https://github.com/l5yth/potato-mesh/pull/146>
|
||||
* Fix web app startup binding by @l5yth in <https://github.com/l5yth/potato-mesh/pull/147>
|
||||
* Ensure sqlite3 builds from source on Alpine by @l5yth in <https://github.com/l5yth/potato-mesh/pull/145>
|
||||
* Support mock serial interface in CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/143>
|
||||
* Fix Docker workflow matrix for supported platforms by @l5yth in <https://github.com/l5yth/potato-mesh/pull/142>
|
||||
* Add clickable role filters to the map legend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/140>
|
||||
* Rebuild chat log on each refresh by @l5yth in <https://github.com/l5yth/potato-mesh/pull/139>
|
||||
* Fix: retain alpine runtime libs after removing build deps by @l5yth in <https://github.com/l5yth/potato-mesh/pull/138>
|
||||
* Fix: support windows ingestor build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/136>
|
||||
* Fix: use supported ruby image by @l5yth in <https://github.com/l5yth/potato-mesh/pull/135>
|
||||
* Feat: Add comprehensive Docker support by @trose in <https://github.com/l5yth/potato-mesh/pull/122>
|
||||
* Chore: bump version to 0.2.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/134>
|
||||
* Fix dark mode tile styling on new map tiles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/132>
|
||||
* Switch map tiles to OSM HOT and add theme filters by @l5yth in <https://github.com/l5yth/potato-mesh/pull/130>
|
||||
* Add footer version display by @l5yth in <https://github.com/l5yth/potato-mesh/pull/128>
|
||||
* Add responsive controls for map legend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/129>
|
||||
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/119>
|
||||
|
||||
## v0.2.0
|
||||
|
||||
* Update readme for 0.2 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/118>
|
||||
* Add PotatoMesh logo to header and favicon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/117>
|
||||
* Harden API auth and request limits by @l5yth in <https://github.com/l5yth/potato-mesh/pull/116>
|
||||
* Add client-side sorting to node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/114>
|
||||
* Add short name overlay for node details by @l5yth in <https://github.com/l5yth/potato-mesh/pull/111>
|
||||
* Adjust python ingestor interval to 60 seconds by @l5yth in <https://github.com/l5yth/potato-mesh/pull/112>
|
||||
* Hide location columns on medium screens by @l5yth in <https://github.com/l5yth/potato-mesh/pull/109>
|
||||
* Handle message updates based on sender info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/108>
|
||||
* Prioritize node posts in queued API updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/107>
|
||||
* Add auto-refresh toggle to UI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/105>
|
||||
* Adjust Leaflet popup styling for dark mode by @l5yth in <https://github.com/l5yth/potato-mesh/pull/104>
|
||||
* Add site info overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/103>
|
||||
* Add long name tooltip to short name badge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/102>
|
||||
* Ensure node numeric aliases are derived from canonical IDs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/101>
|
||||
* Chore: clean up repository by @l5yth in <https://github.com/l5yth/potato-mesh/pull/96>
|
||||
* Handle SQLite busy errors when upserting nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/100>
|
||||
* Configure Sinatra logging level from DEBUG flag by @l5yth in <https://github.com/l5yth/potato-mesh/pull/97>
|
||||
* Add penetration tests for authentication and SQL injection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/95>
|
||||
* Document Python and Ruby source modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/94>
|
||||
* Add tests covering mesh helper edge cases by @l5yth in <https://github.com/l5yth/potato-mesh/pull/93>
|
||||
* Fix py code cov by @l5yth in <https://github.com/l5yth/potato-mesh/pull/92>
|
||||
* Add Codecov reporting to Python CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/91>
|
||||
* Skip null identifiers when selecting packet fields by @l5yth in <https://github.com/l5yth/potato-mesh/pull/88>
|
||||
* Create python yml ga by @l5yth in <https://github.com/l5yth/potato-mesh/pull/90>
|
||||
* Add unit tests for mesh ingestor script by @l5yth in <https://github.com/l5yth/potato-mesh/pull/89>
|
||||
* Add coverage for debug logging on messages without sender by @l5yth in <https://github.com/l5yth/potato-mesh/pull/86>
|
||||
* Handle concurrent node snapshot updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/85>
|
||||
* Fix ingestion mapping for message sender IDs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/84>
|
||||
* Add coverage for API authentication and payload edge cases by @l5yth in <https://github.com/l5yth/potato-mesh/pull/83>
|
||||
* Add JUnit test reporting to Ruby CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/82>
|
||||
* Configure SimpleCov reporting for Codecov by @l5yth in <https://github.com/l5yth/potato-mesh/pull/81>
|
||||
* Update codecov job by @l5yth in <https://github.com/l5yth/potato-mesh/pull/80>
|
||||
* Fix readme badges by @l5yth in <https://github.com/l5yth/potato-mesh/pull/79>
|
||||
* Add Codecov upload step to Ruby workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/78>
|
||||
* Add Apache license headers to source files by @l5yth in <https://github.com/l5yth/potato-mesh/pull/77>
|
||||
* Add integration specs for node and message APIs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/76>
|
||||
* Docs: update for 0.2.0 release by @l5yth in <https://github.com/l5yth/potato-mesh/pull/75>
|
||||
* Create ruby workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/74>
|
||||
* Add RSpec smoke tests for app boot and database init by @l5yth in <https://github.com/l5yth/potato-mesh/pull/73>
|
||||
* Align refresh controls with status text by @l5yth in <https://github.com/l5yth/potato-mesh/pull/72>
|
||||
* Improve mobile layout by @l5yth in <https://github.com/l5yth/potato-mesh/pull/68>
|
||||
* Normalize message sender IDs using node numbers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/67>
|
||||
* Style: condense node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/65>
|
||||
* Log debug details for messages without sender by @l5yth in <https://github.com/l5yth/potato-mesh/pull/64>
|
||||
* Fix nested dataclass serialization for node snapshots by @l5yth in <https://github.com/l5yth/potato-mesh/pull/63>
|
||||
* Log node object on snapshot update failure by @l5yth in <https://github.com/l5yth/potato-mesh/pull/62>
|
||||
* Initialize database on startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/61>
|
||||
* Send mesh data to Potatomesh API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/60>
|
||||
* Convert boolean flags for SQLite binding by @l5yth in <https://github.com/l5yth/potato-mesh/pull/59>
|
||||
* Use packet id as message primary key by @l5yth in <https://github.com/l5yth/potato-mesh/pull/58>
|
||||
* Add message ingestion API and stricter auth by @l5yth in <https://github.com/l5yth/potato-mesh/pull/56>
|
||||
* Feat: parameterize community info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/55>
|
||||
* Feat: add dark mode toggle by @l5yth in <https://github.com/l5yth/potato-mesh/pull/54>
|
||||
|
||||
## v0.1.0
|
||||
|
||||
* Show daily node count in title and header by @l5yth in <https://github.com/l5yth/potato-mesh/pull/49>
|
||||
* Add daily date separators to chat log by @l5yth in <https://github.com/l5yth/potato-mesh/pull/47>
|
||||
* Feat: make frontend responsive for mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/46>
|
||||
* Harden mesh utilities by @l5yth in <https://github.com/l5yth/potato-mesh/pull/45>
|
||||
* Filter out distant nodes from Berlin map view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/43>
|
||||
* Display filtered active node counts in #MediumFast subheading by @l5yth in <https://github.com/l5yth/potato-mesh/pull/44>
|
||||
* Limit chat log and highlight short names by role by @l5yth in <https://github.com/l5yth/potato-mesh/pull/42>
|
||||
* Fix string/integer comparison in node query by @l5yth in <https://github.com/l5yth/potato-mesh/pull/40>
|
||||
* Escape chat message and node entries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/39>
|
||||
* Sort chat entries by timestamp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/38>
|
||||
* Feat: append messages to chat log by @l5yth in <https://github.com/l5yth/potato-mesh/pull/36>
|
||||
* Normalize future timestamps for nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/35>
|
||||
* Optimize web frontend and Ruby app by @l5yth in <https://github.com/l5yth/potato-mesh/pull/32>
|
||||
* Add messages API endpoint with node details by @l5yth in <https://github.com/l5yth/potato-mesh/pull/33>
|
||||
* Clamp node timestamps and sync last_heard with position time by @l5yth in <https://github.com/l5yth/potato-mesh/pull/31>
|
||||
* Refactor: replace deprecated utcfromtimestamp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/30>
|
||||
* Add optional debug logging for node and message operations by @l5yth in <https://github.com/l5yth/potato-mesh/pull/29>
|
||||
* Data: enable serial collection of messages on channel 0 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/25>
|
||||
* Add first_heard timestamp by @l5yth in <https://github.com/l5yth/potato-mesh/pull/23>
|
||||
* Add persistent footer with contact information by @l5yth in <https://github.com/l5yth/potato-mesh/pull/22>
|
||||
* Sort initial chat entries by last-heard by @l5yth in <https://github.com/l5yth/potato-mesh/pull/20>
|
||||
* Display position time in relative 'time ago' format by @l5yth in <https://github.com/l5yth/potato-mesh/pull/19>
|
||||
* Adjust marker size and map tile opacity by @l5yth in <https://github.com/l5yth/potato-mesh/pull/18>
|
||||
* Add chat box for node notifications by @l5yth in <https://github.com/l5yth/potato-mesh/pull/17>
|
||||
* Color markers by role with grayscale map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/16>
|
||||
* Default missing node role to client by @l5yth in <https://github.com/l5yth/potato-mesh/pull/15>
|
||||
* Show live node count in nodes page titles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/14>
|
||||
* Filter stale nodes and add live search by @l5yth in <https://github.com/l5yth/potato-mesh/pull/13>
|
||||
* Remove raw node JSON column by @l5yth in <https://github.com/l5yth/potato-mesh/pull/12>
|
||||
* Add JSON ingest API for node updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/11>
|
||||
* Ignore Python __pycache__ directories by @l5yth in <https://github.com/l5yth/potato-mesh/pull/10>
|
||||
* Feat: load nodes from json for tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/8>
|
||||
* Handle dataclass fields in node snapshots by @l5yth in <https://github.com/l5yth/potato-mesh/pull/6>
|
||||
* Add index page and /nodes route for node map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/4>
|
||||
94
DOCKER.md
Normal file
94
DOCKER.md
Normal file
@@ -0,0 +1,94 @@
|
||||
# PotatoMesh Docker Guide
|
||||
|
||||
PotatoMesh publishes ready-to-run container images to the GitHub Packages container
|
||||
registry (GHCR). You do not need to clone the repository to deploy them—Compose
|
||||
will pull the latest release images for you.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker Engine 24+ or Docker Desktop with the Compose plugin
|
||||
- Access to `/dev/ttyACM*` (or equivalent) if you plan to attach a Meshtastic
|
||||
device to the ingestor container
|
||||
- An API token that authorises the ingestor to post to your PotatoMesh instance
|
||||
|
||||
## Images on GHCR
|
||||
|
||||
| Service | Image |
|
||||
|----------|-------------------------------------------------------------------|
|
||||
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:latest` |
|
||||
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:latest` |
|
||||
|
||||
Images are published for every tagged release. Replace `latest` with a
|
||||
specific version tag if you prefer pinned deployments.
|
||||
|
||||
## Configure environment
|
||||
|
||||
Create a `.env` file alongside your Compose file and populate the variables you
|
||||
need. At a minimum you must set `API_TOKEN` so the ingestor can authenticate
|
||||
against the web API.
|
||||
|
||||
```env
|
||||
API_TOKEN=replace-with-a-strong-token
|
||||
SITE_NAME=PotatoMesh Demo
|
||||
CONNECTION=/dev/ttyACM0
|
||||
```
|
||||
|
||||
Additional environment variables are optional:
|
||||
|
||||
- `CHANNEL`, `FREQUENCY`, `MAP_CENTER`, `MAX_DISTANCE`, and `CONTACT_LINK`
|
||||
customise the UI.
|
||||
- `POTATOMESH_INSTANCE` (defaults to `http://web:41447`) lets the ingestor post
|
||||
to a remote PotatoMesh instance if you do not run both services together.
|
||||
- `CONNECTION` overrides the default serial device or network endpoint used by
|
||||
the ingestor.
|
||||
- `CHANNEL_INDEX` selects the LoRa channel when using serial or Bluetooth
|
||||
connections.
|
||||
- `DEBUG` enables verbose logging across the stack.
|
||||
|
||||
## Docker Compose file
|
||||
|
||||
Use the `docker-compose.yml` file provided in the repository (or download the
|
||||
[raw file from GitHub](https://raw.githubusercontent.com/l5yth/potato-mesh/main/docker-compose.yml)).
|
||||
It already references the published GHCR images, defines persistent volumes for
|
||||
data, configuration, and logs, and includes optional bridge-profile services for
|
||||
environments that require classic port mapping. Place this file in the same
|
||||
directory as your `.env` file so Compose can pick up both.
|
||||
|
||||
The dedicated configuration volume binds to `/app/.config/potato-mesh` inside
|
||||
the container. This path stores the instance private key and staged
|
||||
`/.well-known/potato-mesh` documents. Because the volume persists independently
|
||||
of container lifecycle events, generated credentials are not replaced on reboot
|
||||
or re-deploy.
|
||||
|
||||
## Start the stack
|
||||
|
||||
From the directory containing the Compose file:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Docker automatically pulls the GHCR images when they are not present locally.
|
||||
The dashboard becomes available at `http://127.0.0.1:41447`. Use the bridge
|
||||
profile when you need to map the port explicitly:
|
||||
|
||||
```bash
|
||||
COMPOSE_PROFILES=bridge docker compose up -d
|
||||
```
|
||||
|
||||
## Updating
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **Serial device permissions (Linux/macOS):** grant access with `sudo chmod 666
|
||||
/dev/ttyACM0` or add your user to the `dialout` group.
|
||||
- **Port already in use:** identify the conflicting service with `sudo lsof -i
|
||||
:41447`.
|
||||
- **Viewing logs:** `docker compose logs -f` tails output from both services.
|
||||
|
||||
For general Docker support, consult the [Docker Compose documentation](https://docs.docker.com/compose/).
|
||||
78
Dockerfile
Normal file
78
Dockerfile
Normal file
@@ -0,0 +1,78 @@
|
||||
# NOTE: This Dockerfile is kept for backward compatibility. The canonical build
|
||||
# instructions live in `web/Dockerfile`; keep the two files in sync.
|
||||
|
||||
# Main application builder stage
|
||||
FROM ruby:3.3-alpine AS builder
|
||||
|
||||
# Ensure native extensions are built against musl libc rather than
|
||||
# using glibc precompiled binaries (which fail on Alpine).
|
||||
ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy Gemfile and install dependencies
|
||||
COPY web/Gemfile web/Gemfile.lock* ./
|
||||
|
||||
# Install gems with SQLite3 support
|
||||
RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1000 -S potatomesh && \
|
||||
adduser -u 1000 -S potatomesh -G potatomesh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
|
||||
# Copy application code (exclude Dockerfile from web directory)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb web/app.sh web/Gemfile web/Gemfile.lock* web/spec/ ./
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views/ ./views/
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
|
||||
# Create data directory for SQLite database
|
||||
RUN mkdir -p /app/data /app/.local/share/potato-mesh && \
|
||||
chown -R potatomesh:potatomesh /app/data /app/.local
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
|
||||
# Expose port
|
||||
EXPOSE 41447
|
||||
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV APP_ENV=production \
|
||||
RACK_ENV=production \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
CHANNEL="#LongFast" \
|
||||
FREQUENCY="915MHz" \
|
||||
MAP_CENTER="38.761944,-27.090833" \
|
||||
MAX_DISTANCE=42 \
|
||||
CONTACT_LINK="#potatomesh:dod.ngo" \
|
||||
DEBUG=0
|
||||
|
||||
# Start the application
|
||||
CMD ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
|
||||
4
LICENSE
4
LICENSE
@@ -33,7 +33,7 @@
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
Object form, made available under the Licen2se, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
@@ -186,7 +186,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright (C) 2025 l5yth
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
223
README.md
223
README.md
@@ -1,70 +1,37 @@
|
||||
# potato-mesh
|
||||
# 🥔 PotatoMesh
|
||||
|
||||
a simple meshtastic node dashboard for your local community. here: berlin mediumfast.
|
||||
[](https://github.com/l5yth/potato-mesh/actions)
|
||||
[](https://github.com/l5yth/potato-mesh/releases)
|
||||
[](https://codecov.io/gh/l5yth/potato-mesh)
|
||||
[](LICENSE)
|
||||
[](https://github.com/l5yth/potato-mesh/issues)
|
||||
|
||||

|
||||
A simple Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
|
||||
|
||||
## status
|
||||
* Web app with chat window and map view showing nodes, neighbors, telemetry, and messages.
|
||||
* API to POST (authenticated) and to GET nodes and messages.
|
||||
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
|
||||
* Shows new node notifications (first seen) in chat.
|
||||
* Allows searching and filtering for nodes in map and table view.
|
||||
|
||||
_in active development._
|
||||
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
|
||||
|
||||
what works:
|
||||

|
||||
|
||||
* updating nodes from a locally connected meshtastic device (via serial)
|
||||
* awaiting messages on default channel (0) from a local meshtastic device
|
||||
* storing nodes and messages in a local database (sqlite3)
|
||||
* displaying nodes ordered by last seen in a web app table view
|
||||
* displaying nodes by geographic coordinates on a map layer, coloured by device role
|
||||
* displaying new node notifications and chat messages in default channel in chat box
|
||||
* displaying active node count and filtering nodes by name
|
||||
* exposing nodes and messages to api endpoints
|
||||
## Web App
|
||||
|
||||
what does not work _(yet):_
|
||||
|
||||
* posting nodes and messages to the api endpoints _(wip)_
|
||||
|
||||
## requirements
|
||||
|
||||
requires a meshtastic node connected (via serial) to gather mesh data and the meshtastic cli.
|
||||
|
||||
requires the meshtastic python api for the database.
|
||||
Requires Ruby for the Sinatra web app and SQLite3 for the app's database.
|
||||
|
||||
```bash
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -U meshtastic
|
||||
```
|
||||
|
||||
requires latest ruby and ruby gems for the sinatra web app.
|
||||
|
||||
```bash
|
||||
gem install bundler
|
||||
pacman -S ruby sqlite3
|
||||
gem install sinatra sqlite3 rackup puma rspec rack-test rufo
|
||||
cd ./web
|
||||
bundle install
|
||||
```
|
||||
|
||||
### database
|
||||
### Run
|
||||
|
||||
uses python meshtastic library to ingest mesh data into an sqlite3 database locally
|
||||
|
||||
run `mesh.sh` in `data/` to keep updating node records and parsing new incoming messages.
|
||||
|
||||
```bash
|
||||
MESH_SERIAL=/dev/ttyACM0 DEBUG=1 ./mesh.sh
|
||||
[...]
|
||||
[debug] upserted node !849b7154 shortName='7154'
|
||||
[debug] upserted node !ba653ae8 shortName='3ae8'
|
||||
[debug] upserted node !16ced364 shortName='Pat'
|
||||
[debug] stored message from '!9ee71c38' to '^all' ch=0 text='Guten Morgen!'
|
||||
```
|
||||
|
||||
enable debug output with `DEBUG=1`, specify the serial port with `MESH_SERIAL` (default `/dev/ttyACM0`).
|
||||
|
||||
### web app
|
||||
|
||||
uses a ruby sinatra webapp to display data from the sqlite database
|
||||
|
||||
run `app.sh` in `web/` to run the sinatra webserver and check
|
||||
[127.0.0.1:41447](http://127.0.0.1:41447/) for the correct node map.
|
||||
Check out the `app.sh` run script in `./web` directory.
|
||||
|
||||
```bash
|
||||
API_TOKEN="1eb140fd-cab4-40be-b862-41c607762246" ./app.sh
|
||||
@@ -76,17 +43,149 @@ Puma starting in single mode...
|
||||
* Listening on http://127.0.0.1:41447
|
||||
```
|
||||
|
||||
set `API_TOKEN` required for authorizations on the api post-endpoints (wip).
|
||||
Check [127.0.0.1:41447](http://127.0.0.1:41447/) for the development preview
|
||||
of the node map. Set `API_TOKEN` required for authorizations on the API's POST endpoints.
|
||||
|
||||
## api
|
||||
### Production
|
||||
|
||||
the web app contains an api:
|
||||
When promoting the app to production, run the server with the minimum required
|
||||
configuration to ensure secure access and proper routing:
|
||||
|
||||
* GET `/api/nodes?limit=1000` - returns the latest 1000 nodes reported to the app
|
||||
* GET `/api/messages?limit=1000` - returns the latest 1000 messages
|
||||
```bash
|
||||
RACK_ENV="production" \
|
||||
APP_ENV="production" \
|
||||
API_TOKEN="SuperSecureTokenReally" \
|
||||
INSTANCE_DOMAIN="https://potatomesh.net" \
|
||||
exec ruby app.rb -p 41447 -o 0.0.0.0
|
||||
```
|
||||
|
||||
the `POST` apis are _currently being worked on (tm)._
|
||||
* `RACK_ENV` and `APP_ENV` must be set to `production` to enable optimized
|
||||
settings suited for live deployments.
|
||||
* Bind the server to a production port and all interfaces (`-p 41447 -o 0.0.0.0`)
|
||||
so that clients can reach the dashboard over the network.
|
||||
* Provide a strong `API_TOKEN` value to authorize POST requests against the API.
|
||||
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
|
||||
links and generated metadata resolve correctly.
|
||||
|
||||
## license
|
||||
### Configuration storage
|
||||
|
||||
apache v2.0
|
||||
PotatoMesh stores its runtime assets using the XDG base directory specification.
|
||||
During startup the web application migrates existing configuration from
|
||||
`web/.config` and `web/config` into the resolved `XDG_CONFIG_HOME` directory.
|
||||
This preserves previously generated instance key material and
|
||||
`/.well-known/potato-mesh` documents so upgrades do not create new credentials
|
||||
unnecessarily. When XDG directories are not provided the application falls back
|
||||
to the repository root.
|
||||
|
||||
The migrated key is written to `<XDG_CONFIG_HOME>/potato-mesh/keyfile` and the
|
||||
well-known document is staged in
|
||||
`<XDG_CONFIG_HOME>/potato-mesh/well-known/potato-mesh`.
|
||||
|
||||
When deploying with Docker Compose, the default `docker-compose.yml` mounts a
|
||||
named volume at `/app/.config/potato-mesh` to persist these files. Avoid
|
||||
removing this volume once a key has been generated so the instance identity and
|
||||
well-known metadata remain stable across restarts.
|
||||
|
||||
The web app can be configured with environment variables (defaults shown):
|
||||
|
||||
* `SITE_NAME` - title and header shown in the UI (default: "PotatoMesh Demo")
|
||||
* `CHANNEL` - default channel shown in the UI (default: "#LongFast")
|
||||
* `FREQUENCY` - default frequency shown in the UI (default: "915MHz")
|
||||
* `MAP_CENTER` - default map center coordinates (default: `38.761944,-27.090833`)
|
||||
* `MAX_DISTANCE` - hide nodes farther than this distance from the center (default: `42`)
|
||||
* `CONTACT_LINK` - chat link or Matrix alias for footer and overlay (default: `#potatomesh:dod.ngo`)
|
||||
* `PRIVATE` - set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients (default: unset)
|
||||
|
||||
The application derives SEO-friendly document titles, descriptions, and social
|
||||
preview tags from these existing configuration values and reuses the bundled
|
||||
logo for Open Graph and Twitter cards.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
SITE_NAME="PotatoMesh Demo" MAP_CENTER=38.761944,-27.090833 MAX_DISTANCE=42 CONTACT_LINK="#potatomesh:dod.ngo" ./app.sh
|
||||
```
|
||||
|
||||
### API
|
||||
|
||||
The web app contains an API:
|
||||
|
||||
* GET `/api/nodes?limit=100` - returns the latest 100 nodes reported to the app
|
||||
* GET `/api/positions?limit=100` - returns the latest 100 position data
|
||||
* GET `/api/messages?limit=100` - returns the latest 100 messages (disabled when `PRIVATE=1`)
|
||||
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
|
||||
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
|
||||
* GET `/metrics`- prometheus endpoint
|
||||
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/positions` - appends positions provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
|
||||
* POST `/api/telemetry` - appends telemetry provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/neighbors` - appends neighbor tuples provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
|
||||
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
|
||||
|
||||
## Python Ingestor
|
||||
|
||||
The web app is not meant to be run locally connected to a Meshtastic node but rather
|
||||
on a remote host without access to a physical Meshtastic device. Therefore, it only
|
||||
accepts data through the API POST endpoints. Benefit is, here multiple nodes across the
|
||||
community can feed the dashboard with data. The web app handles messages and nodes
|
||||
by ID and there will be no duplication.
|
||||
|
||||
For convenience, the directory `./data` contains a Python ingestor. It connects to a
|
||||
Meshtastic node via serial port or to a remote device that exposes the Meshtastic TCP
|
||||
or Bluetooth (BLE) interfaces to gather nodes and messages seen by the node.
|
||||
|
||||
```bash
|
||||
pacman -S python
|
||||
cd ./data
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -U meshtastic
|
||||
```
|
||||
|
||||
It uses the Meshtastic Python library to ingest mesh data and post nodes and messages
|
||||
to the configured potato-mesh instance.
|
||||
|
||||
Check out `mesh.sh` ingestor script in the `./data` directory.
|
||||
|
||||
```bash
|
||||
POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 CONNECTION=/dev/ttyACM0 DEBUG=1 ./mesh.sh
|
||||
[2025-02-20T12:34:56.789012Z] [potato-mesh] [info] channel=0 context=daemon.main port='41447' target='http://127.0.0.1' Mesh daemon starting
|
||||
[...]
|
||||
[2025-02-20T12:34:57.012345Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!849b7154 short_name='7154' long_name='7154' Queued node upsert payload
|
||||
[2025-02-20T12:34:57.456789Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!ba653ae8 short_name='3ae8' long_name='3ae8' Queued node upsert payload
|
||||
[2025-02-20T12:34:58.001122Z] [potato-mesh] [debug] context=handlers.store_packet_dict channel=0 from_id='!9ee71c38' payload='Guten Morgen!' to_id='^all' Queued message payload
|
||||
```
|
||||
|
||||
Run the script with `POTATOMESH_INSTANCE` and `API_TOKEN` to keep updating
|
||||
node records and parsing new incoming messages. Enable debug output with `DEBUG=1`,
|
||||
specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set it to
|
||||
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
|
||||
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
|
||||
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available.
|
||||
|
||||
## Demos
|
||||
|
||||
* <https://potatomesh.net/>
|
||||
* <https://vrs.kdd2105.ru/>
|
||||
* <https://potatomesh.stratospire.com/>
|
||||
* <https://es1tem.uk/>
|
||||
|
||||
## Docker
|
||||
|
||||
Docker images are published on Github for each release:
|
||||
|
||||
```bash
|
||||
docker pull ghcr.io/l5yth/potato-mesh/web:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
|
||||
```
|
||||
|
||||
See the [Docker guide](DOCKER.md) for more details and custome deployment instructions.
|
||||
|
||||
## License
|
||||
|
||||
Apache v2.0, Contact <COM0@l5y.tech>
|
||||
|
||||
Join our community chat to discuss the dashboard or ask for technical support:
|
||||
[#potatomesh:dod.ngo](https://matrix.to/#/#potatomesh:dod.ngo)
|
||||
|
||||
183
configure.sh
Executable file
183
configure.sh
Executable file
@@ -0,0 +1,183 @@
|
||||
#!/bin/bash
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# PotatoMesh Configuration Script
|
||||
# This script helps you configure your PotatoMesh instance with your local settings
|
||||
|
||||
set -e
|
||||
|
||||
echo "🥔 PotatoMesh Configuration"
|
||||
echo "=========================="
|
||||
echo ""
|
||||
|
||||
# Check if .env exists, if not create from .env.example
|
||||
if [ ! -f .env ]; then
|
||||
if [ -f .env.example ]; then
|
||||
echo "📋 Creating .env file from .env.example..."
|
||||
cp .env.example .env
|
||||
else
|
||||
echo "📋 Creating new .env file..."
|
||||
touch .env
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔧 Let's configure your PotatoMesh instance!"
|
||||
echo ""
|
||||
|
||||
# Function to read input with default
|
||||
read_with_default() {
|
||||
local prompt="$1"
|
||||
local default="$2"
|
||||
local var_name="$3"
|
||||
|
||||
if [ -n "$default" ]; then
|
||||
read -p "$prompt [$default]: " input
|
||||
input=${input:-$default}
|
||||
else
|
||||
read -p "$prompt: " input
|
||||
fi
|
||||
|
||||
eval "$var_name='$input'"
|
||||
}
|
||||
|
||||
# Function to update .env file
|
||||
update_env() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
|
||||
if grep -q "^$key=" .env; then
|
||||
# Update existing value
|
||||
sed -i.bak "s/^$key=.*/$key=$value/" .env
|
||||
else
|
||||
# Add new value
|
||||
echo "$key=$value" >> .env
|
||||
fi
|
||||
}
|
||||
|
||||
# Get current values from .env if they exist
|
||||
SITE_NAME=$(grep "^SITE_NAME=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "PotatoMesh Demo")
|
||||
CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#LongFast")
|
||||
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
|
||||
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
|
||||
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
|
||||
CONTACT_LINK=$(grep "^CONTACT_LINK=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#potatomesh:dod.ngo")
|
||||
API_TOKEN=$(grep "^API_TOKEN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
|
||||
POTATOMESH_IMAGE_ARCH=$(grep "^POTATOMESH_IMAGE_ARCH=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "linux-amd64")
|
||||
|
||||
echo "📍 Location Settings"
|
||||
echo "-------------------"
|
||||
read_with_default "Site Name (your mesh network name)" "$SITE_NAME" SITE_NAME
|
||||
read_with_default "Map Center (lat,lon)" "$MAP_CENTER" MAP_CENTER
|
||||
read_with_default "Max Distance (km)" "$MAX_DISTANCE" MAX_DISTANCE
|
||||
|
||||
echo ""
|
||||
echo "📡 Meshtastic Settings"
|
||||
echo "---------------------"
|
||||
read_with_default "Channel" "$CHANNEL" CHANNEL
|
||||
read_with_default "Frequency (868MHz, 915MHz, etc.)" "$FREQUENCY" FREQUENCY
|
||||
|
||||
echo ""
|
||||
echo "💬 Optional Settings"
|
||||
echo "-------------------"
|
||||
read_with_default "Chat link or Matrix room (optional)" "$CONTACT_LINK" CONTACT_LINK
|
||||
|
||||
echo ""
|
||||
echo "🛠 Docker Settings"
|
||||
echo "------------------"
|
||||
echo "Specify the Docker image architecture for your host (linux-amd64, linux-arm64, linux-armv7)."
|
||||
read_with_default "Docker image architecture" "$POTATOMESH_IMAGE_ARCH" POTATOMESH_IMAGE_ARCH
|
||||
|
||||
echo ""
|
||||
echo "🔐 Security Settings"
|
||||
echo "-------------------"
|
||||
echo "The API token is used for secure communication between the web app and ingestor."
|
||||
echo "You can provide your own custom token or let us generate a secure one for you."
|
||||
echo ""
|
||||
|
||||
if [ -z "$API_TOKEN" ]; then
|
||||
echo "No existing API token found. Generating a secure token..."
|
||||
API_TOKEN=$(openssl rand -hex 32 2>/dev/null || python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null || echo "your-secure-api-token-here")
|
||||
echo "✅ Generated secure API token: ${API_TOKEN:0:8}..."
|
||||
echo ""
|
||||
read -p "Use this generated token? (Y/n): " use_generated
|
||||
if [[ "$use_generated" =~ ^[Nn]$ ]]; then
|
||||
read -p "Enter your custom API token: " API_TOKEN
|
||||
fi
|
||||
else
|
||||
echo "Existing API token found: ${API_TOKEN:0:8}..."
|
||||
read -p "Keep existing token? (Y/n): " keep_existing
|
||||
if [[ "$keep_existing" =~ ^[Nn]$ ]]; then
|
||||
read -p "Enter new API token (or press Enter to generate): " new_token
|
||||
if [ -n "$new_token" ]; then
|
||||
API_TOKEN="$new_token"
|
||||
else
|
||||
echo "Generating new secure token..."
|
||||
API_TOKEN=$(openssl rand -hex 32 2>/dev/null || python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null || echo "your-secure-api-token-here")
|
||||
echo "✅ Generated new API token: ${API_TOKEN:0:8}..."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📝 Updating .env file..."
|
||||
|
||||
# Update .env file
|
||||
update_env "SITE_NAME" "\"$SITE_NAME\""
|
||||
update_env "CHANNEL" "\"$CHANNEL\""
|
||||
update_env "FREQUENCY" "\"$FREQUENCY\""
|
||||
update_env "MAP_CENTER" "\"$MAP_CENTER\""
|
||||
update_env "MAX_DISTANCE" "$MAX_DISTANCE"
|
||||
update_env "CONTACT_LINK" "\"$CONTACT_LINK\""
|
||||
update_env "API_TOKEN" "$API_TOKEN"
|
||||
update_env "POTATOMESH_IMAGE_ARCH" "$POTATOMESH_IMAGE_ARCH"
|
||||
|
||||
# Migrate legacy connection settings and ensure defaults exist
|
||||
if grep -q "^MESH_SERIAL=" .env; then
|
||||
legacy_connection=$(grep "^MESH_SERIAL=" .env | head -n1 | cut -d'=' -f2-)
|
||||
if [ -n "$legacy_connection" ] && ! grep -q "^CONNECTION=" .env; then
|
||||
echo "♻️ Migrating legacy MESH_SERIAL value to CONNECTION"
|
||||
update_env "CONNECTION" "$legacy_connection"
|
||||
fi
|
||||
sed -i.bak '/^MESH_SERIAL=.*/d' .env
|
||||
fi
|
||||
|
||||
if ! grep -q "^CONNECTION=" .env; then
|
||||
echo "CONNECTION=/dev/ttyACM0" >> .env
|
||||
fi
|
||||
|
||||
if ! grep -q "^DEBUG=" .env; then
|
||||
echo "DEBUG=0" >> .env
|
||||
fi
|
||||
|
||||
# Clean up backup file
|
||||
rm -f .env.bak
|
||||
|
||||
echo ""
|
||||
echo "✅ Configuration complete!"
|
||||
echo ""
|
||||
echo "📋 Your settings:"
|
||||
echo " Site Name: $SITE_NAME"
|
||||
echo " Map Center: $MAP_CENTER"
|
||||
echo " Max Distance: ${MAX_DISTANCE}km"
|
||||
echo " Channel: $CHANNEL"
|
||||
echo " Frequency: $FREQUENCY"
|
||||
echo " Chat: ${CONTACT_LINK:-'Not set'}"
|
||||
echo " API Token: ${API_TOKEN:0:8}..."
|
||||
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
|
||||
echo ""
|
||||
echo "🚀 You can now start PotatoMesh with:"
|
||||
echo " docker-compose up -d"
|
||||
echo ""
|
||||
echo "📖 For more configuration options, see the README.md"
|
||||
2
data/.gitignore
vendored
2
data/.gitignore
vendored
@@ -2,3 +2,5 @@
|
||||
*.db-wal
|
||||
*.db-shm
|
||||
*.backup
|
||||
*.copy
|
||||
*.log
|
||||
|
||||
70
data/Dockerfile
Normal file
70
data/Dockerfile
Normal file
@@ -0,0 +1,70 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
ARG TARGETOS=linux
|
||||
ARG PYTHON_VERSION=3.12.6
|
||||
|
||||
# Linux production image
|
||||
FROM python:${PYTHON_VERSION}-alpine AS production-linux
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY data/requirements.txt ./
|
||||
RUN set -eux; \
|
||||
apk add --no-cache \
|
||||
tzdata \
|
||||
curl \
|
||||
libstdc++ \
|
||||
libgcc; \
|
||||
apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
linux-headers \
|
||||
build-base; \
|
||||
python -m pip install --no-cache-dir -r requirements.txt; \
|
||||
apk del .build-deps
|
||||
|
||||
COPY data /app/data
|
||||
RUN addgroup -S potatomesh && \
|
||||
adduser -S potatomesh -G potatomesh && \
|
||||
adduser potatomesh dialout && \
|
||||
chown -R potatomesh:potatomesh /app
|
||||
|
||||
USER potatomesh
|
||||
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
POTATOMESH_INSTANCE="" \
|
||||
API_TOKEN=""
|
||||
|
||||
CMD ["python", "-m", "data.mesh"]
|
||||
|
||||
# Windows production image
|
||||
FROM python:${PYTHON_VERSION}-windowsservercore-ltsc2022 AS production-windows
|
||||
|
||||
SHELL ["cmd", "/S", "/C"]
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY data/requirements.txt ./
|
||||
RUN python -m pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY data /app/data
|
||||
|
||||
USER ContainerUser
|
||||
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
POTATOMESH_INSTANCE="" \
|
||||
API_TOKEN=""
|
||||
|
||||
CMD ["python", "-m", "data.mesh"]
|
||||
|
||||
FROM production-${TARGETOS} AS production
|
||||
@@ -0,0 +1,19 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Data utilities for the Potato Mesh synchronisation daemon.
|
||||
|
||||
The ``data.mesh`` module exposes helpers for reading Meshtastic node and
|
||||
message information before forwarding it to the accompanying web application.
|
||||
"""
|
||||
|
||||
32
data/instances.sql
Normal file
32
data/instances.sql
Normal file
@@ -0,0 +1,32 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
PRAGMA journal_mode=WAL;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS instances (
|
||||
id TEXT PRIMARY KEY,
|
||||
domain TEXT NOT NULL,
|
||||
pubkey TEXT NOT NULL,
|
||||
name TEXT,
|
||||
version TEXT,
|
||||
channel TEXT,
|
||||
frequency TEXT,
|
||||
latitude REAL,
|
||||
longitude REAL,
|
||||
last_update_time INTEGER,
|
||||
is_private BOOLEAN NOT NULL DEFAULT 0,
|
||||
signature TEXT
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_instances_domain ON instances(domain);
|
||||
282
data/mesh.py
282
data/mesh.py
@@ -1,253 +1,43 @@
|
||||
#!/usr/bin/env python3
|
||||
import json, os, sqlite3, time, threading, signal
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Backward-compatible entry point for the mesh ingestor daemon."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
from meshtastic.mesh_interface import MeshInterface
|
||||
from pubsub import pub
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from google.protobuf.message import Message as ProtoMessage
|
||||
try:
|
||||
from . import mesh_ingestor as _mesh_ingestor
|
||||
except ImportError:
|
||||
if __package__ in {None, ""}:
|
||||
package_dir = Path(__file__).resolve().parent
|
||||
project_root = str(package_dir.parent)
|
||||
if project_root not in sys.path:
|
||||
sys.path.insert(0, project_root)
|
||||
_mesh_ingestor = importlib.import_module("data.mesh_ingestor")
|
||||
else:
|
||||
raise
|
||||
|
||||
# --- Config (env overrides) ---------------------------------------------------
|
||||
DB = os.environ.get("MESH_DB", "mesh.db")
|
||||
PORT = os.environ.get("MESH_SERIAL", "/dev/ttyACM0")
|
||||
SNAPSHOT_SECS = int(os.environ.get("MESH_SNAPSHOT_SECS", "30"))
|
||||
CHANNEL_INDEX = int(os.environ.get("MESH_CHANNEL_INDEX", "0"))
|
||||
DEBUG = os.environ.get("DEBUG") == "1"
|
||||
|
||||
# --- DB setup -----------------------------------------------------------------
|
||||
nodeSchema = Path(__file__).with_name("nodes.sql").read_text()
|
||||
conn = sqlite3.connect(DB, check_same_thread=False)
|
||||
conn.executescript(nodeSchema)
|
||||
msgSchema = Path(__file__).with_name("messages.sql").read_text()
|
||||
conn.executescript(msgSchema)
|
||||
conn.commit()
|
||||
|
||||
DB_LOCK = threading.Lock()
|
||||
|
||||
|
||||
def _get(obj, key, default=None):
|
||||
"""Return value for key/attribute from dicts or objects."""
|
||||
if isinstance(obj, dict):
|
||||
return obj.get(key, default)
|
||||
return getattr(obj, key, default)
|
||||
|
||||
|
||||
# --- Node upsert --------------------------------------------------------------
|
||||
def upsert_node(node_id, n):
|
||||
user = _get(n, "user") or {}
|
||||
met = _get(n, "deviceMetrics") or {}
|
||||
pos = _get(n, "position") or {}
|
||||
lh = _get(n, "lastHeard")
|
||||
pt = _get(pos, "time")
|
||||
now = int(time.time())
|
||||
if pt is not None and pt > now:
|
||||
pt = None
|
||||
if lh is not None and lh > now:
|
||||
lh = now
|
||||
if pt is not None and (lh is None or lh < pt):
|
||||
lh = pt
|
||||
row = (
|
||||
node_id,
|
||||
_get(n, "num"),
|
||||
_get(user, "shortName"),
|
||||
_get(user, "longName"),
|
||||
_get(user, "macaddr"),
|
||||
_get(user, "hwModel") or _get(n, "hwModel"),
|
||||
_get(user, "role"),
|
||||
_get(user, "publicKey"),
|
||||
_get(user, "isUnmessagable"),
|
||||
_get(n, "isFavorite"),
|
||||
_get(n, "hopsAway"),
|
||||
_get(n, "snr"),
|
||||
lh,
|
||||
lh,
|
||||
_get(met, "batteryLevel"),
|
||||
_get(met, "voltage"),
|
||||
_get(met, "channelUtilization"),
|
||||
_get(met, "airUtilTx"),
|
||||
_get(met, "uptimeSeconds"),
|
||||
pt,
|
||||
_get(pos, "locationSource"),
|
||||
_get(pos, "latitude"),
|
||||
_get(pos, "longitude"),
|
||||
_get(pos, "altitude"),
|
||||
)
|
||||
with DB_LOCK:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO nodes(node_id,num,short_name,long_name,macaddr,hw_model,role,public_key,is_unmessagable,is_favorite,
|
||||
hops_away,snr,last_heard,first_heard,battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,
|
||||
position_time,location_source,latitude,longitude,altitude)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
ON CONFLICT(node_id) DO UPDATE SET
|
||||
num=excluded.num, short_name=excluded.short_name, long_name=excluded.long_name, macaddr=excluded.macaddr,
|
||||
hw_model=excluded.hw_model, role=excluded.role, public_key=excluded.public_key, is_unmessagable=excluded.is_unmessagable,
|
||||
is_favorite=excluded.is_favorite, hops_away=excluded.hops_away, snr=excluded.snr, last_heard=excluded.last_heard,
|
||||
battery_level=excluded.battery_level, voltage=excluded.voltage, channel_utilization=excluded.channel_utilization,
|
||||
air_util_tx=excluded.air_util_tx, uptime_seconds=excluded.uptime_seconds, position_time=excluded.position_time,
|
||||
location_source=excluded.location_source, latitude=excluded.latitude, longitude=excluded.longitude,
|
||||
altitude=excluded.altitude
|
||||
""",
|
||||
row,
|
||||
)
|
||||
|
||||
if DEBUG:
|
||||
short = _get(user, "shortName")
|
||||
print(f"[debug] upserted node {node_id} shortName={short!r}")
|
||||
|
||||
|
||||
# --- Message logging via PubSub -----------------------------------------------
|
||||
def _iso(ts: int | float) -> str:
|
||||
import datetime
|
||||
|
||||
return (
|
||||
datetime.datetime.fromtimestamp(int(ts), datetime.UTC)
|
||||
.isoformat()
|
||||
.replace("+00:00", "Z")
|
||||
)
|
||||
|
||||
|
||||
def _first(d: dict, *names, default=None):
|
||||
"""Return first present key from names (supports nested 'a.b' lookups)."""
|
||||
for name in names:
|
||||
cur = d
|
||||
parts = name.split(".")
|
||||
ok = True
|
||||
for p in parts:
|
||||
if isinstance(cur, dict) and p in cur:
|
||||
cur = cur[p]
|
||||
else:
|
||||
ok = False
|
||||
break
|
||||
if ok:
|
||||
return cur
|
||||
return default
|
||||
|
||||
|
||||
def _pkt_to_dict(packet) -> dict:
|
||||
"""Convert protobuf MeshPacket or already-dict into a JSON-friendly dict."""
|
||||
if isinstance(packet, dict):
|
||||
return packet
|
||||
if isinstance(packet, ProtoMessage):
|
||||
return MessageToDict(
|
||||
packet, preserving_proto_field_name=True, use_integers_for_enums=False
|
||||
)
|
||||
# Last resort: try to read attributes
|
||||
try:
|
||||
return json.loads(json.dumps(packet, default=lambda o: str(o)))
|
||||
except Exception:
|
||||
return {"_unparsed": str(packet)}
|
||||
|
||||
|
||||
def store_packet_dict(p: dict):
|
||||
"""
|
||||
Store only TEXT messages (decoded.payload.text) to the DB.
|
||||
Safe against snake/camel case differences.
|
||||
"""
|
||||
dec = p.get("decoded") or {}
|
||||
text = _first(dec, "payload.text", "text", default=None)
|
||||
if not text:
|
||||
return # ignore non-text packets
|
||||
|
||||
# port filter: only keep packets from the TEXT_MESSAGE_APP port
|
||||
portnum_raw = _first(dec, "portnum", default=None)
|
||||
portnum = str(portnum_raw).upper() if portnum_raw is not None else None
|
||||
if portnum and portnum not in {"1", "TEXT_MESSAGE_APP"}:
|
||||
return # ignore non-text-message ports
|
||||
|
||||
# channel (prefer decoded.channel if present; else top-level)
|
||||
ch = _first(dec, "channel", default=None)
|
||||
if ch is None:
|
||||
ch = _first(p, "channel", default=0)
|
||||
try:
|
||||
ch = int(ch)
|
||||
except Exception:
|
||||
ch = 0
|
||||
|
||||
# timestamps & ids
|
||||
rx_time = int(_first(p, "rxTime", "rx_time", default=time.time()))
|
||||
from_id = _first(p, "fromId", "from_id", "from", default=None)
|
||||
to_id = _first(p, "toId", "to_id", "to", default=None)
|
||||
|
||||
# link metrics
|
||||
snr = _first(p, "snr", "rx_snr", "rxSnr", default=None)
|
||||
rssi = _first(p, "rssi", "rx_rssi", "rxRssi", default=None)
|
||||
hop = _first(p, "hopLimit", "hop_limit", default=None)
|
||||
|
||||
row = (
|
||||
rx_time,
|
||||
_iso(rx_time),
|
||||
from_id,
|
||||
to_id,
|
||||
ch,
|
||||
str(portnum) if portnum is not None else None,
|
||||
text,
|
||||
float(snr) if snr is not None else None,
|
||||
int(rssi) if rssi is not None else None,
|
||||
int(hop) if hop is not None else None,
|
||||
)
|
||||
with DB_LOCK:
|
||||
conn.execute(
|
||||
"""INSERT INTO messages
|
||||
(rx_time, rx_iso, from_id, to_id, channel, portnum, text, snr, rssi, hop_limit)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?)""",
|
||||
row,
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
if DEBUG:
|
||||
print(
|
||||
f"[debug] stored message from {from_id!r} to {to_id!r} ch={ch} text={text!r}"
|
||||
)
|
||||
|
||||
|
||||
# PubSub receive handler
|
||||
def on_receive(packet, interface):
|
||||
p = None
|
||||
try:
|
||||
p = _pkt_to_dict(packet)
|
||||
store_packet_dict(p)
|
||||
except Exception as e:
|
||||
info = list(p.keys()) if isinstance(p, dict) else type(packet)
|
||||
print(f"[warn] failed to store packet: {e} | info: {info}")
|
||||
|
||||
|
||||
# --- Main ---------------------------------------------------------------------
|
||||
def main():
|
||||
# Subscribe to PubSub topics (reliable in current meshtastic)
|
||||
pub.subscribe(on_receive, "meshtastic.receive")
|
||||
|
||||
iface = SerialInterface(devPath=PORT)
|
||||
|
||||
stop = threading.Event()
|
||||
|
||||
def handle_sig(*_):
|
||||
stop.set()
|
||||
|
||||
signal.signal(signal.SIGINT, handle_sig)
|
||||
signal.signal(signal.SIGTERM, handle_sig)
|
||||
|
||||
print(f"Mesh daemon: nodes+messages → {DB} | port={PORT} | channel={CHANNEL_INDEX}")
|
||||
while not stop.is_set():
|
||||
try:
|
||||
nodes = getattr(iface, "nodes", {}) or {}
|
||||
for node_id, n in nodes.items():
|
||||
upsert_node(node_id, n)
|
||||
with DB_LOCK:
|
||||
conn.commit()
|
||||
except Exception as e:
|
||||
print(f"[warn] failed to update node snapshot: {e}")
|
||||
stop.wait(SNAPSHOT_SECS)
|
||||
|
||||
try:
|
||||
iface.close()
|
||||
except Exception:
|
||||
pass
|
||||
with DB_LOCK:
|
||||
conn.commit()
|
||||
conn.close()
|
||||
# Expose the refactored mesh ingestor module under the legacy name so existing
|
||||
# imports (``import data.mesh as mesh``) continue to work. Attribute access and
|
||||
# monkeypatching operate directly on the shared module instance.
|
||||
sys.modules[__name__] = _mesh_ingestor
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
_mesh_ingestor.main()
|
||||
|
||||
17
data/mesh.sh
17
data/mesh.sh
@@ -1,7 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
python -m venv .venv
|
||||
source .venv/bin/activate
|
||||
pip install -U meshtastic
|
||||
pip install -U meshtastic black pytest
|
||||
exec python mesh.py
|
||||
|
||||
129
data/mesh_ingestor/__init__.py
Normal file
129
data/mesh_ingestor/__init__.py
Normal file
@@ -0,0 +1,129 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""High-level API for the potato-mesh ingestor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import signal as signal # re-exported for compatibility
|
||||
import threading as threading # re-exported for compatibility
|
||||
import sys
|
||||
import types
|
||||
|
||||
from . import channels, config, daemon, handlers, interfaces, queue, serialization
|
||||
|
||||
__all__: list[str] = []
|
||||
|
||||
|
||||
def _reexport(module) -> None:
|
||||
names = getattr(module, "__all__", [])
|
||||
for name in names:
|
||||
globals()[name] = getattr(module, name)
|
||||
__all__.extend(names)
|
||||
|
||||
|
||||
def _export_constants() -> None:
|
||||
globals()["json"] = queue.json
|
||||
globals()["urllib"] = queue.urllib
|
||||
globals()["glob"] = interfaces.glob
|
||||
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
|
||||
|
||||
|
||||
for _module in (channels, daemon, handlers, interfaces, queue, serialization):
|
||||
_reexport(_module)
|
||||
|
||||
_export_constants()
|
||||
|
||||
_CONFIG_ATTRS = {
|
||||
"CONNECTION",
|
||||
"SNAPSHOT_SECS",
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"API_TOKEN",
|
||||
"LORA_FREQ",
|
||||
"MODEM_PRESET",
|
||||
"_RECONNECT_INITIAL_DELAY_SECS",
|
||||
"_RECONNECT_MAX_DELAY_SECS",
|
||||
"_CLOSE_TIMEOUT_SECS",
|
||||
"_debug_log",
|
||||
}
|
||||
|
||||
# Legacy export maintained for backwards compatibility.
|
||||
_CONFIG_ATTRS.add("PORT")
|
||||
|
||||
_INTERFACE_ATTRS = {"BLEInterface", "SerialInterface", "TCPInterface"}
|
||||
|
||||
_QUEUE_ATTRS = set(queue.__all__)
|
||||
_HANDLER_ATTRS = set(handlers.__all__)
|
||||
_DAEMON_ATTRS = set(daemon.__all__)
|
||||
_SERIALIZATION_ATTRS = set(serialization.__all__)
|
||||
_INTERFACE_EXPORTS = set(interfaces.__all__)
|
||||
|
||||
__all__.extend(sorted(_CONFIG_ATTRS))
|
||||
__all__.extend(sorted(_INTERFACE_ATTRS))
|
||||
|
||||
|
||||
class _MeshIngestorModule(types.ModuleType):
|
||||
"""Module proxy that forwards config and interface state."""
|
||||
|
||||
def __getattr__(self, name: str): # type: ignore[override]
|
||||
"""Resolve attributes by delegating to the underlying submodules."""
|
||||
|
||||
if name in _CONFIG_ATTRS:
|
||||
return getattr(config, name)
|
||||
if name in _INTERFACE_ATTRS:
|
||||
return getattr(interfaces, name)
|
||||
if name in _INTERFACE_EXPORTS:
|
||||
return getattr(interfaces, name)
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name: str, value): # type: ignore[override]
|
||||
"""Propagate assignments to the appropriate submodule."""
|
||||
|
||||
if name in _CONFIG_ATTRS:
|
||||
setattr(config, name, value)
|
||||
super().__setattr__(name, value)
|
||||
return
|
||||
if name in _INTERFACE_ATTRS:
|
||||
setattr(interfaces, name, value)
|
||||
super().__setattr__(name, value)
|
||||
return
|
||||
handled = False
|
||||
if name in _INTERFACE_EXPORTS:
|
||||
setattr(interfaces, name, value)
|
||||
super().__setattr__(name, getattr(interfaces, name, value))
|
||||
handled = True
|
||||
if name in _QUEUE_ATTRS:
|
||||
setattr(queue, name, value)
|
||||
super().__setattr__(name, getattr(queue, name, value))
|
||||
handled = True
|
||||
if name in _HANDLER_ATTRS:
|
||||
setattr(handlers, name, value)
|
||||
super().__setattr__(name, getattr(handlers, name, value))
|
||||
handled = True
|
||||
if name in _DAEMON_ATTRS:
|
||||
setattr(daemon, name, value)
|
||||
super().__setattr__(name, getattr(daemon, name, value))
|
||||
handled = True
|
||||
if name in _SERIALIZATION_ATTRS:
|
||||
setattr(serialization, name, value)
|
||||
super().__setattr__(name, getattr(serialization, name, value))
|
||||
handled = True
|
||||
if handled:
|
||||
return
|
||||
super().__setattr__(name, value)
|
||||
|
||||
|
||||
sys.modules[__name__].__class__ = _MeshIngestorModule
|
||||
223
data/mesh_ingestor/channels.py
Normal file
223
data/mesh_ingestor/channels.py
Normal file
@@ -0,0 +1,223 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helpers for capturing and exposing mesh channel metadata."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Iterable, Iterator
|
||||
|
||||
from . import config
|
||||
|
||||
try: # pragma: no cover - optional dependency for enum introspection
|
||||
from meshtastic.protobuf import channel_pb2
|
||||
except Exception: # pragma: no cover - exercised in environments without protobufs
|
||||
channel_pb2 = None # type: ignore[assignment]
|
||||
|
||||
_ROLE_PRIMARY = 1
|
||||
_ROLE_SECONDARY = 2
|
||||
|
||||
if channel_pb2 is not None: # pragma: no branch - evaluated once at import time
|
||||
try:
|
||||
_ROLE_PRIMARY = int(channel_pb2.Channel.Role.PRIMARY)
|
||||
_ROLE_SECONDARY = int(channel_pb2.Channel.Role.SECONDARY)
|
||||
except Exception: # pragma: no cover - defensive, version specific
|
||||
_ROLE_PRIMARY = 1
|
||||
_ROLE_SECONDARY = 2
|
||||
|
||||
_CHANNEL_MAPPINGS: tuple[tuple[int, str], ...] = ()
|
||||
_CHANNEL_LOOKUP: dict[int, str] = {}
|
||||
|
||||
|
||||
def _iter_channel_objects(channels_obj: Any) -> Iterator[Any]:
|
||||
"""Yield channel descriptors from ``channels_obj``.
|
||||
|
||||
The real Meshtastic API exposes channels via protobuf containers that are
|
||||
list-like. This helper converts the container into a deterministic iterator
|
||||
while avoiding runtime errors if an unexpected type is supplied.
|
||||
"""
|
||||
|
||||
if channels_obj is None:
|
||||
return iter(())
|
||||
|
||||
if isinstance(channels_obj, dict):
|
||||
return iter(channels_obj.values())
|
||||
|
||||
if isinstance(channels_obj, Iterable):
|
||||
return iter(list(channels_obj))
|
||||
|
||||
length_fn = getattr(channels_obj, "__len__", None)
|
||||
getitem = getattr(channels_obj, "__getitem__", None)
|
||||
if callable(length_fn) and callable(getitem):
|
||||
try:
|
||||
length = int(length_fn())
|
||||
except Exception: # pragma: no cover - defensive only
|
||||
length = None
|
||||
if length is not None and length >= 0:
|
||||
snapshot = []
|
||||
for index in range(length):
|
||||
try:
|
||||
snapshot.append(getitem(index))
|
||||
except Exception: # pragma: no cover - best effort copy
|
||||
break
|
||||
return iter(snapshot)
|
||||
|
||||
return iter(())
|
||||
|
||||
|
||||
def _primary_channel_name() -> str | None:
|
||||
"""Return the name to use for the primary channel when available."""
|
||||
|
||||
preset = getattr(config, "MODEM_PRESET", None)
|
||||
if isinstance(preset, str) and preset.strip():
|
||||
return preset
|
||||
env_name = os.environ.get("CHANNEL", "").strip()
|
||||
if env_name:
|
||||
return env_name
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_role(role: Any) -> int | None:
|
||||
"""Convert a channel role descriptor into an integer value."""
|
||||
|
||||
if isinstance(role, int):
|
||||
return role
|
||||
if isinstance(role, str):
|
||||
value = role.strip().upper()
|
||||
if value == "PRIMARY":
|
||||
return _ROLE_PRIMARY
|
||||
if value == "SECONDARY":
|
||||
return _ROLE_SECONDARY
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
return None
|
||||
name_attr = getattr(role, "name", None)
|
||||
if isinstance(name_attr, str):
|
||||
return _normalize_role(name_attr)
|
||||
value_attr = getattr(role, "value", None)
|
||||
if isinstance(value_attr, int):
|
||||
return value_attr
|
||||
try:
|
||||
return int(role) # type: ignore[arg-type]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _channel_tuple(channel_obj: Any) -> tuple[int, str] | None:
|
||||
"""Return ``(index, name)`` for ``channel_obj`` when resolvable."""
|
||||
|
||||
role_value = _normalize_role(getattr(channel_obj, "role", None))
|
||||
if role_value == _ROLE_PRIMARY:
|
||||
channel_index = 0
|
||||
channel_name = _primary_channel_name()
|
||||
elif role_value == _ROLE_SECONDARY:
|
||||
raw_index = getattr(channel_obj, "index", None)
|
||||
try:
|
||||
channel_index = int(raw_index)
|
||||
except Exception:
|
||||
channel_index = None
|
||||
settings = getattr(channel_obj, "settings", None)
|
||||
channel_name = getattr(settings, "name", None) if settings else None
|
||||
else:
|
||||
return None
|
||||
|
||||
if not isinstance(channel_index, int):
|
||||
return None
|
||||
|
||||
if isinstance(channel_name, str):
|
||||
channel_name = channel_name.strip()
|
||||
else:
|
||||
channel_name = None
|
||||
|
||||
if not channel_name:
|
||||
return None
|
||||
|
||||
return channel_index, channel_name
|
||||
|
||||
|
||||
def capture_from_interface(iface: Any) -> None:
|
||||
"""Populate the channel cache by inspecting ``iface`` when possible."""
|
||||
|
||||
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
|
||||
|
||||
if iface is None or _CHANNEL_MAPPINGS:
|
||||
return
|
||||
|
||||
try:
|
||||
wait_for_config = getattr(iface, "waitForConfig", None)
|
||||
if callable(wait_for_config):
|
||||
wait_for_config()
|
||||
except Exception: # pragma: no cover - hardware dependent safeguard
|
||||
pass
|
||||
|
||||
local_node = getattr(iface, "localNode", None)
|
||||
channels_obj = getattr(local_node, "channels", None) if local_node else None
|
||||
|
||||
channel_entries: list[tuple[int, str]] = []
|
||||
seen_indices: set[int] = set()
|
||||
for candidate in _iter_channel_objects(channels_obj):
|
||||
result = _channel_tuple(candidate)
|
||||
if result is None:
|
||||
continue
|
||||
index, name = result
|
||||
if index in seen_indices:
|
||||
continue
|
||||
channel_entries.append((index, name))
|
||||
seen_indices.add(index)
|
||||
|
||||
if not channel_entries:
|
||||
return
|
||||
|
||||
_CHANNEL_MAPPINGS = tuple(channel_entries)
|
||||
_CHANNEL_LOOKUP = {index: name for index, name in _CHANNEL_MAPPINGS}
|
||||
|
||||
config._debug_log(
|
||||
"Captured channel metadata",
|
||||
context="channels.capture",
|
||||
severity="info",
|
||||
always=True,
|
||||
channels=_CHANNEL_MAPPINGS,
|
||||
)
|
||||
|
||||
|
||||
def channel_mappings() -> tuple[tuple[int, str], ...]:
|
||||
"""Return the cached ``(index, name)`` channel tuples."""
|
||||
|
||||
return _CHANNEL_MAPPINGS
|
||||
|
||||
|
||||
def channel_name(channel_index: int | None) -> str | None:
|
||||
"""Return the channel name for ``channel_index`` when known."""
|
||||
|
||||
if channel_index is None:
|
||||
return None
|
||||
return _CHANNEL_LOOKUP.get(int(channel_index))
|
||||
|
||||
|
||||
def _reset_channel_cache() -> None:
|
||||
"""Clear cached channel data. Intended for use in tests only."""
|
||||
|
||||
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
|
||||
_CHANNEL_MAPPINGS = ()
|
||||
_CHANNEL_LOOKUP = {}
|
||||
|
||||
|
||||
__all__ = [
|
||||
"capture_from_interface",
|
||||
"channel_mappings",
|
||||
"channel_name",
|
||||
"_reset_channel_cache",
|
||||
]
|
||||
153
data/mesh_ingestor/config.py
Normal file
153
data/mesh_ingestor/config.py
Normal file
@@ -0,0 +1,153 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Configuration helpers for the potato-mesh ingestor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from types import ModuleType
|
||||
from typing import Any
|
||||
|
||||
DEFAULT_SNAPSHOT_SECS = 60
|
||||
"""Default interval, in seconds, between state snapshot uploads."""
|
||||
|
||||
DEFAULT_CHANNEL_INDEX = 0
|
||||
"""Default LoRa channel index used when none is specified."""
|
||||
|
||||
DEFAULT_RECONNECT_INITIAL_DELAY_SECS = 5.0
|
||||
"""Initial reconnection delay applied after connection loss."""
|
||||
|
||||
DEFAULT_RECONNECT_MAX_DELAY_SECS = 60.0
|
||||
"""Maximum reconnection backoff delay applied by the ingestor."""
|
||||
|
||||
DEFAULT_CLOSE_TIMEOUT_SECS = 5.0
|
||||
"""Grace period for interface shutdown routines to complete."""
|
||||
|
||||
DEFAULT_INACTIVITY_RECONNECT_SECS = float(60 * 60)
|
||||
"""Interval before forcing a reconnect when no packets are observed."""
|
||||
|
||||
DEFAULT_ENERGY_ONLINE_DURATION_SECS = 300.0
|
||||
"""Duration to stay online before entering a low-power sleep cycle."""
|
||||
|
||||
DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
|
||||
"""Sleep duration used when energy saving mode is active."""
|
||||
|
||||
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
|
||||
"""Optional connection target for the mesh interface.
|
||||
|
||||
When unset, platform-specific defaults will be inferred by the interface
|
||||
implementations. The legacy :envvar:`MESH_SERIAL` environment variable is still
|
||||
accepted for backwards compatibility.
|
||||
"""
|
||||
|
||||
SNAPSHOT_SECS = DEFAULT_SNAPSHOT_SECS
|
||||
"""Interval, in seconds, between state snapshot uploads."""
|
||||
|
||||
CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
|
||||
"""Index of the LoRa channel to select when connecting."""
|
||||
|
||||
DEBUG = os.environ.get("DEBUG") == "1"
|
||||
INSTANCE = os.environ.get("POTATOMESH_INSTANCE", "").rstrip("/")
|
||||
API_TOKEN = os.environ.get("API_TOKEN", "")
|
||||
ENERGY_SAVING = os.environ.get("ENERGY_SAVING") == "1"
|
||||
"""When ``True``, enables the ingestor's energy saving mode."""
|
||||
|
||||
LORA_FREQ: int | None = None
|
||||
"""Frequency of the local node's configured LoRa region in MHz."""
|
||||
|
||||
MODEM_PRESET: str | None = None
|
||||
"""CamelCase modem preset name reported by the local node."""
|
||||
|
||||
_RECONNECT_INITIAL_DELAY_SECS = DEFAULT_RECONNECT_INITIAL_DELAY_SECS
|
||||
_RECONNECT_MAX_DELAY_SECS = DEFAULT_RECONNECT_MAX_DELAY_SECS
|
||||
_CLOSE_TIMEOUT_SECS = DEFAULT_CLOSE_TIMEOUT_SECS
|
||||
_INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
|
||||
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
|
||||
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
|
||||
|
||||
# Backwards compatibility shim for legacy imports.
|
||||
PORT = CONNECTION
|
||||
|
||||
|
||||
def _debug_log(
|
||||
message: str,
|
||||
*,
|
||||
context: str | None = None,
|
||||
severity: str = "debug",
|
||||
always: bool = False,
|
||||
**metadata: Any,
|
||||
) -> None:
|
||||
"""Print ``message`` with a UTC timestamp when ``DEBUG`` is enabled.
|
||||
|
||||
Parameters:
|
||||
message: Text to display when debug logging is active.
|
||||
context: Optional logical component emitting the message.
|
||||
severity: Log level label to embed in the formatted output.
|
||||
always: When ``True``, bypasses the :data:`DEBUG` guard.
|
||||
**metadata: Additional structured log metadata.
|
||||
"""
|
||||
|
||||
normalized_severity = severity.lower()
|
||||
|
||||
if not DEBUG and not always and normalized_severity == "debug":
|
||||
return
|
||||
|
||||
timestamp = datetime.now(timezone.utc).isoformat(timespec="milliseconds")
|
||||
timestamp = timestamp.replace("+00:00", "Z")
|
||||
parts = [f"[{timestamp}]", "[potato-mesh]", f"[{normalized_severity}]"]
|
||||
if context:
|
||||
parts.append(f"context={context}")
|
||||
for key, value in sorted(metadata.items()):
|
||||
parts.append(f"{key}={value!r}")
|
||||
parts.append(message)
|
||||
print(" ".join(parts))
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CONNECTION",
|
||||
"SNAPSHOT_SECS",
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"API_TOKEN",
|
||||
"ENERGY_SAVING",
|
||||
"LORA_FREQ",
|
||||
"MODEM_PRESET",
|
||||
"_RECONNECT_INITIAL_DELAY_SECS",
|
||||
"_RECONNECT_MAX_DELAY_SECS",
|
||||
"_CLOSE_TIMEOUT_SECS",
|
||||
"_INACTIVITY_RECONNECT_SECS",
|
||||
"_ENERGY_ONLINE_DURATION_SECS",
|
||||
"_ENERGY_SLEEP_SECS",
|
||||
"_debug_log",
|
||||
]
|
||||
|
||||
|
||||
class _ConfigModule(ModuleType):
|
||||
"""Module proxy that keeps connection aliases synchronised."""
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None: # type: ignore[override]
|
||||
"""Propagate CONNECTION/PORT assignments to both attributes."""
|
||||
|
||||
if name in {"CONNECTION", "PORT"}:
|
||||
super().__setattr__("CONNECTION", value)
|
||||
super().__setattr__("PORT", value)
|
||||
return
|
||||
super().__setattr__(name, value)
|
||||
|
||||
|
||||
sys.modules[__name__].__class__ = _ConfigModule
|
||||
472
data/mesh_ingestor/daemon.py
Normal file
472
data/mesh_ingestor/daemon.py
Normal file
@@ -0,0 +1,472 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Runtime entry point for the mesh ingestor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
|
||||
from pubsub import pub
|
||||
|
||||
from . import config, handlers, interfaces
|
||||
|
||||
_RECEIVE_TOPICS = (
|
||||
"meshtastic.receive",
|
||||
"meshtastic.receive.text",
|
||||
"meshtastic.receive.position",
|
||||
"meshtastic.receive.user",
|
||||
"meshtastic.receive.POSITION_APP",
|
||||
"meshtastic.receive.NODEINFO_APP",
|
||||
"meshtastic.receive.NEIGHBORINFO_APP",
|
||||
"meshtastic.receive.TEXT_MESSAGE_APP",
|
||||
"meshtastic.receive.TELEMETRY_APP",
|
||||
)
|
||||
|
||||
|
||||
def _event_wait_allows_default_timeout() -> bool:
|
||||
"""Return ``True`` when :meth:`threading.Event.wait` accepts ``timeout``.
|
||||
|
||||
The behaviour changed between Python versions; this helper shields the
|
||||
daemon from ``TypeError`` when the default timeout parameter is absent.
|
||||
"""
|
||||
|
||||
try:
|
||||
wait_signature = inspect.signature(threading.Event.wait)
|
||||
except (TypeError, ValueError): # pragma: no cover
|
||||
return True
|
||||
|
||||
parameters = list(wait_signature.parameters.values())
|
||||
if len(parameters) <= 1:
|
||||
return True
|
||||
|
||||
timeout_parameter = parameters[1]
|
||||
if timeout_parameter.kind in (
|
||||
inspect.Parameter.VAR_POSITIONAL,
|
||||
inspect.Parameter.VAR_KEYWORD,
|
||||
):
|
||||
return True
|
||||
|
||||
return timeout_parameter.default is not inspect._empty
|
||||
|
||||
|
||||
def _subscribe_receive_topics() -> list[str]:
|
||||
"""Subscribe the packet handler to all receive-related pubsub topics."""
|
||||
|
||||
subscribed = []
|
||||
for topic in _RECEIVE_TOPICS:
|
||||
try:
|
||||
pub.subscribe(handlers.on_receive, topic)
|
||||
subscribed.append(topic)
|
||||
except Exception as exc: # pragma: no cover
|
||||
config._debug_log(f"failed to subscribe to {topic!r}: {exc}")
|
||||
return subscribed
|
||||
|
||||
|
||||
def _node_items_snapshot(
|
||||
nodes_obj, retries: int = 3
|
||||
) -> list[tuple[str, object]] | None:
|
||||
"""Snapshot ``nodes_obj`` to avoid iteration errors during updates.
|
||||
|
||||
Parameters:
|
||||
nodes_obj: Meshtastic nodes mapping or iterable.
|
||||
retries: Number of attempts when encountering "dictionary changed"
|
||||
runtime errors.
|
||||
|
||||
Returns:
|
||||
A list of ``(node_id, node)`` tuples, ``None`` when retries are
|
||||
exhausted, or an empty list when no nodes exist.
|
||||
"""
|
||||
|
||||
if not nodes_obj:
|
||||
return []
|
||||
|
||||
items_callable = getattr(nodes_obj, "items", None)
|
||||
if callable(items_callable):
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
return list(items_callable())
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
if hasattr(nodes_obj, "__iter__") and hasattr(nodes_obj, "__getitem__"):
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
keys = list(nodes_obj)
|
||||
return [(key, nodes_obj[key]) for key in keys]
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def _close_interface(iface_obj) -> None:
|
||||
"""Close ``iface_obj`` while respecting configured timeouts."""
|
||||
|
||||
if iface_obj is None:
|
||||
return
|
||||
|
||||
def _do_close() -> None:
|
||||
try:
|
||||
iface_obj.close()
|
||||
except Exception as exc: # pragma: no cover
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Error closing mesh interface",
|
||||
context="daemon.close",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
if config._CLOSE_TIMEOUT_SECS <= 0 or not _event_wait_allows_default_timeout():
|
||||
_do_close()
|
||||
return
|
||||
|
||||
close_thread = threading.Thread(target=_do_close, name="mesh-close", daemon=True)
|
||||
close_thread.start()
|
||||
close_thread.join(config._CLOSE_TIMEOUT_SECS)
|
||||
if close_thread.is_alive():
|
||||
config._debug_log(
|
||||
"Mesh interface close timed out",
|
||||
context="daemon.close",
|
||||
severity="warn",
|
||||
timeout_seconds=config._CLOSE_TIMEOUT_SECS,
|
||||
)
|
||||
|
||||
|
||||
def _is_ble_interface(iface_obj) -> bool:
|
||||
"""Return ``True`` when ``iface_obj`` appears to be a BLE interface."""
|
||||
|
||||
if iface_obj is None:
|
||||
return False
|
||||
iface_cls = getattr(iface_obj, "__class__", None)
|
||||
if iface_cls is None:
|
||||
return False
|
||||
module_name = getattr(iface_cls, "__module__", "") or ""
|
||||
return "ble_interface" in module_name
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run the mesh ingestion daemon until interrupted."""
|
||||
|
||||
subscribed = _subscribe_receive_topics()
|
||||
if subscribed:
|
||||
config._debug_log(
|
||||
"Subscribed to receive topics",
|
||||
context="daemon.subscribe",
|
||||
severity="info",
|
||||
topics=subscribed,
|
||||
)
|
||||
|
||||
iface = None
|
||||
resolved_target = None
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
|
||||
stop = threading.Event()
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
iface_connected_at: float | None = None
|
||||
last_seen_packet_monotonic = handlers.last_packet_monotonic()
|
||||
last_inactivity_reconnect: float | None = None
|
||||
inactivity_reconnect_secs = max(
|
||||
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
|
||||
)
|
||||
|
||||
energy_saving_enabled = config.ENERGY_SAVING
|
||||
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
|
||||
energy_sleep_secs = max(0.0, config._ENERGY_SLEEP_SECS)
|
||||
|
||||
def _energy_sleep(reason: str) -> None:
|
||||
if not energy_saving_enabled or energy_sleep_secs <= 0:
|
||||
return
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
f"energy saving: {reason}; sleeping for {energy_sleep_secs:g}s"
|
||||
)
|
||||
stop.wait(energy_sleep_secs)
|
||||
|
||||
def handle_sigterm(*_args) -> None:
|
||||
stop.set()
|
||||
|
||||
def handle_sigint(signum, frame) -> None:
|
||||
if stop.is_set():
|
||||
signal.default_int_handler(signum, frame)
|
||||
return
|
||||
stop.set()
|
||||
|
||||
signal.signal(signal.SIGINT, handle_sigint)
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
target = config.INSTANCE or "(no POTATOMESH_INSTANCE)"
|
||||
configured_port = config.CONNECTION
|
||||
active_candidate = configured_port
|
||||
announced_target = False
|
||||
config._debug_log(
|
||||
"Mesh daemon starting",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
target=target,
|
||||
port=configured_port or "auto",
|
||||
channel=config.CHANNEL_INDEX,
|
||||
)
|
||||
try:
|
||||
while not stop.is_set():
|
||||
if iface is None:
|
||||
try:
|
||||
if active_candidate:
|
||||
iface, resolved_target = interfaces._create_serial_interface(
|
||||
active_candidate
|
||||
)
|
||||
else:
|
||||
iface, resolved_target = interfaces._create_default_interface()
|
||||
active_candidate = resolved_target
|
||||
interfaces._ensure_radio_metadata(iface)
|
||||
interfaces._ensure_channel_metadata(iface)
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
initial_snapshot_sent = False
|
||||
if not announced_target and resolved_target:
|
||||
config._debug_log(
|
||||
"Using mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="info",
|
||||
target=resolved_target,
|
||||
)
|
||||
announced_target = True
|
||||
if energy_saving_enabled and energy_online_secs > 0:
|
||||
energy_session_deadline = time.monotonic() + energy_online_secs
|
||||
else:
|
||||
energy_session_deadline = None
|
||||
iface_connected_at = time.monotonic()
|
||||
# Seed the inactivity tracking from the connection time so a
|
||||
# reconnect is given a full inactivity window even when the
|
||||
# handler still reports the previous packet timestamp.
|
||||
last_seen_packet_monotonic = iface_connected_at
|
||||
last_inactivity_reconnect = None
|
||||
except interfaces.NoAvailableMeshInterface as exc:
|
||||
config._debug_log(
|
||||
"No mesh interface available",
|
||||
context="daemon.interface",
|
||||
severity="error",
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(iface)
|
||||
raise SystemExit(1) from exc
|
||||
except Exception as exc:
|
||||
candidate_desc = active_candidate or "auto"
|
||||
config._debug_log(
|
||||
"Failed to create mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
candidate=candidate_desc,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if configured_port is None:
|
||||
active_candidate = None
|
||||
announced_target = False
|
||||
stop.wait(retry_delay)
|
||||
if config._RECONNECT_MAX_DELAY_SECS > 0:
|
||||
retry_delay = min(
|
||||
(
|
||||
retry_delay * 2
|
||||
if retry_delay
|
||||
else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
),
|
||||
config._RECONNECT_MAX_DELAY_SECS,
|
||||
)
|
||||
continue
|
||||
|
||||
if energy_saving_enabled and iface is not None:
|
||||
if (
|
||||
energy_session_deadline is not None
|
||||
and time.monotonic() >= energy_session_deadline
|
||||
):
|
||||
config._debug_log(
|
||||
"Energy saving disconnect",
|
||||
context="daemon.energy",
|
||||
severity="info",
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
_energy_sleep("disconnected after session")
|
||||
continue
|
||||
if (
|
||||
_is_ble_interface(iface)
|
||||
and getattr(iface, "client", object()) is None
|
||||
):
|
||||
config._debug_log(
|
||||
"Energy saving BLE disconnect",
|
||||
context="daemon.energy",
|
||||
severity="info",
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
_energy_sleep("BLE client disconnected")
|
||||
continue
|
||||
|
||||
if not initial_snapshot_sent:
|
||||
try:
|
||||
nodes = getattr(iface, "nodes", {}) or {}
|
||||
node_items = _node_items_snapshot(nodes)
|
||||
if node_items is None:
|
||||
config._debug_log(
|
||||
"Skipping node snapshot due to concurrent modification",
|
||||
context="daemon.snapshot",
|
||||
)
|
||||
else:
|
||||
processed_snapshot_item = False
|
||||
for node_id, node in node_items:
|
||||
processed_snapshot_item = True
|
||||
try:
|
||||
handlers.upsert_node(node_id, node)
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to update node snapshot",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
node_id=node_id,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Snapshot node payload",
|
||||
context="daemon.snapshot",
|
||||
node=node,
|
||||
)
|
||||
if processed_snapshot_item:
|
||||
initial_snapshot_sent = True
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Snapshot refresh failed",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
stop.wait(retry_delay)
|
||||
if config._RECONNECT_MAX_DELAY_SECS > 0:
|
||||
retry_delay = min(
|
||||
(
|
||||
retry_delay * 2
|
||||
if retry_delay
|
||||
else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
),
|
||||
config._RECONNECT_MAX_DELAY_SECS,
|
||||
)
|
||||
continue
|
||||
|
||||
if iface is not None and inactivity_reconnect_secs > 0:
|
||||
now_monotonic = time.monotonic()
|
||||
iface_activity = handlers.last_packet_monotonic()
|
||||
if (
|
||||
iface_activity is not None
|
||||
and iface_connected_at is not None
|
||||
and iface_activity < iface_connected_at
|
||||
):
|
||||
iface_activity = iface_connected_at
|
||||
if iface_activity is not None and (
|
||||
last_seen_packet_monotonic is None
|
||||
or iface_activity > last_seen_packet_monotonic
|
||||
):
|
||||
last_seen_packet_monotonic = iface_activity
|
||||
last_inactivity_reconnect = None
|
||||
|
||||
latest_activity = iface_activity
|
||||
if latest_activity is None and iface_connected_at is not None:
|
||||
latest_activity = iface_connected_at
|
||||
if latest_activity is None:
|
||||
latest_activity = now_monotonic
|
||||
|
||||
inactivity_elapsed = now_monotonic - latest_activity
|
||||
|
||||
connected_attr = getattr(iface, "isConnected", None)
|
||||
believed_disconnected = False
|
||||
if callable(connected_attr):
|
||||
try:
|
||||
believed_disconnected = not bool(connected_attr())
|
||||
except Exception:
|
||||
believed_disconnected = False
|
||||
elif connected_attr is not None:
|
||||
believed_disconnected = not bool(connected_attr)
|
||||
|
||||
should_reconnect = believed_disconnected or (
|
||||
inactivity_elapsed >= inactivity_reconnect_secs
|
||||
)
|
||||
|
||||
if should_reconnect:
|
||||
if (
|
||||
last_inactivity_reconnect is None
|
||||
or now_monotonic - last_inactivity_reconnect
|
||||
>= inactivity_reconnect_secs
|
||||
):
|
||||
reason = (
|
||||
"disconnected"
|
||||
if believed_disconnected
|
||||
else f"no data for {inactivity_elapsed:.0f}s"
|
||||
)
|
||||
config._debug_log(
|
||||
"Mesh interface inactivity detected",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
reason=reason,
|
||||
)
|
||||
last_inactivity_reconnect = now_monotonic
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
iface_connected_at = None
|
||||
continue
|
||||
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
stop.wait(config.SNAPSHOT_SECS)
|
||||
except KeyboardInterrupt: # pragma: no cover - interactive only
|
||||
config._debug_log(
|
||||
"Received KeyboardInterrupt; shutting down",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
)
|
||||
stop.set()
|
||||
finally:
|
||||
_close_interface(iface)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_RECEIVE_TOPICS",
|
||||
"_event_wait_allows_default_timeout",
|
||||
"_node_items_snapshot",
|
||||
"_subscribe_receive_topics",
|
||||
"_is_ble_interface",
|
||||
"main",
|
||||
]
|
||||
985
data/mesh_ingestor/handlers.py
Normal file
985
data/mesh_ingestor/handlers.py
Normal file
@@ -0,0 +1,985 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Packet handlers that serialise data and push it to the HTTP queue."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from . import channels, config, queue
|
||||
from .serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_float,
|
||||
_coerce_int,
|
||||
_decode_nodeinfo_payload,
|
||||
_extract_payload_bytes,
|
||||
_first,
|
||||
_get,
|
||||
_iso,
|
||||
_merge_mappings,
|
||||
_node_num_from_id,
|
||||
_node_to_dict,
|
||||
_nodeinfo_metrics_dict,
|
||||
_nodeinfo_position_dict,
|
||||
_nodeinfo_user_dict,
|
||||
_pkt_to_dict,
|
||||
upsert_payload,
|
||||
)
|
||||
|
||||
|
||||
def _radio_metadata_fields() -> dict[str, object]:
|
||||
"""Return the shared radio metadata fields for payload enrichment."""
|
||||
|
||||
metadata: dict[str, object] = {}
|
||||
freq = getattr(config, "LORA_FREQ", None)
|
||||
if freq is not None:
|
||||
metadata["lora_freq"] = freq
|
||||
preset = getattr(config, "MODEM_PRESET", None)
|
||||
if preset is not None:
|
||||
metadata["modem_preset"] = preset
|
||||
return metadata
|
||||
|
||||
|
||||
def _apply_radio_metadata(payload: dict) -> dict:
|
||||
"""Augment ``payload`` with radio metadata when available."""
|
||||
|
||||
metadata = _radio_metadata_fields()
|
||||
if metadata:
|
||||
payload.update(metadata)
|
||||
return payload
|
||||
|
||||
|
||||
def _is_encrypted_flag(value) -> bool:
|
||||
"""Return ``True`` when ``value`` represents an encrypted payload."""
|
||||
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, (int, float)):
|
||||
return value != 0
|
||||
if isinstance(value, str):
|
||||
normalized = value.strip().lower()
|
||||
if normalized in {"", "0", "false", "no"}:
|
||||
return False
|
||||
return True
|
||||
return bool(value)
|
||||
|
||||
|
||||
def _apply_radio_metadata_to_nodes(payload: dict) -> dict:
|
||||
"""Attach radio metadata to each node entry stored in ``payload``."""
|
||||
|
||||
metadata = _radio_metadata_fields()
|
||||
if not metadata:
|
||||
return payload
|
||||
for value in payload.values():
|
||||
if isinstance(value, dict):
|
||||
value.update(metadata)
|
||||
return payload
|
||||
|
||||
|
||||
def upsert_node(node_id, node) -> None:
|
||||
"""Schedule an upsert for a single node.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical identifier for the node in the ``!xxxxxxxx`` format.
|
||||
node: Node object or mapping to serialise for the API payload.
|
||||
|
||||
Returns:
|
||||
``None``. The payload is forwarded to the shared HTTP queue.
|
||||
"""
|
||||
|
||||
payload = _apply_radio_metadata_to_nodes(upsert_payload(node_id, node))
|
||||
_queue_post_json("/api/nodes", payload, priority=queue._NODE_POST_PRIORITY)
|
||||
|
||||
if config.DEBUG:
|
||||
user = _get(payload[node_id], "user") or {}
|
||||
short = _get(user, "shortName")
|
||||
long = _get(user, "longName")
|
||||
config._debug_log(
|
||||
"Queued node upsert payload",
|
||||
context="handlers.upsert_node",
|
||||
node_id=node_id,
|
||||
short_name=short,
|
||||
long_name=long,
|
||||
)
|
||||
|
||||
|
||||
def store_position_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist a decoded position packet.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata emitted by Meshtastic.
|
||||
decoded: Decoded payload extracted from ``packet['decoded']``.
|
||||
|
||||
Returns:
|
||||
``None``. The formatted position data is queued for HTTP submission.
|
||||
"""
|
||||
|
||||
node_ref = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
if node_ref is None:
|
||||
node_ref = _first(decoded, "num", default=None)
|
||||
node_id = _canonical_node_id(node_ref)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_num = _coerce_int(_first(decoded, "num", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
return
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
to_id = to_id if to_id not in {"", None} else None
|
||||
|
||||
position_section = decoded.get("position") if isinstance(decoded, Mapping) else None
|
||||
if not isinstance(position_section, Mapping):
|
||||
position_section = {}
|
||||
|
||||
latitude = _coerce_float(
|
||||
_first(position_section, "latitude", "raw.latitude", default=None)
|
||||
)
|
||||
if latitude is None:
|
||||
lat_i = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"latitudeI",
|
||||
"latitude_i",
|
||||
"raw.latitude_i",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
if lat_i is not None:
|
||||
latitude = lat_i / 1e7
|
||||
|
||||
longitude = _coerce_float(
|
||||
_first(position_section, "longitude", "raw.longitude", default=None)
|
||||
)
|
||||
if longitude is None:
|
||||
lon_i = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"longitudeI",
|
||||
"longitude_i",
|
||||
"raw.longitude_i",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
if lon_i is not None:
|
||||
longitude = lon_i / 1e7
|
||||
|
||||
altitude = _coerce_float(
|
||||
_first(position_section, "altitude", "raw.altitude", default=None)
|
||||
)
|
||||
position_time = _coerce_int(
|
||||
_first(position_section, "time", "raw.time", default=None)
|
||||
)
|
||||
location_source = _first(
|
||||
position_section,
|
||||
"locationSource",
|
||||
"location_source",
|
||||
"raw.location_source",
|
||||
default=None,
|
||||
)
|
||||
location_source = (
|
||||
str(location_source).strip() if location_source not in {None, ""} else None
|
||||
)
|
||||
|
||||
precision_bits = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"precisionBits",
|
||||
"precision_bits",
|
||||
"raw.precision_bits",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
sats_in_view = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"satsInView",
|
||||
"sats_in_view",
|
||||
"raw.sats_in_view",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
pdop = _coerce_float(
|
||||
_first(position_section, "PDOP", "pdop", "raw.PDOP", "raw.pdop", default=None)
|
||||
)
|
||||
ground_speed = _coerce_float(
|
||||
_first(
|
||||
position_section,
|
||||
"groundSpeed",
|
||||
"ground_speed",
|
||||
"raw.ground_speed",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
ground_track = _coerce_float(
|
||||
_first(
|
||||
position_section,
|
||||
"groundTrack",
|
||||
"ground_track",
|
||||
"raw.ground_track",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
snr = _coerce_float(_first(packet, "snr", "rx_snr", "rxSnr", default=None))
|
||||
rssi = _coerce_int(_first(packet, "rssi", "rx_rssi", "rxRssi", default=None))
|
||||
hop_limit = _coerce_int(_first(packet, "hopLimit", "hop_limit", default=None))
|
||||
bitfield = _coerce_int(_first(decoded, "bitfield", default=None))
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
payload_b64 = base64_payload(payload_bytes)
|
||||
|
||||
raw_section = decoded.get("raw") if isinstance(decoded, Mapping) else None
|
||||
raw_payload = _node_to_dict(raw_section) if raw_section else None
|
||||
if raw_payload is None and position_section:
|
||||
raw_position = (
|
||||
position_section.get("raw")
|
||||
if isinstance(position_section, Mapping)
|
||||
else None
|
||||
)
|
||||
if raw_position:
|
||||
raw_payload = _node_to_dict(raw_position)
|
||||
|
||||
position_payload = {
|
||||
"id": pkt_id,
|
||||
"node_id": node_id or node_ref,
|
||||
"node_num": node_num,
|
||||
"num": node_num,
|
||||
"from_id": node_id,
|
||||
"to_id": to_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"latitude": latitude,
|
||||
"longitude": longitude,
|
||||
"altitude": altitude,
|
||||
"position_time": position_time,
|
||||
"location_source": location_source,
|
||||
"precision_bits": precision_bits,
|
||||
"sats_in_view": sats_in_view,
|
||||
"pdop": pdop,
|
||||
"ground_speed": ground_speed,
|
||||
"ground_track": ground_track,
|
||||
"snr": snr,
|
||||
"rssi": rssi,
|
||||
"hop_limit": hop_limit,
|
||||
"bitfield": bitfield,
|
||||
"payload_b64": payload_b64,
|
||||
}
|
||||
if raw_payload:
|
||||
position_payload["raw"] = raw_payload
|
||||
|
||||
_queue_post_json(
|
||||
"/api/positions",
|
||||
_apply_radio_metadata(position_payload),
|
||||
priority=queue._POSITION_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued position payload",
|
||||
context="handlers.store_position",
|
||||
node_id=node_id,
|
||||
latitude=latitude,
|
||||
longitude=longitude,
|
||||
position_time=position_time,
|
||||
)
|
||||
|
||||
|
||||
def base64_payload(payload_bytes: bytes | None) -> str | None:
|
||||
"""Encode raw payload bytes for JSON transport.
|
||||
|
||||
Parameters:
|
||||
payload_bytes: Optional payload to encode. ``None`` is returned when
|
||||
the payload is empty or missing.
|
||||
|
||||
Returns:
|
||||
The Base64 encoded payload string or ``None`` when no payload exists.
|
||||
"""
|
||||
|
||||
if not payload_bytes:
|
||||
return None
|
||||
return base64.b64encode(payload_bytes).decode("ascii")
|
||||
|
||||
|
||||
def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist telemetry metrics extracted from a packet.
|
||||
|
||||
Parameters:
|
||||
packet: Packet metadata received from the radio interface.
|
||||
decoded: Meshtastic-decoded view containing telemetry structures.
|
||||
|
||||
Returns:
|
||||
``None``. The telemetry payload is added to the HTTP queue.
|
||||
"""
|
||||
|
||||
telemetry_section = (
|
||||
decoded.get("telemetry") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if not isinstance(telemetry_section, Mapping):
|
||||
return
|
||||
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
return
|
||||
|
||||
raw_from = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
node_id = _canonical_node_id(raw_from)
|
||||
node_num = _coerce_int(_first(decoded, "num", "node_num", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id or raw_from)
|
||||
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
|
||||
raw_rx_time = _first(packet, "rxTime", "rx_time", default=time.time())
|
||||
try:
|
||||
rx_time = int(raw_rx_time)
|
||||
except (TypeError, ValueError):
|
||||
rx_time = int(time.time())
|
||||
rx_iso = _iso(rx_time)
|
||||
|
||||
telemetry_time = _coerce_int(_first(telemetry_section, "time", default=None))
|
||||
|
||||
channel = _coerce_int(_first(decoded, "channel", default=None))
|
||||
if channel is None:
|
||||
channel = _coerce_int(_first(packet, "channel", default=None))
|
||||
if channel is None:
|
||||
channel = 0
|
||||
|
||||
portnum = _first(decoded, "portnum", default=None)
|
||||
portnum = str(portnum) if portnum not in {None, ""} else None
|
||||
|
||||
bitfield = _coerce_int(_first(decoded, "bitfield", default=None))
|
||||
|
||||
snr = _coerce_float(_first(packet, "snr", "rx_snr", "rxSnr", default=None))
|
||||
rssi = _coerce_int(_first(packet, "rssi", "rx_rssi", "rxRssi", default=None))
|
||||
hop_limit = _coerce_int(_first(packet, "hopLimit", "hop_limit", default=None))
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
payload_b64 = base64_payload(payload_bytes) or ""
|
||||
|
||||
battery_level = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"batteryLevel",
|
||||
"battery_level",
|
||||
"deviceMetrics.batteryLevel",
|
||||
"environmentMetrics.battery_level",
|
||||
"deviceMetrics.battery_level",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
voltage = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"voltage",
|
||||
"environmentMetrics.voltage",
|
||||
"deviceMetrics.voltage",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
channel_utilization = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"channelUtilization",
|
||||
"channel_utilization",
|
||||
"deviceMetrics.channelUtilization",
|
||||
"deviceMetrics.channel_utilization",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
air_util_tx = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"airUtilTx",
|
||||
"air_util_tx",
|
||||
"deviceMetrics.airUtilTx",
|
||||
"deviceMetrics.air_util_tx",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
uptime_seconds = _coerce_int(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"uptimeSeconds",
|
||||
"uptime_seconds",
|
||||
"deviceMetrics.uptimeSeconds",
|
||||
"deviceMetrics.uptime_seconds",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
temperature = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"temperature",
|
||||
"environmentMetrics.temperature",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
relative_humidity = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"relativeHumidity",
|
||||
"relative_humidity",
|
||||
"environmentMetrics.relativeHumidity",
|
||||
"environmentMetrics.relative_humidity",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
barometric_pressure = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"barometricPressure",
|
||||
"barometric_pressure",
|
||||
"environmentMetrics.barometricPressure",
|
||||
"environmentMetrics.barometric_pressure",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
telemetry_payload = {
|
||||
"id": pkt_id,
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"from_id": node_id or raw_from,
|
||||
"to_id": to_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": rx_iso,
|
||||
"telemetry_time": telemetry_time,
|
||||
"channel": channel,
|
||||
"portnum": portnum,
|
||||
"bitfield": bitfield,
|
||||
"snr": snr,
|
||||
"rssi": rssi,
|
||||
"hop_limit": hop_limit,
|
||||
"payload_b64": payload_b64,
|
||||
}
|
||||
|
||||
if battery_level is not None:
|
||||
telemetry_payload["battery_level"] = battery_level
|
||||
if voltage is not None:
|
||||
telemetry_payload["voltage"] = voltage
|
||||
if channel_utilization is not None:
|
||||
telemetry_payload["channel_utilization"] = channel_utilization
|
||||
if air_util_tx is not None:
|
||||
telemetry_payload["air_util_tx"] = air_util_tx
|
||||
if uptime_seconds is not None:
|
||||
telemetry_payload["uptime_seconds"] = uptime_seconds
|
||||
if temperature is not None:
|
||||
telemetry_payload["temperature"] = temperature
|
||||
if relative_humidity is not None:
|
||||
telemetry_payload["relative_humidity"] = relative_humidity
|
||||
if barometric_pressure is not None:
|
||||
telemetry_payload["barometric_pressure"] = barometric_pressure
|
||||
|
||||
_queue_post_json(
|
||||
"/api/telemetry",
|
||||
_apply_radio_metadata(telemetry_payload),
|
||||
priority=queue._TELEMETRY_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued telemetry payload",
|
||||
context="handlers.store_telemetry",
|
||||
node_id=node_id,
|
||||
battery_level=battery_level,
|
||||
voltage=voltage,
|
||||
)
|
||||
|
||||
|
||||
def store_nodeinfo_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist node information updates.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata describing the update.
|
||||
decoded: Decoded payload that may include ``user`` and ``position``
|
||||
sections.
|
||||
|
||||
Returns:
|
||||
``None``. The node payload is merged into the API queue.
|
||||
"""
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
node_info = _decode_nodeinfo_payload(payload_bytes)
|
||||
decoded_user = decoded.get("user")
|
||||
user_dict = _nodeinfo_user_dict(node_info, decoded_user)
|
||||
|
||||
node_info_fields = set()
|
||||
if node_info:
|
||||
node_info_fields = {field_desc.name for field_desc, _ in node_info.ListFields()}
|
||||
|
||||
node_id = None
|
||||
if isinstance(user_dict, Mapping):
|
||||
node_id = _canonical_node_id(user_dict.get("id"))
|
||||
|
||||
if node_id is None:
|
||||
node_id = _canonical_node_id(
|
||||
_first(packet, "fromId", "from_id", "from", default=None)
|
||||
)
|
||||
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_payload: dict = {}
|
||||
if user_dict:
|
||||
node_payload["user"] = user_dict
|
||||
|
||||
node_num = None
|
||||
if node_info and "num" in node_info_fields:
|
||||
try:
|
||||
node_num = int(node_info.num)
|
||||
except (TypeError, ValueError):
|
||||
node_num = None
|
||||
if node_num is None:
|
||||
decoded_num = decoded.get("num")
|
||||
if decoded_num is not None:
|
||||
try:
|
||||
node_num = int(decoded_num)
|
||||
except (TypeError, ValueError):
|
||||
try:
|
||||
node_num = int(str(decoded_num).strip(), 0)
|
||||
except Exception:
|
||||
node_num = None
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
if node_num is not None:
|
||||
node_payload["num"] = node_num
|
||||
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
last_heard = None
|
||||
if node_info and "last_heard" in node_info_fields:
|
||||
try:
|
||||
last_heard = int(node_info.last_heard)
|
||||
except (TypeError, ValueError):
|
||||
last_heard = None
|
||||
if last_heard is None:
|
||||
decoded_last_heard = decoded.get("lastHeard")
|
||||
if decoded_last_heard is not None:
|
||||
try:
|
||||
last_heard = int(decoded_last_heard)
|
||||
except (TypeError, ValueError):
|
||||
last_heard = None
|
||||
if last_heard is None or last_heard < rx_time:
|
||||
last_heard = rx_time
|
||||
node_payload["lastHeard"] = last_heard
|
||||
|
||||
snr = None
|
||||
if node_info and "snr" in node_info_fields:
|
||||
try:
|
||||
snr = float(node_info.snr)
|
||||
except (TypeError, ValueError):
|
||||
snr = None
|
||||
if snr is None:
|
||||
snr = _first(packet, "snr", "rx_snr", "rxSnr", default=None)
|
||||
if snr is not None:
|
||||
try:
|
||||
snr = float(snr)
|
||||
except (TypeError, ValueError):
|
||||
snr = None
|
||||
if snr is not None:
|
||||
node_payload["snr"] = snr
|
||||
|
||||
hops = None
|
||||
if node_info and "hops_away" in node_info_fields:
|
||||
try:
|
||||
hops = int(node_info.hops_away)
|
||||
except (TypeError, ValueError):
|
||||
hops = None
|
||||
if hops is None:
|
||||
hops = decoded.get("hopsAway")
|
||||
if hops is not None:
|
||||
try:
|
||||
hops = int(hops)
|
||||
except (TypeError, ValueError):
|
||||
hops = None
|
||||
if hops is not None:
|
||||
node_payload["hopsAway"] = hops
|
||||
|
||||
if node_info and "channel" in node_info_fields:
|
||||
try:
|
||||
node_payload["channel"] = int(node_info.channel)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if node_info and "via_mqtt" in node_info_fields:
|
||||
node_payload["viaMqtt"] = bool(node_info.via_mqtt)
|
||||
|
||||
if node_info and "is_favorite" in node_info_fields:
|
||||
node_payload["isFavorite"] = bool(node_info.is_favorite)
|
||||
elif "isFavorite" in decoded:
|
||||
node_payload["isFavorite"] = bool(decoded.get("isFavorite"))
|
||||
|
||||
if node_info and "is_ignored" in node_info_fields:
|
||||
node_payload["isIgnored"] = bool(node_info.is_ignored)
|
||||
if node_info and "is_key_manually_verified" in node_info_fields:
|
||||
node_payload["isKeyManuallyVerified"] = bool(node_info.is_key_manually_verified)
|
||||
|
||||
metrics = _nodeinfo_metrics_dict(node_info)
|
||||
decoded_metrics = decoded.get("deviceMetrics")
|
||||
if isinstance(decoded_metrics, Mapping):
|
||||
metrics = _merge_mappings(metrics, _node_to_dict(decoded_metrics))
|
||||
if metrics:
|
||||
node_payload["deviceMetrics"] = metrics
|
||||
|
||||
position = _nodeinfo_position_dict(node_info)
|
||||
decoded_position = decoded.get("position")
|
||||
if isinstance(decoded_position, Mapping):
|
||||
position = _merge_mappings(position, _node_to_dict(decoded_position))
|
||||
if position:
|
||||
node_payload["position"] = position
|
||||
|
||||
hop_limit = _first(packet, "hopLimit", "hop_limit", default=None)
|
||||
if hop_limit is not None and "hopLimit" not in node_payload:
|
||||
try:
|
||||
node_payload["hopLimit"] = int(hop_limit)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
_queue_post_json(
|
||||
"/api/nodes",
|
||||
_apply_radio_metadata_to_nodes({node_id: node_payload}),
|
||||
priority=queue._NODE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
short = None
|
||||
long_name = None
|
||||
if isinstance(user_dict, Mapping):
|
||||
short = user_dict.get("shortName")
|
||||
long_name = user_dict.get("longName")
|
||||
config._debug_log(
|
||||
"Queued nodeinfo payload",
|
||||
context="handlers.store_nodeinfo",
|
||||
node_id=node_id,
|
||||
short_name=short,
|
||||
long_name=long_name,
|
||||
)
|
||||
|
||||
|
||||
def store_neighborinfo_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist neighbour information gathered from a packet.
|
||||
|
||||
Parameters:
|
||||
packet: Raw Meshtastic packet metadata.
|
||||
decoded: Decoded view containing the neighbour information section.
|
||||
|
||||
Returns:
|
||||
``None``. The neighbour snapshot is queued for submission.
|
||||
"""
|
||||
|
||||
neighbor_section = (
|
||||
decoded.get("neighborinfo") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if not isinstance(neighbor_section, Mapping):
|
||||
return
|
||||
|
||||
node_ref = _first(
|
||||
neighbor_section,
|
||||
"nodeId",
|
||||
"node_id",
|
||||
default=_first(packet, "fromId", "from_id", "from", default=None),
|
||||
)
|
||||
node_id = _canonical_node_id(node_ref)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_num = _coerce_int(_first(neighbor_section, "nodeId", "node_id", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
|
||||
node_broadcast_interval = _coerce_int(
|
||||
_first(
|
||||
neighbor_section,
|
||||
"nodeBroadcastIntervalSecs",
|
||||
"node_broadcast_interval_secs",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
last_sent_by_ref = _first(
|
||||
neighbor_section,
|
||||
"lastSentById",
|
||||
"last_sent_by_id",
|
||||
default=None,
|
||||
)
|
||||
last_sent_by_id = _canonical_node_id(last_sent_by_ref)
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
neighbors_payload = neighbor_section.get("neighbors")
|
||||
neighbors_iterable = (
|
||||
neighbors_payload if isinstance(neighbors_payload, list) else []
|
||||
)
|
||||
|
||||
neighbor_entries: list[dict] = []
|
||||
for entry in neighbors_iterable:
|
||||
if not isinstance(entry, Mapping):
|
||||
continue
|
||||
neighbor_ref = _first(entry, "nodeId", "node_id", default=None)
|
||||
neighbor_id = _canonical_node_id(neighbor_ref)
|
||||
if neighbor_id is None:
|
||||
continue
|
||||
neighbor_num = _coerce_int(_first(entry, "nodeId", "node_id", default=None))
|
||||
if neighbor_num is None:
|
||||
neighbor_num = _node_num_from_id(neighbor_id)
|
||||
snr = _coerce_float(_first(entry, "snr", default=None))
|
||||
entry_rx_time = _coerce_int(_first(entry, "rxTime", "rx_time", default=None))
|
||||
if entry_rx_time is None:
|
||||
entry_rx_time = rx_time
|
||||
neighbor_entries.append(
|
||||
{
|
||||
"neighbor_id": neighbor_id,
|
||||
"neighbor_num": neighbor_num,
|
||||
"snr": snr,
|
||||
"rx_time": entry_rx_time,
|
||||
"rx_iso": _iso(entry_rx_time),
|
||||
}
|
||||
)
|
||||
|
||||
payload = {
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"neighbors": neighbor_entries,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
}
|
||||
|
||||
if node_broadcast_interval is not None:
|
||||
payload["node_broadcast_interval_secs"] = node_broadcast_interval
|
||||
if last_sent_by_id is not None:
|
||||
payload["last_sent_by_id"] = last_sent_by_id
|
||||
|
||||
_queue_post_json(
|
||||
"/api/neighbors",
|
||||
_apply_radio_metadata(payload),
|
||||
priority=queue._NEIGHBOR_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued neighborinfo payload",
|
||||
context="handlers.store_neighborinfo",
|
||||
node_id=node_id,
|
||||
neighbors=len(neighbor_entries),
|
||||
)
|
||||
|
||||
|
||||
def store_packet_dict(packet: Mapping) -> None:
|
||||
"""Route a decoded packet to the appropriate storage handler.
|
||||
|
||||
Parameters:
|
||||
packet: Packet dictionary emitted by the mesh interface.
|
||||
|
||||
Returns:
|
||||
``None``. Side-effects depend on the specific handler invoked.
|
||||
"""
|
||||
|
||||
decoded = packet.get("decoded") or {}
|
||||
|
||||
portnum_raw = _first(decoded, "portnum", default=None)
|
||||
portnum = str(portnum_raw).upper() if portnum_raw is not None else None
|
||||
portnum_int = _coerce_int(portnum_raw)
|
||||
|
||||
telemetry_section = (
|
||||
decoded.get("telemetry") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if (
|
||||
portnum == "TELEMETRY_APP"
|
||||
or portnum_int == 65
|
||||
or isinstance(telemetry_section, Mapping)
|
||||
):
|
||||
store_telemetry_packet(packet, decoded)
|
||||
return
|
||||
|
||||
if portnum in {"5", "NODEINFO_APP"}:
|
||||
store_nodeinfo_packet(packet, decoded)
|
||||
return
|
||||
|
||||
if portnum in {"4", "POSITION_APP"}:
|
||||
store_position_packet(packet, decoded)
|
||||
return
|
||||
|
||||
neighborinfo_section = (
|
||||
decoded.get("neighborinfo") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if portnum == "NEIGHBORINFO_APP" or isinstance(neighborinfo_section, Mapping):
|
||||
store_neighborinfo_packet(packet, decoded)
|
||||
return
|
||||
|
||||
text = _first(decoded, "payload.text", "text", default=None)
|
||||
encrypted = _first(decoded, "payload.encrypted", "encrypted", default=None)
|
||||
if encrypted is None:
|
||||
encrypted = _first(packet, "encrypted", default=None)
|
||||
if not text and not encrypted:
|
||||
return
|
||||
|
||||
if portnum and portnum not in {"1", "TEXT_MESSAGE_APP"}:
|
||||
return
|
||||
|
||||
channel = _first(decoded, "channel", default=None)
|
||||
if channel is None:
|
||||
channel = _first(packet, "channel", default=0)
|
||||
try:
|
||||
channel = int(channel)
|
||||
except Exception:
|
||||
channel = 0
|
||||
|
||||
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
|
||||
if pkt_id is None:
|
||||
return
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
from_id = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
|
||||
if (from_id is None or str(from_id) == "") and config.DEBUG:
|
||||
try:
|
||||
raw = json.dumps(packet, default=str)
|
||||
except Exception:
|
||||
raw = str(packet)
|
||||
config._debug_log(
|
||||
"Packet missing from_id",
|
||||
context="handlers.store_packet_dict",
|
||||
packet=raw,
|
||||
)
|
||||
|
||||
snr = _first(packet, "snr", "rx_snr", "rxSnr", default=None)
|
||||
rssi = _first(packet, "rssi", "rx_rssi", "rxRssi", default=None)
|
||||
hop = _first(packet, "hopLimit", "hop_limit", default=None)
|
||||
|
||||
encrypted_flag = _is_encrypted_flag(encrypted)
|
||||
|
||||
message_payload = {
|
||||
"id": int(pkt_id),
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"from_id": from_id,
|
||||
"to_id": to_id,
|
||||
"channel": channel,
|
||||
"portnum": str(portnum) if portnum is not None else None,
|
||||
"text": text,
|
||||
"encrypted": encrypted,
|
||||
"snr": float(snr) if snr is not None else None,
|
||||
"rssi": int(rssi) if rssi is not None else None,
|
||||
"hop_limit": int(hop) if hop is not None else None,
|
||||
}
|
||||
|
||||
channel_name_value = None
|
||||
if not encrypted_flag:
|
||||
channel_name_value = channels.channel_name(channel)
|
||||
if channel_name_value:
|
||||
message_payload["channel_name"] = channel_name_value
|
||||
_queue_post_json(
|
||||
"/api/messages",
|
||||
_apply_radio_metadata(message_payload),
|
||||
priority=queue._MESSAGE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
from_label = _canonical_node_id(from_id) or from_id
|
||||
to_label = _canonical_node_id(to_id) or to_id
|
||||
payload_desc = "Encrypted" if text is None and encrypted else text
|
||||
log_kwargs = {
|
||||
"context": "handlers.store_packet_dict",
|
||||
"from_id": from_label,
|
||||
"to_id": to_label,
|
||||
"channel": channel,
|
||||
"channel_display": channel_name_value or channel,
|
||||
"payload": payload_desc,
|
||||
}
|
||||
if channel_name_value:
|
||||
log_kwargs["channel_name"] = channel_name_value
|
||||
config._debug_log("Queued message payload", **log_kwargs)
|
||||
|
||||
|
||||
_last_packet_monotonic: float | None = None
|
||||
|
||||
|
||||
def last_packet_monotonic() -> float | None:
|
||||
"""Return the monotonic timestamp of the most recent packet."""
|
||||
|
||||
return _last_packet_monotonic
|
||||
|
||||
|
||||
def _mark_packet_seen() -> None:
|
||||
"""Record that a packet has been processed."""
|
||||
|
||||
global _last_packet_monotonic
|
||||
_last_packet_monotonic = time.monotonic()
|
||||
|
||||
|
||||
def on_receive(packet, interface) -> None:
|
||||
"""Callback registered with Meshtastic to capture incoming packets.
|
||||
|
||||
Parameters:
|
||||
packet: Packet payload supplied by the Meshtastic pubsub topic.
|
||||
interface: Interface instance that produced the packet. Only used for
|
||||
compatibility with Meshtastic's callback signature.
|
||||
|
||||
Returns:
|
||||
``None``. Packets are serialised and enqueued asynchronously.
|
||||
"""
|
||||
|
||||
if isinstance(packet, dict):
|
||||
if packet.get("_potatomesh_seen"):
|
||||
return
|
||||
packet["_potatomesh_seen"] = True
|
||||
|
||||
_mark_packet_seen()
|
||||
|
||||
packet_dict = None
|
||||
try:
|
||||
packet_dict = _pkt_to_dict(packet)
|
||||
store_packet_dict(packet_dict)
|
||||
except Exception as exc:
|
||||
info = (
|
||||
list(packet_dict.keys()) if isinstance(packet_dict, dict) else type(packet)
|
||||
)
|
||||
config._debug_log(
|
||||
"Failed to store packet",
|
||||
context="handlers.on_receive",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
packet_info=info,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_queue_post_json",
|
||||
"last_packet_monotonic",
|
||||
"on_receive",
|
||||
"store_neighborinfo_packet",
|
||||
"store_nodeinfo_packet",
|
||||
"store_packet_dict",
|
||||
"store_position_packet",
|
||||
"store_telemetry_packet",
|
||||
"upsert_node",
|
||||
]
|
||||
|
||||
_queue_post_json = queue._queue_post_json
|
||||
615
data/mesh_ingestor/interfaces.py
Normal file
615
data/mesh_ingestor/interfaces.py
Normal file
@@ -0,0 +1,615 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Mesh interface discovery helpers for interacting with Meshtastic hardware."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import glob
|
||||
import ipaddress
|
||||
import re
|
||||
import urllib.parse
|
||||
from collections.abc import Mapping
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
from meshtastic.tcp_interface import TCPInterface
|
||||
|
||||
from . import channels, config, serialization
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover - import only used for type checking
|
||||
from meshtastic.ble_interface import BLEInterface as _BLEInterface
|
||||
|
||||
BLEInterface = None
|
||||
|
||||
|
||||
def _patch_meshtastic_nodeinfo_handler() -> None:
|
||||
"""Ensure Meshtastic nodeinfo packets always include an ``id`` field."""
|
||||
|
||||
try:
|
||||
import meshtastic # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
return
|
||||
|
||||
original = getattr(meshtastic, "_onNodeInfoReceive", None)
|
||||
if not callable(original):
|
||||
return
|
||||
if getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
def _safe_on_node_info_receive(iface, packet): # type: ignore[override]
|
||||
candidate_mapping: Mapping | None = None
|
||||
if isinstance(packet, Mapping):
|
||||
candidate_mapping = packet
|
||||
elif hasattr(packet, "__dict__") and isinstance(packet.__dict__, Mapping):
|
||||
candidate_mapping = packet.__dict__
|
||||
|
||||
node_id = None
|
||||
if candidate_mapping is not None:
|
||||
node_id = serialization._canonical_node_id(candidate_mapping.get("id"))
|
||||
if node_id is None:
|
||||
user_section = candidate_mapping.get("user")
|
||||
if isinstance(user_section, Mapping):
|
||||
node_id = serialization._canonical_node_id(user_section.get("id"))
|
||||
if node_id is None:
|
||||
for key in ("fromId", "from_id", "from", "num", "nodeId", "node_id"):
|
||||
node_id = serialization._canonical_node_id(
|
||||
candidate_mapping.get(key)
|
||||
)
|
||||
if node_id:
|
||||
break
|
||||
|
||||
if node_id:
|
||||
if not isinstance(candidate_mapping, dict):
|
||||
try:
|
||||
candidate_mapping = dict(candidate_mapping)
|
||||
except Exception:
|
||||
candidate_mapping = {
|
||||
k: candidate_mapping[k] for k in candidate_mapping
|
||||
}
|
||||
if candidate_mapping.get("id") != node_id:
|
||||
candidate_mapping["id"] = node_id
|
||||
packet = candidate_mapping
|
||||
|
||||
try:
|
||||
return original(iface, packet)
|
||||
except KeyError as exc: # pragma: no cover - defensive only
|
||||
if exc.args and exc.args[0] == "id":
|
||||
return None
|
||||
raise
|
||||
|
||||
_safe_on_node_info_receive._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
meshtastic._onNodeInfoReceive = _safe_on_node_info_receive
|
||||
|
||||
|
||||
_patch_meshtastic_nodeinfo_handler()
|
||||
|
||||
|
||||
def _patch_meshtastic_ble_receive_loop() -> None:
|
||||
"""Prevent ``UnboundLocalError`` crashes in Meshtastic's BLE reader."""
|
||||
|
||||
try:
|
||||
from meshtastic import ble_interface as _ble_interface_module # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
return
|
||||
|
||||
ble_class = getattr(_ble_interface_module, "BLEInterface", None)
|
||||
if ble_class is None:
|
||||
return
|
||||
|
||||
original = getattr(ble_class, "_receiveFromRadioImpl", None)
|
||||
if not callable(original):
|
||||
return
|
||||
if getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
FROMRADIO_UUID = getattr(_ble_interface_module, "FROMRADIO_UUID", None)
|
||||
BleakDBusError = getattr(_ble_interface_module, "BleakDBusError", ())
|
||||
BleakError = getattr(_ble_interface_module, "BleakError", ())
|
||||
logger = getattr(_ble_interface_module, "logger", None)
|
||||
time = getattr(_ble_interface_module, "time", None)
|
||||
|
||||
if not FROMRADIO_UUID or logger is None or time is None:
|
||||
return
|
||||
|
||||
def _safe_receive_from_radio(self): # type: ignore[override]
|
||||
while self._want_receive:
|
||||
if self.should_read:
|
||||
self.should_read = False
|
||||
retries: int = 0
|
||||
while self._want_receive:
|
||||
if self.client is None:
|
||||
logger.debug("BLE client is None, shutting down")
|
||||
self._want_receive = False
|
||||
continue
|
||||
|
||||
payload: bytes = b""
|
||||
try:
|
||||
payload = bytes(self.client.read_gatt_char(FROMRADIO_UUID))
|
||||
except BleakDBusError as exc:
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
except BleakError as exc:
|
||||
if "Not connected" in str(exc):
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
else:
|
||||
raise ble_class.BLEError("Error reading BLE") from exc
|
||||
|
||||
if not payload:
|
||||
if not self._want_receive:
|
||||
break
|
||||
if retries < 5:
|
||||
time.sleep(0.1)
|
||||
retries += 1
|
||||
continue
|
||||
break
|
||||
|
||||
logger.debug("FROMRADIO read: %s", payload.hex())
|
||||
self._handleFromRadio(payload)
|
||||
else:
|
||||
time.sleep(0.01)
|
||||
|
||||
_safe_receive_from_radio._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
ble_class._receiveFromRadioImpl = _safe_receive_from_radio
|
||||
|
||||
|
||||
_patch_meshtastic_ble_receive_loop()
|
||||
|
||||
|
||||
def _has_field(message: Any, field_name: str) -> bool:
|
||||
"""Return ``True`` when ``message`` advertises ``field_name`` via ``HasField``."""
|
||||
|
||||
if message is None:
|
||||
return False
|
||||
has_field = getattr(message, "HasField", None)
|
||||
if callable(has_field):
|
||||
try:
|
||||
return bool(has_field(field_name))
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
return False
|
||||
return hasattr(message, field_name)
|
||||
|
||||
|
||||
def _enum_name_from_field(message: Any, field_name: str, value: Any) -> str | None:
|
||||
"""Return the enum name for ``value`` using ``message`` descriptors."""
|
||||
|
||||
descriptor = getattr(message, "DESCRIPTOR", None)
|
||||
if descriptor is None:
|
||||
return None
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {})
|
||||
field_desc = fields_by_name.get(field_name)
|
||||
if field_desc is None:
|
||||
return None
|
||||
enum_type = getattr(field_desc, "enum_type", None)
|
||||
if enum_type is None:
|
||||
return None
|
||||
enum_values = getattr(enum_type, "values_by_number", {})
|
||||
enum_value = enum_values.get(value)
|
||||
if enum_value is None:
|
||||
return None
|
||||
return getattr(enum_value, "name", None)
|
||||
|
||||
|
||||
def _resolve_lora_message(local_config: Any) -> Any | None:
|
||||
"""Return the LoRa configuration sub-message from ``local_config``."""
|
||||
|
||||
if local_config is None:
|
||||
return None
|
||||
if _has_field(local_config, "lora"):
|
||||
candidate = getattr(local_config, "lora", None)
|
||||
if candidate is not None:
|
||||
return candidate
|
||||
radio_section = getattr(local_config, "radio", None)
|
||||
if radio_section is not None:
|
||||
if _has_field(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora", None)
|
||||
if hasattr(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora")
|
||||
if hasattr(local_config, "lora"):
|
||||
return getattr(local_config, "lora")
|
||||
return None
|
||||
|
||||
|
||||
def _region_frequency(lora_message: Any) -> int | None:
|
||||
"""Derive the LoRa region frequency in MHz from ``lora_message``."""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
region_value = getattr(lora_message, "region", None)
|
||||
if region_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, "region", region_value)
|
||||
if enum_name:
|
||||
digits = re.findall(r"\d+", enum_name)
|
||||
for token in digits:
|
||||
try:
|
||||
freq = int(token)
|
||||
except ValueError: # pragma: no cover - regex guarantees digits
|
||||
continue
|
||||
if freq >= 100:
|
||||
return freq
|
||||
for token in reversed(digits):
|
||||
try:
|
||||
return int(token)
|
||||
except ValueError: # pragma: no cover - defensive only
|
||||
continue
|
||||
if isinstance(region_value, int) and region_value >= 100:
|
||||
return region_value
|
||||
return None
|
||||
|
||||
|
||||
def _camelcase_enum_name(name: str | None) -> str | None:
|
||||
"""Convert ``name`` from ``SCREAMING_SNAKE`` to ``CamelCase``."""
|
||||
|
||||
if not name:
|
||||
return None
|
||||
parts = re.split(r"[^0-9A-Za-z]+", name.strip())
|
||||
camel_parts = [part.capitalize() for part in parts if part]
|
||||
if not camel_parts:
|
||||
return None
|
||||
return "".join(camel_parts)
|
||||
|
||||
|
||||
def _modem_preset(lora_message: Any) -> str | None:
|
||||
"""Return the CamelCase modem preset configured on ``lora_message``."""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
descriptor = getattr(lora_message, "DESCRIPTOR", None)
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {}) if descriptor else {}
|
||||
if "modem_preset" in fields_by_name:
|
||||
preset_field = "modem_preset"
|
||||
elif "preset" in fields_by_name:
|
||||
preset_field = "preset"
|
||||
elif hasattr(lora_message, "modem_preset"):
|
||||
preset_field = "modem_preset"
|
||||
elif hasattr(lora_message, "preset"):
|
||||
preset_field = "preset"
|
||||
else:
|
||||
return None
|
||||
|
||||
preset_value = getattr(lora_message, preset_field, None)
|
||||
if preset_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, preset_field, preset_value)
|
||||
if isinstance(enum_name, str) and enum_name:
|
||||
return _camelcase_enum_name(enum_name)
|
||||
if isinstance(preset_value, str) and preset_value:
|
||||
return _camelcase_enum_name(preset_value)
|
||||
return None
|
||||
|
||||
|
||||
def _ensure_radio_metadata(iface: Any) -> None:
|
||||
"""Populate cached LoRa metadata by inspecting ``iface`` when available."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
wait_for_config = getattr(iface, "waitForConfig", None)
|
||||
if callable(wait_for_config):
|
||||
wait_for_config()
|
||||
except Exception: # pragma: no cover - hardware dependent guard
|
||||
pass
|
||||
|
||||
local_node = getattr(iface, "localNode", None)
|
||||
local_config = getattr(local_node, "localConfig", None) if local_node else None
|
||||
lora_message = _resolve_lora_message(local_config)
|
||||
if lora_message is None:
|
||||
return
|
||||
|
||||
frequency = _region_frequency(lora_message)
|
||||
preset = _modem_preset(lora_message)
|
||||
|
||||
updated = False
|
||||
if frequency is not None and getattr(config, "LORA_FREQ", None) is None:
|
||||
config.LORA_FREQ = frequency
|
||||
updated = True
|
||||
if preset is not None and getattr(config, "MODEM_PRESET", None) is None:
|
||||
config.MODEM_PRESET = preset
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
config._debug_log(
|
||||
"Captured LoRa radio metadata",
|
||||
context="interfaces.ensure_radio_metadata",
|
||||
severity="info",
|
||||
always=True,
|
||||
lora_freq=frequency,
|
||||
modem_preset=preset,
|
||||
)
|
||||
|
||||
|
||||
def _ensure_channel_metadata(iface: Any) -> None:
|
||||
"""Capture channel metadata by inspecting ``iface`` once per runtime."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
channels.capture_from_interface(iface)
|
||||
except Exception as exc: # pragma: no cover - defensive instrumentation
|
||||
config._debug_log(
|
||||
"Failed to capture channel metadata",
|
||||
context="interfaces.ensure_channel_metadata",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
|
||||
_DEFAULT_TCP_PORT = 4403
|
||||
_DEFAULT_TCP_TARGET = "http://127.0.0.1"
|
||||
|
||||
_DEFAULT_SERIAL_PATTERNS = (
|
||||
"/dev/ttyACM*",
|
||||
"/dev/ttyUSB*",
|
||||
"/dev/tty.usbmodem*",
|
||||
"/dev/tty.usbserial*",
|
||||
"/dev/cu.usbmodem*",
|
||||
"/dev/cu.usbserial*",
|
||||
)
|
||||
|
||||
_BLE_ADDRESS_RE = re.compile(r"^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$")
|
||||
|
||||
|
||||
class _DummySerialInterface:
|
||||
"""In-memory replacement for ``meshtastic.serial_interface.SerialInterface``."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.nodes: dict = {}
|
||||
|
||||
def close(self) -> None: # pragma: no cover - nothing to close
|
||||
pass
|
||||
|
||||
|
||||
def _parse_ble_target(value: str) -> str | None:
|
||||
"""Return an uppercase BLE MAC address when ``value`` matches the format.
|
||||
|
||||
Parameters:
|
||||
value: User-provided target string.
|
||||
|
||||
Returns:
|
||||
The normalised MAC address or ``None`` when validation fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
if _BLE_ADDRESS_RE.fullmatch(value):
|
||||
return value.upper()
|
||||
return None
|
||||
|
||||
|
||||
def _parse_network_target(value: str) -> tuple[str, int] | None:
|
||||
"""Return ``(host, port)`` when ``value`` is a numeric IP address string.
|
||||
|
||||
Only literal IPv4 or IPv6 addresses are accepted, optionally paired with a
|
||||
port or scheme. Callers that start from hostnames should resolve them to an
|
||||
address before invoking this helper.
|
||||
|
||||
Parameters:
|
||||
value: Numeric IP literal or URL describing the TCP interface.
|
||||
|
||||
Returns:
|
||||
A ``(host, port)`` tuple or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
|
||||
def _validated_result(host: str | None, port: int | None) -> tuple[str, int] | None:
|
||||
if not host:
|
||||
return None
|
||||
try:
|
||||
ipaddress.ip_address(host)
|
||||
except ValueError:
|
||||
return None
|
||||
return host, port or _DEFAULT_TCP_PORT
|
||||
|
||||
parsed_values = []
|
||||
if "://" in value:
|
||||
parsed_values.append(urllib.parse.urlparse(value, scheme="tcp"))
|
||||
parsed_values.append(urllib.parse.urlparse(f"//{value}", scheme="tcp"))
|
||||
|
||||
for parsed in parsed_values:
|
||||
try:
|
||||
port = parsed.port
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(parsed.hostname, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
if value.count(":") == 1 and not value.startswith("["):
|
||||
host, _, port_text = value.partition(":")
|
||||
try:
|
||||
port = int(port_text) if port_text else None
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(host, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
return _validated_result(value, None)
|
||||
|
||||
|
||||
def _load_ble_interface():
|
||||
"""Return :class:`meshtastic.ble_interface.BLEInterface` when available.
|
||||
|
||||
Returns:
|
||||
The resolved BLE interface class.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the BLE dependencies are not installed.
|
||||
"""
|
||||
|
||||
global BLEInterface
|
||||
if BLEInterface is not None:
|
||||
return BLEInterface
|
||||
|
||||
try:
|
||||
from meshtastic.ble_interface import BLEInterface as _resolved_interface
|
||||
except ImportError as exc: # pragma: no cover - exercised in non-BLE envs
|
||||
raise RuntimeError(
|
||||
"BLE interface requested but the Meshtastic BLE dependencies are not installed. "
|
||||
"Install the 'meshtastic[ble]' extra to enable BLE support."
|
||||
) from exc
|
||||
BLEInterface = _resolved_interface
|
||||
try:
|
||||
import sys
|
||||
|
||||
for module_name in ("data.mesh_ingestor", "data.mesh"):
|
||||
mesh_module = sys.modules.get(module_name)
|
||||
if mesh_module is not None:
|
||||
setattr(mesh_module, "BLEInterface", BLEInterface)
|
||||
except Exception: # pragma: no cover - defensive only
|
||||
pass
|
||||
return _resolved_interface
|
||||
|
||||
|
||||
def _create_serial_interface(port: str) -> tuple[object, str]:
|
||||
"""Return an appropriate mesh interface for ``port``.
|
||||
|
||||
Parameters:
|
||||
port: User-supplied port string which may represent serial, BLE or TCP.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` describing the created interface.
|
||||
"""
|
||||
|
||||
port_value = (port or "").strip()
|
||||
if port_value.lower() in {"", "mock", "none", "null", "disabled"}:
|
||||
config._debug_log(
|
||||
"Using dummy serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return _DummySerialInterface(), "mock"
|
||||
ble_target = _parse_ble_target(port_value)
|
||||
if ble_target:
|
||||
config._debug_log(
|
||||
"Using BLE interface",
|
||||
context="interfaces.ble",
|
||||
address=ble_target,
|
||||
)
|
||||
return _load_ble_interface()(address=ble_target), ble_target
|
||||
network_target = _parse_network_target(port_value)
|
||||
if network_target:
|
||||
host, tcp_port = network_target
|
||||
config._debug_log(
|
||||
"Using TCP interface",
|
||||
context="interfaces.tcp",
|
||||
host=host,
|
||||
port=tcp_port,
|
||||
)
|
||||
return (
|
||||
TCPInterface(hostname=host, portNumber=tcp_port),
|
||||
f"tcp://{host}:{tcp_port}",
|
||||
)
|
||||
config._debug_log(
|
||||
"Using serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return SerialInterface(devPath=port_value), port_value
|
||||
|
||||
|
||||
class NoAvailableMeshInterface(RuntimeError):
|
||||
"""Raised when no default mesh interface can be created."""
|
||||
|
||||
|
||||
def _default_serial_targets() -> list[str]:
|
||||
"""Return candidate serial device paths for auto-discovery."""
|
||||
|
||||
candidates: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for pattern in _DEFAULT_SERIAL_PATTERNS:
|
||||
for path in sorted(glob.glob(pattern)):
|
||||
if path not in seen:
|
||||
candidates.append(path)
|
||||
seen.add(path)
|
||||
if "/dev/ttyACM0" not in seen:
|
||||
candidates.append("/dev/ttyACM0")
|
||||
return candidates
|
||||
|
||||
|
||||
def _create_default_interface() -> tuple[object, str]:
|
||||
"""Attempt to create the default mesh interface, raising on failure.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` for the discovered connection.
|
||||
|
||||
Raises:
|
||||
NoAvailableMeshInterface: When no usable connection can be created.
|
||||
"""
|
||||
|
||||
errors: list[tuple[str, Exception]] = []
|
||||
for candidate in _default_serial_targets():
|
||||
try:
|
||||
return _create_serial_interface(candidate)
|
||||
except Exception as exc: # pragma: no cover - hardware dependent
|
||||
errors.append((candidate, exc))
|
||||
config._debug_log(
|
||||
"Failed to open serial candidate",
|
||||
context="interfaces.auto_discovery",
|
||||
target=candidate,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
try:
|
||||
return _create_serial_interface(_DEFAULT_TCP_TARGET)
|
||||
except Exception as exc: # pragma: no cover - network dependent
|
||||
errors.append((_DEFAULT_TCP_TARGET, exc))
|
||||
config._debug_log(
|
||||
"Failed to open TCP fallback",
|
||||
context="interfaces.auto_discovery",
|
||||
target=_DEFAULT_TCP_TARGET,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if errors:
|
||||
summary = "; ".join(f"{target}: {error}" for target, error in errors)
|
||||
raise NoAvailableMeshInterface(
|
||||
f"no mesh interface available ({summary})"
|
||||
) from errors[-1][1]
|
||||
raise NoAvailableMeshInterface("no mesh interface available")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BLEInterface",
|
||||
"NoAvailableMeshInterface",
|
||||
"_ensure_channel_metadata",
|
||||
"_ensure_radio_metadata",
|
||||
"_DummySerialInterface",
|
||||
"_DEFAULT_TCP_PORT",
|
||||
"_DEFAULT_TCP_TARGET",
|
||||
"_create_default_interface",
|
||||
"_create_serial_interface",
|
||||
"_default_serial_targets",
|
||||
"_load_ble_interface",
|
||||
"_parse_ble_target",
|
||||
"_parse_network_target",
|
||||
"SerialInterface",
|
||||
"TCPInterface",
|
||||
]
|
||||
196
data/mesh_ingestor/queue.py
Normal file
196
data/mesh_ingestor/queue.py
Normal file
@@ -0,0 +1,196 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Priority queue for POST operations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import heapq
|
||||
import itertools
|
||||
import json
|
||||
import threading
|
||||
import urllib.request
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, Iterable, Tuple
|
||||
|
||||
from . import config
|
||||
|
||||
_MESSAGE_POST_PRIORITY = 10
|
||||
_NEIGHBOR_POST_PRIORITY = 20
|
||||
_POSITION_POST_PRIORITY = 30
|
||||
_TELEMETRY_POST_PRIORITY = 40
|
||||
_NODE_POST_PRIORITY = 50
|
||||
_DEFAULT_POST_PRIORITY = 90
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueueState:
|
||||
"""Mutable state for the HTTP POST priority queue."""
|
||||
|
||||
lock: threading.Lock = field(default_factory=threading.Lock)
|
||||
queue: list[tuple[int, int, str, dict]] = field(default_factory=list)
|
||||
counter: Iterable[int] = field(default_factory=itertools.count)
|
||||
active: bool = False
|
||||
|
||||
|
||||
STATE = QueueState()
|
||||
|
||||
|
||||
def _post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
instance: str | None = None,
|
||||
api_token: str | None = None,
|
||||
) -> None:
|
||||
"""Send a JSON payload to the configured web API.
|
||||
|
||||
Parameters:
|
||||
path: API path relative to the configured instance root.
|
||||
payload: JSON-serialisable body to transmit.
|
||||
instance: Optional override for :data:`config.INSTANCE`.
|
||||
api_token: Optional override for :data:`config.API_TOKEN`.
|
||||
"""
|
||||
|
||||
if instance is None:
|
||||
instance = config.INSTANCE
|
||||
if api_token is None:
|
||||
api_token = config.API_TOKEN
|
||||
|
||||
if not instance:
|
||||
return
|
||||
url = f"{instance}{path}"
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
url, data=data, headers={"Content-Type": "application/json"}
|
||||
)
|
||||
if api_token:
|
||||
req.add_header("Authorization", f"Bearer {api_token}")
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
resp.read()
|
||||
except Exception as exc: # pragma: no cover - exercised in production
|
||||
config._debug_log(
|
||||
"POST request failed",
|
||||
context="queue.post_json",
|
||||
severity="warn",
|
||||
url=url,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
|
||||
def _enqueue_post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
priority: int,
|
||||
*,
|
||||
state: QueueState = STATE,
|
||||
) -> None:
|
||||
"""Store a POST request in the priority queue.
|
||||
|
||||
Parameters:
|
||||
path: API path for the queued request.
|
||||
payload: JSON-serialisable body.
|
||||
priority: Lower values execute first.
|
||||
state: Shared queue state, injectable for testing.
|
||||
"""
|
||||
|
||||
with state.lock:
|
||||
counter = next(state.counter)
|
||||
heapq.heappush(state.queue, (priority, counter, path, payload))
|
||||
|
||||
|
||||
def _drain_post_queue(
|
||||
state: QueueState = STATE, send: Callable[[str, dict], None] | None = None
|
||||
) -> None:
|
||||
"""Process queued POST requests in priority order.
|
||||
|
||||
Parameters:
|
||||
state: Queue container holding pending items.
|
||||
send: Optional callable used to transmit requests.
|
||||
"""
|
||||
|
||||
if send is None:
|
||||
send = _post_json
|
||||
|
||||
try:
|
||||
while True:
|
||||
with state.lock:
|
||||
if not state.queue:
|
||||
state.active = False
|
||||
return
|
||||
_priority, _idx, path, payload = heapq.heappop(state.queue)
|
||||
send(path, payload)
|
||||
finally:
|
||||
with state.lock:
|
||||
state.active = False
|
||||
|
||||
|
||||
def _queue_post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
priority: int = _DEFAULT_POST_PRIORITY,
|
||||
state: QueueState = STATE,
|
||||
send: Callable[[str, dict], None] | None = None,
|
||||
) -> None:
|
||||
"""Queue a POST request and start processing if idle.
|
||||
|
||||
Parameters:
|
||||
path: API path for the request.
|
||||
payload: JSON payload to send.
|
||||
priority: Scheduling priority where lower values run first.
|
||||
state: Queue container used to store pending requests.
|
||||
send: Optional transport override, primarily for tests.
|
||||
"""
|
||||
|
||||
if send is None:
|
||||
send = _post_json
|
||||
|
||||
_enqueue_post_json(path, payload, priority, state=state)
|
||||
with state.lock:
|
||||
if state.active:
|
||||
return
|
||||
state.active = True
|
||||
_drain_post_queue(state, send=send)
|
||||
|
||||
|
||||
def _clear_post_queue(state: QueueState = STATE) -> None:
|
||||
"""Clear the pending POST queue.
|
||||
|
||||
Parameters:
|
||||
state: Queue state to reset. Defaults to the global queue.
|
||||
"""
|
||||
|
||||
with state.lock:
|
||||
state.queue.clear()
|
||||
state.active = False
|
||||
|
||||
|
||||
__all__ = [
|
||||
"STATE",
|
||||
"QueueState",
|
||||
"_DEFAULT_POST_PRIORITY",
|
||||
"_MESSAGE_POST_PRIORITY",
|
||||
"_NEIGHBOR_POST_PRIORITY",
|
||||
"_NODE_POST_PRIORITY",
|
||||
"_POSITION_POST_PRIORITY",
|
||||
"_TELEMETRY_POST_PRIORITY",
|
||||
"_clear_post_queue",
|
||||
"_drain_post_queue",
|
||||
"_enqueue_post_json",
|
||||
"_post_json",
|
||||
"_queue_post_json",
|
||||
]
|
||||
613
data/mesh_ingestor/serialization.py
Normal file
613
data/mesh_ingestor/serialization.py
Normal file
@@ -0,0 +1,613 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for converting Meshtastic structures into JSON-friendly forms.
|
||||
|
||||
The helpers normalise loosely structured Meshtastic packets so they can be
|
||||
forwarded to the web application using predictable field names and types.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import dataclasses
|
||||
import json
|
||||
import math
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from google.protobuf.message import DecodeError
|
||||
from google.protobuf.message import Message as ProtoMessage
|
||||
|
||||
|
||||
def _get(obj, key, default=None):
|
||||
"""Return ``obj[key]`` or ``getattr(obj, key)`` when available.
|
||||
|
||||
Parameters:
|
||||
obj: Mapping or object supplying attributes.
|
||||
key: Name of the attribute or mapping key to retrieve.
|
||||
default: Fallback value when ``key`` is not present.
|
||||
|
||||
Returns:
|
||||
The resolved value or ``default`` if the lookup fails.
|
||||
"""
|
||||
|
||||
if isinstance(obj, dict):
|
||||
return obj.get(key, default)
|
||||
return getattr(obj, key, default)
|
||||
|
||||
|
||||
def _node_to_dict(n) -> dict:
|
||||
"""Convert ``n`` into a JSON-serialisable mapping.
|
||||
|
||||
Parameters:
|
||||
n: Arbitrary data structure, commonly a protobuf message, dataclass or
|
||||
nested containers produced by Meshtastic.
|
||||
|
||||
Returns:
|
||||
A plain dictionary containing recursively converted values.
|
||||
"""
|
||||
|
||||
def _convert(value):
|
||||
if isinstance(value, dict):
|
||||
return {k: _convert(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return [_convert(v) for v in value]
|
||||
if dataclasses.is_dataclass(value):
|
||||
return {k: _convert(getattr(value, k)) for k in value.__dataclass_fields__}
|
||||
if isinstance(value, ProtoMessage):
|
||||
try:
|
||||
return MessageToDict(
|
||||
value,
|
||||
preserving_proto_field_name=True,
|
||||
use_integers_for_enums=False,
|
||||
)
|
||||
except Exception:
|
||||
if hasattr(value, "to_dict"):
|
||||
try:
|
||||
return value.to_dict()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
return json.loads(json.dumps(value, default=str))
|
||||
except Exception:
|
||||
return str(value)
|
||||
if isinstance(value, bytes):
|
||||
try:
|
||||
return value.decode()
|
||||
except Exception:
|
||||
return value.hex()
|
||||
if isinstance(value, (str, int, float, bool)) or value is None:
|
||||
return value
|
||||
try:
|
||||
return json.loads(json.dumps(value, default=str))
|
||||
except Exception:
|
||||
return str(value)
|
||||
|
||||
return _convert(n)
|
||||
|
||||
|
||||
def upsert_payload(node_id, node) -> dict:
|
||||
"""Return the payload expected by ``/api/nodes`` upsert requests.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical node identifier.
|
||||
node: Node representation to convert with :func:`_node_to_dict`.
|
||||
|
||||
Returns:
|
||||
A mapping keyed by ``node_id`` describing the node.
|
||||
"""
|
||||
|
||||
ndict = _node_to_dict(node)
|
||||
return {node_id: ndict}
|
||||
|
||||
|
||||
def _iso(ts: int | float) -> str:
|
||||
"""Convert ``ts`` into an ISO-8601 timestamp in UTC."""
|
||||
|
||||
import datetime
|
||||
|
||||
return (
|
||||
datetime.datetime.fromtimestamp(int(ts), datetime.UTC)
|
||||
.isoformat()
|
||||
.replace("+00:00", "Z")
|
||||
)
|
||||
|
||||
|
||||
def _first(d, *names, default=None):
|
||||
"""Return the first matching attribute or key from ``d``.
|
||||
|
||||
Parameters:
|
||||
d: Mapping or object providing nested attributes.
|
||||
*names: Candidate names, optionally using ``dot.separated`` notation
|
||||
for nested lookups.
|
||||
default: Value returned when no candidates succeed.
|
||||
|
||||
Returns:
|
||||
The first non-empty value encountered or ``default``.
|
||||
"""
|
||||
|
||||
def _mapping_get(obj, key):
|
||||
if isinstance(obj, Mapping) and key in obj:
|
||||
return True, obj[key]
|
||||
if hasattr(obj, "__getitem__"):
|
||||
try:
|
||||
return True, obj[key]
|
||||
except Exception:
|
||||
pass
|
||||
if hasattr(obj, key):
|
||||
return True, getattr(obj, key)
|
||||
return False, None
|
||||
|
||||
for name in names:
|
||||
cur = d
|
||||
ok = True
|
||||
for part in name.split("."):
|
||||
ok, cur = _mapping_get(cur, part)
|
||||
if not ok:
|
||||
break
|
||||
if ok:
|
||||
if cur is None:
|
||||
continue
|
||||
if isinstance(cur, str) and cur == "":
|
||||
continue
|
||||
return cur
|
||||
return default
|
||||
|
||||
|
||||
def _coerce_int(value):
|
||||
"""Best-effort conversion of ``value`` to an integer.
|
||||
|
||||
Parameters:
|
||||
value: Any type supported by Meshtastic payloads.
|
||||
|
||||
Returns:
|
||||
An integer or ``None`` when conversion is not possible.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, bool):
|
||||
return int(value)
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
if isinstance(value, float):
|
||||
return int(value) if math.isfinite(value) else None
|
||||
if isinstance(value, (str, bytes, bytearray)):
|
||||
text = value.decode() if isinstance(value, (bytes, bytearray)) else value
|
||||
stripped = text.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
try:
|
||||
if stripped.lower().startswith("0x"):
|
||||
return int(stripped, 16)
|
||||
return int(stripped, 10)
|
||||
except ValueError:
|
||||
try:
|
||||
return int(float(stripped))
|
||||
except ValueError:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _coerce_float(value):
|
||||
"""Best-effort conversion of ``value`` to a float.
|
||||
|
||||
Parameters:
|
||||
value: Any type supported by Meshtastic payloads.
|
||||
|
||||
Returns:
|
||||
A float or ``None`` when conversion fails or results in ``NaN``.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, bool):
|
||||
return float(value)
|
||||
if isinstance(value, (int, float)):
|
||||
result = float(value)
|
||||
return result if math.isfinite(result) else None
|
||||
if isinstance(value, (str, bytes, bytearray)):
|
||||
text = value.decode() if isinstance(value, (bytes, bytearray)) else value
|
||||
stripped = text.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
try:
|
||||
result = float(stripped)
|
||||
except ValueError:
|
||||
return None
|
||||
return result if math.isfinite(result) else None
|
||||
try:
|
||||
result = float(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return result if math.isfinite(result) else None
|
||||
|
||||
|
||||
def _pkt_to_dict(packet) -> dict:
|
||||
"""Normalise a packet into a plain dictionary.
|
||||
|
||||
Parameters:
|
||||
packet: Packet object or mapping emitted by Meshtastic.
|
||||
|
||||
Returns:
|
||||
A dictionary representation suitable for downstream processing.
|
||||
"""
|
||||
|
||||
if isinstance(packet, dict):
|
||||
return packet
|
||||
if isinstance(packet, ProtoMessage):
|
||||
try:
|
||||
return MessageToDict(
|
||||
packet, preserving_proto_field_name=True, use_integers_for_enums=False
|
||||
)
|
||||
except Exception:
|
||||
if hasattr(packet, "to_dict"):
|
||||
try:
|
||||
return packet.to_dict()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
return json.loads(json.dumps(packet, default=lambda o: str(o)))
|
||||
except Exception:
|
||||
return {"_unparsed": str(packet)}
|
||||
|
||||
|
||||
def _canonical_node_id(value) -> str | None:
|
||||
"""Convert node identifiers into the canonical ``!xxxxxxxx`` format.
|
||||
|
||||
Parameters:
|
||||
value: Input identifier which may be an int, float or string.
|
||||
|
||||
Returns:
|
||||
The canonical identifier or ``None`` if conversion fails.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
try:
|
||||
num = int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if num < 0:
|
||||
return None
|
||||
return f"!{num & 0xFFFFFFFF:08x}"
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("^"):
|
||||
return trimmed
|
||||
if trimmed.startswith("!"):
|
||||
body = trimmed[1:]
|
||||
elif trimmed.lower().startswith("0x"):
|
||||
body = trimmed[2:]
|
||||
elif trimmed.isdigit():
|
||||
try:
|
||||
return f"!{int(trimmed, 10) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
else:
|
||||
body = trimmed
|
||||
|
||||
if not body:
|
||||
return None
|
||||
try:
|
||||
return f"!{int(body, 16) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _node_num_from_id(node_id) -> int | None:
|
||||
"""Extract the numeric node ID from a canonical identifier.
|
||||
|
||||
Parameters:
|
||||
node_id: Identifier value accepted by :func:`_canonical_node_id`.
|
||||
|
||||
Returns:
|
||||
The numeric node ID or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
return None
|
||||
if isinstance(node_id, (int, float)):
|
||||
try:
|
||||
num = int(node_id)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return num if num >= 0 else None
|
||||
if not isinstance(node_id, str):
|
||||
return None
|
||||
|
||||
trimmed = node_id.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("!"):
|
||||
trimmed = trimmed[1:]
|
||||
if trimmed.lower().startswith("0x"):
|
||||
trimmed = trimmed[2:]
|
||||
try:
|
||||
return int(trimmed, 16)
|
||||
except ValueError:
|
||||
try:
|
||||
return int(trimmed, 10)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _merge_mappings(base, extra):
|
||||
"""Merge two mapping-like objects recursively.
|
||||
|
||||
Parameters:
|
||||
base: Existing mapping or mapping-like structure.
|
||||
extra: Mapping or compatible object whose entries should overlay
|
||||
``base``.
|
||||
|
||||
Returns:
|
||||
A new dictionary containing the merged values.
|
||||
"""
|
||||
|
||||
base_dict: dict
|
||||
if isinstance(base, Mapping):
|
||||
base_dict = dict(base)
|
||||
elif base:
|
||||
converted_base = _node_to_dict(base)
|
||||
base_dict = dict(converted_base) if isinstance(converted_base, Mapping) else {}
|
||||
else:
|
||||
base_dict = {}
|
||||
|
||||
if not isinstance(extra, Mapping):
|
||||
converted_extra = _node_to_dict(extra)
|
||||
if not isinstance(converted_extra, Mapping):
|
||||
return base_dict
|
||||
extra = converted_extra
|
||||
|
||||
for key, value in extra.items():
|
||||
if isinstance(value, Mapping):
|
||||
existing = base_dict.get(key)
|
||||
base_dict[key] = _merge_mappings(existing, value)
|
||||
else:
|
||||
base_dict[key] = _node_to_dict(value)
|
||||
return base_dict
|
||||
|
||||
|
||||
def _extract_payload_bytes(decoded_section: Mapping) -> bytes | None:
|
||||
"""Return raw payload bytes from ``decoded_section`` when available.
|
||||
|
||||
Parameters:
|
||||
decoded_section: Mapping that may include a ``payload`` entry.
|
||||
|
||||
Returns:
|
||||
Raw payload bytes or ``None`` when the payload is missing or invalid.
|
||||
"""
|
||||
|
||||
if not isinstance(decoded_section, Mapping):
|
||||
return None
|
||||
payload = decoded_section.get("payload")
|
||||
if isinstance(payload, Mapping):
|
||||
data = payload.get("__bytes_b64__") or payload.get("bytes")
|
||||
if isinstance(data, str):
|
||||
try:
|
||||
return base64.b64decode(data)
|
||||
except Exception:
|
||||
return None
|
||||
if isinstance(payload, (bytes, bytearray)):
|
||||
return bytes(payload)
|
||||
if isinstance(payload, str):
|
||||
try:
|
||||
return base64.b64decode(payload)
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _decode_nodeinfo_payload(payload_bytes):
|
||||
"""Decode ``NodeInfo`` protobuf payloads from raw bytes.
|
||||
|
||||
Parameters:
|
||||
payload_bytes: Serialized protobuf data from a NODEINFO packet.
|
||||
|
||||
Returns:
|
||||
A :class:`meshtastic.protobuf.mesh_pb2.NodeInfo` instance or ``None``.
|
||||
"""
|
||||
|
||||
if not payload_bytes:
|
||||
return None
|
||||
try:
|
||||
from meshtastic.protobuf import mesh_pb2
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
node_info = mesh_pb2.NodeInfo()
|
||||
try:
|
||||
node_info.ParseFromString(payload_bytes)
|
||||
return node_info
|
||||
except DecodeError:
|
||||
try:
|
||||
user_msg = mesh_pb2.User()
|
||||
user_msg.ParseFromString(payload_bytes)
|
||||
except DecodeError:
|
||||
return None
|
||||
node_info = mesh_pb2.NodeInfo()
|
||||
node_info.user.CopyFrom(user_msg)
|
||||
return node_info
|
||||
|
||||
|
||||
def _nodeinfo_metrics_dict(node_info) -> dict | None:
|
||||
"""Extract device metric fields from a NodeInfo message.
|
||||
|
||||
Parameters:
|
||||
node_info: Parsed NodeInfo protobuf message.
|
||||
|
||||
Returns:
|
||||
A dictionary containing selected metric fields, or ``None`` when no
|
||||
metrics are present.
|
||||
"""
|
||||
|
||||
if not node_info:
|
||||
return None
|
||||
metrics_field_names = {f[0].name for f in node_info.ListFields()}
|
||||
if "device_metrics" not in metrics_field_names:
|
||||
return None
|
||||
metrics = {}
|
||||
for field_desc, value in node_info.device_metrics.ListFields():
|
||||
name = field_desc.name
|
||||
if name == "battery_level":
|
||||
metrics["batteryLevel"] = float(value)
|
||||
elif name == "voltage":
|
||||
metrics["voltage"] = float(value)
|
||||
elif name == "channel_utilization":
|
||||
metrics["channelUtilization"] = float(value)
|
||||
elif name == "air_util_tx":
|
||||
metrics["airUtilTx"] = float(value)
|
||||
elif name == "uptime_seconds":
|
||||
metrics["uptimeSeconds"] = int(value)
|
||||
elif name == "humidity":
|
||||
metrics["humidity"] = float(value)
|
||||
elif name == "temperature":
|
||||
metrics["temperature"] = float(value)
|
||||
elif name == "barometric_pressure":
|
||||
metrics["barometricPressure"] = float(value)
|
||||
return metrics or None
|
||||
|
||||
|
||||
def _nodeinfo_position_dict(node_info) -> dict | None:
|
||||
"""Return a dictionary view of positional data from NodeInfo.
|
||||
|
||||
Parameters:
|
||||
node_info: Parsed NodeInfo protobuf message.
|
||||
|
||||
Returns:
|
||||
A dictionary of positional fields or ``None`` if no data exists.
|
||||
"""
|
||||
|
||||
if not node_info:
|
||||
return None
|
||||
fields = {f[0].name for f in node_info.ListFields()}
|
||||
if "position" not in fields:
|
||||
return None
|
||||
|
||||
result = {}
|
||||
latitude_i = None
|
||||
longitude_i = None
|
||||
|
||||
for field_desc, value in node_info.position.ListFields():
|
||||
name = field_desc.name
|
||||
if name == "latitude_i":
|
||||
latitude_i = int(value)
|
||||
result["latitudeI"] = latitude_i
|
||||
elif name == "longitude_i":
|
||||
longitude_i = int(value)
|
||||
result["longitudeI"] = longitude_i
|
||||
elif name == "latitude":
|
||||
result["latitude"] = float(value)
|
||||
elif name == "longitude":
|
||||
result["longitude"] = float(value)
|
||||
elif name == "altitude":
|
||||
result["altitude"] = int(value)
|
||||
elif name == "time":
|
||||
result["time"] = int(value)
|
||||
elif name == "ground_speed":
|
||||
result["groundSpeed"] = float(value)
|
||||
elif name == "ground_track":
|
||||
result["groundTrack"] = float(value)
|
||||
elif name == "precision_bits":
|
||||
result["precisionBits"] = int(value)
|
||||
elif name == "location_source":
|
||||
# Preserve the raw enum value to allow downstream formatting.
|
||||
result["locationSource"] = int(value)
|
||||
|
||||
if "latitude" not in result and latitude_i is not None:
|
||||
result["latitude"] = latitude_i / 1e7
|
||||
if "longitude" not in result and longitude_i is not None:
|
||||
result["longitude"] = longitude_i / 1e7
|
||||
|
||||
return result or None
|
||||
|
||||
|
||||
def _nodeinfo_user_dict(node_info, decoded_user):
|
||||
"""Combine protobuf and decoded user information into a mapping.
|
||||
|
||||
Parameters:
|
||||
node_info: Parsed NodeInfo protobuf message that may contain a ``user``
|
||||
field.
|
||||
decoded_user: Mapping or protobuf message representing decoded user
|
||||
data from the packet payload.
|
||||
|
||||
Returns:
|
||||
A merged mapping of user information or ``None`` when no data exists.
|
||||
"""
|
||||
|
||||
user_dict = None
|
||||
if node_info:
|
||||
field_names = {f[0].name for f in node_info.ListFields()}
|
||||
if "user" in field_names:
|
||||
try:
|
||||
user_dict = MessageToDict(
|
||||
node_info.user,
|
||||
preserving_proto_field_name=False,
|
||||
use_integers_for_enums=False,
|
||||
)
|
||||
except Exception:
|
||||
user_dict = None
|
||||
|
||||
if isinstance(decoded_user, ProtoMessage):
|
||||
try:
|
||||
decoded_user = MessageToDict(
|
||||
decoded_user,
|
||||
preserving_proto_field_name=False,
|
||||
use_integers_for_enums=False,
|
||||
)
|
||||
except Exception:
|
||||
decoded_user = _node_to_dict(decoded_user)
|
||||
|
||||
if isinstance(decoded_user, Mapping):
|
||||
user_dict = _merge_mappings(user_dict, decoded_user)
|
||||
|
||||
if isinstance(user_dict, Mapping):
|
||||
canonical = _canonical_node_id(user_dict.get("id"))
|
||||
if canonical:
|
||||
user_dict = dict(user_dict)
|
||||
user_dict["id"] = canonical
|
||||
return user_dict
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_canonical_node_id",
|
||||
"_coerce_float",
|
||||
"_coerce_int",
|
||||
"_decode_nodeinfo_payload",
|
||||
"_extract_payload_bytes",
|
||||
"_first",
|
||||
"_get",
|
||||
"_iso",
|
||||
"_merge_mappings",
|
||||
"_node_num_from_id",
|
||||
"_node_to_dict",
|
||||
"_nodeinfo_metrics_dict",
|
||||
"_nodeinfo_position_dict",
|
||||
"_nodeinfo_user_dict",
|
||||
"_pkt_to_dict",
|
||||
"DecodeError",
|
||||
"MessageToDict",
|
||||
"ProtoMessage",
|
||||
"upsert_payload",
|
||||
]
|
||||
@@ -1,16 +1,33 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS messages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
rx_time INTEGER NOT NULL, -- unix seconds when received
|
||||
rx_iso TEXT NOT NULL, -- ISO8601 UTC timestamp
|
||||
from_id TEXT, -- sender node id (string form)
|
||||
to_id TEXT, -- recipient node id
|
||||
channel INTEGER, -- channel index
|
||||
portnum TEXT, -- application portnum (e.g. TEXT_MESSAGE_APP)
|
||||
text TEXT, -- decoded text payload if present
|
||||
snr REAL, -- signal-to-noise ratio
|
||||
rssi INTEGER, -- received signal strength
|
||||
hop_limit INTEGER, -- hops left when received
|
||||
raw_json TEXT -- entire packet JSON dump
|
||||
id INTEGER PRIMARY KEY,
|
||||
rx_time INTEGER NOT NULL,
|
||||
rx_iso TEXT NOT NULL,
|
||||
from_id TEXT,
|
||||
to_id TEXT,
|
||||
channel INTEGER,
|
||||
portnum TEXT,
|
||||
text TEXT,
|
||||
encrypted TEXT,
|
||||
snr REAL,
|
||||
rssi INTEGER,
|
||||
hop_limit INTEGER,
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT,
|
||||
channel_name TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
-- Add support for encrypted messages to the existing schema.
|
||||
BEGIN;
|
||||
ALTER TABLE messages ADD COLUMN encrypted TEXT;
|
||||
COMMIT;
|
||||
22
data/migrations/20250301_add_lora_columns.sql
Normal file
22
data/migrations/20250301_add_lora_columns.sql
Normal file
@@ -0,0 +1,22 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
-- Extend the nodes and messages tables with LoRa metadata columns.
|
||||
BEGIN;
|
||||
ALTER TABLE nodes ADD COLUMN lora_freq INTEGER;
|
||||
ALTER TABLE nodes ADD COLUMN modem_preset TEXT;
|
||||
ALTER TABLE messages ADD COLUMN lora_freq INTEGER;
|
||||
ALTER TABLE messages ADD COLUMN modem_preset TEXT;
|
||||
ALTER TABLE messages ADD COLUMN channel_name TEXT;
|
||||
COMMIT;
|
||||
26
data/neighbors.sql
Normal file
26
data/neighbors.sql
Normal file
@@ -0,0 +1,26 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS neighbors (
|
||||
node_id TEXT NOT NULL,
|
||||
neighbor_id TEXT NOT NULL,
|
||||
snr REAL,
|
||||
rx_time INTEGER NOT NULL,
|
||||
PRIMARY KEY (node_id, neighbor_id),
|
||||
FOREIGN KEY (node_id) REFERENCES nodes(node_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (neighbor_id) REFERENCES nodes(node_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_neighbors_rx_time ON neighbors(rx_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_neighbors_neighbor_id ON neighbors(neighbor_id);
|
||||
@@ -1,4 +1,17 @@
|
||||
-- nodes.sql
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
PRAGMA journal_mode=WAL;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS nodes (
|
||||
@@ -23,9 +36,12 @@ CREATE TABLE IF NOT EXISTS nodes (
|
||||
uptime_seconds INTEGER,
|
||||
position_time INTEGER,
|
||||
location_source TEXT,
|
||||
precision_bits INTEGER,
|
||||
latitude REAL,
|
||||
longitude REAL,
|
||||
altitude REAL
|
||||
altitude REAL,
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_last_heard ON nodes(last_heard);
|
||||
|
||||
40
data/positions.sql
Normal file
40
data/positions.sql
Normal file
@@ -0,0 +1,40 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS positions (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_id TEXT,
|
||||
node_num INTEGER,
|
||||
rx_time INTEGER NOT NULL,
|
||||
rx_iso TEXT NOT NULL,
|
||||
position_time INTEGER,
|
||||
to_id TEXT,
|
||||
latitude REAL,
|
||||
longitude REAL,
|
||||
altitude REAL,
|
||||
location_source TEXT,
|
||||
precision_bits INTEGER,
|
||||
sats_in_view INTEGER,
|
||||
pdop REAL,
|
||||
ground_speed REAL,
|
||||
ground_track REAL,
|
||||
snr REAL,
|
||||
rssi INTEGER,
|
||||
hop_limit INTEGER,
|
||||
bitfield INTEGER,
|
||||
payload_b64 TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_positions_rx_time ON positions(rx_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_positions_node_id ON positions(node_id);
|
||||
8
data/requirements.txt
Normal file
8
data/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
# Production dependencies
|
||||
meshtastic>=2.5.0
|
||||
protobuf>=5.27.2
|
||||
|
||||
# Development dependencies (optional)
|
||||
black>=24.8.0
|
||||
pytest>=8.3.0
|
||||
pytest-cov>=5.0.0
|
||||
43
data/telemetry.sql
Normal file
43
data/telemetry.sql
Normal file
@@ -0,0 +1,43 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS telemetry (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_id TEXT,
|
||||
node_num INTEGER,
|
||||
from_id TEXT,
|
||||
to_id TEXT,
|
||||
rx_time INTEGER NOT NULL,
|
||||
rx_iso TEXT NOT NULL,
|
||||
telemetry_time INTEGER,
|
||||
channel INTEGER,
|
||||
portnum TEXT,
|
||||
hop_limit INTEGER,
|
||||
snr REAL,
|
||||
rssi INTEGER,
|
||||
bitfield INTEGER,
|
||||
payload_b64 TEXT,
|
||||
battery_level REAL,
|
||||
voltage REAL,
|
||||
channel_utilization REAL,
|
||||
air_util_tx REAL,
|
||||
uptime_seconds INTEGER,
|
||||
temperature REAL,
|
||||
relative_humidity REAL,
|
||||
barometric_pressure REAL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_rx_time ON telemetry(rx_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_node_id ON telemetry(node_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_time ON telemetry(telemetry_time);
|
||||
42
docker-compose.dev.yml
Normal file
42
docker-compose.dev.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
# Development overrides for docker-compose.yml
|
||||
services:
|
||||
web:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./web:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/vendor/bundle
|
||||
|
||||
web-bridge:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./web:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/vendor/bundle
|
||||
ports:
|
||||
- "41447:41447"
|
||||
- "9292:9292"
|
||||
|
||||
ingestor:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./data:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/.local
|
||||
- /dev:/dev
|
||||
|
||||
ingestor-bridge:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./data:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/.local
|
||||
- /dev:/dev
|
||||
37
docker-compose.prod.yml
Normal file
37
docker-compose.prod.yml
Normal file
@@ -0,0 +1,37 @@
|
||||
# Production overrides for docker-compose.yml
|
||||
services:
|
||||
web:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: web/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
web-bridge:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: web/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
ingestor:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: data/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
ingestor-bridge:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: data/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
100
docker-compose.yml
Normal file
100
docker-compose.yml
Normal file
@@ -0,0 +1,100 @@
|
||||
x-web-base: &web-base
|
||||
image: ghcr.io/l5yth/potato-mesh-web-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:latest
|
||||
environment:
|
||||
APP_ENV: ${APP_ENV:-production}
|
||||
RACK_ENV: ${RACK_ENV:-production}
|
||||
SITE_NAME: ${SITE_NAME:-PotatoMesh Demo}
|
||||
CHANNEL: ${CHANNEL:-#LongFast}
|
||||
FREQUENCY: ${FREQUENCY:-915MHz}
|
||||
MAP_CENTER: ${MAP_CENTER:-38.761944,-27.090833}
|
||||
MAX_DISTANCE: ${MAX_DISTANCE:-42}
|
||||
CONTACT_LINK: ${CONTACT_LINK:-#potatomesh:dod.ngo}
|
||||
API_TOKEN: ${API_TOKEN}
|
||||
DEBUG: ${DEBUG:-0}
|
||||
command: ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
|
||||
volumes:
|
||||
- potatomesh_data:/app/.local/share/potato-mesh
|
||||
- potatomesh_config:/app/.config/potato-mesh
|
||||
- potatomesh_logs:/app/logs
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
cpus: '0.5'
|
||||
reservations:
|
||||
memory: 256M
|
||||
cpus: '0.25'
|
||||
|
||||
x-ingestor-base: &ingestor-base
|
||||
image: ghcr.io/l5yth/potato-mesh-ingestor-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:latest
|
||||
environment:
|
||||
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
|
||||
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
|
||||
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
|
||||
API_TOKEN: ${API_TOKEN}
|
||||
DEBUG: ${DEBUG:-0}
|
||||
volumes:
|
||||
- potatomesh_data:/app/.local/share/potato-mesh
|
||||
- potatomesh_config:/app/.config/potato-mesh
|
||||
- potatomesh_logs:/app/logs
|
||||
- /dev:/dev
|
||||
device_cgroup_rules:
|
||||
- 'c 166:* rwm' # ttyACM devices
|
||||
- 'c 188:* rwm' # ttyUSB devices
|
||||
- 'c 4:* rwm' # ttyS devices
|
||||
privileged: false
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
cpus: '0.25'
|
||||
reservations:
|
||||
memory: 128M
|
||||
cpus: '0.1'
|
||||
|
||||
services:
|
||||
web:
|
||||
<<: *web-base
|
||||
network_mode: host
|
||||
|
||||
ingestor:
|
||||
<<: *ingestor-base
|
||||
network_mode: host
|
||||
depends_on:
|
||||
- web
|
||||
extra_hosts:
|
||||
- "web:127.0.0.1"
|
||||
|
||||
web-bridge:
|
||||
<<: *web-base
|
||||
container_name: potatomesh-web-bridge
|
||||
networks:
|
||||
- potatomesh-network
|
||||
ports:
|
||||
- "41447:41447"
|
||||
profiles:
|
||||
- bridge
|
||||
|
||||
ingestor-bridge:
|
||||
<<: *ingestor-base
|
||||
container_name: potatomesh-ingestor-bridge
|
||||
networks:
|
||||
- potatomesh-network
|
||||
depends_on:
|
||||
- web-bridge
|
||||
profiles:
|
||||
- bridge
|
||||
|
||||
volumes:
|
||||
potatomesh_data:
|
||||
driver: local
|
||||
potatomesh_config:
|
||||
driver: local
|
||||
potatomesh_logs:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
potatomesh-network:
|
||||
driver: bridge
|
||||
BIN
scrot-0.2.png
Normal file
BIN
scrot-0.2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 748 KiB |
BIN
scrot-0.3.png
Normal file
BIN
scrot-0.3.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 952 KiB |
BIN
scrot-0.4.png
Normal file
BIN
scrot-0.4.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 907 KiB |
BIN
test/mesh.db
BIN
test/mesh.db
Binary file not shown.
1442
test/messages.json
1442
test/messages.json
File diff suppressed because it is too large
Load Diff
2096
test/nodes.json
2096
test/nodes.json
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,21 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Interactive debugging helpers for live Meshtastic sessions."""
|
||||
|
||||
import time, json, base64, threading
|
||||
from pubsub import pub # comes with meshtastic
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
@@ -13,7 +30,14 @@ stop = threading.Event()
|
||||
|
||||
|
||||
def to_jsonable(obj):
|
||||
"""Recursively convert protobuf/bytes/etc. into JSON-serializable structures."""
|
||||
"""Recursively convert complex objects into JSON-serialisable structures.
|
||||
|
||||
Parameters:
|
||||
obj: Any Meshtastic-related payload or protobuf message.
|
||||
|
||||
Returns:
|
||||
A structure composed of standard Python types.
|
||||
"""
|
||||
if obj is None:
|
||||
return None
|
||||
if isinstance(obj, ProtoMessage):
|
||||
@@ -34,7 +58,14 @@ def to_jsonable(obj):
|
||||
|
||||
|
||||
def extract_text(d):
|
||||
"""Best-effort pull of decoded text from a dict produced by to_jsonable()."""
|
||||
"""Best-effort pull of decoded text from :func:`to_jsonable` output.
|
||||
|
||||
Parameters:
|
||||
d: Mapping derived from :func:`to_jsonable`.
|
||||
|
||||
Returns:
|
||||
The decoded text when available, otherwise ``None``.
|
||||
"""
|
||||
dec = d.get("decoded") or {}
|
||||
# Text packets usually at decoded.payload.text
|
||||
payload = dec.get("payload") or {}
|
||||
@@ -47,6 +78,12 @@ def extract_text(d):
|
||||
|
||||
|
||||
def on_receive(packet, interface):
|
||||
"""Display human-readable output for each received packet.
|
||||
|
||||
Parameters:
|
||||
packet: Packet instance supplied by Meshtastic.
|
||||
interface: Interface that produced the packet.
|
||||
"""
|
||||
global packet_count, last_rx_ts
|
||||
packet_count += 1
|
||||
last_rx_ts = time.time()
|
||||
@@ -71,14 +108,20 @@ def on_receive(packet, interface):
|
||||
|
||||
|
||||
def on_connected(interface, *args, **kwargs):
|
||||
"""Log when a connection is established."""
|
||||
|
||||
print("[info] connection established")
|
||||
|
||||
|
||||
def on_disconnected(interface, *args, **kwargs):
|
||||
"""Log when the interface disconnects."""
|
||||
|
||||
print("[info] disconnected")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the interactive debugging loop."""
|
||||
|
||||
print(f"Opening Meshtastic on {PORT} …")
|
||||
|
||||
# Use PubSub topics (reliable in current meshtastic)
|
||||
127
tests/dump.py
Normal file
127
tests/dump.py
Normal file
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utility script to dump Meshtastic traffic for offline analysis."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from meshtastic.mesh_interface import MeshInterface
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
from pubsub import pub
|
||||
|
||||
CONNECTION = os.environ.get("CONNECTION") or os.environ.get(
|
||||
"MESH_SERIAL", "/dev/ttyACM0"
|
||||
)
|
||||
"""Connection target opened to capture Meshtastic traffic."""
|
||||
OUT = os.environ.get("MESH_DUMP_FILE", "meshtastic-dump.ndjson")
|
||||
|
||||
# line-buffered append so you can tail -f safely
|
||||
f = open(OUT, "a", buffering=1, encoding="utf-8")
|
||||
|
||||
|
||||
def now() -> str:
|
||||
"""Return the current UTC timestamp in ISO 8601 format."""
|
||||
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def write(kind: str, payload: dict) -> None:
|
||||
"""Append a JSON record to the dump file.
|
||||
|
||||
Parameters:
|
||||
kind: Logical record type such as ``"packet"`` or ``"node"``.
|
||||
payload: Serializable payload containing the record body.
|
||||
"""
|
||||
|
||||
rec = {"ts": now(), "kind": kind, **payload}
|
||||
f.write(json.dumps(rec, ensure_ascii=False, default=str) + "\n")
|
||||
|
||||
|
||||
# Connect to the node
|
||||
iface: MeshInterface = SerialInterface(CONNECTION)
|
||||
|
||||
|
||||
# Packet callback: every RF/Mesh packet the node receives/decodes lands here
|
||||
def on_packet(packet, iface):
|
||||
"""Write packet metadata whenever the radio receives a frame.
|
||||
|
||||
Parameters:
|
||||
packet: Meshtastic packet object or dictionary.
|
||||
iface: Interface instance delivering the packet.
|
||||
"""
|
||||
|
||||
# 'packet' already includes decoded fields when available (portnum, payload, position, telemetry, etc.)
|
||||
write("packet", {"packet": packet})
|
||||
|
||||
|
||||
# Node callback: topology/metadata updates (nodeinfo, hops, lastHeard, etc.)
|
||||
def on_node(node, iface):
|
||||
"""Write node metadata updates produced by Meshtastic.
|
||||
|
||||
Parameters:
|
||||
node: Meshtastic node object or mapping.
|
||||
iface: Interface instance emitting the update.
|
||||
"""
|
||||
|
||||
write("node", {"node": node})
|
||||
|
||||
|
||||
iface.onReceive = on_packet
|
||||
pub.subscribe(on_node, "meshtastic.node")
|
||||
|
||||
# Write a little header so you know what you captured
|
||||
try:
|
||||
my = getattr(iface, "myInfo", None)
|
||||
write(
|
||||
"meta",
|
||||
{
|
||||
"event": "started",
|
||||
"port": CONNECTION,
|
||||
"my_node_num": getattr(my, "my_node_num", None) if my else None,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
write("meta", {"event": "started", "port": CONNECTION, "error": str(e)})
|
||||
|
||||
|
||||
# Keep the process alive until Ctrl-C
|
||||
def _stop(signum, frame):
|
||||
"""Handle termination signals by flushing buffers and exiting."""
|
||||
|
||||
write("meta", {"event": "stopping"})
|
||||
try:
|
||||
try:
|
||||
pub.unsubscribe(on_node, "meshtastic.node")
|
||||
except Exception:
|
||||
pass
|
||||
iface.close()
|
||||
finally:
|
||||
f.close()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, _stop)
|
||||
signal.signal(signal.SIGTERM, _stop)
|
||||
|
||||
# Simple sleep loop; avoids busy-wait
|
||||
while True:
|
||||
time.sleep(1)
|
||||
BIN
tests/mesh.db
Normal file
BIN
tests/mesh.db
Normal file
Binary file not shown.
239
tests/meshtastic_protobuf_stub.py
Normal file
239
tests/meshtastic_protobuf_stub.py
Normal file
@@ -0,0 +1,239 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Minimal Meshtastic protobuf stubs for isolated unit testing."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import types
|
||||
from typing import Any, Callable, Dict, Tuple
|
||||
|
||||
|
||||
def _enum_value(name: str, mapping: Dict[str, int]) -> int:
|
||||
normalized = name.upper()
|
||||
if normalized not in mapping:
|
||||
raise KeyError(f"Unknown enum value: {name}")
|
||||
return mapping[normalized]
|
||||
|
||||
|
||||
def build(message_base, decode_error) -> Tuple[types.ModuleType, types.ModuleType]:
|
||||
"""Return ``(config_pb2, mesh_pb2)`` stubs built from protobuf shims."""
|
||||
|
||||
class _ProtoMessage(message_base):
|
||||
"""Base class implementing JSON round-tripping for protobuf stubs."""
|
||||
|
||||
_FIELD_ALIASES: Dict[str, str] = {}
|
||||
_FIELD_FACTORIES: Dict[str, Callable[[], "_ProtoMessage"]] = {}
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
object.__setattr__(self, "_fields", {})
|
||||
|
||||
def __setattr__(
|
||||
self, name: str, value: Any
|
||||
) -> None: # noqa: D401 - behaviour documented on base class
|
||||
object.__setattr__(self, name, value)
|
||||
if not name.startswith("_"):
|
||||
self._fields[name] = value
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
factories = getattr(self, "_FIELD_FACTORIES", {})
|
||||
if name in factories:
|
||||
value = factories[name]()
|
||||
self.__setattr__(name, value)
|
||||
return value
|
||||
raise AttributeError(name)
|
||||
|
||||
def _alias_for(self, name: str) -> str:
|
||||
return self._FIELD_ALIASES.get(name, name)
|
||||
|
||||
def _name_for(self, alias: str) -> str:
|
||||
reverse = getattr(self, "_FIELD_ALIASES", {})
|
||||
for key, candidate in reverse.items():
|
||||
if candidate == alias:
|
||||
return key
|
||||
return alias
|
||||
|
||||
def _to_dict(self) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {}
|
||||
for name, value in self._fields.items():
|
||||
alias = self._alias_for(name)
|
||||
if isinstance(value, _ProtoMessage):
|
||||
result[alias] = value._to_dict()
|
||||
elif isinstance(value, list):
|
||||
result[alias] = [
|
||||
item._to_dict() if isinstance(item, _ProtoMessage) else item
|
||||
for item in value
|
||||
]
|
||||
else:
|
||||
result[alias] = value
|
||||
return result
|
||||
|
||||
def SerializeToString(self) -> bytes:
|
||||
"""Encode the message contents as a JSON byte string."""
|
||||
|
||||
return json.dumps(self._to_dict(), sort_keys=True).encode("utf-8")
|
||||
|
||||
def ParseFromString(self, payload: bytes) -> None:
|
||||
"""Populate the message from a JSON byte string."""
|
||||
|
||||
try:
|
||||
data = json.loads(payload.decode("utf-8"))
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
raise decode_error(str(exc)) from exc
|
||||
self._load_from_dict(data)
|
||||
|
||||
def _load_from_dict(self, data: Dict[str, Any]) -> None:
|
||||
factories = getattr(self, "_FIELD_FACTORIES", {})
|
||||
for alias, value in data.items():
|
||||
name = self._name_for(alias)
|
||||
if name in factories and isinstance(value, dict):
|
||||
nested = getattr(self, name, None)
|
||||
if not isinstance(nested, _ProtoMessage):
|
||||
nested = factories[name]()
|
||||
object.__setattr__(self, name, nested)
|
||||
nested._load_from_dict(value)
|
||||
self._fields[name] = nested
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Return a JSON-compatible representation of the message."""
|
||||
|
||||
return self._to_dict()
|
||||
|
||||
def ListFields(self):
|
||||
"""Mimic protobuf ``ListFields`` for the subset of tests used."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
entries = []
|
||||
for name, value in self._fields.items():
|
||||
descriptor = SimpleNamespace(name=name)
|
||||
entries.append((descriptor, value))
|
||||
return entries
|
||||
|
||||
def CopyFrom(self, other: "_ProtoMessage") -> None:
|
||||
"""Populate this message with values from ``other``."""
|
||||
|
||||
if not isinstance(other, _ProtoMessage):
|
||||
raise TypeError("CopyFrom expects another protobuf message")
|
||||
self._fields.clear()
|
||||
for name, value in other._fields.items():
|
||||
if isinstance(value, _ProtoMessage):
|
||||
copied = type(value)()
|
||||
copied.CopyFrom(value)
|
||||
setattr(self, name, copied)
|
||||
elif isinstance(value, list):
|
||||
converted = []
|
||||
for item in value:
|
||||
if isinstance(item, _ProtoMessage):
|
||||
nested = type(item)()
|
||||
nested.CopyFrom(item)
|
||||
converted.append(nested)
|
||||
else:
|
||||
converted.append(item)
|
||||
setattr(self, name, converted)
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
class _DeviceMetrics(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"battery_level": "batteryLevel",
|
||||
"voltage": "voltage",
|
||||
"channel_utilization": "channelUtilization",
|
||||
"air_util_tx": "airUtilTx",
|
||||
"uptime_seconds": "uptimeSeconds",
|
||||
}
|
||||
|
||||
class _Position(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"latitude_i": "latitudeI",
|
||||
"longitude_i": "longitudeI",
|
||||
"location_source": "locationSource",
|
||||
}
|
||||
|
||||
class LocSource:
|
||||
_VALUES = {
|
||||
"LOC_UNSET": 0,
|
||||
"LOC_INTERNAL": 1,
|
||||
"LOC_EXTERNAL": 2,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def Value(cls, name: str) -> int:
|
||||
return _enum_value(name, cls._VALUES)
|
||||
|
||||
class _User(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"short_name": "shortName",
|
||||
"long_name": "longName",
|
||||
"hw_model": "hwModel",
|
||||
}
|
||||
|
||||
class _NodeInfo(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"last_heard": "lastHeard",
|
||||
"is_favorite": "isFavorite",
|
||||
"hops_away": "hopsAway",
|
||||
}
|
||||
_FIELD_FACTORIES = {
|
||||
"user": _User,
|
||||
"device_metrics": _DeviceMetrics,
|
||||
"position": _Position,
|
||||
}
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
class _HardwareModel:
|
||||
_VALUES = {
|
||||
"UNKNOWN": 0,
|
||||
"TBEAM": 1,
|
||||
"HELTEC": 2,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def Value(cls, name: str) -> int:
|
||||
return _enum_value(name, cls._VALUES)
|
||||
|
||||
mesh_pb2 = types.ModuleType("mesh_pb2")
|
||||
mesh_pb2.NodeInfo = _NodeInfo
|
||||
mesh_pb2.User = _User
|
||||
mesh_pb2.Position = _Position
|
||||
mesh_pb2.DeviceMetrics = _DeviceMetrics
|
||||
mesh_pb2.HardwareModel = _HardwareModel
|
||||
|
||||
class _RoleEnum:
|
||||
_VALUES = {
|
||||
"UNKNOWN": 0,
|
||||
"CLIENT": 1,
|
||||
"REPEATER": 2,
|
||||
"ROUTER": 3,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def Value(cls, name: str) -> int:
|
||||
return _enum_value(name, cls._VALUES)
|
||||
|
||||
class _DeviceConfig:
|
||||
Role = _RoleEnum
|
||||
|
||||
class _Config:
|
||||
DeviceConfig = _DeviceConfig
|
||||
|
||||
config_pb2 = types.ModuleType("config_pb2")
|
||||
config_pb2.Config = _Config
|
||||
|
||||
return config_pb2, mesh_pb2
|
||||
3908
tests/messages.json
Normal file
3908
tests/messages.json
Normal file
File diff suppressed because it is too large
Load Diff
20
tests/neighbors.json
Normal file
20
tests/neighbors.json
Normal file
@@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"node_id": "!7c5b0920",
|
||||
"rx_time": 1758884186,
|
||||
"node_broadcast_interval_secs": 1800,
|
||||
"last_sent_by": "!9e99f8c0",
|
||||
"neighbors": [
|
||||
{ "node_id": "!2b22accc", "snr": -6.5, "rx_time": 1758884106 },
|
||||
{ "node_id": "!43ba26d0", "snr": -5.0, "rx_time": 1758884120 },
|
||||
{ "node_id": "!69ba6f71", "snr": -13.0, "rx_time": 1758884135 },
|
||||
{ "node_id": "!fa848384", "snr": -14.75, "rx_time": 1758884150 },
|
||||
{ "node_id": "!da6a35b4", "snr": -6.5, "rx_time": 1758884165 }
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_id": "!cafebabe",
|
||||
"rx_time": 1758883200,
|
||||
"neighbors": []
|
||||
}
|
||||
]
|
||||
4013
tests/nodes.json
Normal file
4013
tests/nodes.json
Normal file
File diff suppressed because it is too large
Load Diff
84
tests/telemetry.json
Normal file
84
tests/telemetry.json
Normal file
@@ -0,0 +1,84 @@
|
||||
[
|
||||
{
|
||||
"id": 1256091342,
|
||||
"node_id": "!9e95cf60",
|
||||
"from_id": "!9e95cf60",
|
||||
"to_id": "^all",
|
||||
"rx_time": 1758024300,
|
||||
"rx_iso": "2025-09-16T12:05:00Z",
|
||||
"telemetry_time": 1758024300,
|
||||
"channel": 0,
|
||||
"portnum": "TELEMETRY_APP",
|
||||
"battery_level": 101,
|
||||
"bitfield": 1,
|
||||
"payload_b64": "DTVr0mgSFQhlFQIrh0AdJb8YPyXYFSA9KJTPEg==",
|
||||
"device_metrics": {
|
||||
"batteryLevel": 101,
|
||||
"voltage": 4.224,
|
||||
"channelUtilization": 0.59666663,
|
||||
"airUtilTx": 0.03908333,
|
||||
"uptimeSeconds": 305044
|
||||
},
|
||||
"raw": {
|
||||
"device_metrics": {
|
||||
"battery_level": 101,
|
||||
"voltage": 4.224,
|
||||
"channel_utilization": 0.59666663,
|
||||
"air_util_tx": 0.03908333,
|
||||
"uptime_seconds": 305044
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2817720548,
|
||||
"node_id": "!2a2a2a2a",
|
||||
"from_id": "!2a2a2a2a",
|
||||
"to_id": "^all",
|
||||
"rx_time": 1758024400,
|
||||
"rx_iso": "2025-09-16T12:06:40Z",
|
||||
"telemetry_time": 1758024390,
|
||||
"channel": 0,
|
||||
"portnum": "TELEMETRY_APP",
|
||||
"bitfield": 1,
|
||||
"environment_metrics": {
|
||||
"temperature": 21.98,
|
||||
"relativeHumidity": 39.475586,
|
||||
"barometricPressure": 1017.8353
|
||||
},
|
||||
"raw": {
|
||||
"environment_metrics": {
|
||||
"temperature": 21.98,
|
||||
"relative_humidity": 39.475586,
|
||||
"barometric_pressure": 1017.8353
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 345678901,
|
||||
"node_id": "!1234abcd",
|
||||
"from_id": "!1234abcd",
|
||||
"node_num": 305441741,
|
||||
"to_id": "^all",
|
||||
"rx_time": 1758024500,
|
||||
"rx_iso": "2025-09-16T12:08:20Z",
|
||||
"telemetry_time": 1758024450,
|
||||
"channel": 1,
|
||||
"portnum": "TELEMETRY_APP",
|
||||
"payload_b64": "AAEC",
|
||||
"device_metrics": {
|
||||
"battery_level": 58.5,
|
||||
"voltage": 3.92,
|
||||
"channel_utilization": 0.284,
|
||||
"air_util_tx": 0.051,
|
||||
"uptime_seconds": 86400
|
||||
},
|
||||
"local_stats": {
|
||||
"numPacketsTx": 1280,
|
||||
"numPacketsRx": 1425,
|
||||
"numClients": 6,
|
||||
"numNodes": 18,
|
||||
"freeHeap": 21344,
|
||||
"heapLowWater": 19876
|
||||
}
|
||||
}
|
||||
]
|
||||
2187
tests/test_mesh.py
Normal file
2187
tests/test_mesh.py
Normal file
File diff suppressed because it is too large
Load Diff
21
tests/update.sh
Executable file
21
tests/update.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
sqlite3 ../data/mesh.db ".backup './mesh.db'"
|
||||
curl http://127.0.0.1:41447/api/nodes |jq > ./nodes.json
|
||||
curl http://127.0.0.1:41447/api/messages |jq > ./messages.json
|
||||
86
web/Dockerfile
Normal file
86
web/Dockerfile
Normal file
@@ -0,0 +1,86 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
# Main application builder stage
|
||||
FROM ruby:3.3-alpine AS builder
|
||||
|
||||
# Ensure native extensions are built against musl libc rather than
|
||||
# using glibc precompiled binaries (which fail on Alpine).
|
||||
ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy Gemfile and install dependencies
|
||||
COPY web/Gemfile web/Gemfile.lock* ./
|
||||
|
||||
# Install gems with SQLite3 support
|
||||
RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1000 -S potatomesh && \
|
||||
adduser -u 1000 -S potatomesh -G potatomesh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
|
||||
# Copy application code (excluding the Dockerfile which is not required at runtime)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb ./
|
||||
COPY --chown=potatomesh:potatomesh web/app.sh ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile.lock* ./
|
||||
COPY --chown=potatomesh:potatomesh web/lib ./lib
|
||||
COPY --chown=potatomesh:potatomesh web/spec ./spec
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views ./views
|
||||
COPY --chown=potatomesh:potatomesh web/scripts ./scripts
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
|
||||
# Create data and configuration directories with correct ownership
|
||||
RUN mkdir -p /app/.local/share/potato-mesh \
|
||||
&& mkdir -p /app/.config/potato-mesh/well-known \
|
||||
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
|
||||
# Expose port
|
||||
EXPOSE 41447
|
||||
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV RACK_ENV=production \
|
||||
APP_ENV=production \
|
||||
XDG_DATA_HOME=/app/.local/share \
|
||||
XDG_CONFIG_HOME=/app/.config \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
CHANNEL="#LongFast" \
|
||||
FREQUENCY="915MHz" \
|
||||
MAP_CENTER="38.761944,-27.090833" \
|
||||
MAX_DISTANCE=42 \
|
||||
CONTACT_LINK="#potatomesh:dod.ngo" \
|
||||
DEBUG=0
|
||||
|
||||
# Start the application
|
||||
CMD ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
|
||||
24
web/Gemfile
24
web/Gemfile
@@ -1,6 +1,30 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
source "https://rubygems.org"
|
||||
|
||||
gem "sinatra", "~> 4.0"
|
||||
gem "sqlite3", "~> 1.7"
|
||||
gem "rackup", "~> 2.2"
|
||||
gem "puma", "~> 7.0"
|
||||
gem "prometheus-client"
|
||||
|
||||
group :test do
|
||||
gem "rspec", "~> 3.12"
|
||||
gem "rack-test", "~> 2.1"
|
||||
gem "rufo", "~> 0.18.1"
|
||||
gem "simplecov", "~> 0.22", require: false
|
||||
gem "simplecov_json_formatter", "~> 0.1", require: false
|
||||
gem "rspec_junit_formatter", "~> 0.6", require: false
|
||||
end
|
||||
|
||||
170
web/app.rb
170
web/app.rb
@@ -1,159 +1,17 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
require "sinatra"
|
||||
require "json"
|
||||
require "sqlite3"
|
||||
|
||||
# run ../data/mesh.sh to populate nodes and messages database
|
||||
DB_PATH = ENV.fetch("MESH_DB", File.join(__dir__, "../data/mesh.db"))
|
||||
WEEK_SECONDS = 7 * 24 * 60 * 60
|
||||
require_relative "lib/potato_mesh/application"
|
||||
|
||||
set :public_folder, File.join(__dir__, "public")
|
||||
|
||||
def query_nodes(limit)
|
||||
db = SQLite3::Database.new(DB_PATH, readonly: true, results_as_hash: true)
|
||||
now = Time.now.to_i
|
||||
min_last_heard = now - WEEK_SECONDS
|
||||
rows = db.execute <<~SQL, [min_last_heard, limit]
|
||||
SELECT node_id, short_name, long_name, hw_model, role, snr,
|
||||
battery_level, voltage, last_heard, first_heard,
|
||||
uptime_seconds, channel_utilization, air_util_tx,
|
||||
position_time, latitude, longitude, altitude
|
||||
FROM nodes
|
||||
WHERE last_heard >= ?
|
||||
ORDER BY last_heard DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
rows.each do |r|
|
||||
r["role"] ||= "CLIENT"
|
||||
lh = r["last_heard"]&.to_i
|
||||
pt = r["position_time"]&.to_i
|
||||
lh = now if lh && lh > now
|
||||
pt = nil if pt && pt > now
|
||||
r["last_heard"] = lh
|
||||
r["position_time"] = pt
|
||||
r["last_seen_iso"] = Time.at(lh).utc.iso8601 if lh
|
||||
r["pos_time_iso"] = Time.at(pt).utc.iso8601 if pt
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
get "/api/nodes" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_nodes(limit).to_json
|
||||
end
|
||||
|
||||
def query_messages(limit)
|
||||
db = SQLite3::Database.new(DB_PATH, readonly: true)
|
||||
db.results_as_hash = true
|
||||
rows = db.execute <<~SQL, [limit]
|
||||
SELECT m.*, n.*, m.snr AS msg_snr
|
||||
FROM messages m
|
||||
LEFT JOIN nodes n ON m.from_id = n.node_id
|
||||
ORDER BY m.rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
msg_fields = %w[id rx_time rx_iso from_id to_id channel portnum text msg_snr rssi hop_limit raw_json]
|
||||
rows.each do |r|
|
||||
node = {}
|
||||
r.keys.each do |k|
|
||||
next if msg_fields.include?(k)
|
||||
node[k] = r.delete(k)
|
||||
end
|
||||
r["snr"] = r.delete("msg_snr")
|
||||
r["node"] = node unless node.empty?
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
get "/api/messages" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_messages(limit).to_json
|
||||
end
|
||||
|
||||
def upsert_node(db, node_id, n)
|
||||
user = n["user"] || {}
|
||||
met = n["deviceMetrics"] || {}
|
||||
pos = n["position"] || {}
|
||||
role = user["role"] || "CLIENT"
|
||||
lh = n["lastHeard"]
|
||||
pt = pos["time"]
|
||||
now = Time.now.to_i
|
||||
pt = nil if pt && pt > now
|
||||
lh = now if lh && lh > now
|
||||
lh = pt if pt && (!lh || lh < pt)
|
||||
row = [
|
||||
node_id,
|
||||
n["num"],
|
||||
user["shortName"],
|
||||
user["longName"],
|
||||
user["macaddr"],
|
||||
user["hwModel"] || n["hwModel"],
|
||||
role,
|
||||
user["publicKey"],
|
||||
user["isUnmessagable"],
|
||||
n["isFavorite"],
|
||||
n["hopsAway"],
|
||||
n["snr"],
|
||||
lh,
|
||||
lh,
|
||||
met["batteryLevel"],
|
||||
met["voltage"],
|
||||
met["channelUtilization"],
|
||||
met["airUtilTx"],
|
||||
met["uptimeSeconds"],
|
||||
pt,
|
||||
pos["locationSource"],
|
||||
pos["latitude"],
|
||||
pos["longitude"],
|
||||
pos["altitude"],
|
||||
]
|
||||
db.execute <<~SQL, row
|
||||
INSERT INTO nodes(node_id,num,short_name,long_name,macaddr,hw_model,role,public_key,is_unmessagable,is_favorite,
|
||||
hops_away,snr,last_heard,first_heard,battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,
|
||||
position_time,location_source,latitude,longitude,altitude)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
ON CONFLICT(node_id) DO UPDATE SET
|
||||
num=excluded.num, short_name=excluded.short_name, long_name=excluded.long_name, macaddr=excluded.macaddr,
|
||||
hw_model=excluded.hw_model, role=excluded.role, public_key=excluded.public_key, is_unmessagable=excluded.is_unmessagable,
|
||||
is_favorite=excluded.is_favorite, hops_away=excluded.hops_away, snr=excluded.snr, last_heard=excluded.last_heard,
|
||||
battery_level=excluded.battery_level, voltage=excluded.voltage, channel_utilization=excluded.channel_utilization,
|
||||
air_util_tx=excluded.air_util_tx, uptime_seconds=excluded.uptime_seconds, position_time=excluded.position_time,
|
||||
location_source=excluded.location_source, latitude=excluded.latitude, longitude=excluded.longitude,
|
||||
altitude=excluded.altitude
|
||||
WHERE COALESCE(excluded.last_heard,0) >= COALESCE(nodes.last_heard,0)
|
||||
SQL
|
||||
end
|
||||
|
||||
def require_token!
|
||||
token = ENV["API_TOKEN"]
|
||||
provided = request.env["HTTP_AUTHORIZATION"].to_s.sub(/^Bearer\s+/i, "")
|
||||
halt 403, { error: "Forbidden" }.to_json unless token && provided == token
|
||||
end
|
||||
|
||||
post "/api/nodes" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(request.body.read)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
halt 400, { error: "too many nodes" }.to_json if data.is_a?(Hash) && data.size > 1000
|
||||
db = SQLite3::Database.new(DB_PATH)
|
||||
data.each do |node_id, node|
|
||||
upsert_node(db, node_id, node)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
get "/" do
|
||||
send_file File.join(settings.public_folder, "index.html")
|
||||
end
|
||||
PotatoMesh::Application.run! if $PROGRAM_NAME == __FILE__
|
||||
|
||||
18
web/app.sh
18
web/app.sh
@@ -1,5 +1,21 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
bundle install
|
||||
exec ruby app.rb -p 41447 -o 127.0.0.1
|
||||
|
||||
exec ruby app.rb -p 41447 -o 0.0.0.0
|
||||
|
||||
178
web/lib/potato_mesh/application.rb
Normal file
178
web/lib/potato_mesh/application.rb
Normal file
@@ -0,0 +1,178 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "sinatra/base"
|
||||
require "json"
|
||||
require "sqlite3"
|
||||
require "fileutils"
|
||||
require "logger"
|
||||
require "rack/utils"
|
||||
require "open3"
|
||||
require "resolv"
|
||||
require "socket"
|
||||
require "time"
|
||||
require "openssl"
|
||||
require "base64"
|
||||
require "prometheus/client"
|
||||
require "prometheus/client/formats/text"
|
||||
require "prometheus/middleware/collector"
|
||||
require "prometheus/middleware/exporter"
|
||||
require "net/http"
|
||||
require "uri"
|
||||
require "ipaddr"
|
||||
require "set"
|
||||
require "digest"
|
||||
|
||||
require_relative "config"
|
||||
require_relative "sanitizer"
|
||||
require_relative "meta"
|
||||
require_relative "logging"
|
||||
require_relative "application/helpers"
|
||||
require_relative "application/errors"
|
||||
require_relative "application/database"
|
||||
require_relative "application/networking"
|
||||
require_relative "application/identity"
|
||||
require_relative "application/federation"
|
||||
require_relative "application/prometheus"
|
||||
require_relative "application/queries"
|
||||
require_relative "application/data_processing"
|
||||
require_relative "application/filesystem"
|
||||
require_relative "application/instances"
|
||||
require_relative "application/routes/api"
|
||||
require_relative "application/routes/ingest"
|
||||
require_relative "application/routes/root"
|
||||
|
||||
module PotatoMesh
|
||||
class Application < Sinatra::Base
|
||||
extend App::Helpers
|
||||
extend App::Database
|
||||
extend App::Networking
|
||||
extend App::Identity
|
||||
extend App::Federation
|
||||
extend App::Instances
|
||||
extend App::Prometheus
|
||||
extend App::Queries
|
||||
extend App::DataProcessing
|
||||
extend App::Filesystem
|
||||
|
||||
helpers App::Helpers
|
||||
include App::Database
|
||||
include App::Networking
|
||||
include App::Identity
|
||||
include App::Federation
|
||||
include App::Instances
|
||||
include App::Prometheus
|
||||
include App::Queries
|
||||
include App::DataProcessing
|
||||
include App::Filesystem
|
||||
|
||||
register App::Routes::Api
|
||||
register App::Routes::Ingest
|
||||
register App::Routes::Root
|
||||
|
||||
DEFAULT_PORT = 41_447
|
||||
DEFAULT_BIND_ADDRESS = "0.0.0.0"
|
||||
|
||||
APP_VERSION = determine_app_version
|
||||
INSTANCE_PRIVATE_KEY, INSTANCE_KEY_GENERATED = load_or_generate_instance_private_key
|
||||
INSTANCE_PUBLIC_KEY_PEM = INSTANCE_PRIVATE_KEY.public_key.export
|
||||
SELF_INSTANCE_ID = Digest::SHA256.hexdigest(INSTANCE_PUBLIC_KEY_PEM)
|
||||
INSTANCE_DOMAIN, INSTANCE_DOMAIN_SOURCE = determine_instance_domain
|
||||
|
||||
# Adjust the runtime logger severity to match the DEBUG flag.
|
||||
#
|
||||
# @return [void]
|
||||
def self.apply_logger_level!
|
||||
logger = settings.logger
|
||||
return unless logger
|
||||
|
||||
logger.level = PotatoMesh::Config.debug? ? Logger::DEBUG : Logger::WARN
|
||||
end
|
||||
|
||||
# Determine the port the application should listen on.
|
||||
#
|
||||
# @param default_port [Integer] fallback port when ENV['PORT'] is absent or invalid.
|
||||
# @return [Integer] port number for the HTTP server.
|
||||
def self.resolve_port(default_port: DEFAULT_PORT)
|
||||
default_port
|
||||
end
|
||||
|
||||
configure do
|
||||
set :public_folder, File.expand_path("../../public", __dir__)
|
||||
set :views, File.expand_path("../../views", __dir__)
|
||||
set :federation_thread, nil
|
||||
set :port, resolve_port
|
||||
set :bind, DEFAULT_BIND_ADDRESS
|
||||
|
||||
app_logger = PotatoMesh::Logging.build_logger($stdout)
|
||||
set :logger, app_logger
|
||||
use Rack::CommonLogger, app_logger
|
||||
use Rack::Deflater
|
||||
use ::Prometheus::Middleware::Collector
|
||||
use ::Prometheus::Middleware::Exporter
|
||||
|
||||
apply_logger_level!
|
||||
|
||||
perform_initial_filesystem_setup!
|
||||
cleanup_legacy_well_known_artifacts
|
||||
init_db unless db_schema_present?
|
||||
ensure_schema_upgrades
|
||||
|
||||
log_instance_domain_resolution
|
||||
log_instance_public_key
|
||||
refresh_well_known_document_if_stale
|
||||
ensure_self_instance_record!
|
||||
update_all_prometheus_metrics_from_nodes
|
||||
|
||||
if federation_announcements_active?
|
||||
start_initial_federation_announcement!
|
||||
start_federation_announcer!
|
||||
elsif federation_enabled?
|
||||
debug_log(
|
||||
"Federation announcements disabled",
|
||||
context: "federation",
|
||||
reason: "test environment",
|
||||
)
|
||||
else
|
||||
debug_log(
|
||||
"Federation announcements disabled",
|
||||
context: "federation",
|
||||
reason: "configuration",
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if defined?(Sinatra::Application) && Sinatra::Application != PotatoMesh::Application
|
||||
Sinatra.send(:remove_const, :Application)
|
||||
end
|
||||
Sinatra::Application = PotatoMesh::Application unless defined?(Sinatra::Application)
|
||||
|
||||
APP_VERSION = PotatoMesh::Application::APP_VERSION unless defined?(APP_VERSION)
|
||||
SELF_INSTANCE_ID = PotatoMesh::Application::SELF_INSTANCE_ID unless defined?(SELF_INSTANCE_ID)
|
||||
|
||||
[
|
||||
PotatoMesh::App::Helpers,
|
||||
PotatoMesh::App::Database,
|
||||
PotatoMesh::App::Networking,
|
||||
PotatoMesh::App::Identity,
|
||||
PotatoMesh::App::Federation,
|
||||
PotatoMesh::App::Instances,
|
||||
PotatoMesh::App::Prometheus,
|
||||
PotatoMesh::App::Queries,
|
||||
PotatoMesh::App::DataProcessing,
|
||||
].each do |mod|
|
||||
Object.include(mod) unless Object < mod
|
||||
end
|
||||
1071
web/lib/potato_mesh/application/data_processing.rb
Normal file
1071
web/lib/potato_mesh/application/data_processing.rb
Normal file
File diff suppressed because it is too large
Load Diff
134
web/lib/potato_mesh/application/database.rb
Normal file
134
web/lib/potato_mesh/application/database.rb
Normal file
@@ -0,0 +1,134 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Database
|
||||
# Open a connection to the application database applying common pragmas.
|
||||
#
|
||||
# @param readonly [Boolean] whether to open the database in read-only mode.
|
||||
# @return [SQLite3::Database] configured database handle.
|
||||
def open_database(readonly: false)
|
||||
SQLite3::Database.new(PotatoMesh::Config.db_path, readonly: readonly).tap do |db|
|
||||
db.busy_timeout = PotatoMesh::Config.db_busy_timeout_ms
|
||||
db.execute("PRAGMA foreign_keys = ON")
|
||||
end
|
||||
end
|
||||
|
||||
# Execute the provided block and retry when SQLite reports a busy error.
|
||||
#
|
||||
# @param max_retries [Integer] maximum number of retries when locked.
|
||||
# @param base_delay [Float] incremental back-off delay between retries.
|
||||
# @yield Executes the database operation.
|
||||
# @return [Object] result of the block.
|
||||
def with_busy_retry(
|
||||
max_retries: PotatoMesh::Config.db_busy_max_retries,
|
||||
base_delay: PotatoMesh::Config.db_busy_retry_delay
|
||||
)
|
||||
attempts = 0
|
||||
begin
|
||||
yield
|
||||
rescue SQLite3::BusyException
|
||||
attempts += 1
|
||||
raise if attempts > max_retries
|
||||
|
||||
sleep(base_delay * attempts)
|
||||
retry
|
||||
end
|
||||
end
|
||||
|
||||
# Determine whether the database schema has already been provisioned.
|
||||
#
|
||||
# @return [Boolean] true when all required tables exist.
|
||||
def db_schema_present?
|
||||
return false unless File.exist?(PotatoMesh::Config.db_path)
|
||||
|
||||
db = open_database(readonly: true)
|
||||
required = %w[nodes messages positions telemetry neighbors instances]
|
||||
tables =
|
||||
db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances')",
|
||||
).flatten
|
||||
(required - tables).empty?
|
||||
rescue SQLite3::Exception
|
||||
false
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Create the database schema using the bundled SQL files.
|
||||
#
|
||||
# @return [void]
|
||||
def init_db
|
||||
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
|
||||
db = open_database
|
||||
%w[nodes messages positions telemetry neighbors instances].each do |schema|
|
||||
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Apply any schema migrations required for older installations.
|
||||
#
|
||||
# @return [void]
|
||||
def ensure_schema_upgrades
|
||||
db = open_database
|
||||
node_columns = db.execute("PRAGMA table_info(nodes)").map { |row| row[1] }
|
||||
unless node_columns.include?("precision_bits")
|
||||
db.execute("ALTER TABLE nodes ADD COLUMN precision_bits INTEGER")
|
||||
node_columns << "precision_bits"
|
||||
end
|
||||
|
||||
unless node_columns.include?("lora_freq")
|
||||
db.execute("ALTER TABLE nodes ADD COLUMN lora_freq INTEGER")
|
||||
end
|
||||
|
||||
unless node_columns.include?("modem_preset")
|
||||
db.execute("ALTER TABLE nodes ADD COLUMN modem_preset TEXT")
|
||||
end
|
||||
|
||||
message_columns = db.execute("PRAGMA table_info(messages)").map { |row| row[1] }
|
||||
|
||||
unless message_columns.include?("lora_freq")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN lora_freq INTEGER")
|
||||
end
|
||||
|
||||
unless message_columns.include?("modem_preset")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN modem_preset TEXT")
|
||||
end
|
||||
|
||||
unless message_columns.include?("channel_name")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN channel_name TEXT")
|
||||
end
|
||||
|
||||
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='instances'").flatten
|
||||
if tables.empty?
|
||||
sql_file = File.expand_path("../../../../data/instances.sql", __dir__)
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
rescue SQLite3::SQLException, Errno::ENOENT => e
|
||||
warn_log(
|
||||
"Failed to apply schema upgrade",
|
||||
context: "database.schema",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
20
web/lib/potato_mesh/application/errors.rb
Normal file
20
web/lib/potato_mesh/application/errors.rb
Normal file
@@ -0,0 +1,20 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Raised when a remote instance fails to provide valid federation data.
|
||||
class InstanceFetchError < StandardError; end
|
||||
end
|
||||
end
|
||||
738
web/lib/potato_mesh/application/federation.rb
Normal file
738
web/lib/potato_mesh/application/federation.rb
Normal file
@@ -0,0 +1,738 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Federation
|
||||
def self_instance_domain
|
||||
sanitized = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
|
||||
return sanitized if sanitized
|
||||
|
||||
raise "INSTANCE_DOMAIN could not be determined"
|
||||
end
|
||||
|
||||
# Determine whether the local instance should persist its own record.
|
||||
#
|
||||
# @param domain [String, nil] candidate domain for the running instance.
|
||||
# @return [Array(Boolean, String, nil)] tuple containing a decision flag and an optional reason.
|
||||
def self_instance_registration_decision(domain)
|
||||
source = app_constant(:INSTANCE_DOMAIN_SOURCE)
|
||||
return [false, "INSTANCE_DOMAIN source is #{source}"] unless source == :environment
|
||||
|
||||
sanitized = sanitize_instance_domain(domain)
|
||||
return [false, "INSTANCE_DOMAIN missing or invalid"] unless sanitized
|
||||
|
||||
ip = ip_from_domain(sanitized)
|
||||
if ip && restricted_ip_address?(ip)
|
||||
return [false, "INSTANCE_DOMAIN resolves to restricted IP"]
|
||||
end
|
||||
|
||||
[true, nil]
|
||||
end
|
||||
|
||||
def self_instance_attributes
|
||||
domain = self_instance_domain
|
||||
last_update = latest_node_update_timestamp || Time.now.to_i
|
||||
{
|
||||
id: app_constant(:SELF_INSTANCE_ID),
|
||||
domain: domain,
|
||||
pubkey: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
|
||||
name: sanitized_site_name,
|
||||
version: app_constant(:APP_VERSION),
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
latitude: PotatoMesh::Config.map_center_lat,
|
||||
longitude: PotatoMesh::Config.map_center_lon,
|
||||
last_update_time: last_update,
|
||||
is_private: private_mode?,
|
||||
}
|
||||
end
|
||||
|
||||
def sign_instance_attributes(attributes)
|
||||
payload = canonical_instance_payload(attributes)
|
||||
Base64.strict_encode64(
|
||||
app_constant(:INSTANCE_PRIVATE_KEY).sign(OpenSSL::Digest::SHA256.new, payload),
|
||||
)
|
||||
end
|
||||
|
||||
def instance_announcement_payload(attributes, signature)
|
||||
payload = {
|
||||
"id" => attributes[:id],
|
||||
"domain" => attributes[:domain],
|
||||
"pubkey" => attributes[:pubkey],
|
||||
"name" => attributes[:name],
|
||||
"version" => attributes[:version],
|
||||
"channel" => attributes[:channel],
|
||||
"frequency" => attributes[:frequency],
|
||||
"latitude" => attributes[:latitude],
|
||||
"longitude" => attributes[:longitude],
|
||||
"lastUpdateTime" => attributes[:last_update_time],
|
||||
"isPrivate" => attributes[:is_private],
|
||||
"signature" => signature,
|
||||
}
|
||||
payload.reject { |_, value| value.nil? }
|
||||
end
|
||||
|
||||
def ensure_self_instance_record!
|
||||
attributes = self_instance_attributes
|
||||
signature = sign_instance_attributes(attributes)
|
||||
db = nil
|
||||
allowed, reason = self_instance_registration_decision(attributes[:domain])
|
||||
if allowed
|
||||
db = open_database
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
debug_log(
|
||||
"Registered self instance record",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
instance_id: attributes[:id],
|
||||
)
|
||||
else
|
||||
debug_log(
|
||||
"Skipped self instance registration",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: reason,
|
||||
)
|
||||
end
|
||||
[attributes, signature]
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def federation_target_domains(self_domain)
|
||||
normalized_self = sanitize_instance_domain(self_domain)&.downcase
|
||||
ordered = []
|
||||
seen = Set.new
|
||||
|
||||
PotatoMesh::Config.federation_seed_domains.each do |seed|
|
||||
sanitized = sanitize_instance_domain(seed)&.downcase
|
||||
next unless sanitized
|
||||
next if normalized_self && sanitized == normalized_self
|
||||
next if seen.include?(sanitized)
|
||||
|
||||
ordered << sanitized
|
||||
seen << sanitized
|
||||
end
|
||||
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = false
|
||||
rows = with_busy_retry {
|
||||
db.execute("SELECT domain FROM instances WHERE domain IS NOT NULL AND TRIM(domain) != ''")
|
||||
}
|
||||
rows.flatten.compact.each do |raw_domain|
|
||||
sanitized = sanitize_instance_domain(raw_domain)&.downcase
|
||||
next unless sanitized
|
||||
next if normalized_self && sanitized == normalized_self
|
||||
next if seen.include?(sanitized)
|
||||
|
||||
ordered << sanitized
|
||||
seen << sanitized
|
||||
end
|
||||
ordered
|
||||
rescue SQLite3::Exception
|
||||
fallback = PotatoMesh::Config.federation_seed_domains.filter_map do |seed|
|
||||
candidate = sanitize_instance_domain(seed)&.downcase
|
||||
next if normalized_self && candidate == normalized_self
|
||||
|
||||
candidate
|
||||
end
|
||||
fallback.uniq
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def announce_instance_to_domain(domain, payload_json)
|
||||
return false unless domain && !domain.empty?
|
||||
|
||||
instance_uri_candidates(domain, "/api/instances").each do |uri|
|
||||
begin
|
||||
http = build_remote_http_client(uri)
|
||||
response = http.start do |connection|
|
||||
request = Net::HTTP::Post.new(uri)
|
||||
request["Content-Type"] = "application/json"
|
||||
request.body = payload_json
|
||||
connection.request(request)
|
||||
end
|
||||
if response.is_a?(Net::HTTPSuccess)
|
||||
debug_log(
|
||||
"Published federation announcement",
|
||||
context: "federation.announce",
|
||||
target: uri.to_s,
|
||||
status: response.code,
|
||||
)
|
||||
return true
|
||||
end
|
||||
debug_log(
|
||||
"Federation announcement failed",
|
||||
context: "federation.announce",
|
||||
target: uri.to_s,
|
||||
status: response.code,
|
||||
)
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Federation announcement raised exception",
|
||||
context: "federation.announce",
|
||||
target: uri.to_s,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
def announce_instance_to_all_domains
|
||||
return unless federation_enabled?
|
||||
|
||||
attributes, signature = ensure_self_instance_record!
|
||||
payload_json = JSON.generate(instance_announcement_payload(attributes, signature))
|
||||
domains = federation_target_domains(attributes[:domain])
|
||||
domains.each do |domain|
|
||||
announce_instance_to_domain(domain, payload_json)
|
||||
end
|
||||
unless domains.empty?
|
||||
debug_log(
|
||||
"Federation announcement cycle complete",
|
||||
context: "federation.announce",
|
||||
targets: domains,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
def start_federation_announcer!
|
||||
existing = settings.federation_thread
|
||||
return existing if existing&.alive?
|
||||
|
||||
thread = Thread.new do
|
||||
loop do
|
||||
sleep PotatoMesh::Config.federation_announcement_interval
|
||||
begin
|
||||
announce_instance_to_all_domains
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Federation announcement loop error",
|
||||
context: "federation.announce",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
thread.name = "potato-mesh-federation" if thread.respond_to?(:name=)
|
||||
set(:federation_thread, thread)
|
||||
thread
|
||||
end
|
||||
|
||||
def start_initial_federation_announcement!
|
||||
existing = settings.respond_to?(:initial_federation_thread) ? settings.initial_federation_thread : nil
|
||||
return existing if existing&.alive?
|
||||
|
||||
thread = Thread.new do
|
||||
begin
|
||||
announce_instance_to_all_domains
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Initial federation announcement failed",
|
||||
context: "federation.announce",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
ensure
|
||||
set(:initial_federation_thread, nil)
|
||||
end
|
||||
end
|
||||
thread.name = "potato-mesh-federation-initial" if thread.respond_to?(:name=)
|
||||
thread.report_on_exception = false if thread.respond_to?(:report_on_exception=)
|
||||
set(:initial_federation_thread, thread)
|
||||
thread
|
||||
end
|
||||
|
||||
def canonical_instance_payload(attributes)
|
||||
data = {}
|
||||
data["id"] = attributes[:id] if attributes[:id]
|
||||
data["domain"] = attributes[:domain] if attributes[:domain]
|
||||
data["pubkey"] = attributes[:pubkey] if attributes[:pubkey]
|
||||
data["name"] = attributes[:name] if attributes[:name]
|
||||
data["version"] = attributes[:version] if attributes[:version]
|
||||
data["channel"] = attributes[:channel] if attributes[:channel]
|
||||
data["frequency"] = attributes[:frequency] if attributes[:frequency]
|
||||
data["latitude"] = attributes[:latitude] unless attributes[:latitude].nil?
|
||||
data["longitude"] = attributes[:longitude] unless attributes[:longitude].nil?
|
||||
data["lastUpdateTime"] = attributes[:last_update_time] unless attributes[:last_update_time].nil?
|
||||
data["isPrivate"] = attributes[:is_private] unless attributes[:is_private].nil?
|
||||
|
||||
JSON.generate(data, sort_keys: true)
|
||||
end
|
||||
|
||||
def verify_instance_signature(attributes, signature, public_key_pem)
|
||||
return false unless signature && public_key_pem
|
||||
|
||||
canonical = canonical_instance_payload(attributes)
|
||||
signature_bytes = Base64.strict_decode64(signature)
|
||||
key = OpenSSL::PKey::RSA.new(public_key_pem)
|
||||
key.verify(OpenSSL::Digest::SHA256.new, signature_bytes, canonical)
|
||||
rescue ArgumentError, OpenSSL::PKey::PKeyError
|
||||
false
|
||||
end
|
||||
|
||||
def instance_uri_candidates(domain, path)
|
||||
base = domain
|
||||
[
|
||||
URI.parse("https://#{base}#{path}"),
|
||||
URI.parse("http://#{base}#{path}"),
|
||||
]
|
||||
rescue URI::InvalidURIError
|
||||
[]
|
||||
end
|
||||
|
||||
def perform_instance_http_request(uri)
|
||||
http = build_remote_http_client(uri)
|
||||
http.start do |connection|
|
||||
response = connection.request(Net::HTTP::Get.new(uri))
|
||||
case response
|
||||
when Net::HTTPSuccess
|
||||
response.body
|
||||
else
|
||||
raise InstanceFetchError, "unexpected response #{response.code}"
|
||||
end
|
||||
end
|
||||
rescue StandardError => e
|
||||
raise_instance_fetch_error(e)
|
||||
end
|
||||
|
||||
# Build a human readable error message for a failed instance request.
|
||||
#
|
||||
# @param error [StandardError] failure raised while performing the request.
|
||||
# @return [String] description including the error class when necessary.
|
||||
def instance_fetch_error_message(error)
|
||||
message = error.message.to_s.strip
|
||||
class_name = error.class.name || error.class.to_s
|
||||
return class_name if message.empty?
|
||||
|
||||
message.include?(class_name) ? message : "#{class_name}: #{message}"
|
||||
end
|
||||
|
||||
# Raise an InstanceFetchError that preserves the original context.
|
||||
#
|
||||
# @param error [StandardError] failure raised while performing the request.
|
||||
# @return [void]
|
||||
def raise_instance_fetch_error(error)
|
||||
message = instance_fetch_error_message(error)
|
||||
wrapped = InstanceFetchError.new(message)
|
||||
wrapped.set_backtrace(error.backtrace)
|
||||
raise wrapped
|
||||
end
|
||||
|
||||
def fetch_instance_json(domain, path)
|
||||
errors = []
|
||||
instance_uri_candidates(domain, path).each do |uri|
|
||||
begin
|
||||
body = perform_instance_http_request(uri)
|
||||
return [JSON.parse(body), uri] if body
|
||||
rescue JSON::ParserError => e
|
||||
errors << "#{uri}: invalid JSON (#{e.message})"
|
||||
rescue InstanceFetchError => e
|
||||
errors << "#{uri}: #{e.message}"
|
||||
end
|
||||
end
|
||||
[nil, errors]
|
||||
end
|
||||
|
||||
# Parse a remote federation instance payload into canonical attributes.
|
||||
#
|
||||
# @param payload [Hash] JSON object describing a remote instance.
|
||||
# @return [Array<(Hash, String), String>] tuple containing the attribute
|
||||
# hash and signature when valid or a failure reason when invalid.
|
||||
def remote_instance_attributes_from_payload(payload)
|
||||
unless payload.is_a?(Hash)
|
||||
return [nil, nil, "instance payload is not an object"]
|
||||
end
|
||||
|
||||
id = string_or_nil(payload["id"])
|
||||
return [nil, nil, "missing instance id"] unless id
|
||||
|
||||
domain = sanitize_instance_domain(payload["domain"])
|
||||
return [nil, nil, "missing instance domain"] unless domain
|
||||
|
||||
pubkey = sanitize_public_key_pem(payload["pubkey"])
|
||||
return [nil, nil, "missing instance public key"] unless pubkey
|
||||
|
||||
signature = string_or_nil(payload["signature"])
|
||||
return [nil, nil, "missing instance signature"] unless signature
|
||||
|
||||
private_value = if payload.key?("isPrivate")
|
||||
payload["isPrivate"]
|
||||
else
|
||||
payload["is_private"]
|
||||
end
|
||||
private_flag = coerce_boolean(private_value)
|
||||
if private_flag.nil?
|
||||
numeric_flag = coerce_integer(private_value)
|
||||
private_flag = !numeric_flag.to_i.zero? if numeric_flag
|
||||
end
|
||||
|
||||
attributes = {
|
||||
id: id,
|
||||
domain: domain,
|
||||
pubkey: pubkey,
|
||||
name: string_or_nil(payload["name"]),
|
||||
version: string_or_nil(payload["version"]),
|
||||
channel: string_or_nil(payload["channel"]),
|
||||
frequency: string_or_nil(payload["frequency"]),
|
||||
latitude: coerce_float(payload["latitude"]),
|
||||
longitude: coerce_float(payload["longitude"]),
|
||||
last_update_time: coerce_integer(payload["lastUpdateTime"]),
|
||||
is_private: private_flag,
|
||||
}
|
||||
|
||||
[attributes, signature, nil]
|
||||
rescue StandardError => e
|
||||
[nil, nil, e.message]
|
||||
end
|
||||
|
||||
# Recursively ingest federation records exposed by the supplied domain.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database connection used for writes.
|
||||
# @param domain [String] remote domain to crawl for federation records.
|
||||
# @param visited [Set<String>] domains processed during this crawl.
|
||||
# @return [Set<String>] updated set of visited domains.
|
||||
def ingest_known_instances_from!(db, domain, visited: nil)
|
||||
sanitized = sanitize_instance_domain(domain)
|
||||
return visited || Set.new unless sanitized
|
||||
|
||||
visited ||= Set.new
|
||||
return visited if visited.include?(sanitized)
|
||||
|
||||
visited << sanitized
|
||||
|
||||
payload, metadata = fetch_instance_json(sanitized, "/api/instances")
|
||||
unless payload.is_a?(Array)
|
||||
warn_log(
|
||||
"Failed to load remote federation instances",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
reason: Array(metadata).map(&:to_s).join("; "),
|
||||
)
|
||||
return visited
|
||||
end
|
||||
|
||||
payload.each do |entry|
|
||||
attributes, signature, reason = remote_instance_attributes_from_payload(entry)
|
||||
unless attributes && signature
|
||||
warn_log(
|
||||
"Discarded remote instance entry",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
reason: reason || "invalid payload",
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
if attributes[:is_private]
|
||||
debug_log(
|
||||
"Skipped private remote instance",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
unless verify_instance_signature(attributes, signature, attributes[:pubkey])
|
||||
warn_log(
|
||||
"Discarded remote instance entry",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: "invalid signature",
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
attributes[:is_private] = false if attributes[:is_private].nil?
|
||||
|
||||
remote_nodes, node_metadata = fetch_instance_json(attributes[:domain], "/api/nodes")
|
||||
unless remote_nodes
|
||||
warn_log(
|
||||
"Failed to load remote node data",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: Array(node_metadata).map(&:to_s).join("; "),
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
fresh, freshness_reason = validate_remote_nodes(remote_nodes)
|
||||
unless fresh
|
||||
warn_log(
|
||||
"Discarded remote instance entry",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: freshness_reason || "stale node data",
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
begin
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
ingest_known_instances_from!(db, attributes[:domain], visited: visited)
|
||||
rescue ArgumentError => e
|
||||
warn_log(
|
||||
"Failed to persist remote instance",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
visited
|
||||
end
|
||||
|
||||
# Build an HTTP client configured for communication with a remote instance.
|
||||
#
|
||||
# @param uri [URI::Generic] target URI describing the remote endpoint.
|
||||
# @return [Net::HTTP] HTTP client ready to execute the request.
|
||||
def build_remote_http_client(uri)
|
||||
http = Net::HTTP.new(uri.host, uri.port)
|
||||
http.open_timeout = PotatoMesh::Config.remote_instance_http_timeout
|
||||
http.read_timeout = PotatoMesh::Config.remote_instance_read_timeout
|
||||
http.use_ssl = uri.scheme == "https"
|
||||
return http unless http.use_ssl?
|
||||
|
||||
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
||||
http.min_version = :TLS1_2 if http.respond_to?(:min_version=)
|
||||
store = remote_instance_cert_store
|
||||
http.cert_store = store if store
|
||||
callback = remote_instance_verify_callback
|
||||
http.verify_callback = callback if callback
|
||||
http
|
||||
end
|
||||
|
||||
# Construct a certificate store that disables strict CRL enforcement.
|
||||
#
|
||||
# OpenSSL may fail remote requests when certificate revocation lists are
|
||||
# unavailable from the issuing authority. The returned store mirrors the
|
||||
# default system trust store while clearing CRL-related flags so that
|
||||
# federation announcements gracefully succeed when CRLs cannot be fetched.
|
||||
#
|
||||
# @return [OpenSSL::X509::Store, nil] configured store or nil when setup fails.
|
||||
def remote_instance_cert_store
|
||||
return @remote_instance_cert_store if defined?(@remote_instance_cert_store) && @remote_instance_cert_store
|
||||
|
||||
store = OpenSSL::X509::Store.new
|
||||
store.set_default_paths
|
||||
store.flags = 0 if store.respond_to?(:flags=)
|
||||
@remote_instance_cert_store = store
|
||||
rescue OpenSSL::X509::StoreError => e
|
||||
debug_log(
|
||||
"Failed to initialize certificate store for federation HTTP: #{e.message}",
|
||||
)
|
||||
@remote_instance_cert_store = nil
|
||||
end
|
||||
|
||||
# Build a TLS verification callback that tolerates CRL availability failures.
|
||||
#
|
||||
# Some certificate authorities publish CRL endpoints that may occasionally be
|
||||
# unreachable. When OpenSSL cannot download the CRL it raises the
|
||||
# V_ERR_UNABLE_TO_GET_CRL error which would otherwise cause HTTPS federation
|
||||
# announcements to abort. The generated callback accepts those specific
|
||||
# failures while preserving strict verification for all other errors.
|
||||
#
|
||||
# @return [Proc, nil] verification callback or nil when creation fails.
|
||||
def remote_instance_verify_callback
|
||||
if defined?(@remote_instance_verify_callback) && @remote_instance_verify_callback
|
||||
return @remote_instance_verify_callback
|
||||
end
|
||||
|
||||
callback = lambda do |preverify_ok, store_context|
|
||||
return true if preverify_ok
|
||||
|
||||
if store_context && crl_unavailable_error?(store_context.error)
|
||||
debug_log(
|
||||
"Ignoring TLS CRL retrieval failure during federation request",
|
||||
context: "federation.announce",
|
||||
)
|
||||
true
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
@remote_instance_verify_callback = callback
|
||||
rescue StandardError => e
|
||||
debug_log(
|
||||
"Failed to initialize federation TLS verify callback: #{e.message}",
|
||||
context: "federation.announce",
|
||||
)
|
||||
@remote_instance_verify_callback = nil
|
||||
end
|
||||
|
||||
# Determine whether the supplied OpenSSL verification error corresponds to a
|
||||
# missing certificate revocation list.
|
||||
#
|
||||
# @param error_code [Integer, nil] OpenSSL verification error value.
|
||||
# @return [Boolean] true when the error should be ignored.
|
||||
def crl_unavailable_error?(error_code)
|
||||
allowed_errors = [OpenSSL::X509::V_ERR_UNABLE_TO_GET_CRL]
|
||||
if defined?(OpenSSL::X509::V_ERR_UNABLE_TO_GET_CRL_ISSUER)
|
||||
allowed_errors << OpenSSL::X509::V_ERR_UNABLE_TO_GET_CRL_ISSUER
|
||||
end
|
||||
allowed_errors.include?(error_code)
|
||||
end
|
||||
|
||||
def validate_well_known_document(document, domain, pubkey)
|
||||
unless document.is_a?(Hash)
|
||||
return [false, "document is not an object"]
|
||||
end
|
||||
|
||||
remote_pubkey = sanitize_public_key_pem(document["publicKey"])
|
||||
return [false, "public key missing"] unless remote_pubkey
|
||||
return [false, "public key mismatch"] unless remote_pubkey == pubkey
|
||||
|
||||
remote_domain = string_or_nil(document["domain"])
|
||||
return [false, "domain missing"] unless remote_domain
|
||||
return [false, "domain mismatch"] unless remote_domain.casecmp?(domain)
|
||||
|
||||
algorithm = string_or_nil(document["signatureAlgorithm"])
|
||||
unless algorithm&.casecmp?(PotatoMesh::Config.instance_signature_algorithm)
|
||||
return [false, "unsupported signature algorithm"]
|
||||
end
|
||||
|
||||
signed_payload_b64 = string_or_nil(document["signedPayload"])
|
||||
signature_b64 = string_or_nil(document["signature"])
|
||||
return [false, "missing signed payload"] unless signed_payload_b64
|
||||
return [false, "missing signature"] unless signature_b64
|
||||
|
||||
signed_payload = Base64.strict_decode64(signed_payload_b64)
|
||||
signature = Base64.strict_decode64(signature_b64)
|
||||
key = OpenSSL::PKey::RSA.new(remote_pubkey)
|
||||
unless key.verify(OpenSSL::Digest::SHA256.new, signature, signed_payload)
|
||||
return [false, "invalid well-known signature"]
|
||||
end
|
||||
|
||||
payload = JSON.parse(signed_payload)
|
||||
unless payload.is_a?(Hash)
|
||||
return [false, "signed payload is not an object"]
|
||||
end
|
||||
|
||||
payload_domain = string_or_nil(payload["domain"])
|
||||
payload_pubkey = sanitize_public_key_pem(payload["publicKey"])
|
||||
return [false, "signed payload domain mismatch"] unless payload_domain&.casecmp?(domain)
|
||||
return [false, "signed payload public key mismatch"] unless payload_pubkey == pubkey
|
||||
|
||||
[true, nil]
|
||||
rescue ArgumentError, OpenSSL::PKey::PKeyError => e
|
||||
[false, e.message]
|
||||
rescue JSON::ParserError => e
|
||||
[false, "signed payload JSON error: #{e.message}"]
|
||||
end
|
||||
|
||||
def validate_remote_nodes(nodes)
|
||||
unless nodes.is_a?(Array)
|
||||
return [false, "node response is not an array"]
|
||||
end
|
||||
|
||||
if nodes.length < PotatoMesh::Config.remote_instance_min_node_count
|
||||
return [false, "insufficient nodes"]
|
||||
end
|
||||
|
||||
latest = nodes.filter_map do |node|
|
||||
next unless node.is_a?(Hash)
|
||||
|
||||
last_heard_values = []
|
||||
last_heard_values << coerce_integer(node["last_heard"])
|
||||
last_heard_values << coerce_integer(node["lastHeard"])
|
||||
last_heard_values.compact.max
|
||||
end.compact.max
|
||||
|
||||
return [false, "missing last_heard data"] unless latest
|
||||
|
||||
cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
|
||||
return [false, "node data is stale"] if latest < cutoff
|
||||
|
||||
[true, nil]
|
||||
end
|
||||
|
||||
def upsert_instance_record(db, attributes, signature)
|
||||
sanitized_domain = sanitize_instance_domain(attributes[:domain])
|
||||
raise ArgumentError, "invalid domain" unless sanitized_domain
|
||||
|
||||
ip = ip_from_domain(sanitized_domain)
|
||||
if ip && restricted_ip_address?(ip)
|
||||
raise ArgumentError, "restricted domain"
|
||||
end
|
||||
|
||||
normalized_domain = sanitized_domain
|
||||
existing_id = with_busy_retry do
|
||||
db.get_first_value(
|
||||
"SELECT id FROM instances WHERE domain = ?",
|
||||
normalized_domain,
|
||||
)
|
||||
end
|
||||
if existing_id && existing_id != attributes[:id]
|
||||
with_busy_retry do
|
||||
db.execute("DELETE FROM instances WHERE id = ?", existing_id)
|
||||
end
|
||||
debug_log(
|
||||
"Removed conflicting instance by domain",
|
||||
context: "federation.instances",
|
||||
domain: normalized_domain,
|
||||
replaced_id: existing_id,
|
||||
incoming_id: attributes[:id],
|
||||
)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
INSERT INTO instances (
|
||||
id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, signature
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
domain=excluded.domain,
|
||||
pubkey=excluded.pubkey,
|
||||
name=excluded.name,
|
||||
version=excluded.version,
|
||||
channel=excluded.channel,
|
||||
frequency=excluded.frequency,
|
||||
latitude=excluded.latitude,
|
||||
longitude=excluded.longitude,
|
||||
last_update_time=excluded.last_update_time,
|
||||
is_private=excluded.is_private,
|
||||
signature=excluded.signature
|
||||
SQL
|
||||
|
||||
params = [
|
||||
attributes[:id],
|
||||
normalized_domain,
|
||||
attributes[:pubkey],
|
||||
attributes[:name],
|
||||
attributes[:version],
|
||||
attributes[:channel],
|
||||
attributes[:frequency],
|
||||
attributes[:latitude],
|
||||
attributes[:longitude],
|
||||
attributes[:last_update_time],
|
||||
attributes[:is_private] ? 1 : 0,
|
||||
signature,
|
||||
]
|
||||
|
||||
with_busy_retry do
|
||||
db.execute(sql, params)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
121
web/lib/potato_mesh/application/filesystem.rb
Normal file
121
web/lib/potato_mesh/application/filesystem.rb
Normal file
@@ -0,0 +1,121 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "fileutils"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Filesystem helpers responsible for migrating legacy assets to XDG compliant
|
||||
# directories and preparing runtime storage locations.
|
||||
module Filesystem
|
||||
# Execute all filesystem migrations required before the application boots.
|
||||
#
|
||||
# @return [void]
|
||||
def perform_initial_filesystem_setup!
|
||||
migrate_legacy_database!
|
||||
migrate_legacy_keyfile!
|
||||
migrate_legacy_well_known_assets!
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Copy the legacy database file into the configured XDG data directory.
|
||||
#
|
||||
# @return [void]
|
||||
def migrate_legacy_database!
|
||||
return unless default_database_destination?
|
||||
|
||||
migrate_legacy_file(
|
||||
PotatoMesh::Config.legacy_db_path,
|
||||
PotatoMesh::Config.db_path,
|
||||
chmod: 0o600,
|
||||
context: "filesystem.db",
|
||||
)
|
||||
end
|
||||
|
||||
# Copy the legacy keyfile into the configured XDG configuration directory.
|
||||
#
|
||||
# @return [void]
|
||||
def migrate_legacy_keyfile!
|
||||
PotatoMesh::Config.legacy_keyfile_candidates.each do |candidate|
|
||||
migrate_legacy_file(
|
||||
candidate,
|
||||
PotatoMesh::Config.keyfile_path,
|
||||
chmod: 0o600,
|
||||
context: "filesystem.keys",
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# Copy the legacy well-known document into the configured XDG directory.
|
||||
#
|
||||
# @return [void]
|
||||
def migrate_legacy_well_known_assets!
|
||||
destination = File.join(
|
||||
PotatoMesh::Config.well_known_storage_root,
|
||||
File.basename(PotatoMesh::Config.well_known_relative_path),
|
||||
)
|
||||
|
||||
PotatoMesh::Config.legacy_well_known_candidates.each do |candidate|
|
||||
migrate_legacy_file(
|
||||
candidate,
|
||||
destination,
|
||||
chmod: 0o644,
|
||||
context: "filesystem.well_known",
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# Migrate a legacy file if it exists and the destination has not been created yet.
|
||||
#
|
||||
# @param source_path [String] absolute path to the legacy file.
|
||||
# @param destination_path [String] absolute path to the new file location.
|
||||
# @param chmod [Integer, nil] optional permission bits applied to the destination file.
|
||||
# @param context [String] logging context describing the migration target.
|
||||
# @return [void]
|
||||
def migrate_legacy_file(source_path, destination_path, chmod:, context:)
|
||||
return if source_path == destination_path
|
||||
return unless File.exist?(source_path)
|
||||
return if File.exist?(destination_path)
|
||||
|
||||
FileUtils.mkdir_p(File.dirname(destination_path))
|
||||
FileUtils.cp(source_path, destination_path)
|
||||
File.chmod(chmod, destination_path) if chmod
|
||||
|
||||
debug_log(
|
||||
"Migrated legacy file to XDG directory",
|
||||
context: context,
|
||||
source: source_path,
|
||||
destination: destination_path,
|
||||
)
|
||||
rescue SystemCallError => e
|
||||
warn_log(
|
||||
"Failed to migrate legacy file",
|
||||
context: context,
|
||||
source: source_path,
|
||||
destination: destination_path,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
|
||||
# Determine whether the database destination matches the configured default.
|
||||
#
|
||||
# @return [Boolean] true when the destination should receive migrated data.
|
||||
def default_database_destination?
|
||||
PotatoMesh::Config.db_path == PotatoMesh::Config.default_db_path
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
351
web/lib/potato_mesh/application/helpers.rb
Normal file
351
web/lib/potato_mesh/application/helpers.rb
Normal file
@@ -0,0 +1,351 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Shared view and controller helper methods. Each helper is documented with
|
||||
# its intended consumers to ensure consistent behaviour across the Sinatra
|
||||
# application.
|
||||
module Helpers
|
||||
# Fetch an application level constant exposed by {PotatoMesh::Application}.
|
||||
#
|
||||
# @param name [Symbol] constant identifier to retrieve.
|
||||
# @return [Object] constant value stored on the application class.
|
||||
def app_constant(name)
|
||||
PotatoMesh::Application.const_get(name)
|
||||
end
|
||||
|
||||
# Retrieve the configured Prometheus report identifiers as an array.
|
||||
#
|
||||
# @return [Array<String>] list of report IDs used on the metrics page.
|
||||
def prom_report_ids
|
||||
PotatoMesh::Config.prom_report_id_list
|
||||
end
|
||||
|
||||
# Read a text configuration value with a fallback.
|
||||
#
|
||||
# @param key [String] environment variable key.
|
||||
# @param default [String] fallback value when unset.
|
||||
# @return [String] sanitised configuration string.
|
||||
def fetch_config_string(key, default)
|
||||
PotatoMesh::Config.fetch_string(key, default)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.string_or_nil}.
|
||||
#
|
||||
# @param value [Object] value to sanitise.
|
||||
# @return [String, nil] cleaned string or nil.
|
||||
def string_or_nil(value)
|
||||
PotatoMesh::Sanitizer.string_or_nil(value)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.sanitize_instance_domain}.
|
||||
#
|
||||
# @param value [Object] candidate domain string.
|
||||
# @param downcase [Boolean] whether to force lowercase normalisation.
|
||||
# @return [String, nil] canonical domain or nil.
|
||||
def sanitize_instance_domain(value, downcase: true)
|
||||
PotatoMesh::Sanitizer.sanitize_instance_domain(value, downcase: downcase)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.instance_domain_host}.
|
||||
#
|
||||
# @param domain [String] domain literal.
|
||||
# @return [String, nil] host portion of the domain.
|
||||
def instance_domain_host(domain)
|
||||
PotatoMesh::Sanitizer.instance_domain_host(domain)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.ip_from_domain}.
|
||||
#
|
||||
# @param domain [String] domain literal.
|
||||
# @return [IPAddr, nil] parsed address object.
|
||||
def ip_from_domain(domain)
|
||||
PotatoMesh::Sanitizer.ip_from_domain(domain)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.sanitized_string}.
|
||||
#
|
||||
# @param value [Object] arbitrary input.
|
||||
# @return [String] trimmed string representation.
|
||||
def sanitized_string(value)
|
||||
PotatoMesh::Sanitizer.sanitized_string(value)
|
||||
end
|
||||
|
||||
# Retrieve the site name presented to users.
|
||||
#
|
||||
# @return [String] sanitised site label.
|
||||
def sanitized_site_name
|
||||
PotatoMesh::Sanitizer.sanitized_site_name
|
||||
end
|
||||
|
||||
# Retrieve the configured channel.
|
||||
#
|
||||
# @return [String] sanitised channel identifier.
|
||||
def sanitized_channel
|
||||
PotatoMesh::Sanitizer.sanitized_channel
|
||||
end
|
||||
|
||||
# Retrieve the configured frequency descriptor.
|
||||
#
|
||||
# @return [String] sanitised frequency text.
|
||||
def sanitized_frequency
|
||||
PotatoMesh::Sanitizer.sanitized_frequency
|
||||
end
|
||||
|
||||
# Build the configuration hash exposed to the frontend application.
|
||||
#
|
||||
# @return [Hash] JSON serialisable configuration payload.
|
||||
def frontend_app_config
|
||||
{
|
||||
refreshIntervalSeconds: PotatoMesh::Config.refresh_interval_seconds,
|
||||
refreshMs: PotatoMesh::Config.refresh_interval_seconds * 1000,
|
||||
chatEnabled: !private_mode?,
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
contactLink: sanitized_contact_link,
|
||||
contactLinkUrl: sanitized_contact_link_url,
|
||||
mapCenter: {
|
||||
lat: PotatoMesh::Config.map_center_lat,
|
||||
lon: PotatoMesh::Config.map_center_lon,
|
||||
},
|
||||
maxDistanceKm: PotatoMesh::Config.max_distance_km,
|
||||
tileFilters: PotatoMesh::Config.tile_filters,
|
||||
instanceDomain: app_constant(:INSTANCE_DOMAIN),
|
||||
}
|
||||
end
|
||||
|
||||
# Retrieve the configured contact link or nil when unset.
|
||||
#
|
||||
# @return [String, nil] contact link identifier.
|
||||
def sanitized_contact_link
|
||||
PotatoMesh::Sanitizer.sanitized_contact_link
|
||||
end
|
||||
|
||||
# Retrieve the hyperlink derived from the configured contact link.
|
||||
#
|
||||
# @return [String, nil] hyperlink pointing to the community chat.
|
||||
def sanitized_contact_link_url
|
||||
PotatoMesh::Sanitizer.sanitized_contact_link_url
|
||||
end
|
||||
|
||||
# Retrieve the configured maximum node distance in kilometres.
|
||||
#
|
||||
# @return [Numeric, nil] maximum distance or nil if disabled.
|
||||
def sanitized_max_distance_km
|
||||
PotatoMesh::Sanitizer.sanitized_max_distance_km
|
||||
end
|
||||
|
||||
# Format a kilometre value for human readable output.
|
||||
#
|
||||
# @param distance [Numeric] distance in kilometres.
|
||||
# @return [String] formatted distance value.
|
||||
def formatted_distance_km(distance)
|
||||
PotatoMesh::Meta.formatted_distance_km(distance)
|
||||
end
|
||||
|
||||
# Generate the meta description used in SEO tags.
|
||||
#
|
||||
# @return [String] combined descriptive sentence.
|
||||
def meta_description
|
||||
PotatoMesh::Meta.description(private_mode: private_mode?)
|
||||
end
|
||||
|
||||
# Generate the structured meta configuration for the UI.
|
||||
#
|
||||
# @return [Hash] frozen configuration metadata.
|
||||
def meta_configuration
|
||||
PotatoMesh::Meta.configuration(private_mode: private_mode?)
|
||||
end
|
||||
|
||||
# Coerce an arbitrary value into an integer when possible.
|
||||
#
|
||||
# @param value [Object] user supplied value.
|
||||
# @return [Integer, nil] parsed integer or nil when invalid.
|
||||
def coerce_integer(value)
|
||||
case value
|
||||
when Integer
|
||||
value
|
||||
when Float
|
||||
value.finite? ? value.to_i : nil
|
||||
when Numeric
|
||||
value.to_i
|
||||
when String
|
||||
trimmed = value.strip
|
||||
return nil if trimmed.empty?
|
||||
return trimmed.to_i(16) if trimmed.match?(/\A0[xX][0-9A-Fa-f]+\z/)
|
||||
return trimmed.to_i(10) if trimmed.match?(/\A-?\d+\z/)
|
||||
begin
|
||||
float_val = Float(trimmed)
|
||||
float_val.finite? ? float_val.to_i : nil
|
||||
rescue ArgumentError
|
||||
nil
|
||||
end
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Coerce an arbitrary value into a floating point number when possible.
|
||||
#
|
||||
# @param value [Object] user supplied value.
|
||||
# @return [Float, nil] parsed float or nil when invalid.
|
||||
def coerce_float(value)
|
||||
case value
|
||||
when Float
|
||||
value.finite? ? value : nil
|
||||
when Integer
|
||||
value.to_f
|
||||
when Numeric
|
||||
value.to_f
|
||||
when String
|
||||
trimmed = value.strip
|
||||
return nil if trimmed.empty?
|
||||
begin
|
||||
float_val = Float(trimmed)
|
||||
float_val.finite? ? float_val : nil
|
||||
rescue ArgumentError
|
||||
nil
|
||||
end
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Coerce an arbitrary value into a boolean according to common truthy
|
||||
# conventions.
|
||||
#
|
||||
# @param value [Object] user supplied value.
|
||||
# @return [Boolean, nil] boolean interpretation or nil when unknown.
|
||||
def coerce_boolean(value)
|
||||
case value
|
||||
when true, false
|
||||
value
|
||||
when String
|
||||
trimmed = value.strip.downcase
|
||||
return true if %w[true 1 yes y].include?(trimmed)
|
||||
return false if %w[false 0 no n].include?(trimmed)
|
||||
nil
|
||||
when Numeric
|
||||
!value.to_i.zero?
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Normalise PEM encoded public key content into LF line endings.
|
||||
#
|
||||
# @param value [String, #to_s, nil] raw PEM content.
|
||||
# @return [String, nil] cleaned PEM string or nil when blank.
|
||||
def sanitize_public_key_pem(value)
|
||||
return nil if value.nil?
|
||||
|
||||
pem = value.is_a?(String) ? value : value.to_s
|
||||
pem = pem.gsub(/\r\n?/, "\n")
|
||||
return nil if pem.strip.empty?
|
||||
|
||||
pem
|
||||
end
|
||||
|
||||
# Recursively coerce hash keys to strings and normalise nested arrays.
|
||||
#
|
||||
# @param value [Object] JSON compatible value.
|
||||
# @return [Object] structure with canonical string keys.
|
||||
def normalize_json_value(value)
|
||||
case value
|
||||
when Hash
|
||||
value.each_with_object({}) do |(key, val), memo|
|
||||
memo[key.to_s] = normalize_json_value(val)
|
||||
end
|
||||
when Array
|
||||
value.map { |element| normalize_json_value(element) }
|
||||
else
|
||||
value
|
||||
end
|
||||
end
|
||||
|
||||
# Parse JSON payloads or hashes into normalised hashes with string keys.
|
||||
#
|
||||
# @param value [Hash, String, nil] raw JSON object or string representation.
|
||||
# @return [Hash, nil] canonicalised hash or nil when parsing fails.
|
||||
def normalize_json_object(value)
|
||||
case value
|
||||
when Hash
|
||||
normalize_json_value(value)
|
||||
when String
|
||||
trimmed = value.strip
|
||||
return nil if trimmed.empty?
|
||||
begin
|
||||
parsed = JSON.parse(trimmed)
|
||||
rescue JSON::ParserError
|
||||
return nil
|
||||
end
|
||||
parsed.is_a?(Hash) ? normalize_json_value(parsed) : nil
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Emit a structured debug log entry tagged with the calling context.
|
||||
#
|
||||
# @param message [String] text to emit.
|
||||
# @param context [String] logical source of the message.
|
||||
# @param metadata [Hash] additional structured key/value data.
|
||||
# @return [void]
|
||||
def debug_log(message, context: "app", **metadata)
|
||||
logger = PotatoMesh::Logging.logger_for(self)
|
||||
PotatoMesh::Logging.log(logger, :debug, message, context: context, **metadata)
|
||||
end
|
||||
|
||||
# Emit a structured warning log entry tagged with the calling context.
|
||||
#
|
||||
# @param message [String] text to emit.
|
||||
# @param context [String] logical source of the message.
|
||||
# @param metadata [Hash] additional structured key/value data.
|
||||
# @return [void]
|
||||
def warn_log(message, context: "app", **metadata)
|
||||
logger = PotatoMesh::Logging.logger_for(self)
|
||||
PotatoMesh::Logging.log(logger, :warn, message, context: context, **metadata)
|
||||
end
|
||||
|
||||
# Indicate whether private mode has been requested.
|
||||
#
|
||||
# @return [Boolean] true when PRIVATE=1.
|
||||
def private_mode?
|
||||
ENV["PRIVATE"] == "1"
|
||||
end
|
||||
|
||||
# Identify whether the Rack environment corresponds to the test suite.
|
||||
#
|
||||
# @return [Boolean] true when RACK_ENV is "test".
|
||||
def test_environment?
|
||||
ENV["RACK_ENV"] == "test"
|
||||
end
|
||||
|
||||
# Determine whether federation features should be active.
|
||||
#
|
||||
# @return [Boolean] true when federation configuration allows it.
|
||||
def federation_enabled?
|
||||
ENV.fetch("FEDERATION", "1") != "0" && !private_mode?
|
||||
end
|
||||
|
||||
# Determine whether federation announcements should run asynchronously.
|
||||
#
|
||||
# @return [Boolean] true when announcements are enabled.
|
||||
def federation_announcements_active?
|
||||
federation_enabled? && !test_environment?
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
281
web/lib/potato_mesh/application/identity.rb
Normal file
281
web/lib/potato_mesh/application/identity.rb
Normal file
@@ -0,0 +1,281 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Identity
|
||||
# Resolve the current application version string using git metadata when available.
|
||||
#
|
||||
# @return [String] semantic version compatible identifier.
|
||||
def determine_app_version
|
||||
repo_root = locate_git_repo_root(File.expand_path("../../..", __dir__))
|
||||
return PotatoMesh::Config.version_fallback unless repo_root
|
||||
|
||||
stdout, status = Open3.capture2("git", "-C", repo_root, "describe", "--tags", "--long", "--abbrev=7")
|
||||
return PotatoMesh::Config.version_fallback unless status.success?
|
||||
|
||||
raw = stdout.strip
|
||||
return PotatoMesh::Config.version_fallback if raw.empty?
|
||||
|
||||
match = /\A(?<tag>.+)-(?<count>\d+)-g(?<hash>[0-9a-f]+)\z/.match(raw)
|
||||
return raw unless match
|
||||
|
||||
tag = match[:tag]
|
||||
count = match[:count].to_i
|
||||
hash = match[:hash]
|
||||
return tag if count.zero?
|
||||
|
||||
"#{tag}+#{count}-#{hash}"
|
||||
rescue StandardError
|
||||
PotatoMesh::Config.version_fallback
|
||||
end
|
||||
|
||||
# Discover the root directory of the git repository containing the
|
||||
# application by traversing parent directories until a ``.git`` entry is
|
||||
# located. This supports both traditional repositories where ``.git`` is a
|
||||
# directory and worktree checkouts where it is a plain file.
|
||||
#
|
||||
# @param start_dir [String] absolute path where the search should begin.
|
||||
# @return [String, nil] absolute path to the repository root when found,
|
||||
# otherwise ``nil``.
|
||||
def locate_git_repo_root(start_dir)
|
||||
current = File.expand_path(start_dir)
|
||||
loop do
|
||||
git_entry = File.join(current, ".git")
|
||||
return current if File.exist?(git_entry)
|
||||
|
||||
parent = File.dirname(current)
|
||||
break if parent == current
|
||||
|
||||
current = parent
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Load the persisted instance private key or generate a new one when absent.
|
||||
#
|
||||
# @return [Array<OpenSSL::PKey::RSA, Boolean>] tuple of key and generation flag.
|
||||
def load_or_generate_instance_private_key
|
||||
keyfile_path = PotatoMesh::Config.keyfile_path
|
||||
migrate_legacy_keyfile_for_identity!(keyfile_path)
|
||||
FileUtils.mkdir_p(File.dirname(keyfile_path))
|
||||
if File.exist?(keyfile_path)
|
||||
contents = File.binread(keyfile_path)
|
||||
return [OpenSSL::PKey.read(contents), false]
|
||||
end
|
||||
|
||||
key = OpenSSL::PKey::RSA.new(2048)
|
||||
File.open(keyfile_path, File::WRONLY | File::CREAT | File::TRUNC, 0o600) do |file|
|
||||
file.write(key.export)
|
||||
end
|
||||
[key, true]
|
||||
rescue OpenSSL::PKey::PKeyError, ArgumentError => e
|
||||
warn_log(
|
||||
"Failed to load instance private key",
|
||||
context: "identity.keys",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
key = OpenSSL::PKey::RSA.new(2048)
|
||||
File.open(keyfile_path, File::WRONLY | File::CREAT | File::TRUNC, 0o600) do |file|
|
||||
file.write(key.export)
|
||||
end
|
||||
[key, true]
|
||||
end
|
||||
|
||||
# Migrate an existing legacy keyfile into the configured destination.
|
||||
#
|
||||
# @param destination_path [String] absolute path where the keyfile should reside.
|
||||
# @return [void]
|
||||
def migrate_legacy_keyfile_for_identity!(destination_path)
|
||||
return if File.exist?(destination_path)
|
||||
|
||||
PotatoMesh::Config.legacy_keyfile_candidates.each do |candidate|
|
||||
next unless File.exist?(candidate)
|
||||
next if candidate == destination_path
|
||||
|
||||
begin
|
||||
FileUtils.mkdir_p(File.dirname(destination_path))
|
||||
FileUtils.cp(candidate, destination_path)
|
||||
File.chmod(0o600, destination_path)
|
||||
|
||||
debug_log(
|
||||
"Migrated legacy keyfile to XDG directory",
|
||||
context: "identity.keys",
|
||||
source: candidate,
|
||||
destination: destination_path,
|
||||
)
|
||||
rescue SystemCallError => e
|
||||
warn_log(
|
||||
"Failed to migrate legacy keyfile",
|
||||
context: "identity.keys",
|
||||
source: candidate,
|
||||
destination: destination_path,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
private :migrate_legacy_keyfile_for_identity!, :locate_git_repo_root
|
||||
|
||||
# Return the directory used to store well-known documents.
|
||||
#
|
||||
# @return [String] absolute path to the staging directory.
|
||||
def well_known_directory
|
||||
PotatoMesh::Config.well_known_storage_root
|
||||
end
|
||||
|
||||
# Determine the absolute path to the well-known document file.
|
||||
#
|
||||
# @return [String] filesystem path for the JSON document.
|
||||
def well_known_file_path
|
||||
File.join(
|
||||
well_known_directory,
|
||||
File.basename(PotatoMesh::Config.well_known_relative_path),
|
||||
)
|
||||
end
|
||||
|
||||
# Remove legacy well-known artifacts from previous releases.
|
||||
#
|
||||
# @return [void]
|
||||
def cleanup_legacy_well_known_artifacts
|
||||
legacy_path = PotatoMesh::Config.legacy_public_well_known_path
|
||||
FileUtils.rm_f(legacy_path)
|
||||
legacy_dir = File.dirname(legacy_path)
|
||||
FileUtils.rmdir(legacy_dir) if Dir.exist?(legacy_dir) && Dir.empty?(legacy_dir)
|
||||
rescue SystemCallError
|
||||
# Ignore errors removing legacy static files; failure only means the directory
|
||||
# or file did not exist or is in use.
|
||||
end
|
||||
|
||||
# Construct the JSON body and detached signature for the well-known document.
|
||||
#
|
||||
# @return [Array(String, String)] pair of JSON output and base64 signature.
|
||||
def build_well_known_document
|
||||
last_update = latest_node_update_timestamp
|
||||
domain_value = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
|
||||
|
||||
payload = {
|
||||
publicKey: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
|
||||
name: sanitized_site_name,
|
||||
version: app_constant(:APP_VERSION),
|
||||
domain: domain_value,
|
||||
lastUpdate: last_update,
|
||||
}
|
||||
|
||||
signed_payload = JSON.generate(payload, sort_keys: true)
|
||||
signature = Base64.strict_encode64(
|
||||
app_constant(:INSTANCE_PRIVATE_KEY).sign(OpenSSL::Digest::SHA256.new, signed_payload),
|
||||
)
|
||||
|
||||
document = payload.merge(
|
||||
signature: signature,
|
||||
signatureAlgorithm: PotatoMesh::Config.instance_signature_algorithm,
|
||||
signedPayload: Base64.strict_encode64(signed_payload),
|
||||
)
|
||||
|
||||
json_output = JSON.pretty_generate(document)
|
||||
[json_output, signature]
|
||||
end
|
||||
|
||||
# Regenerate the well-known document when the on-disk copy is stale.
|
||||
#
|
||||
# @return [void]
|
||||
def refresh_well_known_document_if_stale
|
||||
FileUtils.mkdir_p(well_known_directory)
|
||||
path = well_known_file_path
|
||||
now = Time.now
|
||||
if File.exist?(path)
|
||||
mtime = File.mtime(path)
|
||||
if (now - mtime) < PotatoMesh::Config.well_known_refresh_interval
|
||||
return
|
||||
end
|
||||
end
|
||||
|
||||
json_output, signature = build_well_known_document
|
||||
File.open(path, File::WRONLY | File::CREAT | File::TRUNC, 0o644) do |file|
|
||||
file.write(json_output)
|
||||
file.write("\n") unless json_output.end_with?("\n")
|
||||
end
|
||||
|
||||
debug_log(
|
||||
"Refreshed well-known document content",
|
||||
context: "identity.well_known",
|
||||
path: PotatoMesh::Config.well_known_relative_path,
|
||||
bytes: json_output.bytesize,
|
||||
document: json_output,
|
||||
)
|
||||
debug_log(
|
||||
"Refreshed well-known document signature",
|
||||
context: "identity.well_known",
|
||||
path: PotatoMesh::Config.well_known_relative_path,
|
||||
algorithm: PotatoMesh::Config.instance_signature_algorithm,
|
||||
signature: signature,
|
||||
)
|
||||
end
|
||||
|
||||
# Retrieve the latest node update timestamp from the database.
|
||||
#
|
||||
# @return [Integer, nil] Unix timestamp or nil when unavailable.
|
||||
def latest_node_update_timestamp
|
||||
return nil unless File.exist?(PotatoMesh::Config.db_path)
|
||||
|
||||
db = open_database(readonly: true)
|
||||
value = db.get_first_value("SELECT MAX(last_heard) FROM nodes")
|
||||
value&.to_i
|
||||
rescue SQLite3::Exception
|
||||
nil
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Emit a debug entry describing the active instance key material.
|
||||
#
|
||||
# @return [void]
|
||||
def log_instance_public_key
|
||||
debug_log(
|
||||
"Loaded instance public key",
|
||||
context: "identity.keys",
|
||||
public_key_pem: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
|
||||
)
|
||||
if app_constant(:INSTANCE_KEY_GENERATED)
|
||||
debug_log(
|
||||
"Generated new instance private key",
|
||||
context: "identity.keys",
|
||||
path: PotatoMesh::Config.keyfile_path,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# Emit a debug entry describing how the instance domain was derived.
|
||||
#
|
||||
# @return [void]
|
||||
def log_instance_domain_resolution
|
||||
source = app_constant(:INSTANCE_DOMAIN_SOURCE) || :unknown
|
||||
debug_log(
|
||||
"Resolved instance domain",
|
||||
context: "identity.domain",
|
||||
source: source,
|
||||
domain: app_constant(:INSTANCE_DOMAIN),
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
199
web/lib/potato_mesh/application/instances.rb
Normal file
199
web/lib/potato_mesh/application/instances.rb
Normal file
@@ -0,0 +1,199 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Helper methods for maintaining and presenting instance records.
|
||||
module Instances
|
||||
# Remove duplicate instance records grouped by their canonical domain name
|
||||
# while favouring the most recent entry.
|
||||
#
|
||||
# @return [void]
|
||||
def clean_duplicate_instances!
|
||||
db = open_database
|
||||
rows = with_busy_retry do
|
||||
db.execute(
|
||||
<<~SQL
|
||||
SELECT rowid, domain, last_update_time
|
||||
FROM instances
|
||||
WHERE domain IS NOT NULL AND TRIM(domain) != ''
|
||||
SQL
|
||||
)
|
||||
end
|
||||
|
||||
grouped = rows.group_by do |row|
|
||||
sanitize_instance_domain(row[1])&.downcase
|
||||
rescue StandardError
|
||||
nil
|
||||
end
|
||||
|
||||
deletions = []
|
||||
updates = {}
|
||||
|
||||
grouped.each do |canonical_domain, entries|
|
||||
next if canonical_domain.nil?
|
||||
next if entries.size <= 1
|
||||
|
||||
sorted_entries = entries.sort_by do |entry|
|
||||
timestamp = coerce_integer(entry[2]) || -1
|
||||
[timestamp, entry[0].to_i]
|
||||
end
|
||||
keeper = sorted_entries.last
|
||||
next unless keeper
|
||||
|
||||
deletions.concat(sorted_entries[0...-1].map { |entry| entry[0].to_i })
|
||||
|
||||
current_domain = entries.find { |entry| entry[0] == keeper[0] }&.[](1)
|
||||
if canonical_domain && current_domain != canonical_domain
|
||||
updates[keeper[0].to_i] = canonical_domain
|
||||
end
|
||||
|
||||
removed_count = sorted_entries.length - 1
|
||||
warn_log(
|
||||
"Removed duplicate instance records",
|
||||
context: "instances.cleanup",
|
||||
domain: canonical_domain,
|
||||
removed: removed_count,
|
||||
) if removed_count.positive?
|
||||
end
|
||||
|
||||
unless deletions.empty?
|
||||
placeholders = Array.new(deletions.size, "?").join(",")
|
||||
with_busy_retry do
|
||||
db.execute("DELETE FROM instances WHERE rowid IN (#{placeholders})", deletions)
|
||||
end
|
||||
end
|
||||
|
||||
updates.each do |rowid, canonical_domain|
|
||||
with_busy_retry do
|
||||
db.execute("UPDATE instances SET domain = ? WHERE rowid = ?", [canonical_domain, rowid])
|
||||
end
|
||||
end
|
||||
rescue SQLite3::Exception => e
|
||||
warn_log(
|
||||
"Failed to clean duplicate instances",
|
||||
context: "instances.cleanup",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Normalise and validate an instance database row for API presentation.
|
||||
#
|
||||
# @param row [Hash] raw database row with string keys.
|
||||
# @return [Hash, nil] cleaned hash or +nil+ when the row is discarded.
|
||||
def normalize_instance_row(row)
|
||||
unless row.is_a?(Hash)
|
||||
warn_log(
|
||||
"Discarded malformed instance row",
|
||||
context: "instances.normalize",
|
||||
reason: "row not hash",
|
||||
)
|
||||
return nil
|
||||
end
|
||||
|
||||
id = string_or_nil(row["id"])
|
||||
domain = sanitize_instance_domain(row["domain"])&.downcase
|
||||
pubkey = sanitize_public_key_pem(row["pubkey"])
|
||||
signature = string_or_nil(row["signature"])
|
||||
last_update_time = coerce_integer(row["last_update_time"])
|
||||
is_private_raw = row["is_private"]
|
||||
private_flag = coerce_boolean(is_private_raw)
|
||||
if private_flag.nil?
|
||||
numeric_private = coerce_integer(is_private_raw)
|
||||
private_flag = !numeric_private.to_i.zero? if numeric_private
|
||||
end
|
||||
private_flag = false if private_flag.nil?
|
||||
|
||||
if id.nil? || domain.nil? || pubkey.nil?
|
||||
warn_log(
|
||||
"Discarded malformed instance row",
|
||||
context: "instances.normalize",
|
||||
instance_id: row["id"],
|
||||
domain: row["domain"],
|
||||
reason: "missing required fields",
|
||||
)
|
||||
return nil
|
||||
end
|
||||
|
||||
payload = {
|
||||
"id" => id,
|
||||
"domain" => domain,
|
||||
"pubkey" => pubkey,
|
||||
"name" => string_or_nil(row["name"]),
|
||||
"version" => string_or_nil(row["version"]),
|
||||
"channel" => string_or_nil(row["channel"]),
|
||||
"frequency" => string_or_nil(row["frequency"]),
|
||||
"latitude" => coerce_float(row["latitude"]),
|
||||
"longitude" => coerce_float(row["longitude"]),
|
||||
"lastUpdateTime" => last_update_time,
|
||||
"isPrivate" => private_flag,
|
||||
"signature" => signature,
|
||||
}
|
||||
|
||||
payload.reject { |_, value| value.nil? }
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Failed to normalise instance row",
|
||||
context: "instances.normalize",
|
||||
instance_id: row.respond_to?(:[]) ? row["id"] : nil,
|
||||
domain: row.respond_to?(:[]) ? row["domain"] : nil,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
nil
|
||||
end
|
||||
|
||||
# Fetch all instance rows ready to be served by the API while handling
|
||||
# malformed rows gracefully.
|
||||
#
|
||||
# @return [Array<Hash>] list of cleaned instance payloads.
|
||||
def load_instances_for_api
|
||||
clean_duplicate_instances!
|
||||
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
rows = with_busy_retry do
|
||||
db.execute(
|
||||
<<~SQL
|
||||
SELECT id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, signature
|
||||
FROM instances
|
||||
WHERE domain IS NOT NULL AND TRIM(domain) != ''
|
||||
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
|
||||
ORDER BY LOWER(domain)
|
||||
SQL
|
||||
)
|
||||
end
|
||||
|
||||
rows.each_with_object([]) do |row, memo|
|
||||
normalized = normalize_instance_row(row)
|
||||
memo << normalized if normalized
|
||||
end
|
||||
rescue SQLite3::Exception => e
|
||||
warn_log(
|
||||
"Failed to load instance records",
|
||||
context: "instances.load",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
[]
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
339
web/lib/potato_mesh/application/networking.rb
Normal file
339
web/lib/potato_mesh/application/networking.rb
Normal file
@@ -0,0 +1,339 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Networking
|
||||
# Normalise the configured instance domain by stripping schemes and verifying structure.
|
||||
#
|
||||
# @param raw [String, nil] environment supplied domain or URL.
|
||||
# @return [String, nil] canonicalised hostname with optional port.
|
||||
def canonicalize_configured_instance_domain(raw)
|
||||
return nil if raw.nil?
|
||||
|
||||
trimmed = raw.to_s.strip
|
||||
return nil if trimmed.empty?
|
||||
|
||||
candidate = trimmed
|
||||
|
||||
if candidate.include?("://")
|
||||
begin
|
||||
uri = URI.parse(candidate)
|
||||
rescue URI::InvalidURIError => e
|
||||
raise "INSTANCE_DOMAIN must be a valid hostname or URL, but parsing #{candidate.inspect} failed: #{e.message}"
|
||||
end
|
||||
|
||||
unless uri.host
|
||||
raise "INSTANCE_DOMAIN URL must include a hostname: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
if uri.userinfo
|
||||
raise "INSTANCE_DOMAIN URL must not include credentials: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
if uri.path && !uri.path.empty? && uri.path != "/"
|
||||
raise "INSTANCE_DOMAIN URL must not include a path component: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
if uri.query || uri.fragment
|
||||
raise "INSTANCE_DOMAIN URL must not include query or fragment data: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
hostname = uri.hostname
|
||||
unless hostname
|
||||
raise "INSTANCE_DOMAIN URL must include a hostname: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
ip_host = ipv6_literal?(hostname)
|
||||
candidate_host = ip_host ? "[#{ip_host}]" : hostname
|
||||
candidate = candidate_host
|
||||
port = uri.port
|
||||
candidate = "#{candidate_host}:#{port}" if port_required?(uri, trimmed)
|
||||
end
|
||||
|
||||
sanitized = sanitize_instance_domain(candidate)
|
||||
unless sanitized
|
||||
raise "INSTANCE_DOMAIN must be a bare hostname (optionally with a port) without schemes or paths: #{raw.inspect}"
|
||||
end
|
||||
|
||||
ensure_ipv6_instance_domain(sanitized).downcase
|
||||
end
|
||||
|
||||
# Resolve the best domain for the running instance using configuration and network discovery.
|
||||
#
|
||||
# @return [Array(String, Symbol)] tuple containing the domain and the discovery source.
|
||||
def determine_instance_domain
|
||||
raw = ENV["INSTANCE_DOMAIN"]
|
||||
if raw
|
||||
canonical = canonicalize_configured_instance_domain(raw)
|
||||
return [canonical, :environment] if canonical
|
||||
end
|
||||
|
||||
reverse = sanitize_instance_domain(reverse_dns_domain)
|
||||
return [reverse, :reverse_dns] if reverse
|
||||
|
||||
public_ip = discover_public_ip_address
|
||||
return [public_ip, :public_ip] if public_ip
|
||||
|
||||
protected_ip = discover_protected_ip_address
|
||||
return [protected_ip, :protected_ip] if protected_ip
|
||||
|
||||
[discover_local_ip_address, :local_ip]
|
||||
end
|
||||
|
||||
# Attempt to determine the reverse DNS hostname for the local machine.
|
||||
#
|
||||
# @return [String, nil] resolved hostname or nil when unavailable.
|
||||
def reverse_dns_domain
|
||||
Socket.ip_address_list.each do |address|
|
||||
next unless address.respond_to?(:ip?) && address.ip?
|
||||
|
||||
loopback =
|
||||
(address.respond_to?(:ipv4_loopback?) && address.ipv4_loopback?) ||
|
||||
(address.respond_to?(:ipv6_loopback?) && address.ipv6_loopback?)
|
||||
next if loopback
|
||||
|
||||
link_local =
|
||||
address.respond_to?(:ipv6_linklocal?) && address.ipv6_linklocal?
|
||||
next if link_local
|
||||
|
||||
ip = address.ip_address
|
||||
next if ip.nil? || ip.empty?
|
||||
|
||||
begin
|
||||
hostname = Resolv.getname(ip)
|
||||
trimmed = hostname&.strip
|
||||
return trimmed unless trimmed.nil? || trimmed.empty?
|
||||
rescue Resolv::ResolvError, Resolv::ResolvTimeout, SocketError
|
||||
next
|
||||
end
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Identify the first public IP address of the current host.
|
||||
#
|
||||
# @return [String, nil] public IP address string or nil.
|
||||
def discover_public_ip_address
|
||||
address = ip_address_candidates.find { |candidate| public_ip_address?(candidate) }
|
||||
address&.ip_address
|
||||
end
|
||||
|
||||
# Identify a private yet non-loopback IP address suitable for protected networks.
|
||||
#
|
||||
# @return [String, nil] protected network address or nil.
|
||||
def discover_protected_ip_address
|
||||
address = ip_address_candidates.find { |candidate| protected_ip_address?(candidate) }
|
||||
address&.ip_address
|
||||
end
|
||||
|
||||
# Collect viable socket addresses for evaluation.
|
||||
#
|
||||
# @return [Array<#ip?>] list of socket addresses supporting IP queries.
|
||||
def ip_address_candidates
|
||||
Socket.ip_address_list.select { |addr| addr.respond_to?(:ip?) && addr.ip? }
|
||||
end
|
||||
|
||||
# Determine whether a socket address represents a public IP.
|
||||
#
|
||||
# @param addr [Addrinfo] candidate socket address.
|
||||
# @return [Boolean] true when the address is publicly routable.
|
||||
def public_ip_address?(addr)
|
||||
ip = ipaddr_from(addr)
|
||||
return false unless ip
|
||||
return false if loopback_address?(addr, ip)
|
||||
return false if link_local_address?(addr, ip)
|
||||
return false if private_address?(addr, ip)
|
||||
return false if unspecified_address?(ip)
|
||||
|
||||
true
|
||||
end
|
||||
|
||||
# Determine whether a socket address resides on a protected private network.
|
||||
#
|
||||
# @param addr [Addrinfo] candidate socket address.
|
||||
# @return [Boolean] true when the address is private but not loopback/link-local.
|
||||
def protected_ip_address?(addr)
|
||||
ip = ipaddr_from(addr)
|
||||
return false unless ip
|
||||
return false if loopback_address?(addr, ip)
|
||||
return false if link_local_address?(addr, ip)
|
||||
|
||||
private_address?(addr, ip)
|
||||
end
|
||||
|
||||
# Parse an IP address from the provided socket address.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to examine.
|
||||
# @return [IPAddr, nil] parsed IP or nil when invalid.
|
||||
def ipaddr_from(addr)
|
||||
ip = addr.ip_address
|
||||
return nil if ip.nil? || ip.empty?
|
||||
|
||||
IPAddr.new(ip)
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
|
||||
# Determine whether a socket address is loopback.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to inspect.
|
||||
# @param ip [IPAddr] parsed IP representation of the address.
|
||||
# @return [Boolean] true when the address is loopback.
|
||||
def loopback_address?(addr, ip)
|
||||
(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) ||
|
||||
(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?) ||
|
||||
ip.loopback?
|
||||
end
|
||||
|
||||
# Determine whether a socket address is link-local.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to inspect.
|
||||
# @param ip [IPAddr] parsed IP representation of the address.
|
||||
# @return [Boolean] true when the address is link-local.
|
||||
def link_local_address?(addr, ip)
|
||||
(addr.respond_to?(:ipv6_linklocal?) && addr.ipv6_linklocal?) ||
|
||||
(ip.respond_to?(:link_local?) && ip.link_local?)
|
||||
end
|
||||
|
||||
# Determine whether a socket address is private.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to inspect.
|
||||
# @param ip [IPAddr] parsed IP representation of the address.
|
||||
# @return [Boolean] true when the address is private.
|
||||
def private_address?(addr, ip)
|
||||
if addr.respond_to?(:ipv4?) && addr.ipv4? && addr.respond_to?(:ipv4_private?)
|
||||
addr.ipv4_private?
|
||||
else
|
||||
ip.private?
|
||||
end
|
||||
end
|
||||
|
||||
# Identify unspecified IP addresses.
|
||||
#
|
||||
# @param ip [IPAddr] parsed IP.
|
||||
# @return [Boolean] true for unspecified addresses (0.0.0.0 / ::).
|
||||
def unspecified_address?(ip)
|
||||
(ip.ipv4? || ip.ipv6?) && ip.to_i.zero?
|
||||
end
|
||||
|
||||
# Choose the most appropriate local IP address for the instance domain.
|
||||
#
|
||||
# @return [String] selected IP address string.
|
||||
def discover_local_ip_address
|
||||
candidates = ip_address_candidates
|
||||
|
||||
ipv4 = candidates.find do |addr|
|
||||
addr.respond_to?(:ipv4?) && addr.ipv4? && !(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?)
|
||||
end
|
||||
return ipv4.ip_address if ipv4
|
||||
|
||||
non_loopback = candidates.find do |addr|
|
||||
!(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) &&
|
||||
!(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?)
|
||||
end
|
||||
return non_loopback.ip_address if non_loopback
|
||||
|
||||
loopback = candidates.find do |addr|
|
||||
(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) ||
|
||||
(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?)
|
||||
end
|
||||
return loopback.ip_address if loopback
|
||||
|
||||
"127.0.0.1"
|
||||
end
|
||||
|
||||
# Determine whether an IP should be restricted from exposure.
|
||||
#
|
||||
# @param ip [IPAddr] candidate IP address.
|
||||
# @return [Boolean] true when the IP should not be exposed.
|
||||
def restricted_ip_address?(ip)
|
||||
return true if ip.loopback?
|
||||
return true if ip.private?
|
||||
return true if ip.link_local?
|
||||
return true if ip.to_i.zero?
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
# Normalize IPv6 instance domains so that they remain bracketed and URI-compatible.
|
||||
#
|
||||
# @param domain [String] sanitized hostname optionally including a port suffix.
|
||||
# @return [String] domain with IPv6 literals wrapped in brackets when necessary.
|
||||
def ensure_ipv6_instance_domain(domain)
|
||||
bracketed_match = domain.match(/\A\[(?<host>[^\]]+)\](?::(?<port>\d+))?\z/)
|
||||
if bracketed_match
|
||||
host = bracketed_match[:host]
|
||||
port = bracketed_match[:port]
|
||||
ipv6 = ipv6_literal?(host)
|
||||
if ipv6
|
||||
return "[#{ipv6}]#{port ? ":#{port}" : ""}"
|
||||
end
|
||||
|
||||
return domain
|
||||
end
|
||||
|
||||
host_candidate = domain
|
||||
port_candidate = nil
|
||||
split_host, separator, split_port = domain.rpartition(":")
|
||||
if !separator.empty? && split_port.match?(/\A\d+\z/) && !split_host.empty? && !split_host.end_with?(":")
|
||||
host_candidate = split_host
|
||||
port_candidate = split_port
|
||||
end
|
||||
|
||||
if port_candidate
|
||||
ipv6_host = ipv6_literal?(host_candidate)
|
||||
return "[#{ipv6_host}]:#{port_candidate}" if ipv6_host
|
||||
|
||||
host_candidate = domain
|
||||
port_candidate = nil
|
||||
end
|
||||
|
||||
ipv6 = ipv6_literal?(host_candidate)
|
||||
return "[#{ipv6}]" if ipv6
|
||||
|
||||
domain
|
||||
end
|
||||
|
||||
# Parse an IPv6 literal and return its canonical representation when valid.
|
||||
#
|
||||
# @param candidate [String] potential IPv6 literal.
|
||||
# @return [String, nil] normalized IPv6 literal or nil when the candidate is not IPv6.
|
||||
def ipv6_literal?(candidate)
|
||||
IPAddr.new(candidate).yield_self do |ip|
|
||||
return ip.ipv6? ? ip.to_s : nil
|
||||
end
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
|
||||
# Determine whether a URI's port should be included in the canonicalized domain.
|
||||
#
|
||||
# @param uri [URI::Generic] parsed URI for the instance domain.
|
||||
# @param raw [String] original sanitized input string.
|
||||
# @return [Boolean] true when the port must be preserved.
|
||||
def port_required?(uri, raw)
|
||||
port = uri.port
|
||||
return false unless port
|
||||
|
||||
return true unless uri.respond_to?(:default_port) && uri.default_port && port == uri.default_port
|
||||
|
||||
raw_port_fragment = ":#{port}"
|
||||
sanitized_raw = raw.strip
|
||||
sanitized_raw.end_with?(raw_port_fragment)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
196
web/lib/potato_mesh/application/prometheus.rb
Normal file
196
web/lib/potato_mesh/application/prometheus.rb
Normal file
@@ -0,0 +1,196 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Prometheus
|
||||
MESSAGES_TOTAL = ::Prometheus::Client::Counter.new(
|
||||
:meshtastic_messages_total,
|
||||
docstring: "Total number of messages received",
|
||||
)
|
||||
|
||||
NODES_GAUGE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_nodes,
|
||||
docstring: "Number of nodes tracked",
|
||||
)
|
||||
|
||||
NODE_GAUGE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node,
|
||||
docstring: "Presence of a Meshtastic node",
|
||||
labels: %i[node short_name long_name hw_model role],
|
||||
)
|
||||
|
||||
NODE_BATTERY_LEVEL = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_battery_level,
|
||||
docstring: "Battery level of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_VOLTAGE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_voltage,
|
||||
docstring: "Battery voltage of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_UPTIME = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_uptime_seconds,
|
||||
docstring: "Uptime reported by a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_CHANNEL_UTIL = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_channel_utilization,
|
||||
docstring: "Channel utilization reported by a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_AIR_UTIL_TX = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_transmit_air_utilization,
|
||||
docstring: "Transmit air utilization reported by a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_LATITUDE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_latitude,
|
||||
docstring: "Latitude of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_LONGITUDE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_longitude,
|
||||
docstring: "Longitude of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_ALTITUDE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_altitude,
|
||||
docstring: "Altitude of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
METRICS = [
|
||||
MESSAGES_TOTAL,
|
||||
NODES_GAUGE,
|
||||
NODE_GAUGE,
|
||||
NODE_BATTERY_LEVEL,
|
||||
NODE_VOLTAGE,
|
||||
NODE_UPTIME,
|
||||
NODE_CHANNEL_UTIL,
|
||||
NODE_AIR_UTIL_TX,
|
||||
NODE_LATITUDE,
|
||||
NODE_LONGITUDE,
|
||||
NODE_ALTITUDE,
|
||||
].freeze
|
||||
|
||||
METRICS.each do |metric|
|
||||
::Prometheus::Client.registry.register(metric)
|
||||
rescue ::Prometheus::Client::Registry::AlreadyRegisteredError
|
||||
# Ignore duplicate registrations when the code is reloaded.
|
||||
end
|
||||
|
||||
def update_prometheus_metrics(node_id, user = nil, role = "", met = nil, pos = nil)
|
||||
ids = prom_report_ids
|
||||
return if ids.empty? || !node_id
|
||||
|
||||
return unless ids[0] == "*" || ids.include?(node_id)
|
||||
|
||||
if user && user.is_a?(Hash) && role && role != ""
|
||||
NODE_GAUGE.set(
|
||||
1,
|
||||
labels: {
|
||||
node: node_id,
|
||||
short_name: user["shortName"],
|
||||
long_name: user["longName"],
|
||||
hw_model: user["hwModel"],
|
||||
role: role,
|
||||
},
|
||||
)
|
||||
end
|
||||
|
||||
if met && met.is_a?(Hash)
|
||||
if met["batteryLevel"]
|
||||
NODE_BATTERY_LEVEL.set(met["batteryLevel"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["voltage"]
|
||||
NODE_VOLTAGE.set(met["voltage"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["uptimeSeconds"]
|
||||
NODE_UPTIME.set(met["uptimeSeconds"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["channelUtilization"]
|
||||
NODE_CHANNEL_UTIL.set(met["channelUtilization"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["airUtilTx"]
|
||||
NODE_AIR_UTIL_TX.set(met["airUtilTx"], labels: { node: node_id })
|
||||
end
|
||||
end
|
||||
|
||||
if pos && pos.is_a?(Hash)
|
||||
if pos["latitude"]
|
||||
NODE_LATITUDE.set(pos["latitude"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if pos["longitude"]
|
||||
NODE_LONGITUDE.set(pos["longitude"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if pos["altitude"]
|
||||
NODE_ALTITUDE.set(pos["altitude"], labels: { node: node_id })
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def update_all_prometheus_metrics_from_nodes
|
||||
nodes = query_nodes(1000)
|
||||
|
||||
NODES_GAUGE.set(nodes.size)
|
||||
|
||||
ids = prom_report_ids
|
||||
unless ids.empty?
|
||||
nodes.each do |n|
|
||||
node_id = n["node_id"]
|
||||
|
||||
next if ids[0] != "*" && !ids.include?(node_id)
|
||||
|
||||
update_prometheus_metrics(
|
||||
node_id,
|
||||
{
|
||||
"shortName" => n["short_name"] || "",
|
||||
"longName" => n["long_name"] || "",
|
||||
"hwModel" => n["hw_model"] || "",
|
||||
},
|
||||
n["role"] || "",
|
||||
{
|
||||
"batteryLevel" => n["battery_level"],
|
||||
"voltage" => n["voltage"],
|
||||
"uptimeSeconds" => n["uptime_seconds"],
|
||||
"channelUtilization" => n["channel_utilization"],
|
||||
"airUtilTx" => n["air_util_tx"],
|
||||
},
|
||||
{
|
||||
"latitude" => n["latitude"],
|
||||
"longitude" => n["longitude"],
|
||||
"altitude" => n["altitude"],
|
||||
},
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
471
web/lib/potato_mesh/application/queries.rb
Normal file
471
web/lib/potato_mesh/application/queries.rb
Normal file
@@ -0,0 +1,471 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Queries
|
||||
MAX_QUERY_LIMIT = 1000
|
||||
|
||||
# Normalise a caller-provided limit to a sane, positive integer.
|
||||
#
|
||||
# @param limit [Object] value coerced to an integer.
|
||||
# @param default [Integer] fallback used when coercion fails.
|
||||
# @return [Integer] limit clamped between 1 and MAX_QUERY_LIMIT.
|
||||
def coerce_query_limit(limit, default: 200)
|
||||
coerced = begin
|
||||
if limit.is_a?(Integer)
|
||||
limit
|
||||
else
|
||||
Integer(limit, 10)
|
||||
end
|
||||
rescue ArgumentError, TypeError
|
||||
nil
|
||||
end
|
||||
|
||||
coerced = default if coerced.nil? || coerced <= 0
|
||||
coerced = MAX_QUERY_LIMIT if coerced > MAX_QUERY_LIMIT
|
||||
coerced
|
||||
end
|
||||
|
||||
def node_reference_tokens(node_ref)
|
||||
parts = canonical_node_parts(node_ref)
|
||||
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
|
||||
|
||||
string_values = []
|
||||
numeric_values = []
|
||||
|
||||
case node_ref
|
||||
when Integer
|
||||
numeric_values << node_ref
|
||||
string_values << node_ref.to_s
|
||||
when Numeric
|
||||
coerced = node_ref.to_i
|
||||
numeric_values << coerced
|
||||
string_values << coerced.to_s
|
||||
when String
|
||||
trimmed = node_ref.strip
|
||||
unless trimmed.empty?
|
||||
string_values << trimmed
|
||||
numeric_values << trimmed.to_i if trimmed.match?(/\A-?\d+\z/)
|
||||
end
|
||||
when nil
|
||||
# no-op
|
||||
else
|
||||
coerced = node_ref.to_s.strip
|
||||
string_values << coerced unless coerced.empty?
|
||||
end
|
||||
|
||||
if canonical_id
|
||||
string_values << canonical_id
|
||||
string_values << canonical_id.upcase
|
||||
end
|
||||
|
||||
if numeric_id
|
||||
numeric_values << numeric_id
|
||||
string_values << numeric_id.to_s
|
||||
end
|
||||
|
||||
cleaned_strings = string_values.compact.map(&:to_s).map(&:strip).reject(&:empty?).uniq
|
||||
cleaned_numbers = numeric_values.compact.map do |value|
|
||||
begin
|
||||
Integer(value, 10)
|
||||
rescue ArgumentError, TypeError
|
||||
nil
|
||||
end
|
||||
end.compact.uniq
|
||||
|
||||
{
|
||||
string_values: cleaned_strings,
|
||||
numeric_values: cleaned_numbers,
|
||||
}
|
||||
end
|
||||
|
||||
def node_lookup_clause(node_ref, string_columns:, numeric_columns: [])
|
||||
tokens = node_reference_tokens(node_ref)
|
||||
string_values = tokens[:string_values]
|
||||
numeric_values = tokens[:numeric_values]
|
||||
|
||||
clauses = []
|
||||
params = []
|
||||
|
||||
unless string_columns.empty? || string_values.empty?
|
||||
string_columns.each do |column|
|
||||
placeholders = Array.new(string_values.length, "?").join(", ")
|
||||
clauses << "#{column} IN (#{placeholders})"
|
||||
params.concat(string_values)
|
||||
end
|
||||
end
|
||||
|
||||
unless numeric_columns.empty? || numeric_values.empty?
|
||||
numeric_columns.each do |column|
|
||||
placeholders = Array.new(numeric_values.length, "?").join(", ")
|
||||
clauses << "#{column} IN (#{placeholders})"
|
||||
params.concat(numeric_values)
|
||||
end
|
||||
end
|
||||
|
||||
return nil if clauses.empty?
|
||||
|
||||
["(#{clauses.join(" OR ")})", params]
|
||||
end
|
||||
|
||||
def query_nodes(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
now = Time.now.to_i
|
||||
min_last_heard = now - PotatoMesh::Config.week_seconds
|
||||
params = []
|
||||
where_clauses = []
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["num"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
else
|
||||
where_clauses << "last_heard >= ?"
|
||||
params << min_last_heard
|
||||
end
|
||||
|
||||
if private_mode?
|
||||
where_clauses << "(role IS NULL OR role <> 'CLIENT_HIDDEN')"
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT node_id, short_name, long_name, hw_model, role, snr,
|
||||
battery_level, voltage, last_heard, first_heard,
|
||||
uptime_seconds, channel_utilization, air_util_tx,
|
||||
position_time, location_source, precision_bits,
|
||||
latitude, longitude, altitude, lora_freq, modem_preset
|
||||
FROM nodes
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY last_heard DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
|
||||
rows = db.execute(sql, params)
|
||||
rows = rows.select do |r|
|
||||
last_candidate = [r["last_heard"], r["position_time"], r["first_heard"]]
|
||||
.map { |value| coerce_integer(value) }
|
||||
.compact
|
||||
.max
|
||||
last_candidate && last_candidate >= min_last_heard
|
||||
end
|
||||
rows.each do |r|
|
||||
r["role"] ||= "CLIENT"
|
||||
lh = r["last_heard"]&.to_i
|
||||
pt = r["position_time"]&.to_i
|
||||
lh = now if lh && lh > now
|
||||
pt = nil if pt && pt > now
|
||||
r["last_heard"] = lh
|
||||
r["position_time"] = pt
|
||||
r["last_seen_iso"] = Time.at(lh).utc.iso8601 if lh
|
||||
r["pos_time_iso"] = Time.at(pt).utc.iso8601 if pt
|
||||
pb = r["precision_bits"]
|
||||
r["precision_bits"] = pb.to_i if pb
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_messages(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = ["COALESCE(TRIM(m.encrypted), '') = ''"]
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "m.rx_time >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["m.from_id", "m.to_id"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
|
||||
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
|
||||
m.lora_freq AS msg_lora_freq, m.modem_preset AS msg_modem_preset,
|
||||
m.channel_name AS msg_channel_name, m.snr AS msg_snr,
|
||||
n.node_id AS node_node_id, n.num AS node_num,
|
||||
n.short_name AS node_short_name, n.long_name AS node_long_name,
|
||||
n.macaddr AS node_macaddr, n.hw_model AS node_hw_model,
|
||||
n.role AS node_role, n.public_key AS node_public_key,
|
||||
n.is_unmessagable AS node_is_unmessagable,
|
||||
n.is_favorite AS node_is_favorite,
|
||||
n.hops_away AS node_hops_away, n.snr AS node_snr,
|
||||
n.last_heard AS node_last_heard, n.first_heard AS node_first_heard,
|
||||
n.battery_level AS node_battery_level, n.voltage AS node_voltage,
|
||||
n.channel_utilization AS node_channel_utilization,
|
||||
n.air_util_tx AS node_air_util_tx,
|
||||
n.uptime_seconds AS node_uptime_seconds,
|
||||
n.position_time AS node_position_time,
|
||||
n.location_source AS node_location_source,
|
||||
n.precision_bits AS node_precision_bits,
|
||||
n.latitude AS node_latitude, n.longitude AS node_longitude,
|
||||
n.altitude AS node_altitude,
|
||||
n.lora_freq AS node_lora_freq, n.modem_preset AS node_modem_preset
|
||||
FROM messages m
|
||||
LEFT JOIN nodes n ON (
|
||||
m.from_id IS NOT NULL AND TRIM(m.from_id) <> '' AND (
|
||||
m.from_id = n.node_id OR (
|
||||
m.from_id GLOB '[0-9]*' AND CAST(m.from_id AS INTEGER) = n.num
|
||||
)
|
||||
)
|
||||
)
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n"
|
||||
sql += <<~SQL
|
||||
ORDER BY m.rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
r.delete_if { |key, _| key.is_a?(Integer) }
|
||||
r["lora_freq"] = r.delete("msg_lora_freq")
|
||||
r["modem_preset"] = r.delete("msg_modem_preset")
|
||||
r["channel_name"] = r.delete("msg_channel_name")
|
||||
snr_value = r.delete("msg_snr")
|
||||
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.empty?)
|
||||
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
|
||||
debug_log(
|
||||
"Message join produced empty sender",
|
||||
context: "queries.messages",
|
||||
stage: "before_join",
|
||||
row: raw,
|
||||
)
|
||||
debug_log(
|
||||
"Message join produced empty sender",
|
||||
context: "queries.messages",
|
||||
stage: "after_join",
|
||||
row: r,
|
||||
)
|
||||
end
|
||||
node = {}
|
||||
r.keys.grep(/^node_/).each do |k|
|
||||
attribute = k.delete_prefix("node_")
|
||||
node[attribute] = r.delete(k)
|
||||
end
|
||||
r["snr"] = snr_value
|
||||
references = [r["from_id"]].compact
|
||||
if references.any? && (node["node_id"].nil? || node["node_id"].to_s.empty?)
|
||||
lookup_keys = []
|
||||
canonical = normalize_node_id(db, r["from_id"])
|
||||
lookup_keys << canonical if canonical
|
||||
raw_ref = r["from_id"].to_s.strip
|
||||
lookup_keys << raw_ref unless raw_ref.empty?
|
||||
lookup_keys << raw_ref.to_i if raw_ref.match?(/\A[0-9]+\z/)
|
||||
fallback = nil
|
||||
lookup_keys.uniq.each do |ref|
|
||||
sql = ref.is_a?(Integer) ? "SELECT * FROM nodes WHERE num = ?" : "SELECT * FROM nodes WHERE node_id = ?"
|
||||
fallback = db.get_first_row(sql, [ref])
|
||||
break if fallback
|
||||
end
|
||||
if fallback
|
||||
fallback.each do |key, value|
|
||||
next unless key.is_a?(String)
|
||||
node[key] = value if node[key].nil?
|
||||
end
|
||||
end
|
||||
end
|
||||
node["role"] = "CLIENT" if node.key?("role") && (node["role"].nil? || node["role"].to_s.empty?)
|
||||
r["node"] = node
|
||||
|
||||
canonical_from_id = string_or_nil(node["node_id"]) || string_or_nil(normalize_node_id(db, r["from_id"]))
|
||||
if canonical_from_id
|
||||
raw_from_id = string_or_nil(r["from_id"])
|
||||
if raw_from_id.nil? || raw_from_id.match?(/\A[0-9]+\z/)
|
||||
r["from_id"] = canonical_from_id
|
||||
elsif raw_from_id.start_with?("!") && raw_from_id.casecmp(canonical_from_id) != 0
|
||||
r["from_id"] = canonical_from_id
|
||||
end
|
||||
end
|
||||
|
||||
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.empty?)
|
||||
debug_log(
|
||||
"Message row missing sender after processing",
|
||||
context: "queries.messages",
|
||||
stage: "after_processing",
|
||||
row: r,
|
||||
)
|
||||
end
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_positions(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT * FROM positions
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
rx_time = coerce_integer(r["rx_time"])
|
||||
r["rx_time"] = rx_time if rx_time
|
||||
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
|
||||
|
||||
node_num = coerce_integer(r["node_num"])
|
||||
r["node_num"] = node_num if node_num
|
||||
|
||||
position_time = coerce_integer(r["position_time"])
|
||||
position_time = nil if position_time && position_time > now
|
||||
r["position_time"] = position_time
|
||||
r["position_time_iso"] = Time.at(position_time).utc.iso8601 if position_time
|
||||
|
||||
r["precision_bits"] = coerce_integer(r["precision_bits"])
|
||||
r["sats_in_view"] = coerce_integer(r["sats_in_view"])
|
||||
r["pdop"] = coerce_float(r["pdop"])
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_neighbors(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "COALESCE(rx_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT * FROM neighbors
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
rx_time = coerce_integer(r["rx_time"])
|
||||
rx_time = now if rx_time && rx_time > now
|
||||
r["rx_time"] = rx_time if rx_time
|
||||
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_telemetry(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT * FROM telemetry
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
rx_time = coerce_integer(r["rx_time"])
|
||||
r["rx_time"] = rx_time if rx_time
|
||||
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
|
||||
|
||||
node_num = coerce_integer(r["node_num"])
|
||||
r["node_num"] = node_num if node_num
|
||||
|
||||
telemetry_time = coerce_integer(r["telemetry_time"])
|
||||
telemetry_time = nil if telemetry_time && telemetry_time > now
|
||||
r["telemetry_time"] = telemetry_time
|
||||
r["telemetry_time_iso"] = Time.at(telemetry_time).utc.iso8601 if telemetry_time
|
||||
|
||||
r["channel"] = coerce_integer(r["channel"])
|
||||
r["hop_limit"] = coerce_integer(r["hop_limit"])
|
||||
r["rssi"] = coerce_integer(r["rssi"])
|
||||
r["bitfield"] = coerce_integer(r["bitfield"])
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
r["battery_level"] = coerce_float(r["battery_level"])
|
||||
r["voltage"] = coerce_float(r["voltage"])
|
||||
r["channel_utilization"] = coerce_float(r["channel_utilization"])
|
||||
r["air_util_tx"] = coerce_float(r["air_util_tx"])
|
||||
r["uptime_seconds"] = coerce_integer(r["uptime_seconds"])
|
||||
r["temperature"] = coerce_float(r["temperature"])
|
||||
r["relative_humidity"] = coerce_float(r["relative_humidity"])
|
||||
r["barometric_pressure"] = coerce_float(r["barometric_pressure"])
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
137
web/lib/potato_mesh/application/routes/api.rb
Normal file
137
web/lib/potato_mesh/application/routes/api.rb
Normal file
@@ -0,0 +1,137 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Api
|
||||
def self.registered(app)
|
||||
app.get "/version" do
|
||||
content_type :json
|
||||
last_update = latest_node_update_timestamp
|
||||
payload = {
|
||||
name: sanitized_site_name,
|
||||
version: app_constant(:APP_VERSION),
|
||||
lastNodeUpdate: last_update,
|
||||
config: {
|
||||
siteName: sanitized_site_name,
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
contactLink: sanitized_contact_link,
|
||||
contactLinkUrl: sanitized_contact_link_url,
|
||||
refreshIntervalSeconds: PotatoMesh::Config.refresh_interval_seconds,
|
||||
mapCenter: {
|
||||
lat: PotatoMesh::Config.map_center_lat,
|
||||
lon: PotatoMesh::Config.map_center_lon,
|
||||
},
|
||||
maxDistanceKm: PotatoMesh::Config.max_distance_km,
|
||||
instanceDomain: app_constant(:INSTANCE_DOMAIN),
|
||||
privateMode: private_mode?,
|
||||
},
|
||||
}
|
||||
payload.to_json
|
||||
end
|
||||
|
||||
app.get "/.well-known/potato-mesh" do
|
||||
refresh_well_known_document_if_stale
|
||||
cache_control :public, max_age: PotatoMesh::Config.well_known_refresh_interval
|
||||
content_type :json
|
||||
send_file well_known_file_path
|
||||
end
|
||||
|
||||
app.get "/api/nodes" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_nodes(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/nodes/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
rows = query_nodes(limit, node_ref: node_ref)
|
||||
halt 404, { error: "not found" }.to_json if rows.empty?
|
||||
rows.first.to_json
|
||||
end
|
||||
|
||||
app.get "/api/messages" do
|
||||
halt 404 if private_mode?
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_messages(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/messages/:id" do
|
||||
halt 404 if private_mode?
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_messages(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/positions" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/positions/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/neighbors" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/neighbors/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/instances" do
|
||||
content_type :json
|
||||
ensure_self_instance_record!
|
||||
payload = load_instances_for_api
|
||||
JSON.generate(payload)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
293
web/lib/potato_mesh/application/routes/ingest.rb
Normal file
293
web/lib/potato_mesh/application/routes/ingest.rb
Normal file
@@ -0,0 +1,293 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Ingest
|
||||
def self.registered(app)
|
||||
app.post "/api/nodes" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
unless data.is_a?(Hash)
|
||||
halt 400, { error: "invalid payload" }.to_json
|
||||
end
|
||||
halt 400, { error: "too many nodes" }.to_json if data.size > 1000
|
||||
db = open_database
|
||||
data.each do |node_id, node|
|
||||
upsert_node(db, node_id, node)
|
||||
end
|
||||
PotatoMesh::App::Prometheus::NODES_GAUGE.set(query_nodes(1000).length)
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/messages" do
|
||||
halt 404 if private_mode?
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
messages = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many messages" }.to_json if messages.size > 1000
|
||||
db = open_database
|
||||
messages.each do |msg|
|
||||
insert_message(db, msg)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/instances" do
|
||||
content_type :json
|
||||
begin
|
||||
payload = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError => e
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
reason: "invalid JSON",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
|
||||
unless payload.is_a?(Hash)
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
reason: "payload is not an object",
|
||||
)
|
||||
halt 400, { error: "invalid payload" }.to_json
|
||||
end
|
||||
|
||||
id = string_or_nil(payload["id"]) || string_or_nil(payload["instanceId"])
|
||||
raw_domain = sanitize_instance_domain(payload["domain"], downcase: false)
|
||||
# Normalise the domain for persistence while retaining the caller's
|
||||
# original casing for signature verification fallbacks.
|
||||
normalized_domain = sanitize_instance_domain(raw_domain)
|
||||
pubkey = sanitize_public_key_pem(payload["pubkey"])
|
||||
name = string_or_nil(payload["name"])
|
||||
version = string_or_nil(payload["version"])
|
||||
channel = string_or_nil(payload["channel"])
|
||||
frequency = string_or_nil(payload["frequency"])
|
||||
latitude = coerce_float(payload["latitude"])
|
||||
longitude = coerce_float(payload["longitude"])
|
||||
last_update_time = coerce_integer(payload["last_update_time"] || payload["lastUpdateTime"])
|
||||
raw_private = payload.key?("isPrivate") ? payload["isPrivate"] : payload["is_private"]
|
||||
is_private = coerce_boolean(raw_private)
|
||||
signature = string_or_nil(payload["signature"])
|
||||
|
||||
attributes = {
|
||||
id: id,
|
||||
domain: normalized_domain,
|
||||
pubkey: pubkey,
|
||||
name: name,
|
||||
version: version,
|
||||
channel: channel,
|
||||
frequency: frequency,
|
||||
latitude: latitude,
|
||||
longitude: longitude,
|
||||
last_update_time: last_update_time,
|
||||
is_private: is_private,
|
||||
}
|
||||
|
||||
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
reason: "missing required fields",
|
||||
)
|
||||
halt 400, { error: "missing required fields" }.to_json
|
||||
end
|
||||
|
||||
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
|
||||
# Some remote peers sign payloads using a canonicalised lowercase
|
||||
# domain while still sending a mixed-case domain. Retry signature
|
||||
# verification with the original casing when the first attempt
|
||||
# fails to maximise interoperability.
|
||||
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
|
||||
alternate_attributes = attributes.merge(domain: raw_domain)
|
||||
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
|
||||
end
|
||||
|
||||
unless signature_valid
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: raw_domain || attributes[:domain],
|
||||
reason: "invalid signature",
|
||||
)
|
||||
halt 400, { error: "invalid signature" }.to_json
|
||||
end
|
||||
|
||||
if attributes[:is_private]
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "instance marked private",
|
||||
)
|
||||
halt 403, { error: "instance marked private" }.to_json
|
||||
end
|
||||
|
||||
ip = ip_from_domain(attributes[:domain])
|
||||
if ip && restricted_ip_address?(ip)
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "restricted IP address",
|
||||
resolved_ip: ip,
|
||||
)
|
||||
halt 400, { error: "restricted domain" }.to_json
|
||||
end
|
||||
|
||||
well_known, well_known_meta = fetch_instance_json(attributes[:domain], "/.well-known/potato-mesh")
|
||||
unless well_known
|
||||
details_list = Array(well_known_meta).map(&:to_s)
|
||||
details = details_list.empty? ? "no response" : details_list.join("; ")
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "failed to fetch well-known document",
|
||||
details: details,
|
||||
)
|
||||
halt 400, { error: "failed to verify well-known document" }.to_json
|
||||
end
|
||||
|
||||
valid, reason = validate_well_known_document(well_known, attributes[:domain], attributes[:pubkey])
|
||||
unless valid
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: reason || "invalid well-known document",
|
||||
)
|
||||
halt 400, { error: reason || "invalid well-known document" }.to_json
|
||||
end
|
||||
|
||||
remote_nodes, node_source = fetch_instance_json(attributes[:domain], "/api/nodes")
|
||||
unless remote_nodes
|
||||
details_list = Array(node_source).map(&:to_s)
|
||||
details = details_list.empty? ? "no response" : details_list.join("; ")
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "failed to fetch nodes",
|
||||
details: details,
|
||||
)
|
||||
halt 400, { error: "failed to fetch nodes" }.to_json
|
||||
end
|
||||
|
||||
fresh, freshness_reason = validate_remote_nodes(remote_nodes)
|
||||
unless fresh
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: freshness_reason || "stale node data",
|
||||
)
|
||||
halt 400, { error: freshness_reason || "stale node data" }.to_json
|
||||
end
|
||||
|
||||
db = open_database
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
ingest_known_instances_from!(db, attributes[:domain])
|
||||
debug_log(
|
||||
"Registered remote instance",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
instance_id: attributes[:id],
|
||||
)
|
||||
status 201
|
||||
{ status: "registered" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/positions" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
positions = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many positions" }.to_json if positions.size > 1000
|
||||
db = open_database
|
||||
positions.each do |pos|
|
||||
insert_position(db, pos)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/neighbors" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
neighbor_payloads = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many neighbor packets" }.to_json if neighbor_payloads.size > 1000
|
||||
db = open_database
|
||||
neighbor_payloads.each do |packet|
|
||||
insert_neighbors(db, packet)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/telemetry" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
telemetry_packets = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many telemetry packets" }.to_json if telemetry_packets.size > 1000
|
||||
db = open_database
|
||||
telemetry_packets.each do |packet|
|
||||
insert_telemetry(db, packet)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
79
web/lib/potato_mesh/application/routes/root.rb
Normal file
79
web/lib/potato_mesh/application/routes/root.rb
Normal file
@@ -0,0 +1,79 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Root
|
||||
def self.registered(app)
|
||||
app.get "/favicon.ico" do
|
||||
cache_control :public, max_age: PotatoMesh::Config.week_seconds
|
||||
ico_path = File.join(settings.public_folder, "favicon.ico")
|
||||
if File.file?(ico_path)
|
||||
send_file ico_path, type: "image/x-icon"
|
||||
else
|
||||
send_file File.join(settings.public_folder, "potatomesh-logo.svg"), type: "image/svg+xml"
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/potatomesh-logo.svg" do
|
||||
path = File.expand_path("potatomesh-logo.svg", settings.public_folder)
|
||||
settings.logger&.info("logo_path=#{path} exist=#{File.exist?(path)} file=#{File.file?(path)}")
|
||||
halt 404, "Not Found" unless File.exist?(path) && File.readable?(path)
|
||||
|
||||
content_type "image/svg+xml"
|
||||
last_modified File.mtime(path)
|
||||
cache_control :public, max_age: 3600
|
||||
send_file path
|
||||
end
|
||||
|
||||
app.get "/" do
|
||||
meta = meta_configuration
|
||||
config = frontend_app_config
|
||||
|
||||
raw_theme = request.cookies["theme"]
|
||||
theme = %w[dark light].include?(raw_theme) ? raw_theme : "dark"
|
||||
if raw_theme != theme
|
||||
response.set_cookie("theme", value: theme, path: "/", max_age: 60 * 60 * 24 * 7, same_site: :lax)
|
||||
end
|
||||
|
||||
erb :index, locals: {
|
||||
site_name: meta[:name],
|
||||
meta_title: meta[:title],
|
||||
meta_name: meta[:name],
|
||||
meta_description: meta[:description],
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
map_center_lat: PotatoMesh::Config.map_center_lat,
|
||||
map_center_lon: PotatoMesh::Config.map_center_lon,
|
||||
max_distance_km: PotatoMesh::Config.max_distance_km,
|
||||
contact_link: sanitized_contact_link,
|
||||
contact_link_url: sanitized_contact_link_url,
|
||||
version: app_constant(:APP_VERSION),
|
||||
private_mode: private_mode?,
|
||||
refresh_interval_seconds: PotatoMesh::Config.refresh_interval_seconds,
|
||||
app_config_json: JSON.generate(config),
|
||||
initial_theme: theme,
|
||||
}
|
||||
end
|
||||
|
||||
app.get "/metrics" do
|
||||
content_type ::Prometheus::Client::Formats::Text::CONTENT_TYPE
|
||||
::Prometheus::Client::Formats::Text.marshal(::Prometheus::Client.registry)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
453
web/lib/potato_mesh/config.rb
Normal file
453
web/lib/potato_mesh/config.rb
Normal file
@@ -0,0 +1,453 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
# Configuration wrapper responsible for exposing ENV backed settings used by
|
||||
# the web and data ingestion services.
|
||||
module Config
|
||||
module_function
|
||||
|
||||
DEFAULT_DB_BUSY_TIMEOUT_MS = 5_000
|
||||
DEFAULT_DB_BUSY_MAX_RETRIES = 5
|
||||
DEFAULT_DB_BUSY_RETRY_DELAY = 0.05
|
||||
DEFAULT_MAX_JSON_BODY_BYTES = 1_048_576
|
||||
DEFAULT_REFRESH_INTERVAL_SECONDS = 60
|
||||
DEFAULT_TILE_FILTER_LIGHT = "grayscale(1) saturate(0) brightness(0.92) contrast(1.05)"
|
||||
DEFAULT_TILE_FILTER_DARK = "grayscale(1) invert(1) brightness(0.9) contrast(1.08)"
|
||||
DEFAULT_MAP_CENTER_LAT = 38.761944
|
||||
DEFAULT_MAP_CENTER_LON = -27.090833
|
||||
DEFAULT_MAP_CENTER = "#{DEFAULT_MAP_CENTER_LAT},#{DEFAULT_MAP_CENTER_LON}"
|
||||
DEFAULT_CHANNEL = "#LongFast"
|
||||
DEFAULT_FREQUENCY = "915MHz"
|
||||
DEFAULT_CONTACT_LINK = "#potatomesh:dod.ngo"
|
||||
DEFAULT_MAX_DISTANCE_KM = 42.0
|
||||
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 5
|
||||
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT = 12
|
||||
|
||||
# Resolve the absolute path to the web application root directory.
|
||||
#
|
||||
# @return [String] absolute filesystem path of the web folder.
|
||||
def web_root
|
||||
@web_root ||= File.expand_path("../..", __dir__)
|
||||
end
|
||||
|
||||
# Resolve the repository root directory relative to the web folder.
|
||||
#
|
||||
# @return [String] path to the Git repository root.
|
||||
def repo_root
|
||||
@repo_root ||= File.expand_path("..", web_root)
|
||||
end
|
||||
|
||||
# Resolve the current XDG data directory for PotatoMesh content.
|
||||
#
|
||||
# @return [String] absolute path to the PotatoMesh data directory.
|
||||
def data_directory
|
||||
File.join(resolve_xdg_home("XDG_DATA_HOME", %w[.local share]), "potato-mesh")
|
||||
end
|
||||
|
||||
# Resolve the current XDG configuration directory for PotatoMesh files.
|
||||
#
|
||||
# @return [String] absolute path to the PotatoMesh configuration directory.
|
||||
def config_directory
|
||||
File.join(resolve_xdg_home("XDG_CONFIG_HOME", %w[.config]), "potato-mesh")
|
||||
end
|
||||
|
||||
# Build the default SQLite database path inside the data directory.
|
||||
#
|
||||
# @return [String] absolute path to the managed +mesh.db+ file.
|
||||
def default_db_path
|
||||
File.join(data_directory, "mesh.db")
|
||||
end
|
||||
|
||||
# Legacy database path bundled alongside the repository.
|
||||
#
|
||||
# @return [String] absolute path to the repository managed database file.
|
||||
def legacy_db_path
|
||||
File.expand_path("../data/mesh.db", web_root)
|
||||
end
|
||||
|
||||
# Determine the configured database location, defaulting to the bundled
|
||||
# SQLite file.
|
||||
#
|
||||
# @return [String] absolute path to the database file.
|
||||
def db_path
|
||||
default_db_path
|
||||
end
|
||||
|
||||
# Retrieve the SQLite busy timeout duration in milliseconds.
|
||||
#
|
||||
# @return [Integer] timeout value in milliseconds.
|
||||
def db_busy_timeout_ms
|
||||
DEFAULT_DB_BUSY_TIMEOUT_MS
|
||||
end
|
||||
|
||||
# Retrieve the maximum number of retries when encountering SQLITE_BUSY.
|
||||
#
|
||||
# @return [Integer] maximum retry attempts.
|
||||
def db_busy_max_retries
|
||||
DEFAULT_DB_BUSY_MAX_RETRIES
|
||||
end
|
||||
|
||||
# Retrieve the backoff delay between busy retries in seconds.
|
||||
#
|
||||
# @return [Float] seconds to wait between retries.
|
||||
def db_busy_retry_delay
|
||||
DEFAULT_DB_BUSY_RETRY_DELAY
|
||||
end
|
||||
|
||||
# Convenience constant describing the number of seconds in a week.
|
||||
#
|
||||
# @return [Integer] seconds in seven days.
|
||||
def week_seconds
|
||||
7 * 24 * 60 * 60
|
||||
end
|
||||
|
||||
# Default upper bound for accepted JSON payload sizes.
|
||||
#
|
||||
# @return [Integer] byte ceiling for HTTP request bodies.
|
||||
def default_max_json_body_bytes
|
||||
DEFAULT_MAX_JSON_BODY_BYTES
|
||||
end
|
||||
|
||||
# Determine the maximum allowed JSON body size with validation.
|
||||
#
|
||||
# @return [Integer] configured byte limit.
|
||||
def max_json_body_bytes
|
||||
default_max_json_body_bytes
|
||||
end
|
||||
|
||||
# Provide the fallback version string when git metadata is unavailable.
|
||||
#
|
||||
# @return [String] semantic version identifier.
|
||||
def version_fallback
|
||||
"v0.5.1"
|
||||
end
|
||||
|
||||
# Default refresh interval for frontend polling routines.
|
||||
#
|
||||
# @return [Integer] refresh period in seconds.
|
||||
def default_refresh_interval_seconds
|
||||
DEFAULT_REFRESH_INTERVAL_SECONDS
|
||||
end
|
||||
|
||||
# Fetch the refresh interval, ensuring a positive integer value.
|
||||
#
|
||||
# @return [Integer] polling cadence in seconds.
|
||||
def refresh_interval_seconds
|
||||
default_refresh_interval_seconds
|
||||
end
|
||||
|
||||
# Retrieve the CSS filter used for light themed maps.
|
||||
#
|
||||
# @return [String] CSS filter string.
|
||||
def map_tile_filter_light
|
||||
DEFAULT_TILE_FILTER_LIGHT
|
||||
end
|
||||
|
||||
# Retrieve the CSS filter used for dark themed maps.
|
||||
#
|
||||
# @return [String] CSS filter string for dark tiles.
|
||||
def map_tile_filter_dark
|
||||
DEFAULT_TILE_FILTER_DARK
|
||||
end
|
||||
|
||||
# Provide a simple hash of tile filters for template use.
|
||||
#
|
||||
# @return [Hash] frozen mapping of themes to CSS filters.
|
||||
def tile_filters
|
||||
{
|
||||
light: map_tile_filter_light,
|
||||
dark: map_tile_filter_dark,
|
||||
}.freeze
|
||||
end
|
||||
|
||||
# Retrieve the raw comma separated Prometheus report identifiers.
|
||||
#
|
||||
# @return [String] comma separated list of report IDs.
|
||||
def prom_report_ids
|
||||
""
|
||||
end
|
||||
|
||||
# Transform Prometheus report identifiers into a cleaned array.
|
||||
#
|
||||
# @return [Array<String>] list of unique report identifiers.
|
||||
def prom_report_id_list
|
||||
prom_report_ids.split(",").map(&:strip).reject(&:empty?)
|
||||
end
|
||||
|
||||
# Path storing the instance private key used for signing.
|
||||
#
|
||||
# @return [String] absolute location of the PEM file.
|
||||
def keyfile_path
|
||||
File.join(config_directory, "keyfile")
|
||||
end
|
||||
|
||||
# Sub-path used when exposing well known configuration files.
|
||||
#
|
||||
# @return [String] relative path within the public directory.
|
||||
def well_known_relative_path
|
||||
File.join(".well-known", "potato-mesh")
|
||||
end
|
||||
|
||||
# Filesystem directory used to stage /.well-known artifacts.
|
||||
#
|
||||
# @return [String] absolute storage path.
|
||||
def well_known_storage_root
|
||||
File.join(config_directory, "well-known")
|
||||
end
|
||||
|
||||
# Legacy configuration directory bundled with the repository.
|
||||
#
|
||||
# @return [String] absolute path to the repository managed configuration directory.
|
||||
def legacy_config_directory
|
||||
File.join(web_root, ".config")
|
||||
end
|
||||
|
||||
# Legacy keyfile location used before introducing XDG directories.
|
||||
#
|
||||
# @return [String] absolute filesystem path to the legacy keyfile.
|
||||
def legacy_keyfile_path
|
||||
legacy_keyfile_candidates.find { |path| File.exist?(path) } || legacy_keyfile_candidates.first
|
||||
end
|
||||
|
||||
# Enumerate known legacy keyfile locations for migration.
|
||||
#
|
||||
# @return [Array<String>] ordered list of absolute legacy keyfile paths.
|
||||
def legacy_keyfile_candidates
|
||||
[
|
||||
File.join(web_root, ".config", "keyfile"),
|
||||
File.join(web_root, ".config", "potato-mesh", "keyfile"),
|
||||
File.join(web_root, "config", "keyfile"),
|
||||
File.join(web_root, "config", "potato-mesh", "keyfile"),
|
||||
].map { |path| File.expand_path(path) }.uniq
|
||||
end
|
||||
|
||||
# Legacy location for well known assets within the public folder.
|
||||
#
|
||||
# @return [String] absolute path to the legacy output directory.
|
||||
def legacy_public_well_known_path
|
||||
File.join(web_root, "public", well_known_relative_path)
|
||||
end
|
||||
|
||||
# Enumerate known legacy well-known document locations for migration.
|
||||
#
|
||||
# @return [Array<String>] ordered list of absolute legacy well-known document paths.
|
||||
def legacy_well_known_candidates
|
||||
filename = File.basename(well_known_relative_path)
|
||||
[
|
||||
File.join(web_root, ".config", "well-known", filename),
|
||||
File.join(web_root, ".config", ".well-known", filename),
|
||||
File.join(web_root, ".config", "potato-mesh", "well-known", filename),
|
||||
File.join(web_root, ".config", "potato-mesh", ".well-known", filename),
|
||||
File.join(web_root, "config", "well-known", filename),
|
||||
File.join(web_root, "config", ".well-known", filename),
|
||||
File.join(web_root, "config", "potato-mesh", "well-known", filename),
|
||||
File.join(web_root, "config", "potato-mesh", ".well-known", filename),
|
||||
].map { |path| File.expand_path(path) }.uniq
|
||||
end
|
||||
|
||||
# Interval used to refresh well known documents from disk.
|
||||
#
|
||||
# @return [Integer] refresh duration in seconds.
|
||||
def well_known_refresh_interval
|
||||
24 * 60 * 60
|
||||
end
|
||||
|
||||
# Cryptographic algorithm identifier for HTTP signatures.
|
||||
#
|
||||
# @return [String] RFC-compliant algorithm label.
|
||||
def instance_signature_algorithm
|
||||
"rsa-sha256"
|
||||
end
|
||||
|
||||
# Connection timeout used when establishing federation HTTP sockets.
|
||||
#
|
||||
# @return [Integer] connect timeout in seconds.
|
||||
def remote_instance_http_timeout
|
||||
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT
|
||||
end
|
||||
|
||||
# Read timeout used when streaming federation HTTP responses.
|
||||
#
|
||||
# @return [Integer] read timeout in seconds.
|
||||
def remote_instance_read_timeout
|
||||
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT
|
||||
end
|
||||
|
||||
# Maximum acceptable age for remote node data.
|
||||
#
|
||||
# @return [Integer] seconds before remote nodes are considered stale.
|
||||
def remote_instance_max_node_age
|
||||
86_400
|
||||
end
|
||||
|
||||
# Minimum node count expected from a remote instance before storing.
|
||||
#
|
||||
# @return [Integer] node threshold for remote ingestion.
|
||||
def remote_instance_min_node_count
|
||||
10
|
||||
end
|
||||
|
||||
# Domains used to seed the federation discovery process.
|
||||
#
|
||||
# @return [Array<String>] list of default seed domains.
|
||||
def federation_seed_domains
|
||||
["potatomesh.net"].freeze
|
||||
end
|
||||
|
||||
# Determine how often we broadcast federation announcements.
|
||||
#
|
||||
# @return [Integer] number of seconds between announcement cycles.
|
||||
def federation_announcement_interval
|
||||
8 * 60 * 60
|
||||
end
|
||||
|
||||
# Retrieve the configured site name for presentation.
|
||||
#
|
||||
# @return [String] human friendly site label.
|
||||
def site_name
|
||||
fetch_string("SITE_NAME", "PotatoMesh Demo")
|
||||
end
|
||||
|
||||
# Retrieve the default radio channel label.
|
||||
#
|
||||
# @return [String] channel name from configuration.
|
||||
def channel
|
||||
fetch_string("CHANNEL", DEFAULT_CHANNEL)
|
||||
end
|
||||
|
||||
# Retrieve the default radio frequency description.
|
||||
#
|
||||
# @return [String] frequency identifier.
|
||||
def frequency
|
||||
fetch_string("FREQUENCY", DEFAULT_FREQUENCY)
|
||||
end
|
||||
|
||||
# Parse the configured map centre coordinates.
|
||||
#
|
||||
# @return [Hash{Symbol=>Float}] latitude and longitude in decimal degrees.
|
||||
def map_center
|
||||
raw = fetch_string("MAP_CENTER", DEFAULT_MAP_CENTER)
|
||||
lat_str, lon_str = raw.split(",", 2).map { |part| part&.strip }.compact
|
||||
lat = Float(lat_str, exception: false)
|
||||
lon = Float(lon_str, exception: false)
|
||||
lat = DEFAULT_MAP_CENTER_LAT unless lat
|
||||
lon = DEFAULT_MAP_CENTER_LON unless lon
|
||||
{ lat: lat, lon: lon }
|
||||
end
|
||||
|
||||
# Map display latitude centre for the frontend map widget.
|
||||
#
|
||||
# @return [Float] latitude in decimal degrees.
|
||||
def map_center_lat
|
||||
map_center[:lat]
|
||||
end
|
||||
|
||||
# Map display longitude centre for the frontend map widget.
|
||||
#
|
||||
# @return [Float] longitude in decimal degrees.
|
||||
def map_center_lon
|
||||
map_center[:lon]
|
||||
end
|
||||
|
||||
# Maximum straight-line distance between nodes before relationships are
|
||||
# hidden.
|
||||
#
|
||||
# @return [Float] distance in kilometres.
|
||||
def max_distance_km
|
||||
raw = fetch_string("MAX_DISTANCE", nil)
|
||||
parsed = raw && Float(raw, exception: false)
|
||||
return parsed if parsed && parsed.positive?
|
||||
|
||||
DEFAULT_MAX_DISTANCE_KM
|
||||
end
|
||||
|
||||
# Contact link for community discussion.
|
||||
#
|
||||
# @return [String] contact URI or identifier.
|
||||
def contact_link
|
||||
fetch_string("CONTACT_LINK", DEFAULT_CONTACT_LINK)
|
||||
end
|
||||
|
||||
# Determine the best URL to represent the configured contact link.
|
||||
#
|
||||
# @return [String, nil] absolute URL when derivable, otherwise nil.
|
||||
def contact_link_url
|
||||
link = contact_link.to_s.strip
|
||||
return nil if link.empty?
|
||||
|
||||
if matrix_alias?(link)
|
||||
"https://matrix.to/#/#{link}"
|
||||
elsif link.match?(%r{\Ahttps?://}i)
|
||||
link
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Check whether a contact link is a Matrix room alias.
|
||||
#
|
||||
# @param link [String] candidate link string.
|
||||
# @return [Boolean] true when the link resembles a Matrix alias.
|
||||
def matrix_alias?(link)
|
||||
link.match?(/\A[#!][^\s:]+:[^\s]+\z/)
|
||||
end
|
||||
|
||||
# Check whether verbose debugging is enabled for the runtime.
|
||||
#
|
||||
# @return [Boolean] true when DEBUG=1.
|
||||
def debug?
|
||||
ENV["DEBUG"] == "1"
|
||||
end
|
||||
|
||||
# Fetch and sanitise string based configuration values.
|
||||
#
|
||||
# @param key [String] environment variable to read.
|
||||
# @param default [String] fallback value when unset or blank.
|
||||
# @return [String] cleaned configuration string.
|
||||
def fetch_string(key, default)
|
||||
value = ENV[key]
|
||||
return default if value.nil?
|
||||
|
||||
trimmed = value.strip
|
||||
trimmed.empty? ? default : trimmed
|
||||
end
|
||||
|
||||
# Resolve the effective XDG directory honoring environment overrides.
|
||||
#
|
||||
# @param env_key [String] name of the environment variable to inspect.
|
||||
# @param fallback_segments [Array<String>] path segments appended to the user home directory.
|
||||
# @return [String] absolute base directory referenced by the XDG variable.
|
||||
def resolve_xdg_home(env_key, fallback_segments)
|
||||
raw = fetch_string(env_key, nil)
|
||||
candidate = raw && !raw.empty? ? raw : nil
|
||||
return File.expand_path(candidate) if candidate
|
||||
|
||||
base_home = safe_home_directory
|
||||
File.expand_path(File.join(base_home, *fallback_segments))
|
||||
end
|
||||
|
||||
# Retrieve the current user's home directory handling runtime failures.
|
||||
#
|
||||
# @return [String] absolute path to the user home or web root fallback.
|
||||
def safe_home_directory
|
||||
home = Dir.home
|
||||
return web_root if home.nil? || home.empty?
|
||||
|
||||
home
|
||||
rescue ArgumentError, RuntimeError
|
||||
web_root
|
||||
end
|
||||
end
|
||||
end
|
||||
87
web/lib/potato_mesh/logging.rb
Normal file
87
web/lib/potato_mesh/logging.rb
Normal file
@@ -0,0 +1,87 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "logger"
|
||||
require "time"
|
||||
|
||||
module PotatoMesh
|
||||
# Logging utilities shared across the web application.
|
||||
module Logging
|
||||
LOGGER_NAME = "potato-mesh" # :nodoc:
|
||||
|
||||
module_function
|
||||
|
||||
# Build a logger configured with the potato-mesh formatter.
|
||||
#
|
||||
# @param io [#write] destination for log output.
|
||||
# @return [Logger] configured logger instance.
|
||||
def build_logger(io = $stdout)
|
||||
logger = Logger.new(io)
|
||||
logger.progname = LOGGER_NAME
|
||||
logger.formatter = method(:formatter)
|
||||
logger
|
||||
end
|
||||
|
||||
# Format log entries with a consistent structure understood by the UI.
|
||||
#
|
||||
# @param severity [String] Ruby logger severity constant (e.g., "DEBUG").
|
||||
# @param time [Time] timestamp when the log entry was created.
|
||||
# @param progname [String, nil] optional application name emitting the log.
|
||||
# @param message [String] body of the log message.
|
||||
# @return [String] formatted log entry.
|
||||
def formatter(severity, time, progname, message)
|
||||
timestamp = time.utc.iso8601(3)
|
||||
body = message.is_a?(String) ? message : message.inspect
|
||||
"[#{timestamp}] [#{progname || LOGGER_NAME}] [#{severity.downcase}] #{body}\n"
|
||||
end
|
||||
|
||||
# Emit a structured log entry to the provided logger instance.
|
||||
#
|
||||
# @param logger [Logger, nil] logger to emit against.
|
||||
# @param severity [Symbol] target severity (e.g., :debug, :info).
|
||||
# @param message [String] primary message text.
|
||||
# @param context [String, nil] logical component generating the entry.
|
||||
# @param metadata [Hash] supplemental structured data for the log.
|
||||
# @return [void]
|
||||
def log(logger, severity, message, context: nil, **metadata)
|
||||
return unless logger
|
||||
|
||||
parts = []
|
||||
parts << "context=#{context}" if context
|
||||
metadata.each do |key, value|
|
||||
parts << format_metadata_pair(key, value)
|
||||
end
|
||||
parts << message
|
||||
|
||||
logger.public_send(severity, parts.join(" "))
|
||||
end
|
||||
|
||||
# Retrieve the canonical logger for the web application.
|
||||
#
|
||||
# @param target [Object, nil] object with optional +settings.logger+ accessor.
|
||||
# @return [Logger, nil] logger instance when available.
|
||||
def logger_for(target = nil)
|
||||
if target.respond_to?(:settings) && target.settings.respond_to?(:logger)
|
||||
return target.settings.logger
|
||||
end
|
||||
|
||||
if defined?(PotatoMesh::Application) &&
|
||||
PotatoMesh::Application.respond_to?(:settings) &&
|
||||
PotatoMesh::Application.settings.respond_to?(:logger)
|
||||
return PotatoMesh::Application.settings.logger
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Format metadata key/value pairs for structured logging output.
|
||||
#
|
||||
# @param key [Symbol, String]
|
||||
# @param value [Object]
|
||||
# @return [String]
|
||||
def format_metadata_pair(key, value)
|
||||
"#{key}=#{value.inspect}"
|
||||
end
|
||||
|
||||
private_class_method :format_metadata_pair
|
||||
end
|
||||
end
|
||||
80
web/lib/potato_mesh/meta.rb
Normal file
80
web/lib/potato_mesh/meta.rb
Normal file
@@ -0,0 +1,80 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require_relative "config"
|
||||
require_relative "sanitizer"
|
||||
|
||||
module PotatoMesh
|
||||
# Helper functions used to generate SEO metadata and formatted values.
|
||||
module Meta
|
||||
module_function
|
||||
|
||||
# Format a distance in kilometres without trailing decimal precision when unnecessary.
|
||||
#
|
||||
# @param distance [Numeric] distance in kilometres.
|
||||
# @return [String] formatted kilometre value.
|
||||
def formatted_distance_km(distance)
|
||||
format("%.1f", distance).sub(/\.0\z/, "")
|
||||
end
|
||||
|
||||
# Construct the meta description string displayed to search engines and social previews.
|
||||
#
|
||||
# @param private_mode [Boolean] whether private mode is enabled.
|
||||
# @return [String] generated description text.
|
||||
def description(private_mode:)
|
||||
site = Sanitizer.sanitized_site_name
|
||||
channel = Sanitizer.sanitized_channel
|
||||
frequency = Sanitizer.sanitized_frequency
|
||||
contact = Sanitizer.sanitized_contact_link
|
||||
|
||||
summary = "Live Meshtastic mesh map for #{site}"
|
||||
if channel.empty? && frequency.empty?
|
||||
summary += "."
|
||||
elsif channel.empty?
|
||||
summary += " tuned to #{frequency}."
|
||||
elsif frequency.empty?
|
||||
summary += " on #{channel}."
|
||||
else
|
||||
summary += " on #{channel} (#{frequency})."
|
||||
end
|
||||
|
||||
activity_sentence = if private_mode
|
||||
"Track nodes and coverage in real time."
|
||||
else
|
||||
"Track nodes, messages, and coverage in real time."
|
||||
end
|
||||
|
||||
sentences = [summary, activity_sentence]
|
||||
if (distance = Sanitizer.sanitized_max_distance_km)
|
||||
sentences << "Shows nodes within roughly #{formatted_distance_km(distance)} km of the map center."
|
||||
end
|
||||
sentences << "Join the community in #{contact} via chat." if contact
|
||||
|
||||
sentences.join(" ")
|
||||
end
|
||||
|
||||
# Build a hash of meta configuration values used by templating layers.
|
||||
#
|
||||
# @param private_mode [Boolean] whether private mode is enabled.
|
||||
# @return [Hash] structured metadata for templates.
|
||||
def configuration(private_mode:)
|
||||
site = Sanitizer.sanitized_site_name
|
||||
{
|
||||
title: site,
|
||||
name: site,
|
||||
description: description(private_mode: private_mode),
|
||||
}.freeze
|
||||
end
|
||||
end
|
||||
end
|
||||
150
web/lib/potato_mesh/sanitizer.rb
Normal file
150
web/lib/potato_mesh/sanitizer.rb
Normal file
@@ -0,0 +1,150 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "ipaddr"
|
||||
|
||||
require_relative "config"
|
||||
|
||||
module PotatoMesh
|
||||
# Utility module responsible for coercing and sanitising user provided
|
||||
# configuration strings. Each helper is exposed as a module function so it
|
||||
# can be consumed both by the web layer and background jobs without
|
||||
# instantiation overhead.
|
||||
module Sanitizer
|
||||
module_function
|
||||
|
||||
# Coerce an arbitrary value into a trimmed string unless the content is
|
||||
# empty.
|
||||
#
|
||||
# @param value [Object, nil] arbitrary input that should be converted.
|
||||
# @return [String, nil] trimmed string representation or +nil+ when blank.
|
||||
def string_or_nil(value)
|
||||
return nil if value.nil?
|
||||
|
||||
str = value.is_a?(String) ? value : value.to_s
|
||||
trimmed = str.strip
|
||||
trimmed.empty? ? nil : trimmed
|
||||
end
|
||||
|
||||
# Ensure a value is a valid instance domain according to RFC 1035/3986
|
||||
# rules. This rejects whitespace, path separators, and trailing dots.
|
||||
#
|
||||
# @param value [String, Object, nil] candidate domain name.
|
||||
# @param downcase [Boolean] whether to force the result to lowercase.
|
||||
# @return [String, nil] canonical domain value or +nil+ when invalid.
|
||||
def sanitize_instance_domain(value, downcase: true)
|
||||
host = string_or_nil(value)
|
||||
return nil unless host
|
||||
|
||||
trimmed = host.strip
|
||||
trimmed = trimmed.delete_suffix(".") while trimmed.end_with?(".")
|
||||
return nil if trimmed.empty?
|
||||
return nil if trimmed.match?(%r{[\s/\\@]})
|
||||
|
||||
downcase ? trimmed.downcase : trimmed
|
||||
end
|
||||
|
||||
# Extract the host component from a potentially bracketed domain literal.
|
||||
#
|
||||
# @param domain [String, nil] raw domain string received from the user.
|
||||
# @return [String, nil] host portion of the domain, or +nil+ when invalid.
|
||||
def instance_domain_host(domain)
|
||||
return nil if domain.nil?
|
||||
|
||||
candidate = domain.strip
|
||||
return nil if candidate.empty?
|
||||
|
||||
if candidate.start_with?("[")
|
||||
match = candidate.match(/\A\[(?<host>[^\]]+)\](?::(?<port>\d+))?\z/)
|
||||
return match[:host] if match
|
||||
return nil
|
||||
end
|
||||
|
||||
host, port = candidate.split(":", 2)
|
||||
if port && !host.include?(":") && port.match?(/\A\d+\z/)
|
||||
return host
|
||||
end
|
||||
|
||||
candidate
|
||||
end
|
||||
|
||||
# Resolve a validated domain string into an IP address object.
|
||||
#
|
||||
# @param domain [String, nil] domain literal potentially including port.
|
||||
# @return [IPAddr, nil] parsed IP address when valid.
|
||||
def ip_from_domain(domain)
|
||||
host = instance_domain_host(domain)
|
||||
return nil unless host
|
||||
|
||||
IPAddr.new(host)
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
|
||||
# Normalise a value into a trimmed string representation.
|
||||
#
|
||||
# @param value [Object] arbitrary object to coerce into text.
|
||||
# @return [String] trimmed string version of the supplied value.
|
||||
def sanitized_string(value)
|
||||
value.to_s.strip
|
||||
end
|
||||
|
||||
# Retrieve the configured site name as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
def sanitized_site_name
|
||||
sanitized_string(Config.site_name)
|
||||
end
|
||||
|
||||
# Retrieve the configured channel as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
def sanitized_channel
|
||||
sanitized_string(Config.channel)
|
||||
end
|
||||
|
||||
# Retrieve the configured frequency as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
def sanitized_frequency
|
||||
sanitized_string(Config.frequency)
|
||||
end
|
||||
|
||||
# Retrieve the configured contact link and normalise blank values to nil.
|
||||
#
|
||||
# @return [String, nil] contact link identifier or +nil+ when blank.
|
||||
def sanitized_contact_link
|
||||
value = sanitized_string(Config.contact_link)
|
||||
value.empty? ? nil : value
|
||||
end
|
||||
|
||||
# Retrieve the best effort URL for the configured contact link.
|
||||
#
|
||||
# @return [String, nil] contact hyperlink when derivable.
|
||||
def sanitized_contact_link_url
|
||||
Config.contact_link_url
|
||||
end
|
||||
|
||||
# Return a positive numeric maximum distance when configured.
|
||||
#
|
||||
# @return [Numeric, nil] distance value in kilometres.
|
||||
def sanitized_max_distance_km
|
||||
distance = Config.max_distance_km
|
||||
return nil unless distance.is_a?(Numeric)
|
||||
return nil unless distance.positive?
|
||||
|
||||
distance
|
||||
end
|
||||
end
|
||||
end
|
||||
12
web/package-lock.json
generated
Normal file
12
web/package-lock.json
generated
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
9
web/package.json
Normal file
9
web/package.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.0",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"test": "mkdir -p reports coverage && NODE_V8_COVERAGE=coverage node --test --experimental-test-coverage --test-reporter=spec --test-reporter-destination=stdout --test-reporter=junit --test-reporter-destination=reports/javascript-junit.xml && node ./scripts/export-coverage.js"
|
||||
}
|
||||
}
|
||||
0
web/public/.keep
Normal file
0
web/public/.keep
Normal file
126
web/public/assets/js/app/__tests__/chat-format.test.js
Normal file
126
web/public/assets/js/app/__tests__/chat-format.test.js
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import {
|
||||
extractChatMessageMetadata,
|
||||
formatChatMessagePrefix,
|
||||
formatChatChannelTag,
|
||||
formatNodeAnnouncementPrefix,
|
||||
__test__
|
||||
} from '../chat-format.js';
|
||||
|
||||
const {
|
||||
firstNonNull,
|
||||
normalizeString,
|
||||
normalizeFrequency,
|
||||
normalizeFrequencySlot,
|
||||
FREQUENCY_PLACEHOLDER
|
||||
} = __test__;
|
||||
|
||||
test('extractChatMessageMetadata prefers explicit region_frequency and channel_name', () => {
|
||||
const payload = {
|
||||
region_frequency: 868,
|
||||
channel_name: ' Test Channel ',
|
||||
lora_freq: 915,
|
||||
channelName: 'Ignored'
|
||||
};
|
||||
const result = extractChatMessageMetadata(payload);
|
||||
assert.deepEqual(result, { frequency: '868', channelName: 'Test Channel' });
|
||||
});
|
||||
|
||||
test('extractChatMessageMetadata falls back to LoRa metadata', () => {
|
||||
const payload = {
|
||||
lora_freq: 915,
|
||||
channelName: 'SpecChannel'
|
||||
};
|
||||
const result = extractChatMessageMetadata(payload);
|
||||
assert.deepEqual(result, { frequency: '915', channelName: 'SpecChannel' });
|
||||
});
|
||||
|
||||
test('extractChatMessageMetadata returns null metadata for invalid input', () => {
|
||||
assert.deepEqual(extractChatMessageMetadata(null), { frequency: null, channelName: null });
|
||||
assert.deepEqual(extractChatMessageMetadata(undefined), { frequency: null, channelName: null });
|
||||
});
|
||||
|
||||
test('firstNonNull returns the first non-null candidate', () => {
|
||||
assert.equal(firstNonNull(null, undefined, '', 'value'), '');
|
||||
assert.equal(firstNonNull(undefined, null), null);
|
||||
});
|
||||
|
||||
test('normalizeString trims strings and rejects empties', () => {
|
||||
assert.equal(normalizeString(' Spec '), 'Spec');
|
||||
assert.equal(normalizeString(' '), null);
|
||||
assert.equal(normalizeString(123), '123');
|
||||
assert.equal(normalizeString(Number.POSITIVE_INFINITY), null);
|
||||
});
|
||||
|
||||
test('normalizeFrequency handles numeric and string inputs', () => {
|
||||
assert.equal(normalizeFrequency(915), '915');
|
||||
assert.equal(normalizeFrequency(868.125), '868.125');
|
||||
assert.equal(normalizeFrequency(' 868MHz '), '868');
|
||||
assert.equal(normalizeFrequency('n/a'), 'n/a');
|
||||
assert.equal(normalizeFrequency(-5), null);
|
||||
assert.equal(normalizeFrequency(null), null);
|
||||
});
|
||||
|
||||
test('formatChatMessagePrefix preserves bracket placeholders', () => {
|
||||
assert.equal(
|
||||
formatChatMessagePrefix({ timestamp: '11:46:48', frequency: '868' }),
|
||||
'[11:46:48][868]'
|
||||
);
|
||||
assert.equal(
|
||||
formatChatMessagePrefix({ timestamp: '16:19:19', frequency: null }),
|
||||
`[16:19:19][${FREQUENCY_PLACEHOLDER}]`
|
||||
);
|
||||
assert.equal(
|
||||
formatChatMessagePrefix({ timestamp: '09:00:00', frequency: '' }),
|
||||
`[09:00:00][${FREQUENCY_PLACEHOLDER}]`
|
||||
);
|
||||
});
|
||||
|
||||
test('formatChatChannelTag wraps channel names after the short name slot', () => {
|
||||
assert.equal(
|
||||
formatChatChannelTag({ channelName: 'TEST' }),
|
||||
'[TEST]'
|
||||
);
|
||||
assert.equal(
|
||||
formatChatChannelTag({ channelName: '' }),
|
||||
'[]'
|
||||
);
|
||||
assert.equal(
|
||||
formatChatChannelTag({ channelName: null }),
|
||||
'[]'
|
||||
);
|
||||
});
|
||||
|
||||
test('formatNodeAnnouncementPrefix includes optional frequency bracket', () => {
|
||||
assert.equal(
|
||||
formatNodeAnnouncementPrefix({ timestamp: '12:34:56', frequency: '868' }),
|
||||
'[12:34:56][868]'
|
||||
);
|
||||
assert.equal(
|
||||
formatNodeAnnouncementPrefix({ timestamp: '01:02:03', frequency: null }),
|
||||
`[01:02:03][${FREQUENCY_PLACEHOLDER}]`
|
||||
);
|
||||
});
|
||||
|
||||
test('normalizeFrequencySlot returns placeholder when frequency is missing', () => {
|
||||
assert.equal(normalizeFrequencySlot(null), FREQUENCY_PLACEHOLDER);
|
||||
assert.equal(normalizeFrequencySlot(''), FREQUENCY_PLACEHOLDER);
|
||||
assert.equal(normalizeFrequencySlot(undefined), FREQUENCY_PLACEHOLDER);
|
||||
assert.equal(normalizeFrequencySlot('915'), '915');
|
||||
});
|
||||
117
web/public/assets/js/app/__tests__/config.test.js
Normal file
117
web/public/assets/js/app/__tests__/config.test.js
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { documentStub, resetDocumentStub } from './document-stub.js';
|
||||
|
||||
import { readAppConfig } from '../config.js';
|
||||
import { DEFAULT_CONFIG, mergeConfig } from '../settings.js';
|
||||
|
||||
test('readAppConfig returns an empty object when the configuration element is missing', () => {
|
||||
resetDocumentStub();
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
});
|
||||
|
||||
test('readAppConfig returns an empty object when the attribute is empty', () => {
|
||||
resetDocumentStub();
|
||||
documentStub.setConfigElement({ getAttribute: () => '' });
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
});
|
||||
|
||||
test('readAppConfig parses configuration JSON from the DOM attribute', () => {
|
||||
resetDocumentStub();
|
||||
const data = { refreshMs: 5000, chatEnabled: false };
|
||||
documentStub.setConfigElement({
|
||||
getAttribute: name => (name === 'data-app-config' ? JSON.stringify(data) : null)
|
||||
});
|
||||
assert.deepEqual(readAppConfig(), data);
|
||||
});
|
||||
|
||||
test('readAppConfig returns an empty object and logs on parse failure', () => {
|
||||
resetDocumentStub();
|
||||
let called = false;
|
||||
const originalError = console.error;
|
||||
console.error = () => {
|
||||
called = true;
|
||||
};
|
||||
documentStub.setConfigElement({
|
||||
getAttribute: name => (name === 'data-app-config' ? 'not-json' : null)
|
||||
});
|
||||
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
assert.equal(called, true);
|
||||
console.error = originalError;
|
||||
});
|
||||
|
||||
test('readAppConfig ignores non-object JSON payloads', () => {
|
||||
resetDocumentStub();
|
||||
documentStub.setConfigElement({
|
||||
getAttribute: name => (name === 'data-app-config' ? '42' : null)
|
||||
});
|
||||
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
});
|
||||
|
||||
test('mergeConfig applies default values when fields are missing', () => {
|
||||
const result = mergeConfig({});
|
||||
assert.deepEqual(result, {
|
||||
...DEFAULT_CONFIG,
|
||||
mapCenter: { ...DEFAULT_CONFIG.mapCenter },
|
||||
tileFilters: { ...DEFAULT_CONFIG.tileFilters }
|
||||
});
|
||||
});
|
||||
|
||||
test('mergeConfig coerces numeric values and nested objects', () => {
|
||||
const result = mergeConfig({
|
||||
refreshIntervalSeconds: '30',
|
||||
refreshMs: '45000',
|
||||
mapCenter: { lat: '10.5', lon: '20.1' },
|
||||
tileFilters: { dark: 'contrast(2)' },
|
||||
chatEnabled: 0,
|
||||
channel: '#Custom',
|
||||
frequency: '915MHz',
|
||||
contactLink: 'https://example.org/chat',
|
||||
contactLinkUrl: 'https://example.org/chat',
|
||||
maxDistanceKm: '55.5'
|
||||
});
|
||||
|
||||
assert.equal(result.refreshIntervalSeconds, 30);
|
||||
assert.equal(result.refreshMs, 45000);
|
||||
assert.deepEqual(result.mapCenter, { lat: 10.5, lon: 20.1 });
|
||||
assert.deepEqual(result.tileFilters, { light: DEFAULT_CONFIG.tileFilters.light, dark: 'contrast(2)' });
|
||||
assert.equal(result.chatEnabled, false);
|
||||
assert.equal(result.channel, '#Custom');
|
||||
assert.equal(result.frequency, '915MHz');
|
||||
assert.equal(result.contactLink, 'https://example.org/chat');
|
||||
assert.equal(result.contactLinkUrl, 'https://example.org/chat');
|
||||
assert.equal(result.maxDistanceKm, 55.5);
|
||||
});
|
||||
|
||||
test('mergeConfig falls back to defaults for invalid numeric values', () => {
|
||||
const result = mergeConfig({
|
||||
refreshIntervalSeconds: 'NaN',
|
||||
refreshMs: 'NaN',
|
||||
maxDistanceKm: 'oops'
|
||||
});
|
||||
|
||||
assert.equal(result.refreshIntervalSeconds, DEFAULT_CONFIG.refreshIntervalSeconds);
|
||||
assert.equal(result.refreshMs, DEFAULT_CONFIG.refreshMs);
|
||||
assert.equal(result.maxDistanceKm, DEFAULT_CONFIG.maxDistanceKm);
|
||||
});
|
||||
|
||||
test('document stub returns null for unrelated selectors', () => {
|
||||
resetDocumentStub();
|
||||
assert.equal(documentStub.querySelector('#missing'), null);
|
||||
});
|
||||
99
web/public/assets/js/app/__tests__/document-stub.js
Normal file
99
web/public/assets/js/app/__tests__/document-stub.js
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Minimal document implementation that exposes the subset of behaviour needed
|
||||
* by the front-end modules during unit tests.
|
||||
*/
|
||||
class DocumentStub {
|
||||
/**
|
||||
* Instantiate a new stub with a clean internal state.
|
||||
*/
|
||||
constructor() {
|
||||
this.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear tracked configuration elements and registered event listeners.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
reset() {
|
||||
this.configElement = null;
|
||||
this.listeners = new Map();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide an element that will be returned by ``querySelector`` when the
|
||||
* configuration selector is requested.
|
||||
*
|
||||
* @param {?Element} element DOM node exposing ``getAttribute``.
|
||||
* @returns {void}
|
||||
*/
|
||||
setConfigElement(element) {
|
||||
this.configElement = element;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the registered configuration element when the matching selector is
|
||||
* provided.
|
||||
*
|
||||
* @param {string} selector CSS selector requested by the module under test.
|
||||
* @returns {?Element} Config element or ``null`` when unavailable.
|
||||
*/
|
||||
querySelector(selector) {
|
||||
if (selector === '[data-app-config]') {
|
||||
return this.configElement;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an event handler, mirroring the DOM ``addEventListener`` API.
|
||||
*
|
||||
* @param {string} event Event identifier.
|
||||
* @param {Function} handler Callback invoked when ``dispatchEvent`` is
|
||||
* called.
|
||||
* @returns {void}
|
||||
*/
|
||||
addEventListener(event, handler) {
|
||||
this.listeners.set(event, handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger a previously registered listener.
|
||||
*
|
||||
* @param {string} event Event identifier used when registering the handler.
|
||||
* @returns {void}
|
||||
*/
|
||||
dispatchEvent(event) {
|
||||
const handler = this.listeners.get(event);
|
||||
if (handler) {
|
||||
handler();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const documentStub = new DocumentStub();
|
||||
|
||||
/**
|
||||
* Reset the shared stub between test cases to avoid state bleed.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
export function resetDocumentStub() {
|
||||
documentStub.reset();
|
||||
}
|
||||
|
||||
globalThis.document = documentStub;
|
||||
292
web/public/assets/js/app/__tests__/dom-environment.js
Normal file
292
web/public/assets/js/app/__tests__/dom-environment.js
Normal file
@@ -0,0 +1,292 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Simple class list implementation supporting the subset of DOMTokenList
|
||||
* behaviour required by the tests.
|
||||
*/
|
||||
class MockClassList {
|
||||
constructor() {
|
||||
this._values = new Set();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add one or more CSS classes to the element.
|
||||
*
|
||||
* @param {...string} names Class names to insert into the list.
|
||||
* @returns {void}
|
||||
*/
|
||||
add(...names) {
|
||||
names.forEach(name => {
|
||||
if (name) {
|
||||
this._values.add(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove one or more CSS classes from the element.
|
||||
*
|
||||
* @param {...string} names Class names to delete from the list.
|
||||
* @returns {void}
|
||||
*/
|
||||
remove(...names) {
|
||||
names.forEach(name => {
|
||||
if (name) {
|
||||
this._values.delete(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the class list currently contains ``name``.
|
||||
*
|
||||
* @param {string} name Target class name.
|
||||
* @returns {boolean} ``true`` when the class is present.
|
||||
*/
|
||||
contains(name) {
|
||||
return this._values.has(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Toggle the provided class name.
|
||||
*
|
||||
* @param {string} name Class name to toggle.
|
||||
* @param {boolean} [force] Optional forced state mirroring ``DOMTokenList``.
|
||||
* @returns {boolean} ``true`` when the class is present after toggling.
|
||||
*/
|
||||
toggle(name, force) {
|
||||
if (force === true) {
|
||||
this._values.add(name);
|
||||
return true;
|
||||
}
|
||||
if (force === false) {
|
||||
this._values.delete(name);
|
||||
return false;
|
||||
}
|
||||
if (this._values.has(name)) {
|
||||
this._values.delete(name);
|
||||
return false;
|
||||
}
|
||||
this._values.add(name);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Minimal DOM element implementation exposing the subset of behaviour exercised
|
||||
* by the frontend entrypoints.
|
||||
*/
|
||||
class MockElement {
|
||||
/**
|
||||
* @param {string} tagName Element name used for diagnostics.
|
||||
* @param {Map<string, MockElement>} registry Storage shared with the
|
||||
* containing document to support ``getElementById``.
|
||||
*/
|
||||
constructor(tagName, registry) {
|
||||
this.tagName = tagName.toUpperCase();
|
||||
this._registry = registry;
|
||||
this.attributes = new Map();
|
||||
this.dataset = {};
|
||||
this.style = {};
|
||||
this.textContent = '';
|
||||
this.classList = new MockClassList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Associate an attribute with the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @param {string} value Attribute value.
|
||||
* @returns {void}
|
||||
*/
|
||||
setAttribute(name, value) {
|
||||
this.attributes.set(name, String(value));
|
||||
if (name === 'id' && this._registry) {
|
||||
this._registry.set(String(value), this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve an attribute value.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @returns {?string} Matching attribute or ``null`` when absent.
|
||||
*/
|
||||
getAttribute(name) {
|
||||
return this.attributes.has(name) ? this.attributes.get(name) : null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a deterministic DOM environment that provides just enough behaviour
|
||||
* for the UI scripts to execute inside Node.js unit tests.
|
||||
*
|
||||
* @param {{
|
||||
* readyState?: 'loading' | 'interactive' | 'complete',
|
||||
* cookie?: string,
|
||||
* includeBody?: boolean,
|
||||
* bodyHasDarkClass?: boolean
|
||||
* }} [options]
|
||||
* @returns {{
|
||||
* window: Window & { dispatchEvent: Function },
|
||||
* document: Document,
|
||||
* createElement: (tagName?: string, id?: string) => MockElement,
|
||||
* registerElement: (id: string, element: MockElement) => void,
|
||||
* setComputedStyleImplementation: (impl: Function) => void,
|
||||
* triggerDOMContentLoaded: () => void,
|
||||
* dispatchWindowEvent: (event: string) => void,
|
||||
* getCookieString: () => string,
|
||||
* setCookieString: (value: string) => void,
|
||||
* cleanup: () => void
|
||||
* }}
|
||||
*/
|
||||
export function createDomEnvironment(options = {}) {
|
||||
const {
|
||||
readyState = 'complete',
|
||||
cookie = '',
|
||||
includeBody = true,
|
||||
bodyHasDarkClass = true
|
||||
} = options;
|
||||
|
||||
const originalWindow = globalThis.window;
|
||||
const originalDocument = globalThis.document;
|
||||
|
||||
const registry = new Map();
|
||||
const documentListeners = new Map();
|
||||
const windowListeners = new Map();
|
||||
let computedStyleImpl = null;
|
||||
let cookieStore = cookie;
|
||||
|
||||
const document = {
|
||||
readyState,
|
||||
documentElement: new MockElement('html', registry),
|
||||
body: includeBody ? new MockElement('body', registry) : null,
|
||||
addEventListener(event, handler) {
|
||||
documentListeners.set(event, handler);
|
||||
},
|
||||
removeEventListener(event) {
|
||||
documentListeners.delete(event);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const handler = documentListeners.get(event);
|
||||
if (handler) handler();
|
||||
},
|
||||
getElementById(id) {
|
||||
return registry.get(id) || null;
|
||||
},
|
||||
querySelector() {
|
||||
return null;
|
||||
},
|
||||
createElement(tagName) {
|
||||
return new MockElement(tagName, registry);
|
||||
}
|
||||
};
|
||||
|
||||
if (document.body && bodyHasDarkClass) {
|
||||
document.body.classList.add('dark');
|
||||
}
|
||||
|
||||
Object.defineProperty(document, 'cookie', {
|
||||
get() {
|
||||
return cookieStore;
|
||||
},
|
||||
set(value) {
|
||||
cookieStore = cookieStore ? `${cookieStore}; ${value}` : value;
|
||||
}
|
||||
});
|
||||
|
||||
const window = {
|
||||
document,
|
||||
addEventListener(event, handler) {
|
||||
windowListeners.set(event, handler);
|
||||
},
|
||||
removeEventListener(event) {
|
||||
windowListeners.delete(event);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const handler = windowListeners.get(event);
|
||||
if (handler) handler();
|
||||
},
|
||||
getComputedStyle(target) {
|
||||
if (typeof computedStyleImpl === 'function') {
|
||||
return computedStyleImpl(target);
|
||||
}
|
||||
return {
|
||||
getPropertyValue() {
|
||||
return '';
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
globalThis.window = window;
|
||||
globalThis.document = document;
|
||||
|
||||
/**
|
||||
* Create and optionally register a mock element.
|
||||
*
|
||||
* @param {string} [tagName='div'] Tag name of the element.
|
||||
* @param {string} [id] Optional identifier registered with the document.
|
||||
* @returns {MockElement} New mock element instance.
|
||||
*/
|
||||
function createElement(tagName = 'div', id) {
|
||||
const element = new MockElement(tagName, registry);
|
||||
if (id) {
|
||||
element.setAttribute('id', id);
|
||||
}
|
||||
return element;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an element instance so that ``getElementById`` can resolve it.
|
||||
*
|
||||
* @param {string} id Element identifier.
|
||||
* @param {MockElement} element Element instance to register.
|
||||
* @returns {void}
|
||||
*/
|
||||
function registerElement(id, element) {
|
||||
registry.set(id, element);
|
||||
}
|
||||
|
||||
return {
|
||||
window,
|
||||
document,
|
||||
createElement,
|
||||
registerElement,
|
||||
setComputedStyleImplementation(impl) {
|
||||
computedStyleImpl = impl;
|
||||
},
|
||||
triggerDOMContentLoaded() {
|
||||
const handler = documentListeners.get('DOMContentLoaded');
|
||||
if (handler) handler();
|
||||
},
|
||||
dispatchWindowEvent(event) {
|
||||
const handler = windowListeners.get(event);
|
||||
if (handler) handler();
|
||||
},
|
||||
getCookieString() {
|
||||
return cookieStore;
|
||||
},
|
||||
setCookieString(value) {
|
||||
cookieStore = value;
|
||||
},
|
||||
cleanup() {
|
||||
globalThis.window = originalWindow;
|
||||
globalThis.document = originalDocument;
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createMapAutoFitController } from '../map-auto-fit-controller.js';
|
||||
|
||||
class ToggleStub extends EventTarget {
|
||||
constructor(checked = true) {
|
||||
super();
|
||||
this.checked = checked;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Event} event - Event to dispatch to listeners.
|
||||
* @returns {boolean} Dispatch status.
|
||||
*/
|
||||
dispatchEvent(event) {
|
||||
return super.dispatchEvent(event);
|
||||
}
|
||||
}
|
||||
|
||||
class WindowStub {
|
||||
constructor() {
|
||||
this.listeners = new Map();
|
||||
}
|
||||
|
||||
addEventListener(type, listener) {
|
||||
this.listeners.set(type, listener);
|
||||
}
|
||||
|
||||
removeEventListener(type, listener) {
|
||||
const existing = this.listeners.get(type);
|
||||
if (existing === listener) {
|
||||
this.listeners.delete(type);
|
||||
}
|
||||
}
|
||||
|
||||
emit(type) {
|
||||
const listener = this.listeners.get(type);
|
||||
if (listener) listener();
|
||||
}
|
||||
}
|
||||
|
||||
test('recordFit stores and clones the last fit snapshot', () => {
|
||||
const toggle = new ToggleStub(true);
|
||||
const controller = createMapAutoFitController({ toggleEl: toggle, defaultPaddingPx: 20 });
|
||||
|
||||
assert.equal(controller.getLastFit(), null);
|
||||
|
||||
controller.recordFit([[10, 20], [30, 40]], { paddingPx: 12, maxZoom: 9 });
|
||||
const snapshot = controller.getLastFit();
|
||||
assert.ok(snapshot);
|
||||
assert.deepEqual(snapshot.bounds, [[10, 20], [30, 40]]);
|
||||
assert.deepEqual(snapshot.options, { paddingPx: 12, maxZoom: 9 });
|
||||
|
||||
snapshot.bounds[0][0] = -999;
|
||||
snapshot.options.paddingPx = -1;
|
||||
const secondSnapshot = controller.getLastFit();
|
||||
assert.deepEqual(secondSnapshot?.bounds, [[10, 20], [30, 40]]);
|
||||
assert.deepEqual(secondSnapshot?.options, { paddingPx: 12, maxZoom: 9 });
|
||||
});
|
||||
|
||||
|
||||
test('recordFit ignores invalid bounds and normalises fit options', () => {
|
||||
const controller = createMapAutoFitController({ defaultPaddingPx: 16 });
|
||||
|
||||
controller.recordFit(null);
|
||||
assert.equal(controller.getLastFit(), null);
|
||||
|
||||
controller.recordFit([[10, Number.NaN], [20, 30]]);
|
||||
assert.equal(controller.getLastFit(), null);
|
||||
|
||||
controller.recordFit([[10, 11], [12, 13]], { paddingPx: -5, maxZoom: 0 });
|
||||
const snapshot = controller.getLastFit();
|
||||
assert.ok(snapshot);
|
||||
assert.deepEqual(snapshot.options, { paddingPx: 16 });
|
||||
});
|
||||
|
||||
|
||||
test('handleUserInteraction disables auto-fit unless suppressed', () => {
|
||||
const toggle = new ToggleStub(true);
|
||||
let changeEvents = 0;
|
||||
toggle.addEventListener('change', () => {
|
||||
changeEvents += 1;
|
||||
});
|
||||
const controller = createMapAutoFitController({ toggleEl: toggle });
|
||||
|
||||
controller.runAutoFitOperation(() => {
|
||||
assert.equal(controller.handleUserInteraction(), false);
|
||||
assert.equal(toggle.checked, true);
|
||||
});
|
||||
assert.equal(changeEvents, 0);
|
||||
|
||||
assert.equal(controller.handleUserInteraction(), true);
|
||||
assert.equal(toggle.checked, false);
|
||||
assert.equal(changeEvents, 1);
|
||||
|
||||
assert.equal(controller.handleUserInteraction(), false);
|
||||
assert.equal(changeEvents, 1);
|
||||
});
|
||||
|
||||
|
||||
test('isAutoFitEnabled reflects the toggle state', () => {
|
||||
const toggle = new ToggleStub(false);
|
||||
const controller = createMapAutoFitController({ toggleEl: toggle });
|
||||
assert.equal(controller.isAutoFitEnabled(), false);
|
||||
toggle.checked = true;
|
||||
assert.equal(controller.isAutoFitEnabled(), true);
|
||||
});
|
||||
|
||||
|
||||
test('runAutoFitOperation returns callback results and tolerates missing functions', () => {
|
||||
const controller = createMapAutoFitController();
|
||||
assert.equal(controller.runAutoFitOperation(), undefined);
|
||||
let active = false;
|
||||
const result = controller.runAutoFitOperation(() => {
|
||||
active = true;
|
||||
return 42;
|
||||
});
|
||||
assert.equal(active, true);
|
||||
assert.equal(result, 42);
|
||||
});
|
||||
|
||||
|
||||
test('attachResizeListener forwards snapshots and supports teardown', () => {
|
||||
const windowStub = new WindowStub();
|
||||
const controller = createMapAutoFitController({ windowObject: windowStub, defaultPaddingPx: 24 });
|
||||
controller.recordFit([[1, 2], [3, 4]], { paddingPx: 30 });
|
||||
|
||||
let snapshots = [];
|
||||
const detach = controller.attachResizeListener(snapshot => {
|
||||
snapshots.push(snapshot);
|
||||
});
|
||||
|
||||
windowStub.emit('resize');
|
||||
windowStub.emit('orientationchange');
|
||||
assert.equal(snapshots.length, 2);
|
||||
assert.deepEqual(snapshots[0], { bounds: [[1, 2], [3, 4]], options: { paddingPx: 30 } });
|
||||
|
||||
detach();
|
||||
windowStub.emit('resize');
|
||||
assert.equal(snapshots.length, 2);
|
||||
|
||||
const noop = controller.attachResizeListener();
|
||||
assert.equal(typeof noop, 'function');
|
||||
noop();
|
||||
});
|
||||
138
web/public/assets/js/app/__tests__/map-bounds.test.js
Normal file
138
web/public/assets/js/app/__tests__/map-bounds.test.js
Normal file
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import {
|
||||
computeBoundingBox,
|
||||
computeBoundsForPoints,
|
||||
haversineDistanceKm,
|
||||
__testUtils
|
||||
} from '../map-bounds.js';
|
||||
|
||||
const { clampLatitude, clampLongitude, normaliseRange, normaliseLongitudeAround } = __testUtils;
|
||||
|
||||
function approximatelyEqual(actual, expected, epsilon = 1e-3) {
|
||||
assert.ok(Math.abs(actual - expected) <= epsilon, `${actual} is not within ${epsilon} of ${expected}`);
|
||||
}
|
||||
|
||||
test('clamp helpers bound invalid coordinates', () => {
|
||||
assert.equal(clampLatitude(120), 90);
|
||||
assert.equal(clampLatitude(-95), -90);
|
||||
assert.equal(clampLatitude(Number.POSITIVE_INFINITY), 90);
|
||||
assert.equal(clampLatitude(Number.NEGATIVE_INFINITY), -90);
|
||||
|
||||
assert.equal(clampLongitude(200), 180);
|
||||
assert.equal(clampLongitude(-220), -180);
|
||||
assert.equal(clampLongitude(Number.POSITIVE_INFINITY), 180);
|
||||
assert.equal(clampLongitude(Number.NEGATIVE_INFINITY), -180);
|
||||
});
|
||||
|
||||
|
||||
test('normaliseRange enforces minimum distance for invalid inputs', () => {
|
||||
assert.equal(normaliseRange(-1, 2), 2);
|
||||
assert.equal(normaliseRange(Number.NaN, 3), 3);
|
||||
assert.equal(normaliseRange(0, 1), 1);
|
||||
assert.equal(normaliseRange(4, 2), 4);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundingBox returns null for invalid centres', () => {
|
||||
assert.equal(computeBoundingBox(null, 10), null);
|
||||
assert.equal(computeBoundingBox({ lat: 'x', lon: 0 }, 5), null);
|
||||
assert.equal(computeBoundingBox({ lat: 0, lon: NaN }, 5), null);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundingBox returns symmetric bounds for mid-latitude centre', () => {
|
||||
const bounds = computeBoundingBox({ lat: 0, lon: 0 }, 10);
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
approximatelyEqual(north, -south, 1e-4);
|
||||
approximatelyEqual(east, -west, 1e-4);
|
||||
assert.ok(north > 0 && east > 0);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundingBox clamps longitude span near the poles', () => {
|
||||
const bounds = computeBoundingBox({ lat: 89.9, lon: 45 }, 2000);
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
approximatelyEqual(south, 72.0, 1e-1);
|
||||
assert.equal(west, -180);
|
||||
assert.equal(east, 180);
|
||||
assert.equal(north, 90);
|
||||
});
|
||||
|
||||
|
||||
test('haversineDistanceKm matches known city distance', () => {
|
||||
// Approximate distance between Paris (48.8566, 2.3522) and Berlin (52.52, 13.4050)
|
||||
const distance = haversineDistanceKm(48.8566, 2.3522, 52.52, 13.405);
|
||||
approximatelyEqual(distance, 878.8, 2);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints returns null when no valid points exist', () => {
|
||||
assert.equal(computeBoundsForPoints([]), null);
|
||||
assert.equal(computeBoundsForPoints([[Number.NaN, 0]]), null);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints expands bounds with padding and minimum radius', () => {
|
||||
const bounds = computeBoundsForPoints(
|
||||
[
|
||||
[38.0, -27.1],
|
||||
[38.05, -27.08]
|
||||
],
|
||||
{ paddingFraction: 0.2, minimumRangeKm: 2 }
|
||||
);
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
assert.ok(north > 38.05);
|
||||
assert.ok(south < 38.0);
|
||||
assert.ok(east > -27.08);
|
||||
assert.ok(west < -27.1);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints respects the configured minimum range for single points', () => {
|
||||
const bounds = computeBoundsForPoints([[12.34, 56.78]], { minimumRangeKm: 5 });
|
||||
assert.ok(bounds);
|
||||
const [[south], [north]] = bounds;
|
||||
assert.ok(north - south > 0.05);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints preserves tight bounds across the antimeridian', () => {
|
||||
const points = [
|
||||
[10.0, 179.5],
|
||||
[11.2, -179.7],
|
||||
[9.5, 179.2]
|
||||
];
|
||||
const bounds = computeBoundsForPoints(points, { paddingFraction: 0.1 });
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
assert.ok(north - south < 10, 'latitude span should remain tight');
|
||||
const lonSpan = Math.abs(east - west);
|
||||
const normalizedSpan = lonSpan > 180 ? 360 - lonSpan : lonSpan;
|
||||
assert.ok(normalizedSpan < 40, 'longitude span should wrap tightly around the dateline');
|
||||
for (const [, lon] of points) {
|
||||
const adjustedLon = normaliseLongitudeAround(lon, (west + east) / 2);
|
||||
assert.ok(adjustedLon >= west - 1e-6 && adjustedLon <= east + 1e-6, 'point longitude should lie within bounds');
|
||||
}
|
||||
assert.ok(east > 180 || west < -180, 'bounds should extend beyond the canonical range when necessary');
|
||||
});
|
||||
244
web/public/assets/js/app/__tests__/map-marker-node-info.test.js
Normal file
244
web/public/assets/js/app/__tests__/map-marker-node-info.test.js
Normal file
@@ -0,0 +1,244 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { attachNodeInfoRefreshToMarker, overlayToPopupNode } from '../map-marker-node-info.js';
|
||||
|
||||
function createFakeMarker(anchor) {
|
||||
const handlers = {};
|
||||
return {
|
||||
handlers,
|
||||
on(name, handler) {
|
||||
if (!handlers[name]) handlers[name] = [];
|
||||
handlers[name].push(handler);
|
||||
return this;
|
||||
},
|
||||
getElement() {
|
||||
return anchor;
|
||||
},
|
||||
trigger(name, payload) {
|
||||
for (const handler of handlers[name] || []) {
|
||||
handler(payload);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
test('attachNodeInfoRefreshToMarker refreshes markers with merged overlay details', async () => {
|
||||
const anchor = { id: 'anchor-el' };
|
||||
const marker = createFakeMarker(anchor);
|
||||
const popupUpdates = [];
|
||||
const detailCalls = [];
|
||||
let prevented = false;
|
||||
let stopped = false;
|
||||
let token = 0;
|
||||
const refreshCalls = [];
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ nodeId: '!foo', shortName: 'Foo', role: 'CLIENT', neighbors: [] }),
|
||||
refreshNodeInformation: async reference => {
|
||||
refreshCalls.push(reference);
|
||||
return { battery: 55.5, telemetryTime: 123, neighbors: [{ neighbor_id: '!bar', snr: 9.5 }] };
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: el => {
|
||||
assert.equal(el, anchor);
|
||||
return ++token;
|
||||
},
|
||||
isTokenCurrent: (el, candidate) => {
|
||||
assert.equal(el, anchor);
|
||||
return candidate === token;
|
||||
},
|
||||
showLoading: (el, info) => {
|
||||
assert.equal(el, anchor);
|
||||
assert.equal(info.nodeId, '!foo');
|
||||
},
|
||||
showDetails: (el, info) => {
|
||||
detailCalls.push({ el, info });
|
||||
},
|
||||
showError: () => {
|
||||
assert.fail('showError should not be invoked on success');
|
||||
},
|
||||
updatePopup: info => {
|
||||
popupUpdates.push(info);
|
||||
},
|
||||
});
|
||||
|
||||
const clickEvent = {
|
||||
originalEvent: {
|
||||
preventDefault() {
|
||||
prevented = true;
|
||||
},
|
||||
stopPropagation() {
|
||||
stopped = true;
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
marker.trigger('click', clickEvent);
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.equal(prevented, true);
|
||||
assert.equal(stopped, true);
|
||||
assert.equal(refreshCalls.length, 1);
|
||||
assert.deepEqual(refreshCalls[0], {
|
||||
nodeId: '!foo',
|
||||
fallback: { nodeId: '!foo', shortName: 'Foo', role: 'CLIENT', neighbors: [] },
|
||||
});
|
||||
assert.ok(popupUpdates.length >= 1);
|
||||
const merged = popupUpdates[popupUpdates.length - 1];
|
||||
assert.equal(merged.battery, 55.5);
|
||||
assert.equal(merged.telemetryTime, 123);
|
||||
assert.equal(detailCalls.length, 1);
|
||||
assert.equal(detailCalls[0].el, anchor);
|
||||
assert.equal(detailCalls[0].info.battery, 55.5);
|
||||
});
|
||||
|
||||
test('attachNodeInfoRefreshToMarker surfaces errors with fallback overlays', async () => {
|
||||
const anchor = { id: 'anchor' };
|
||||
const marker = createFakeMarker(anchor);
|
||||
let token = 0;
|
||||
let errorCaptured = null;
|
||||
let detailCalls = 0;
|
||||
let updateCalls = 0;
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ nodeId: '!oops', shortName: 'Oops' }),
|
||||
refreshNodeInformation: async () => {
|
||||
throw new Error('boom');
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: el => {
|
||||
assert.equal(el, anchor);
|
||||
return ++token;
|
||||
},
|
||||
isTokenCurrent: (el, candidate) => {
|
||||
assert.equal(el, anchor);
|
||||
return candidate === token;
|
||||
},
|
||||
showLoading: () => {},
|
||||
showDetails: () => {
|
||||
detailCalls += 1;
|
||||
},
|
||||
showError: (el, info, error) => {
|
||||
assert.equal(el, anchor);
|
||||
assert.equal(info.nodeId, '!oops');
|
||||
errorCaptured = error;
|
||||
},
|
||||
updatePopup: () => {
|
||||
updateCalls += 1;
|
||||
},
|
||||
});
|
||||
|
||||
marker.trigger('click', { originalEvent: {} });
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.ok(errorCaptured instanceof Error);
|
||||
assert.equal(errorCaptured.message, 'boom');
|
||||
assert.equal(detailCalls, 0);
|
||||
assert.equal(updateCalls, 2);
|
||||
});
|
||||
|
||||
test('attachNodeInfoRefreshToMarker skips refresh when identifiers are missing', async () => {
|
||||
const anchor = { id: 'anchor' };
|
||||
const marker = createFakeMarker(anchor);
|
||||
let token = 0;
|
||||
let refreshed = false;
|
||||
let detailsShown = 0;
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ shortName: 'Unknown' }),
|
||||
refreshNodeInformation: async () => {
|
||||
refreshed = true;
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: el => {
|
||||
assert.equal(el, anchor);
|
||||
return ++token;
|
||||
},
|
||||
isTokenCurrent: (el, candidate) => {
|
||||
assert.equal(el, anchor);
|
||||
return candidate === token;
|
||||
},
|
||||
showLoading: () => {
|
||||
assert.fail('showLoading should not run without identifiers');
|
||||
},
|
||||
showDetails: (el, info) => {
|
||||
assert.equal(el, anchor);
|
||||
assert.equal(info.shortName, 'Unknown');
|
||||
detailsShown += 1;
|
||||
},
|
||||
});
|
||||
|
||||
marker.trigger('click', { originalEvent: {} });
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.equal(refreshed, false);
|
||||
assert.equal(detailsShown, 1);
|
||||
});
|
||||
|
||||
test('attachNodeInfoRefreshToMarker honours shouldHandleClick predicate', async () => {
|
||||
const marker = createFakeMarker({ id: 'anchor' });
|
||||
let token = 0;
|
||||
let refreshed = false;
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ nodeId: '!skip' }),
|
||||
refreshNodeInformation: async () => {
|
||||
refreshed = true;
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: () => ++token,
|
||||
isTokenCurrent: (el, candidate) => candidate === token,
|
||||
shouldHandleClick: () => false,
|
||||
});
|
||||
|
||||
marker.trigger('click', { originalEvent: {} });
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.equal(refreshed, false);
|
||||
});
|
||||
|
||||
test('overlayToPopupNode normalises raw overlay payloads', () => {
|
||||
const overlay = {
|
||||
nodeId: '!foo',
|
||||
nodeNum: 42,
|
||||
shortName: 'Foo',
|
||||
role: 'ROUTER',
|
||||
battery: '77.5',
|
||||
neighbors: [
|
||||
{ neighbor_id: '!bar', snr: '12.5', neighbor_short_name: 'Bar' },
|
||||
null,
|
||||
],
|
||||
};
|
||||
|
||||
const popupNode = overlayToPopupNode(overlay);
|
||||
assert.equal(popupNode.node_id, '!foo');
|
||||
assert.equal(popupNode.node_num, 42);
|
||||
assert.equal(popupNode.short_name, 'Foo');
|
||||
assert.equal(popupNode.role, 'ROUTER');
|
||||
assert.equal(popupNode.battery_level, 77.5);
|
||||
assert.equal(Array.isArray(popupNode.neighbors), true);
|
||||
assert.equal(popupNode.neighbors.length, 1);
|
||||
assert.equal(popupNode.neighbors[0].node.node_id, '!bar');
|
||||
assert.equal(popupNode.neighbors[0].snr, 12.5);
|
||||
});
|
||||
348
web/public/assets/js/app/__tests__/node-details.test.js
Normal file
348
web/public/assets/js/app/__tests__/node-details.test.js
Normal file
@@ -0,0 +1,348 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { refreshNodeInformation, __testUtils } from '../node-details.js';
|
||||
|
||||
const {
|
||||
toTrimmedString,
|
||||
toFiniteNumber,
|
||||
extractString,
|
||||
extractNumber,
|
||||
assignString,
|
||||
assignNumber,
|
||||
mergeModemMetadata,
|
||||
mergeNodeFields,
|
||||
mergeTelemetry,
|
||||
mergePosition,
|
||||
parseFallback,
|
||||
normalizeReference,
|
||||
} = __testUtils;
|
||||
|
||||
function createResponse(status, body) {
|
||||
return {
|
||||
status,
|
||||
ok: status >= 200 && status < 300,
|
||||
json: async () => body,
|
||||
};
|
||||
}
|
||||
|
||||
test('refreshNodeInformation merges telemetry metrics when the base node lacks them', async () => {
|
||||
const calls = [];
|
||||
const responses = new Map([
|
||||
['/api/nodes/!test', createResponse(200, {
|
||||
node_id: '!test',
|
||||
short_name: 'TST',
|
||||
battery_level: null,
|
||||
last_heard: 1_000,
|
||||
modem_preset: 'MediumFast',
|
||||
lora_freq: '868.1',
|
||||
})],
|
||||
['/api/telemetry/!test?limit=1', createResponse(200, [{
|
||||
node_id: '!test',
|
||||
battery_level: 73.5,
|
||||
rx_time: 1_200,
|
||||
telemetry_time: 1_180,
|
||||
voltage: 4.1,
|
||||
}])],
|
||||
['/api/positions/!test?limit=1', createResponse(200, [{
|
||||
node_id: '!test',
|
||||
latitude: 52.5,
|
||||
longitude: 13.4,
|
||||
rx_time: 1_100,
|
||||
}])],
|
||||
['/api/neighbors/!test?limit=1000', createResponse(200, [{
|
||||
node_id: '!test',
|
||||
neighbor_id: '!peer',
|
||||
snr: 9.5,
|
||||
rx_time: 1_150,
|
||||
}])],
|
||||
]);
|
||||
const fetchImpl = async (url, options) => {
|
||||
calls.push({ url, options });
|
||||
const response = responses.get(url);
|
||||
if (!response) {
|
||||
return createResponse(404, { error: 'not found' });
|
||||
}
|
||||
return response;
|
||||
};
|
||||
|
||||
const fallback = { shortName: 'fallback', role: 'CLIENT' };
|
||||
const node = await refreshNodeInformation({ nodeId: '!test', fallback }, { fetchImpl });
|
||||
|
||||
assert.equal(node.nodeId, '!test');
|
||||
assert.equal(node.shortName, 'TST');
|
||||
assert.equal(node.battery, 73.5);
|
||||
assert.equal(node.voltage, 4.1);
|
||||
assert.equal(node.role, 'CLIENT');
|
||||
assert.equal(node.modemPreset, 'MediumFast');
|
||||
assert.equal(node.loraFreq, 868.1);
|
||||
assert.equal(node.lastHeard, 1_200);
|
||||
assert.equal(node.telemetryTime, 1_180);
|
||||
assert.equal(node.latitude, 52.5);
|
||||
assert.equal(node.longitude, 13.4);
|
||||
assert.deepEqual(node.neighbors, [{
|
||||
node_id: '!test',
|
||||
neighbor_id: '!peer',
|
||||
snr: 9.5,
|
||||
rx_time: 1_150,
|
||||
}]);
|
||||
assert.ok(node.rawSources);
|
||||
assert.ok(node.rawSources.node);
|
||||
assert.ok(node.rawSources.telemetry);
|
||||
assert.ok(node.rawSources.position);
|
||||
|
||||
assert.equal(calls.length, 4);
|
||||
calls.forEach(call => {
|
||||
assert.deepEqual(call.options, { cache: 'no-store' });
|
||||
});
|
||||
});
|
||||
|
||||
test('refreshNodeInformation preserves fallback metrics when telemetry is unavailable', async () => {
|
||||
const responses = new Map([
|
||||
['/api/nodes/42', createResponse(200, {
|
||||
node_id: '!num',
|
||||
short_name: 'NUM',
|
||||
})],
|
||||
['/api/telemetry/42?limit=1', createResponse(404, { error: 'not found' })],
|
||||
['/api/positions/42?limit=1', createResponse(404, { error: 'not found' })],
|
||||
['/api/neighbors/42?limit=1000', createResponse(404, { error: 'not found' })],
|
||||
]);
|
||||
const fetchImpl = async (url, options) => {
|
||||
const response = responses.get(url);
|
||||
return response ?? createResponse(404, { error: 'not found' });
|
||||
};
|
||||
|
||||
const fallback = { nodeNum: 42, battery: 12.5, role: 'CLIENT', modemPreset: 'FallbackPreset', loraFreq: 915 };
|
||||
const node = await refreshNodeInformation({ nodeNum: 42, fallback }, { fetchImpl });
|
||||
|
||||
assert.equal(node.nodeId, '!num');
|
||||
assert.equal(node.nodeNum, 42);
|
||||
assert.equal(node.shortName, 'NUM');
|
||||
assert.equal(node.battery, 12.5);
|
||||
assert.equal(node.role, 'CLIENT');
|
||||
assert.equal(node.modemPreset, 'FallbackPreset');
|
||||
assert.equal(node.loraFreq, 915);
|
||||
assert.equal(Array.isArray(node.neighbors) && node.neighbors.length, 0);
|
||||
});
|
||||
|
||||
test('refreshNodeInformation requires a node identifier', async () => {
|
||||
await assert.rejects(() => refreshNodeInformation(null), /node identifier/i);
|
||||
});
|
||||
|
||||
test('refreshNodeInformation handles missing node records by falling back to telemetry data', async () => {
|
||||
const responses = new Map([
|
||||
['/api/nodes/!missing', createResponse(404, { error: 'not found' })],
|
||||
['/api/telemetry/!missing?limit=1', createResponse(200, [{
|
||||
node_id: '!missing',
|
||||
node_num: 77,
|
||||
battery_level: 66,
|
||||
rx_time: 2_000,
|
||||
telemetry_time: 1_950,
|
||||
}])],
|
||||
['/api/positions/!missing?limit=1', createResponse(200, [{
|
||||
node_id: '!missing',
|
||||
latitude: 1.23,
|
||||
longitude: 3.21,
|
||||
altitude: 42,
|
||||
position_time: 1_960,
|
||||
rx_time: 1_970,
|
||||
}])],
|
||||
['/api/neighbors/!missing?limit=1000', createResponse(200, [null, 'skip', {
|
||||
node_id: '!missing',
|
||||
neighbor_id: '!ally',
|
||||
snr: 8.5,
|
||||
}])],
|
||||
]);
|
||||
|
||||
const fetchImpl = async url => responses.get(url) ?? createResponse(404, { error: 'not found' });
|
||||
|
||||
const node = await refreshNodeInformation({ nodeId: '!missing' }, { fetchImpl });
|
||||
|
||||
assert.equal(node.nodeId, '!missing');
|
||||
assert.equal(node.nodeNum, 77);
|
||||
assert.equal(node.battery, 66);
|
||||
assert.equal(node.lastHeard, 2_000);
|
||||
assert.equal(node.telemetryTime, 1_950);
|
||||
assert.equal(node.positionTime, 1_960);
|
||||
assert.equal(node.latitude, 1.23);
|
||||
assert.equal(node.longitude, 3.21);
|
||||
assert.equal(node.altitude, 42);
|
||||
assert.equal(node.role, 'CLIENT');
|
||||
assert.deepEqual(node.neighbors, [{
|
||||
node_id: '!missing',
|
||||
neighbor_id: '!ally',
|
||||
snr: 8.5,
|
||||
}]);
|
||||
});
|
||||
|
||||
test('refreshNodeInformation enforces a fetch implementation', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
// eslint-disable-next-line no-global-assign
|
||||
globalThis.fetch = undefined;
|
||||
try {
|
||||
await assert.rejects(() => refreshNodeInformation('!test', { fetchImpl: null }), /fetch implementation/i);
|
||||
} finally {
|
||||
// eslint-disable-next-line no-global-assign
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
test('mergeModemMetadata respects preference flags', () => {
|
||||
const target = {};
|
||||
mergeModemMetadata(target, { modem_preset: 'Base', lora_freq: '915.5' });
|
||||
assert.equal(target.modemPreset, 'Base');
|
||||
assert.equal(target.loraFreq, 915.5);
|
||||
|
||||
mergeModemMetadata(target, { modem_preset: 'New', lora_freq: '433' }, { preferExisting: true });
|
||||
assert.equal(target.modemPreset, 'Base');
|
||||
assert.equal(target.loraFreq, 915.5);
|
||||
|
||||
mergeModemMetadata(target, { modem_preset: 'Updated', lora_freq: '433' }, { preferExisting: false });
|
||||
assert.equal(target.modemPreset, 'Updated');
|
||||
assert.equal(target.loraFreq, 433);
|
||||
});
|
||||
|
||||
test('helper utilities normalise primitive values', () => {
|
||||
assert.equal(toTrimmedString(' hello '), 'hello');
|
||||
assert.equal(toTrimmedString(''), null);
|
||||
assert.equal(toTrimmedString(null), null);
|
||||
|
||||
assert.equal(toFiniteNumber('42.5'), 42.5);
|
||||
assert.equal(toFiniteNumber('bad'), null);
|
||||
assert.equal(toFiniteNumber(Infinity), null);
|
||||
|
||||
assert.equal(extractString({ name: ' Alice ' }, ['missing', 'name']), 'Alice');
|
||||
assert.equal(extractString(null, ['name']), null);
|
||||
|
||||
assert.equal(extractNumber({ value: ' 13 ' }, ['missing', 'value']), 13);
|
||||
assert.equal(extractNumber({}, ['value']), null);
|
||||
});
|
||||
|
||||
test('assign helpers respect preferExisting semantics', () => {
|
||||
const target = {};
|
||||
assignString(target, 'name', ' primary ');
|
||||
assignString(target, 'name', 'secondary', { preferExisting: true });
|
||||
assignString(target, 'description', '');
|
||||
assignNumber(target, 'count', '25');
|
||||
assignNumber(target, 'count', 13, { preferExisting: true });
|
||||
assignNumber(target, 'ignored', 'oops');
|
||||
|
||||
assert.deepEqual(target, { name: 'primary', count: 25 });
|
||||
});
|
||||
|
||||
test('merge helpers combine node, telemetry, and position data', () => {
|
||||
const node = {};
|
||||
mergeNodeFields(node, {
|
||||
node_id: '!node',
|
||||
node_num: 55,
|
||||
short_name: 'NODE',
|
||||
battery_level: null,
|
||||
last_heard: 1_000,
|
||||
position_time: 900,
|
||||
});
|
||||
|
||||
node.battery = 50;
|
||||
|
||||
mergeTelemetry(node, {
|
||||
node_id: '!node',
|
||||
battery_level: 75,
|
||||
voltage: 3.8,
|
||||
rx_time: 1_200,
|
||||
rx_iso: '2025-01-01T00:00:00Z',
|
||||
telemetry_time: 1_150,
|
||||
});
|
||||
|
||||
mergePosition(node, {
|
||||
node_id: '!node',
|
||||
latitude: 52.5,
|
||||
longitude: 13.4,
|
||||
altitude: 80,
|
||||
position_time: 1_180,
|
||||
position_time_iso: '2025-01-01T00:19:40Z',
|
||||
rx_time: 1_100,
|
||||
rx_iso: '2025-01-01T00:18:20Z',
|
||||
});
|
||||
|
||||
assert.equal(node.nodeId, '!node');
|
||||
assert.equal(node.nodeNum, 55);
|
||||
assert.equal(node.shortName, 'NODE');
|
||||
assert.equal(node.battery, 50);
|
||||
assert.equal(node.voltage, 3.8);
|
||||
assert.equal(node.lastHeard, 1_200);
|
||||
assert.equal(node.lastSeenIso, '2025-01-01T00:00:00Z');
|
||||
assert.equal(node.telemetryTime, 1_150);
|
||||
assert.equal(node.positionTime, 1_180);
|
||||
assert.equal(node.positionTimeIso, '2025-01-01T00:19:40Z');
|
||||
assert.equal(node.latitude, 52.5);
|
||||
assert.equal(node.longitude, 13.4);
|
||||
assert.equal(node.altitude, 80);
|
||||
assert.ok(node.telemetry);
|
||||
assert.ok(node.position);
|
||||
});
|
||||
|
||||
test('normalizeReference extracts identifiers and tolerates malformed fallback payloads', () => {
|
||||
const originalWarn = console.warn;
|
||||
const warnings = [];
|
||||
console.warn = (...args) => warnings.push(args);
|
||||
|
||||
try {
|
||||
const parsed = normalizeReference({
|
||||
nodeId: ' ',
|
||||
fallback: '{"node_id":"!parsed","nodeNum":99}',
|
||||
});
|
||||
assert.equal(parsed.nodeId, '!parsed');
|
||||
assert.equal(parsed.nodeNum, 99);
|
||||
assert.ok(parsed.fallback);
|
||||
|
||||
const invalid = normalizeReference({ fallback: '{not json}' });
|
||||
assert.equal(invalid.nodeId, null);
|
||||
assert.equal(invalid.nodeNum, null);
|
||||
assert.equal(invalid.fallback, null);
|
||||
|
||||
const strRef = normalizeReference('!direct');
|
||||
assert.equal(strRef.nodeId, '!direct');
|
||||
assert.equal(strRef.nodeNum, null);
|
||||
|
||||
const numRef = normalizeReference(57);
|
||||
assert.equal(numRef.nodeId, null);
|
||||
assert.equal(numRef.nodeNum, 57);
|
||||
|
||||
const emptyRef = normalizeReference(undefined);
|
||||
assert.equal(emptyRef.nodeId, null);
|
||||
assert.equal(emptyRef.nodeNum, null);
|
||||
assert.equal(emptyRef.fallback, null);
|
||||
} finally {
|
||||
console.warn = originalWarn;
|
||||
}
|
||||
|
||||
assert.ok(warnings.length >= 1);
|
||||
});
|
||||
|
||||
test('parseFallback duplicates object references and rejects primitives', () => {
|
||||
const fallbackObject = { nodeId: '!object' };
|
||||
const parsedObject = parseFallback(fallbackObject);
|
||||
assert.notEqual(parsedObject, fallbackObject);
|
||||
assert.deepEqual(parsedObject, fallbackObject);
|
||||
|
||||
const parsedString = parseFallback('{"nodeId":"!string"}');
|
||||
assert.ok(parsedString);
|
||||
assert.equal(parsedString.nodeId, '!string');
|
||||
assert.equal(parseFallback('not json'), null);
|
||||
assert.equal(parseFallback(42), null);
|
||||
});
|
||||
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { extractModemMetadata, formatLoraFrequencyMHz, formatModemDisplay, __testUtils } from '../node-modem-metadata.js';
|
||||
|
||||
describe('node-modem-metadata', () => {
|
||||
it('extracts modem preset and frequency from mixed payloads', () => {
|
||||
const payload = {
|
||||
modem_preset: ' MediumFast ',
|
||||
lora_freq: '915',
|
||||
};
|
||||
assert.deepEqual(extractModemMetadata(payload), { modemPreset: 'MediumFast', loraFreq: 915 });
|
||||
});
|
||||
|
||||
it('falls back across naming conventions when extracting metadata', () => {
|
||||
const payload = {
|
||||
modemPreset: 'LongSlow',
|
||||
frequency: 868,
|
||||
};
|
||||
assert.deepEqual(extractModemMetadata(payload), { modemPreset: 'LongSlow', loraFreq: 868 });
|
||||
});
|
||||
|
||||
it('ignores invalid modem metadata entries', () => {
|
||||
assert.deepEqual(extractModemMetadata({ modem_preset: ' ', lora_freq: 'NaN' }), {
|
||||
modemPreset: null,
|
||||
loraFreq: null,
|
||||
});
|
||||
});
|
||||
|
||||
it('formats positive frequencies with MHz suffix', () => {
|
||||
assert.equal(formatLoraFrequencyMHz(915), '915MHz');
|
||||
assert.equal(formatLoraFrequencyMHz(867.5), '867.5MHz');
|
||||
assert.equal(formatLoraFrequencyMHz('433.1234'), '433.123MHz');
|
||||
assert.equal(formatLoraFrequencyMHz(null), null);
|
||||
});
|
||||
|
||||
it('combines preset and frequency for overlay display', () => {
|
||||
assert.equal(formatModemDisplay('MediumFast', 868), 'MediumFast (868MHz)');
|
||||
assert.equal(formatModemDisplay('ShortSlow', null), 'ShortSlow');
|
||||
assert.equal(formatModemDisplay(null, 433), '433MHz');
|
||||
assert.equal(formatModemDisplay(undefined, undefined), null);
|
||||
});
|
||||
|
||||
it('exposes trimmed string helper for targeted assertions', () => {
|
||||
const { toTrimmedString } = __testUtils;
|
||||
assert.equal(toTrimmedString(' hello '), 'hello');
|
||||
assert.equal(toTrimmedString(''), null);
|
||||
assert.equal(toTrimmedString(null), null);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,397 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createShortInfoOverlayStack } from '../short-info-overlay-manager.js';
|
||||
|
||||
/**
|
||||
* Minimal DOM element implementation tailored for overlay manager tests.
|
||||
*/
|
||||
class StubElement {
|
||||
/**
|
||||
* @param {string} [tagName='div'] Element tag identifier.
|
||||
*/
|
||||
constructor(tagName = 'div') {
|
||||
this.tagName = tagName.toUpperCase();
|
||||
this.children = [];
|
||||
this.parentNode = null;
|
||||
this.style = {};
|
||||
this.dataset = {};
|
||||
this.className = '';
|
||||
this.innerHTML = '';
|
||||
this.attributes = new Map();
|
||||
this.eventHandlers = new Map();
|
||||
this._rect = { left: 0, top: 0, width: 120, height: 80 };
|
||||
}
|
||||
|
||||
/**
|
||||
* Append ``child`` to the element.
|
||||
*
|
||||
* @param {StubElement} child Child node to append.
|
||||
* @returns {StubElement} Appended node.
|
||||
*/
|
||||
appendChild(child) {
|
||||
this.children.push(child);
|
||||
child.parentNode = this;
|
||||
return child;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove ``child`` from the element.
|
||||
*
|
||||
* @param {StubElement} child Child node to remove.
|
||||
* @returns {void}
|
||||
*/
|
||||
removeChild(child) {
|
||||
const idx = this.children.indexOf(child);
|
||||
if (idx >= 0) {
|
||||
this.children.splice(idx, 1);
|
||||
child.parentNode = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the element from its parent tree.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
remove() {
|
||||
if (this.parentNode) {
|
||||
this.parentNode.removeChild(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign an attribute to the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @param {string} value Stored attribute value.
|
||||
* @returns {void}
|
||||
*/
|
||||
setAttribute(name, value) {
|
||||
this.attributes.set(name, String(value));
|
||||
if (name === 'class' || name === 'className') {
|
||||
this.className = String(value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an attribute from the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @returns {void}
|
||||
*/
|
||||
removeAttribute(name) {
|
||||
this.attributes.delete(name);
|
||||
if (name === 'class' || name === 'className') {
|
||||
this.className = '';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an event handler for the element.
|
||||
*
|
||||
* @param {string} event Event identifier.
|
||||
* @param {Function} handler Handler invoked during tests.
|
||||
* @returns {void}
|
||||
*/
|
||||
addEventListener(event, handler) {
|
||||
this.eventHandlers.set(event, handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the first descendant matching a simple class selector.
|
||||
*
|
||||
* @param {string} selector CSS selector (class only).
|
||||
* @returns {?StubElement} Matching element or ``null``.
|
||||
*/
|
||||
querySelector(selector) {
|
||||
if (!selector || selector[0] !== '.') {
|
||||
return null;
|
||||
}
|
||||
const className = selector.slice(1);
|
||||
return this._findByClass(className);
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively search for an element with ``className``.
|
||||
*
|
||||
* @param {string} className Class identifier to match.
|
||||
* @returns {?StubElement} Matching element or ``null``.
|
||||
*/
|
||||
_findByClass(className) {
|
||||
const classes = (this.className || '').split(/\s+/).filter(Boolean);
|
||||
if (classes.includes(className)) {
|
||||
return this;
|
||||
}
|
||||
for (const child of this.children) {
|
||||
const found = child._findByClass(className);
|
||||
if (found) {
|
||||
return found;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether ``candidate`` is a descendant of the element.
|
||||
*
|
||||
* @param {StubElement} candidate Potential child node.
|
||||
* @returns {boolean} ``true`` when the node is contained within the element.
|
||||
*/
|
||||
contains(candidate) {
|
||||
if (this === candidate) {
|
||||
return true;
|
||||
}
|
||||
for (const child of this.children) {
|
||||
if (child.contains(candidate)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the mock bounding rectangle for the element.
|
||||
*
|
||||
* @returns {{ left: number, top: number, width: number, height: number }}
|
||||
*/
|
||||
getBoundingClientRect() {
|
||||
return { ...this._rect };
|
||||
}
|
||||
|
||||
/**
|
||||
* Override the bounding rectangle used during positioning tests.
|
||||
*
|
||||
* @param {{ left?: number, top?: number, width?: number, height?: number }} rect
|
||||
* @returns {void}
|
||||
*/
|
||||
setBoundingRect(rect) {
|
||||
this._rect = { ...this._rect, ...rect };
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a deep clone of the element.
|
||||
*
|
||||
* @param {boolean} [deep=false] When ``true`` clone the children as well.
|
||||
* @returns {StubElement} Cloned element instance.
|
||||
*/
|
||||
cloneNode(deep = false) {
|
||||
const clone = new StubElement(this.tagName);
|
||||
clone.className = this.className;
|
||||
clone.style = { ...this.style };
|
||||
clone.dataset = { ...this.dataset };
|
||||
clone.innerHTML = this.innerHTML;
|
||||
clone._rect = { ...this._rect };
|
||||
clone.attributes = new Map(this.attributes);
|
||||
if (deep) {
|
||||
for (const child of this.children) {
|
||||
clone.appendChild(child.cloneNode(true));
|
||||
}
|
||||
}
|
||||
return clone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate the nearest ancestor carrying ``selector``.
|
||||
*
|
||||
* @param {string} selector CSS selector (class only).
|
||||
* @returns {?StubElement} Matching ancestor or ``null``.
|
||||
*/
|
||||
closest(selector) {
|
||||
if (!selector || selector[0] !== '.') {
|
||||
return null;
|
||||
}
|
||||
const className = selector.slice(1);
|
||||
let current = this;
|
||||
while (current) {
|
||||
const classes = (current.className || '').split(/\s+/).filter(Boolean);
|
||||
if (classes.includes(className)) {
|
||||
return current;
|
||||
}
|
||||
current = current.parentNode;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a minimal DOM document stub for overlay manager tests.
|
||||
*
|
||||
* @returns {{ document: Document, window: Window, factory: Function, anchor: StubElement, body: StubElement }}
|
||||
*/
|
||||
function createStubDom() {
|
||||
const body = new StubElement('body');
|
||||
body.contains = body.contains.bind(body);
|
||||
const listenerMap = new Map();
|
||||
const document = {
|
||||
body,
|
||||
documentElement: { clientWidth: 640, clientHeight: 480 },
|
||||
createElement(tagName) {
|
||||
return new StubElement(tagName);
|
||||
},
|
||||
getElementById() {
|
||||
return null;
|
||||
},
|
||||
addEventListener(event, handler) {
|
||||
if (!listenerMap.has(event)) {
|
||||
listenerMap.set(event, new Set());
|
||||
}
|
||||
listenerMap.get(event).add(handler);
|
||||
},
|
||||
removeEventListener(event, handler) {
|
||||
if (!listenerMap.has(event)) {
|
||||
return;
|
||||
}
|
||||
listenerMap.get(event).delete(handler);
|
||||
},
|
||||
_dispatch(event) {
|
||||
if (!listenerMap.has(event)) {
|
||||
return;
|
||||
}
|
||||
for (const handler of Array.from(listenerMap.get(event))) {
|
||||
handler();
|
||||
}
|
||||
},
|
||||
};
|
||||
const window = {
|
||||
scrollX: 10,
|
||||
scrollY: 20,
|
||||
innerWidth: 640,
|
||||
innerHeight: 480,
|
||||
requestAnimationFrame(callback) {
|
||||
callback();
|
||||
},
|
||||
};
|
||||
function factory() {
|
||||
const overlay = document.createElement('div');
|
||||
overlay.className = 'short-info-overlay';
|
||||
const closeButton = document.createElement('button');
|
||||
closeButton.className = 'short-info-close';
|
||||
const content = document.createElement('div');
|
||||
content.className = 'short-info-content';
|
||||
overlay.appendChild(closeButton);
|
||||
overlay.appendChild(content);
|
||||
return { overlay, closeButton, content };
|
||||
}
|
||||
const anchor = document.createElement('span');
|
||||
anchor.setBoundingRect({ left: 40, top: 50, width: 16, height: 16 });
|
||||
body.appendChild(anchor);
|
||||
return { document, window, factory, anchor, body };
|
||||
}
|
||||
|
||||
test('render opens overlays and positions them relative to anchors', () => {
|
||||
const { document, window, factory, anchor, body } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, '<strong>Node</strong>');
|
||||
const open = stack.getOpenOverlays();
|
||||
assert.equal(open.length, 1);
|
||||
const overlay = open[0].element;
|
||||
assert.equal(overlay.parentNode, body);
|
||||
assert.equal(overlay.style.position, 'absolute');
|
||||
const content = overlay.querySelector('.short-info-content');
|
||||
assert.ok(content);
|
||||
assert.equal(content.innerHTML, '<strong>Node</strong>');
|
||||
assert.equal(overlay.style.left, '50px');
|
||||
assert.equal(overlay.style.top, '70px');
|
||||
});
|
||||
|
||||
test('request tokens track anchors independently', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
const token1 = stack.incrementRequestToken(anchor);
|
||||
const token2 = stack.incrementRequestToken(anchor);
|
||||
assert.equal(token2, token1 + 1);
|
||||
stack.render(anchor, 'Loading…');
|
||||
assert.equal(stack.isTokenCurrent(anchor, token2), true);
|
||||
stack.close(anchor);
|
||||
assert.equal(stack.isTokenCurrent(anchor, token2), false);
|
||||
});
|
||||
|
||||
test('overlays stack and close independently', () => {
|
||||
const { document, window, factory, anchor, body } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
const secondAnchor = document.createElement('span');
|
||||
secondAnchor.setBoundingRect({ left: 200, top: 120 });
|
||||
body.appendChild(secondAnchor);
|
||||
stack.render(anchor, 'First');
|
||||
stack.render(secondAnchor, 'Second');
|
||||
const open = stack.getOpenOverlays();
|
||||
assert.equal(open.length, 2);
|
||||
assert.equal(stack.isOpen(anchor), true);
|
||||
assert.equal(stack.isOpen(secondAnchor), true);
|
||||
stack.close(anchor);
|
||||
assert.equal(stack.isOpen(anchor), false);
|
||||
assert.equal(stack.isOpen(secondAnchor), true);
|
||||
stack.closeAll();
|
||||
assert.equal(stack.getOpenOverlays().length, 0);
|
||||
});
|
||||
|
||||
test('cleanupOrphans removes overlays whose anchors disappear', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Orphaned');
|
||||
anchor.remove();
|
||||
stack.cleanupOrphans();
|
||||
assert.equal(stack.getOpenOverlays().length, 0);
|
||||
});
|
||||
|
||||
test('containsNode recognises overlay descendants', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Descendant');
|
||||
const [entry] = stack.getOpenOverlays();
|
||||
const content = entry.element.querySelector('.short-info-content');
|
||||
assert.ok(stack.containsNode(content));
|
||||
const stray = new StubElement('div');
|
||||
assert.equal(stack.containsNode(stray), false);
|
||||
});
|
||||
|
||||
test('overlays migrate into and out of fullscreen hosts', () => {
|
||||
const { document, window, factory, anchor, body } = createStubDom();
|
||||
const fullscreenRoot = document.createElement('div');
|
||||
body.appendChild(fullscreenRoot);
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Fullscreen');
|
||||
const [entry] = stack.getOpenOverlays();
|
||||
assert.equal(entry.element.parentNode, body);
|
||||
assert.equal(entry.element.style.position, 'absolute');
|
||||
|
||||
document.fullscreenElement = fullscreenRoot;
|
||||
document._dispatch('fullscreenchange');
|
||||
assert.equal(entry.element.parentNode, fullscreenRoot);
|
||||
assert.equal(entry.element.style.position, 'fixed');
|
||||
assert.equal(entry.element.style.left, '40px');
|
||||
assert.equal(entry.element.style.top, '50px');
|
||||
|
||||
document.fullscreenElement = null;
|
||||
document._dispatch('fullscreenchange');
|
||||
assert.equal(entry.element.parentNode, body);
|
||||
assert.equal(entry.element.style.position, 'absolute');
|
||||
assert.equal(entry.element.style.left, '50px');
|
||||
assert.equal(entry.element.style.top, '70px');
|
||||
});
|
||||
|
||||
test('rendered overlays do not swallow click events by default', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Event test');
|
||||
const [entry] = stack.getOpenOverlays();
|
||||
assert.ok(entry);
|
||||
assert.equal(entry.element.eventHandlers.has('click'), false);
|
||||
});
|
||||
216
web/public/assets/js/app/__tests__/theme-background.test.js
Normal file
216
web/public/assets/js/app/__tests__/theme-background.test.js
Normal file
@@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import vm from 'node:vm';
|
||||
|
||||
import { createDomEnvironment } from './dom-environment.js';
|
||||
|
||||
const themeModuleUrl = new URL('../../theme.js', import.meta.url);
|
||||
const backgroundModuleUrl = new URL('../../background.js', import.meta.url);
|
||||
const themeSource = await readFile(themeModuleUrl, 'utf8');
|
||||
const backgroundSource = await readFile(backgroundModuleUrl, 'utf8');
|
||||
|
||||
/**
|
||||
* Evaluate a browser-oriented script within the provided DOM environment.
|
||||
*
|
||||
* @param {string} source Module source code to execute.
|
||||
* @param {URL} url Identifier for the executed script.
|
||||
* @param {ReturnType<typeof createDomEnvironment>} env Active DOM harness.
|
||||
* @returns {void}
|
||||
*/
|
||||
function executeInDom(source, url, env) {
|
||||
const context = vm.createContext({
|
||||
console,
|
||||
setTimeout,
|
||||
clearTimeout,
|
||||
setInterval,
|
||||
clearInterval
|
||||
});
|
||||
context.window = env.window;
|
||||
context.document = env.document;
|
||||
context.global = context;
|
||||
context.globalThis = context;
|
||||
context.window.window = context.window;
|
||||
context.window.document = context.document;
|
||||
context.window.globalThis = context;
|
||||
context.window.console = console;
|
||||
|
||||
vm.runInContext(source, context, { filename: url.pathname, displayErrors: true });
|
||||
}
|
||||
|
||||
test('theme and background modules behave correctly across scenarios', async t => {
|
||||
const env = createDomEnvironment({ readyState: 'complete', cookie: '' });
|
||||
try {
|
||||
const toggle = env.createElement('button', 'themeToggle');
|
||||
env.registerElement('themeToggle', toggle);
|
||||
let filterInvocations = 0;
|
||||
env.window.applyFiltersToAllTiles = () => {
|
||||
filterInvocations += 1;
|
||||
};
|
||||
|
||||
executeInDom(themeSource, themeModuleUrl, env);
|
||||
executeInDom(backgroundSource, backgroundModuleUrl, env);
|
||||
|
||||
const themeHelpers = env.window.__themeCookie;
|
||||
const themeHooks = themeHelpers.__testHooks;
|
||||
const backgroundHelpers = env.window.__potatoBackground;
|
||||
const backgroundHooks = backgroundHelpers.__testHooks;
|
||||
|
||||
await t.test('initialises with a dark theme and persists cookies', () => {
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'dark');
|
||||
assert.equal(env.document.body.classList.contains('dark'), true);
|
||||
assert.equal(toggle.textContent, '☀️');
|
||||
themeHelpers.persistTheme('light');
|
||||
themeHelpers.setCookie('bare', '1');
|
||||
themeHooks.exerciseSetCookieGuard();
|
||||
themeHelpers.setCookie('flag', 'true', { Secure: true });
|
||||
const cookieString = env.getCookieString();
|
||||
assert.equal(themeHelpers.getCookie('flag'), 'true');
|
||||
assert.equal(themeHelpers.getCookie('missing'), null);
|
||||
assert.match(cookieString, /theme=light/);
|
||||
assert.match(cookieString, /; path=\//);
|
||||
assert.match(cookieString, /; SameSite=Lax/);
|
||||
assert.match(cookieString, /; Secure/);
|
||||
});
|
||||
|
||||
await t.test('serializeCookieOptions covers boolean and string attributes', () => {
|
||||
const withAttributes = themeHooks.serializeCookieOptions({ Secure: true, HttpOnly: '1' });
|
||||
assert.equal(withAttributes.includes('; Secure'), true);
|
||||
assert.equal(withAttributes.includes('; HttpOnly=1'), true);
|
||||
const secureOnly = themeHooks.serializeCookieOptions({ Secure: true });
|
||||
assert.equal(secureOnly.trim(), '; Secure');
|
||||
assert.equal(themeHooks.formatCookieOption(['HttpOnly', '1']), '; HttpOnly=1');
|
||||
assert.equal(themeHooks.formatCookieOption(['Secure', true]), '; Secure');
|
||||
assert.equal(themeHooks.serializeCookieOptions({}), '');
|
||||
assert.equal(themeHooks.serializeCookieOptions(), '');
|
||||
});
|
||||
|
||||
await t.test('re-bootstrap handles DOMContentLoaded flow and filter hooks', () => {
|
||||
env.document.readyState = 'loading';
|
||||
filterInvocations = 0;
|
||||
env.setCookieString('theme=light');
|
||||
themeHooks.bootstrap();
|
||||
env.triggerDOMContentLoaded();
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'light');
|
||||
assert.equal(env.document.body.classList.contains('dark'), false);
|
||||
assert.equal(toggle.textContent, '🌙');
|
||||
assert.equal(filterInvocations, 1);
|
||||
env.document.removeEventListener('DOMContentLoaded', themeHooks.handleReady);
|
||||
});
|
||||
|
||||
await t.test('handleReady tolerates missing toggle button', () => {
|
||||
env.registerElement('themeToggle', null);
|
||||
themeHooks.handleReady();
|
||||
env.registerElement('themeToggle', toggle);
|
||||
});
|
||||
|
||||
await t.test('applyTheme copes with absent DOM nodes', () => {
|
||||
const originalBody = env.document.body;
|
||||
const originalRoot = env.document.documentElement;
|
||||
env.document.body = null;
|
||||
env.document.documentElement = null;
|
||||
assert.equal(themeHooks.applyTheme('dark'), true);
|
||||
env.document.body = originalBody;
|
||||
env.document.documentElement = originalRoot;
|
||||
assert.equal(themeHooks.applyTheme('light'), false);
|
||||
});
|
||||
|
||||
await t.test('background bootstrap waits for DOM readiness', () => {
|
||||
env.setComputedStyleImplementation(() => ({ getPropertyValue: () => ' rgb(15, 15, 15) ' }));
|
||||
env.document.readyState = 'loading';
|
||||
const previousColor = env.document.documentElement.style.backgroundColor;
|
||||
backgroundHooks.bootstrap();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, previousColor);
|
||||
env.triggerDOMContentLoaded();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor.trim(), 'rgb(15, 15, 15)');
|
||||
});
|
||||
|
||||
await t.test('background falls back to theme defaults when styles unavailable', () => {
|
||||
env.setComputedStyleImplementation(() => {
|
||||
throw new Error('no styles');
|
||||
});
|
||||
env.document.body.classList.add('dark');
|
||||
backgroundHelpers.applyBackground();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#0e1418');
|
||||
env.document.body.classList.remove('dark');
|
||||
backgroundHelpers.applyBackground();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#f6f3ee');
|
||||
});
|
||||
|
||||
await t.test('background helper tolerates missing body elements', () => {
|
||||
const originalBody = env.document.body;
|
||||
env.document.body = null;
|
||||
backgroundHelpers.applyBackground();
|
||||
assert.equal(backgroundHelpers.resolveBackgroundColor(), null);
|
||||
env.document.body = originalBody;
|
||||
});
|
||||
|
||||
await t.test('theme changes trigger background updates', () => {
|
||||
env.document.body.classList.remove('dark');
|
||||
themeHooks.setTheme('light');
|
||||
backgroundHooks.init();
|
||||
env.dispatchWindowEvent('themechange');
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#f6f3ee');
|
||||
});
|
||||
|
||||
env.window.removeEventListener('themechange', backgroundHelpers.applyBackground);
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('dom environment helpers mimic expected DOM behaviour', () => {
|
||||
const env = createDomEnvironment({ readyState: 'interactive', includeBody: false });
|
||||
try {
|
||||
const element = env.createElement('span');
|
||||
element.classList.add('foo');
|
||||
assert.equal(element.classList.contains('foo'), true);
|
||||
assert.equal(element.classList.toggle('foo'), false);
|
||||
assert.equal(element.classList.toggle('bar'), true);
|
||||
assert.equal(element.getAttribute('id'), null);
|
||||
element.setAttribute('data-test', 'ok');
|
||||
assert.equal(element.getAttribute('data-test'), 'ok');
|
||||
|
||||
env.registerElement('sample', element);
|
||||
assert.equal(env.document.getElementById('sample'), element);
|
||||
assert.equal(env.document.querySelector('.missing'), null);
|
||||
|
||||
let docEventFired = false;
|
||||
env.document.addEventListener('custom', () => {
|
||||
docEventFired = true;
|
||||
});
|
||||
env.document.dispatchEvent('custom');
|
||||
assert.equal(docEventFired, true);
|
||||
env.document.removeEventListener('custom');
|
||||
|
||||
let winEventFired = false;
|
||||
env.window.addEventListener('global', () => {
|
||||
winEventFired = true;
|
||||
});
|
||||
env.window.dispatchEvent('global');
|
||||
assert.equal(winEventFired, true);
|
||||
env.window.removeEventListener('global');
|
||||
|
||||
env.setCookieString('');
|
||||
env.document.cookie = 'foo=bar';
|
||||
assert.equal(env.getCookieString(), 'foo=bar');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
194
web/public/assets/js/app/chat-format.js
Normal file
194
web/public/assets/js/app/chat-format.js
Normal file
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Extract channel metadata from a message payload for chat display.
|
||||
*
|
||||
* @param {Object} message Raw message payload from the API.
|
||||
* @returns {{ frequency: string|null, channelName: string|null }}
|
||||
* Normalized metadata values.
|
||||
*/
|
||||
export function extractChatMessageMetadata(message) {
|
||||
if (!message || typeof message !== 'object') {
|
||||
return { frequency: null, channelName: null };
|
||||
}
|
||||
|
||||
const frequency = normalizeFrequency(
|
||||
firstNonNull(
|
||||
message.region_frequency,
|
||||
message.regionFrequency,
|
||||
message.lora_freq,
|
||||
message.loraFreq,
|
||||
message.frequency
|
||||
)
|
||||
);
|
||||
|
||||
const channelName = normalizeString(
|
||||
firstNonNull(message.channel_name, message.channelName)
|
||||
);
|
||||
|
||||
return { frequency, channelName };
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce the formatted prefix for a chat message entry.
|
||||
*
|
||||
* Timestamp and frequency will each be wrapped in square brackets. Missing
|
||||
* metadata values result in empty brackets (with the frequency replaced by the
|
||||
* configured placeholder) to preserve the positional layout expected by
|
||||
* operators.
|
||||
*
|
||||
* @param {{
|
||||
* timestamp: string,
|
||||
* frequency: string|null
|
||||
* }} params Normalised and escaped display strings.
|
||||
* @returns {string} Prefix string suitable for HTML insertion.
|
||||
*/
|
||||
export function formatChatMessagePrefix({ timestamp, frequency }) {
|
||||
const ts = typeof timestamp === 'string' ? timestamp : '';
|
||||
const freq = normalizeFrequencySlot(frequency);
|
||||
return `[${ts}][${freq}]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render the channel tag that follows the short name in a chat message entry.
|
||||
*
|
||||
* Empty channel names remain blank within the brackets, mirroring the original
|
||||
* UI behaviour that reserves the slot without introducing placeholder text.
|
||||
*
|
||||
* @param {{ channelName: string|null }} params Normalised and escaped display strings.
|
||||
* @returns {string} Channel tag suitable for HTML insertion.
|
||||
*/
|
||||
export function formatChatChannelTag({ channelName }) {
|
||||
const channel = typeof channelName === 'string' ? channelName : channelName == null ? '' : String(channelName);
|
||||
return `[${channel}]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the formatted prefix for node announcements in the chat log.
|
||||
*
|
||||
* Both the timestamp and the optional frequency will be wrapped in brackets,
|
||||
* mirroring the chat message display while omitting the channel indicator.
|
||||
*
|
||||
* @param {{ timestamp: string, frequency: string|null }} params Display strings.
|
||||
* @returns {string} Prefix string suitable for HTML insertion.
|
||||
*/
|
||||
export function formatNodeAnnouncementPrefix({ timestamp, frequency }) {
|
||||
const ts = typeof timestamp === 'string' ? timestamp : '';
|
||||
const freq = normalizeFrequencySlot(frequency);
|
||||
return `[${ts}][${freq}]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce a consistently formatted frequency slot for chat prefixes.
|
||||
*
|
||||
* A missing or empty frequency is rendered as three HTML non-breaking spaces to
|
||||
* ensure the UI maintains its expected alignment while clearly indicating the
|
||||
* absence of data.
|
||||
*
|
||||
* @param {*} value Frequency value that has already been escaped for HTML.
|
||||
* @returns {string} Frequency slot suitable for prefix rendering.
|
||||
*/
|
||||
function normalizeFrequencySlot(value) {
|
||||
if (value == null) {
|
||||
return FREQUENCY_PLACEHOLDER;
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
return value.length > 0 ? value : FREQUENCY_PLACEHOLDER;
|
||||
}
|
||||
const strValue = String(value);
|
||||
return strValue.length > 0 ? strValue : FREQUENCY_PLACEHOLDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* HTML entity sequence inserted when a frequency is unavailable.
|
||||
* @type {string}
|
||||
*/
|
||||
const FREQUENCY_PLACEHOLDER = ' ';
|
||||
|
||||
/**
|
||||
* Return the first value in ``candidates`` that is not ``null`` or ``undefined``.
|
||||
*
|
||||
* @param {...*} candidates Candidate values.
|
||||
* @returns {*} First present value or ``null`` when missing.
|
||||
*/
|
||||
function firstNonNull(...candidates) {
|
||||
for (const value of candidates) {
|
||||
if (value !== null && value !== undefined) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise potential channel name values to trimmed strings.
|
||||
*
|
||||
* @param {*} value Raw value.
|
||||
* @returns {string|null} Sanitised channel name.
|
||||
*/
|
||||
function normalizeString(value) {
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
return trimmed.length > 0 ? trimmed : null;
|
||||
}
|
||||
if (typeof value === 'number') {
|
||||
if (!Number.isFinite(value)) return null;
|
||||
return String(value);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert various frequency representations into clean strings.
|
||||
*
|
||||
* @param {*} value Raw frequency value.
|
||||
* @returns {string|null} Frequency in MHz as a string, when available.
|
||||
*/
|
||||
function normalizeFrequency(value) {
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'number') {
|
||||
if (!Number.isFinite(value) || value <= 0) {
|
||||
return null;
|
||||
}
|
||||
return Number.isInteger(value) ? String(value) : String(Number(value.toFixed(3)));
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return null;
|
||||
const numericMatch = trimmed.match(/\d+(?:\.\d+)?/);
|
||||
if (numericMatch) {
|
||||
const parsed = Number(numericMatch[0]);
|
||||
if (Number.isFinite(parsed) && parsed > 0) {
|
||||
return Number.isInteger(parsed) ? String(Math.trunc(parsed)) : String(parsed);
|
||||
}
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export const __test__ = {
|
||||
firstNonNull,
|
||||
normalizeString,
|
||||
normalizeFrequency,
|
||||
formatChatMessagePrefix,
|
||||
formatNodeAnnouncementPrefix,
|
||||
normalizeFrequencySlot,
|
||||
FREQUENCY_PLACEHOLDER,
|
||||
formatChatChannelTag
|
||||
};
|
||||
45
web/public/assets/js/app/config.js
Normal file
45
web/public/assets/js/app/config.js
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* CSS selector used to locate the embedded configuration element.
|
||||
*
|
||||
* @type {string}
|
||||
*/
|
||||
const CONFIG_SELECTOR = '[data-app-config]';
|
||||
|
||||
/**
|
||||
* Read and parse the serialized application configuration from the DOM.
|
||||
*
|
||||
* @returns {Object<string, *>} Parsed configuration object or an empty object when unavailable.
|
||||
*/
|
||||
export function readAppConfig() {
|
||||
const el = document.querySelector(CONFIG_SELECTOR);
|
||||
if (!el) {
|
||||
return {};
|
||||
}
|
||||
const raw = el.getAttribute('data-app-config') || '';
|
||||
if (!raw) {
|
||||
return {};
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(raw);
|
||||
return typeof parsed === 'object' && parsed !== null ? parsed : {};
|
||||
} catch (err) {
|
||||
console.error('Failed to parse application configuration', err);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
33
web/public/assets/js/app/index.js
Normal file
33
web/public/assets/js/app/index.js
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { readAppConfig } from './config.js';
|
||||
import { initializeApp } from './main.js';
|
||||
import { DEFAULT_CONFIG, mergeConfig } from './settings.js';
|
||||
|
||||
export { DEFAULT_CONFIG, mergeConfig } from './settings.js';
|
||||
|
||||
/**
|
||||
* Bootstraps the application once the DOM is ready by reading configuration
|
||||
* data and delegating to ``initializeApp``.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
const rawConfig = readAppConfig();
|
||||
const config = mergeConfig(rawConfig);
|
||||
initializeApp(config);
|
||||
});
|
||||
3132
web/public/assets/js/app/main.js
Normal file
3132
web/public/assets/js/app/main.js
Normal file
File diff suppressed because it is too large
Load Diff
178
web/public/assets/js/app/map-auto-fit-controller.js
Normal file
178
web/public/assets/js/app/map-auto-fit-controller.js
Normal file
@@ -0,0 +1,178 @@
|
||||
/**
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef {[number, number]} LatLngTuple
|
||||
* @typedef {[LatLngTuple, LatLngTuple]} LatLngBoundsTuple
|
||||
* @typedef {{ paddingPx: number, maxZoom?: number }} FitOptionsSnapshot
|
||||
*/
|
||||
|
||||
/**
|
||||
* Safely clone a Leaflet-compatible bounds tuple to avoid accidental mutation.
|
||||
*
|
||||
* @param {LatLngBoundsTuple} bounds - Bounds tuple to duplicate.
|
||||
* @returns {LatLngBoundsTuple} Deep copy of the provided bounds.
|
||||
*/
|
||||
function cloneBounds(bounds) {
|
||||
return [
|
||||
[bounds[0][0], bounds[0][1]],
|
||||
[bounds[1][0], bounds[1][1]]
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the provided structure resembles a Leaflet bounds tuple.
|
||||
*
|
||||
* @param {unknown} value - Potential bounds input.
|
||||
* @returns {value is LatLngBoundsTuple} True when the input is structurally valid.
|
||||
*/
|
||||
function isValidBounds(value) {
|
||||
if (!Array.isArray(value) || value.length !== 2) return false;
|
||||
const [southWest, northEast] = value;
|
||||
if (!Array.isArray(southWest) || !Array.isArray(northEast)) return false;
|
||||
if (southWest.length !== 2 || northEast.length !== 2) return false;
|
||||
const numbers = [southWest[0], southWest[1], northEast[0], northEast[1]];
|
||||
return numbers.every(number => Number.isFinite(number));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a controller for coordinating map auto-fit behaviour.
|
||||
*
|
||||
* @param {object} options - Controller configuration options.
|
||||
* @param {HTMLInputElement|null} [options.toggleEl] - Checkbox controlling auto-fit.
|
||||
* @param {Window|undefined} [options.windowObject] - Browser window instance.
|
||||
* @param {number} [options.defaultPaddingPx=32] - Padding fallback when none supplied.
|
||||
* @returns {{
|
||||
* attachResizeListener(callback: (snapshot: { bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null) => void): () => void,
|
||||
* getLastFit(): { bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null,
|
||||
* handleUserInteraction(): boolean,
|
||||
* isAutoFitEnabled(): boolean,
|
||||
* recordFit(bounds: LatLngBoundsTuple, options?: { paddingPx?: number, maxZoom?: number }): void,
|
||||
* runAutoFitOperation(fn: () => unknown): unknown
|
||||
* }} Map auto-fit controller instance.
|
||||
*/
|
||||
export function createMapAutoFitController({
|
||||
toggleEl = null,
|
||||
windowObject = typeof window !== 'undefined' ? window : undefined,
|
||||
defaultPaddingPx = 32
|
||||
} = {}) {
|
||||
/** @type {LatLngBoundsTuple|null} */
|
||||
let lastBounds = null;
|
||||
/** @type {FitOptionsSnapshot} */
|
||||
let lastOptions = { paddingPx: defaultPaddingPx };
|
||||
let autoFitInProgress = false;
|
||||
|
||||
/**
|
||||
* Record the most recent set of bounds used for auto-fitting.
|
||||
*
|
||||
* @param {LatLngBoundsTuple} bounds - Leaflet bounds tuple.
|
||||
* @param {{ paddingPx?: number, maxZoom?: number }} [options] - Fit options to persist.
|
||||
* @returns {void}
|
||||
*/
|
||||
function recordFit(bounds, options = {}) {
|
||||
if (!isValidBounds(bounds)) return;
|
||||
const paddingPx = Number.isFinite(options.paddingPx) && options.paddingPx >= 0 ? options.paddingPx : defaultPaddingPx;
|
||||
const maxZoom = Number.isFinite(options.maxZoom) && options.maxZoom > 0 ? options.maxZoom : undefined;
|
||||
lastBounds = cloneBounds(bounds);
|
||||
lastOptions = { paddingPx };
|
||||
if (maxZoom !== undefined) {
|
||||
lastOptions.maxZoom = maxZoom;
|
||||
} else {
|
||||
delete lastOptions.maxZoom;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a snapshot of the most recently recorded fit bounds.
|
||||
*
|
||||
* @returns {{ bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null} Snapshot or ``null`` when unavailable.
|
||||
*/
|
||||
function getLastFit() {
|
||||
if (!lastBounds) return null;
|
||||
return { bounds: cloneBounds(lastBounds), options: { ...lastOptions } };
|
||||
}
|
||||
|
||||
/**
|
||||
* Test whether auto-fit is currently enabled by the user.
|
||||
*
|
||||
* @returns {boolean} True when the toggle exists and is checked.
|
||||
*/
|
||||
function isAutoFitEnabled() {
|
||||
return Boolean(toggleEl && toggleEl.checked);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a callback while marking auto-fit as in-progress.
|
||||
*
|
||||
* @template T
|
||||
* @param {() => T} fn - Operation to run while suppressing interaction side-effects.
|
||||
* @returns {T | undefined} Result of ``fn`` when provided.
|
||||
*/
|
||||
function runAutoFitOperation(fn) {
|
||||
if (typeof fn !== 'function') return undefined;
|
||||
autoFitInProgress = true;
|
||||
try {
|
||||
return fn();
|
||||
} finally {
|
||||
autoFitInProgress = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable auto-fit in response to manual user interactions with the map.
|
||||
*
|
||||
* @returns {boolean} True when the toggle was modified.
|
||||
*/
|
||||
function handleUserInteraction() {
|
||||
if (!toggleEl || !toggleEl.checked || autoFitInProgress) {
|
||||
return false;
|
||||
}
|
||||
toggleEl.checked = false;
|
||||
const event = new Event('change', { bubbles: true });
|
||||
toggleEl.dispatchEvent(event);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attach resize listeners that notify the consumer when a refit may be required.
|
||||
*
|
||||
* @param {(snapshot: { bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null) => void} callback - Resize handler.
|
||||
* @returns {() => void} Function that removes the registered listeners.
|
||||
*/
|
||||
function attachResizeListener(callback) {
|
||||
if (!windowObject || typeof windowObject.addEventListener !== 'function' || typeof callback !== 'function') {
|
||||
return () => {};
|
||||
}
|
||||
const handler = () => {
|
||||
callback(getLastFit());
|
||||
};
|
||||
windowObject.addEventListener('resize', handler, { passive: true });
|
||||
windowObject.addEventListener('orientationchange', handler, { passive: true });
|
||||
return () => {
|
||||
windowObject.removeEventListener('resize', handler);
|
||||
windowObject.removeEventListener('orientationchange', handler);
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
attachResizeListener,
|
||||
getLastFit,
|
||||
handleUserInteraction,
|
||||
isAutoFitEnabled,
|
||||
recordFit,
|
||||
runAutoFitOperation
|
||||
};
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user