forked from iarv/potato-mesh
Compare commits
166 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cc8fec6d05 | |||
| 01665b6e3a | |||
| 1898a99789 | |||
| 3eefda9205 | |||
| a6ba9a8227 | |||
| 7055444c4b | |||
| 4bfc0e25cb | |||
| 81335cbf7b | |||
| 76b57c08c6 | |||
| 926b5591b0 | |||
| 957e597004 | |||
| 68cfbf139f | |||
| b2f4fcaaa5 | |||
| dc2fa9d247 | |||
| a32125996c | |||
| 506a1ab5f6 | |||
| db7b67d859 | |||
| 49f08a7f75 | |||
| b2d35d3edf | |||
| a9d618cdbc | |||
| 6a65abd2e3 | |||
| a3aef8cadd | |||
| cff89a8c88 | |||
| 26c1366412 | |||
| 28f5b49f4d | |||
| a46da284e5 | |||
| 22a31b6c80 | |||
| b7ef0bbfcd | |||
| 03b5a10fe4 | |||
| e97498d09f | |||
| 7db76ec2fc | |||
| 63beb2ea6b | |||
| ffad84f18a | |||
| 2642ff7a95 | |||
| 40b6eda096 | |||
| dee6ad7e4a | |||
| ea9c633eff | |||
| 9c73fceea7 | |||
| 5133e9d498 | |||
| b63e5328b1 | |||
| d66b09ddee | |||
| 009965f2fb | |||
| 51e6479ab6 | |||
| 874c8fd73c | |||
| e4c48682b0 | |||
| 00444f7611 | |||
| 511e6d377c | |||
| e6974a683a | |||
| c0d68b23d4 | |||
| ee904633a8 | |||
| 4329605e6f | |||
| 772c5888c3 | |||
| f04e917cd9 | |||
| 9e939194ba | |||
| e328a20929 | |||
| aba94b197d | |||
| 80f2bbdb25 | |||
| 522213c040 | |||
| 58998ba274 | |||
| 4ad718e164 | |||
| 707786e222 | |||
| 868bf08fd1 | |||
| 1316d4f2d1 | |||
| 9be390ee09 | |||
| d9ed006b4c | |||
| d09fc842b8 | |||
| 73bdd809bd | |||
| f1dba89d4b | |||
| 131a63845c | |||
| 2240be1f2d | |||
| a048a83c6c | |||
| 4ef1e29034 | |||
| b21df3de5c | |||
| 678af5e55b | |||
| c4fd59626f | |||
| 0a26e4252a | |||
| d19e032b40 | |||
| ab9ae796f3 | |||
| 0f2f2f447c | |||
| 3a031694db | |||
| 3cfbffc155 | |||
| 4f5aec45b3 | |||
| 2acfca20d9 | |||
| f2ed5f5c03 | |||
| db04b85134 | |||
| ba66ac5cea | |||
| a592b655c4 | |||
| a5a2ae5edc | |||
| 363b4c5525 | |||
| 16e1304ded | |||
| b89347938a | |||
| 6969ae6c4a | |||
| 64f8862676 | |||
| 6660986211 | |||
| 5dfcc1a5fe | |||
| 2efd28766b | |||
| c9bba25e5a | |||
| 41976a3b43 | |||
| 5a47a8f8e4 | |||
| c13f3c913f | |||
| 2e9b54b6cf | |||
| 7e844be627 | |||
| b37e55c29a | |||
| 332ba044f2 | |||
| 09a2d849ec | |||
| a3fb9b0d5c | |||
| 192978acf9 | |||
| 581aaea93b | |||
| 299752a4f1 | |||
| 142c0aa539 | |||
| 78168ce3db | |||
| 332abbc183 | |||
| c136c5cf26 | |||
| 2a65e89eee | |||
| d6f1e7bc80 | |||
| 5ac5f3ec3f | |||
| bb4cbfa62c | |||
| f0d600e5d7 | |||
| e0f0a6390d | |||
| d4a27dccf7 | |||
| 74c4596dc5 | |||
| 1f2328613c | |||
| eeca67f6ea | |||
| 4ae8a1cfca | |||
| ff06129a6f | |||
| 6d7aa4dd56 | |||
| 4548f750d3 | |||
| 31f02010d3 | |||
| ec1ea5cbba | |||
| 8500c59755 | |||
| 556dd6b51c | |||
| 3863e2d63d | |||
| 9e62621819 | |||
| c8c7c8cc05 | |||
| 5116313ab0 | |||
| 66389dd27c | |||
| ee6501243f | |||
| 8dd912175d | |||
| 02f9fb45e2 | |||
| 4254dbda91 | |||
| a46bed1c33 | |||
| d711300442 | |||
| 98a8203591 | |||
| 084c5ae158 | |||
| 17018aeb19 | |||
| 74b3da6f00 | |||
| ab1217a8bf | |||
| 62de1480f7 | |||
| ab2e9b06e1 | |||
| e91ad24cf9 | |||
| 2e543b7cd4 | |||
| db4353ccdc | |||
| 5a610cf08a | |||
| 71b854998c | |||
| 0a70ae4b3e | |||
| 6e709b0b67 | |||
| a4256cee83 | |||
| 89f0b1bcfe | |||
| e8af3b2397 | |||
| 812d3c851f | |||
| 608d1e0396 | |||
| 63787454ca | |||
| 55c1384f80 | |||
| 6750d7bc12 | |||
| d33fcaf5db | |||
| 7974fd9597 |
@@ -0,0 +1,76 @@
|
||||
# Git
|
||||
.git
|
||||
.gitignore
|
||||
|
||||
# Documentation
|
||||
README.md
|
||||
CHANGELOG.md
|
||||
*.md
|
||||
|
||||
# Docker files
|
||||
docker-compose*.yml
|
||||
.dockerignore
|
||||
|
||||
# Environment files
|
||||
.env*
|
||||
!.env.example
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
logs/
|
||||
|
||||
# Runtime data
|
||||
*.pid
|
||||
*.seed
|
||||
*.pid.lock
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage/
|
||||
|
||||
# nyc test coverage
|
||||
.nyc_output
|
||||
|
||||
# Dependency directories
|
||||
node_modules/
|
||||
vendor/
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variables file
|
||||
.env
|
||||
|
||||
# IDE files
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS generated files
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# Test files
|
||||
tests/
|
||||
spec/
|
||||
test_*
|
||||
*_test.py
|
||||
*_spec.rb
|
||||
|
||||
# Development files
|
||||
ai_docs/
|
||||
@@ -0,0 +1,76 @@
|
||||
# PotatoMesh Environment Configuration
|
||||
# Copy this file to .env and customize for your setup
|
||||
|
||||
# =============================================================================
|
||||
# REQUIRED SETTINGS
|
||||
# =============================================================================
|
||||
|
||||
# API authentication token (required for ingestor communication)
|
||||
# Generate a secure token: openssl rand -hex 32
|
||||
API_TOKEN=your-secure-api-token-here
|
||||
|
||||
# Meshtastic connection target (required for ingestor)
|
||||
# Common serial paths:
|
||||
# - Linux: /dev/ttyACM0, /dev/ttyUSB0
|
||||
# - macOS: /dev/cu.usbserial-*
|
||||
# - Windows (WSL): /dev/ttyS*
|
||||
# You may also provide an IP:PORT pair (e.g. 192.168.1.20:4403) or a
|
||||
# Bluetooth address (e.g. ED:4D:9E:95:CF:60).
|
||||
CONNECTION=/dev/ttyACM0
|
||||
|
||||
# =============================================================================
|
||||
# SITE CUSTOMIZATION
|
||||
# =============================================================================
|
||||
|
||||
# Your mesh network name
|
||||
SITE_NAME=My Meshtastic Network
|
||||
|
||||
# Default Meshtastic channel
|
||||
CHANNEL=#LongFast
|
||||
|
||||
# Default frequency for your region
|
||||
# Common frequencies: 868MHz (Europe), 915MHz (US), 433MHz (Worldwide)
|
||||
FREQUENCY=915MHz
|
||||
|
||||
# Map center coordinates (latitude, longitude)
|
||||
# Berlin, Germany: 52.502889, 13.404194
|
||||
# Denver, Colorado: 39.7392, -104.9903
|
||||
# London, UK: 51.5074, -0.1278
|
||||
MAP_CENTER="38.761944,-27.090833"
|
||||
|
||||
# Maximum distance to show nodes (kilometers)
|
||||
MAX_DISTANCE=42
|
||||
|
||||
# =============================================================================
|
||||
# OPTIONAL INTEGRATIONS
|
||||
# =============================================================================
|
||||
|
||||
# Community chat link or Matrix room for your community (optional)
|
||||
# Matrix aliases (e.g. #meshtastic-berlin:matrix.org) will be linked via matrix.to automatically.
|
||||
CONTACT_LINK='#potatomesh:dod.ngo'
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# ADVANCED SETTINGS
|
||||
# =============================================================================
|
||||
|
||||
# Debug mode (0=off, 1=on)
|
||||
DEBUG=0
|
||||
|
||||
# Public domain name for this PotatoMesh instance
|
||||
# Provide a hostname (with optional port) that resolves to the web service.
|
||||
# Example: mesh.example.org or mesh.example.org:41447
|
||||
INSTANCE_DOMAIN=mesh.example.org
|
||||
|
||||
# Docker image architecture (linux-amd64, linux-arm64, linux-armv7)
|
||||
POTATOMESH_IMAGE_ARCH=linux-amd64
|
||||
|
||||
# Docker Compose networking profile
|
||||
# Leave unset for Linux hosts (default host networking).
|
||||
# Set to "bridge" on Docker Desktop (macOS/Windows) if host networking
|
||||
# is unavailable.
|
||||
# COMPOSE_PROFILES=bridge
|
||||
|
||||
# Meshtastic channel index (0=primary, 1=secondary, etc.)
|
||||
CHANNEL_INDEX=0
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
# GitHub Actions Workflows
|
||||
|
||||
## Workflows
|
||||
|
||||
- **`docker.yml`** - Build and push Docker images to GHCR
|
||||
- **`codeql.yml`** - Security scanning
|
||||
- **`python.yml`** - Python ingestor pipeline
|
||||
- **`ruby.yml`** - Ruby Sinatra app testing
|
||||
- **`javascript.yml`** - Frontend test suite
|
||||
|
||||
## Usage
|
||||
|
||||
```bash
|
||||
# Build locally
|
||||
docker-compose build
|
||||
|
||||
# Deploy
|
||||
docker-compose up -d
|
||||
```
|
||||
@@ -0,0 +1,174 @@
|
||||
name: Build and Push Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: [ 'v*' ]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to publish (e.g., 1.0.0)'
|
||||
required: true
|
||||
default: '1.0.0'
|
||||
publish_all_variants:
|
||||
description: 'Publish all Docker image variants (latest tag)'
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
IMAGE_PREFIX: l5yth/potato-mesh
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
runs-on: ubuntu-latest
|
||||
if: (startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push') || github.event_name == 'workflow_dispatch'
|
||||
environment: production
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
service: [web, ingestor]
|
||||
architecture:
|
||||
- { name: linux-amd64, platform: linux/amd64, label: "Linux x86_64" }
|
||||
- { name: linux-arm64, platform: linux/arm64, label: "Linux ARM64" }
|
||||
- { name: linux-armv7, platform: linux/arm/v7, label: "Linux ARMv7" }
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU emulation
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version from tag or input
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ github.event.inputs.version }}"
|
||||
else
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
fi
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Published version: $VERSION"
|
||||
|
||||
- name: Build and push ${{ matrix.service }} for ${{ matrix.architecture.name }}
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: ./${{ matrix.service == 'web' && 'web/Dockerfile' || 'data/Dockerfile' }}
|
||||
target: production
|
||||
platforms: ${{ matrix.architecture.platform }}
|
||||
push: true
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:latest
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-${{ matrix.service }}-${{ matrix.architecture.name }}:${{ steps.version.outputs.version }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=https://github.com/${{ github.repository }}
|
||||
org.opencontainers.image.description=PotatoMesh ${{ matrix.service == 'web' && 'Web Application' || 'Python Ingestor' }} for ${{ matrix.architecture.label }}
|
||||
org.opencontainers.image.licenses=Apache-2.0
|
||||
org.opencontainers.image.version=${{ steps.version.outputs.version }}
|
||||
org.opencontainers.image.created=${{ github.event.head_commit.timestamp }}
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
org.opencontainers.image.title=PotatoMesh ${{ matrix.service == 'web' && 'Web' || 'Ingestor' }} (${{ matrix.architecture.label }})
|
||||
org.opencontainers.image.vendor=PotatoMesh
|
||||
org.opencontainers.image.architecture=${{ matrix.architecture.name }}
|
||||
org.opencontainers.image.os=linux
|
||||
org.opencontainers.image.arch=${{ matrix.architecture.name }}
|
||||
cache-from: type=gha,scope=${{ matrix.service }}-${{ matrix.architecture.name }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.service }}-${{ matrix.architecture.name }}
|
||||
|
||||
test-images:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build-and-push
|
||||
if: startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push'
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Test web application (Linux AMD64)
|
||||
run: |
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version }}
|
||||
docker run --rm -d --name web-test -p 41447:41447 \
|
||||
-e API_TOKEN=test-token \
|
||||
-e DEBUG=1 \
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:${{ steps.version.outputs.version }}
|
||||
sleep 10
|
||||
curl -f http://localhost:41447/ || exit 1
|
||||
docker stop web-test
|
||||
|
||||
- name: Test ingestor (Linux AMD64)
|
||||
run: |
|
||||
docker pull ${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }}
|
||||
docker run --rm --name ingestor-test \
|
||||
-e POTATOMESH_INSTANCE=http://localhost:41447 \
|
||||
-e API_TOKEN=test-token \
|
||||
-e CONNECTION=mock \
|
||||
-e DEBUG=1 \
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:${{ steps.version.outputs.version }} &
|
||||
sleep 5
|
||||
docker stop ingestor-test || true
|
||||
|
||||
publish-summary:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-and-push, test-images]
|
||||
if: always() && startsWith(github.ref, 'refs/tags/v') && github.event_name == 'push'
|
||||
|
||||
steps:
|
||||
- name: Extract version from tag
|
||||
id: version
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/v}
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Publish release summary
|
||||
run: |
|
||||
echo "## 🚀 PotatoMesh Images Published to GHCR" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Version:** ${{ steps.version.outputs.version }}" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
echo "**Published Images:**" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Web images
|
||||
echo "### 🌐 Web Application" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-web-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Ingestor images
|
||||
echo "### 📡 Ingestor Service" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-amd64:latest\` - Linux x86_64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-arm64:latest\` - Linux ARM64" >> $GITHUB_STEP_SUMMARY
|
||||
echo "- \`${{ env.REGISTRY }}/${{ env.IMAGE_PREFIX }}-ingestor-linux-armv7:latest\` - Linux ARMv7" >> $GITHUB_STEP_SUMMARY
|
||||
echo "" >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
name: JavaScript
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
frontend:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: web
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Node.js 22
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
- name: Run JavaScript tests
|
||||
run: npm test
|
||||
- name: Upload coverage to Codecov
|
||||
if: always()
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: web/reports/javascript-coverage.json
|
||||
flags: frontend
|
||||
name: frontend
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Upload test results to Codecov
|
||||
uses: codecov/test-results-action@v1
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: web/reports/javascript-junit.xml
|
||||
flags: frontend
|
||||
@@ -10,21 +10,18 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
ingestor:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- name: Set up Python 3.13
|
||||
uses: actions/setup-python@v3
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.13"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install black pytest pytest-cov meshtastic
|
||||
- name: Lint with black
|
||||
run: |
|
||||
black --check ./
|
||||
- name: Test with pytest and coverage
|
||||
run: |
|
||||
mkdir -p reports
|
||||
@@ -45,3 +42,6 @@ jobs:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: reports/python-junit.xml
|
||||
flags: python-ingestor
|
||||
- name: Lint with black
|
||||
run: |
|
||||
black --check ./
|
||||
|
||||
@@ -10,7 +10,7 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
sinatra:
|
||||
defaults:
|
||||
run:
|
||||
working-directory: ./web
|
||||
@@ -29,8 +29,6 @@ jobs:
|
||||
working-directory: ./web
|
||||
- name: Set up dependencies
|
||||
run: bundle install
|
||||
- name: Run rufo
|
||||
run: bundle exec rufo --check .
|
||||
- name: Run tests
|
||||
run: |
|
||||
mkdir -p tmp/test-results
|
||||
@@ -44,12 +42,14 @@ jobs:
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: ./web/tmp/test-results/rspec.xml
|
||||
flags: ruby-${{ matrix.ruby-version }}
|
||||
flags: sinatra-${{ matrix.ruby-version }}
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v5
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
fail_ci_if_error: false
|
||||
flags: ruby-${{ matrix.ruby-version }}
|
||||
flags: sinatra-${{ matrix.ruby-version }}
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
- name: Run rufo
|
||||
run: bundle exec rufo --check .
|
||||
|
||||
+8
-1
@@ -11,7 +11,7 @@
|
||||
/tmp/
|
||||
|
||||
# Used by dotenv library to load environment variables.
|
||||
# .env
|
||||
.env
|
||||
|
||||
# Ignore Byebug command history file.
|
||||
.byebug_history
|
||||
@@ -62,3 +62,10 @@ coverage/
|
||||
coverage.xml
|
||||
htmlcov/
|
||||
reports/
|
||||
|
||||
# AI planning and documentation
|
||||
ai_docs/
|
||||
*.log
|
||||
|
||||
# Generated credentials for the instance
|
||||
web/.config
|
||||
|
||||
+219
@@ -1,7 +1,226 @@
|
||||
# CHANGELOG
|
||||
|
||||
## v0.5.1
|
||||
|
||||
* Recursively ingest federated instances by @l5yth in <https://github.com/l5yth/potato-mesh/pull/353>
|
||||
* Remove federation timeout environment overrides by @l5yth in <https://github.com/l5yth/potato-mesh/pull/352>
|
||||
* Close unrelated short info overlays when opening short info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/351>
|
||||
* Improve federation instance error diagnostics by @l5yth in <https://github.com/l5yth/potato-mesh/pull/350>
|
||||
* Harden federation domain validation and tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/347>
|
||||
* Handle malformed instance records gracefully by @l5yth in <https://github.com/l5yth/potato-mesh/pull/348>
|
||||
* Fix ingestor device mounting for non-serial connections by @l5yth in <https://github.com/l5yth/potato-mesh/pull/346>
|
||||
* Ensure Docker deployments persist keyfile and well-known assets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/345>
|
||||
* Add modem preset display to node overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/340>
|
||||
* Display message frequency and channel in chat log by @l5yth in <https://github.com/l5yth/potato-mesh/pull/339>
|
||||
* Bump fallback version string to v0.5.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/338>
|
||||
* Docs: update changelog for 0.5.0 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/337>
|
||||
* Fix ingestor docker import path by @l5yth in <https://github.com/l5yth/potato-mesh/pull/336>
|
||||
|
||||
## v0.5.0
|
||||
|
||||
* Ensure node overlays appear above fullscreen map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/333>
|
||||
* Adjust node table columns responsively by @l5yth in <https://github.com/l5yth/potato-mesh/pull/332>
|
||||
* Add LoRa metadata fields to nodes and messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/331>
|
||||
* Add channel metadata capture for message tagging by @l5yth in <https://github.com/l5yth/potato-mesh/pull/329>
|
||||
* Capture radio metadata for ingestor payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/327>
|
||||
* Fix FrozenError when filtering node query results by @l5yth in <https://github.com/l5yth/potato-mesh/pull/324>
|
||||
* Ensure frontend reports git-aware version strings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/321>
|
||||
* Ensure web Docker image ships application sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/322>
|
||||
* Refine stacked short info overlays on the map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/319>
|
||||
* Refine environment configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/318>
|
||||
* Fix legacy configuration migration to XDG directories by @l5yth in <https://github.com/l5yth/potato-mesh/pull/317>
|
||||
* Adopt XDG base directories for app data and config by @l5yth in <https://github.com/l5yth/potato-mesh/pull/316>
|
||||
* Refactor: streamline ingestor environment variables by @l5yth in <https://github.com/l5yth/potato-mesh/pull/314>
|
||||
* Adjust map auto-fit padding and default zoom by @l5yth in <https://github.com/l5yth/potato-mesh/pull/315>
|
||||
* Ensure APIs filter stale data and refresh node details from latest sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/312>
|
||||
* Improve offline tile fallback initialization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/307>
|
||||
* Add fallback for offline tile rendering errors by @l5yth in <https://github.com/l5yth/potato-mesh/pull/306>
|
||||
* Fix map auto-fit handling and add controller by @l5yth in <https://github.com/l5yth/potato-mesh/pull/311>
|
||||
* Fix map initialization bounds and add coverage by @l5yth in <https://github.com/l5yth/potato-mesh/pull/305>
|
||||
* Increase coverage for configuration and sanitizer helpers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/303>
|
||||
* Add comprehensive theme and background front-end tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/302>
|
||||
* Document sanitization and helper modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/301>
|
||||
* Add in-repo Meshtastic protobuf stubs for tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/300>
|
||||
* Handle CRL lookup failures during federation TLS by @l5yth in <https://github.com/l5yth/potato-mesh/pull/299>
|
||||
* Ensure JavaScript workflow runs frontend tests by @l5yth in <https://github.com/l5yth/potato-mesh/pull/298>
|
||||
* Unify structured logging across application and ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/296>
|
||||
* Add Apache license headers to missing sources by @l5yth in <https://github.com/l5yth/potato-mesh/pull/297>
|
||||
* Update workflows for ingestor, sinatra, and frontend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/295>
|
||||
* Fix IPv6 instance domain canonicalization by @l5yth in <https://github.com/l5yth/potato-mesh/pull/294>
|
||||
* Handle federation HTTPS CRL verification failures by @l5yth in <https://github.com/l5yth/potato-mesh/pull/293>
|
||||
* Adjust federation announcement interval to eight hours by @l5yth in <https://github.com/l5yth/potato-mesh/pull/292>
|
||||
* Restore modular app functionality by @l5yth in <https://github.com/l5yth/potato-mesh/pull/291>
|
||||
* Refactor config and metadata helpers into PotatoMesh modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/290>
|
||||
* Update default site configuration defaults by @l5yth in <https://github.com/l5yth/potato-mesh/pull/288>
|
||||
* Add regression test for queue drain concurrency by @l5yth in <https://github.com/l5yth/potato-mesh/pull/287>
|
||||
* Ensure Docker config directories are created for non-root user by @l5yth in <https://github.com/l5yth/potato-mesh/pull/286>
|
||||
* Clarify numeric address requirement for network target parsing by @l5yth in <https://github.com/l5yth/potato-mesh/pull/285>
|
||||
* Ensure mesh ingestor queue resets active flag when idle by @l5yth in <https://github.com/l5yth/potato-mesh/pull/284>
|
||||
* Clarify BLE connection description in README by @l5yth in <https://github.com/l5yth/potato-mesh/pull/283>
|
||||
* Configure web container for production mode by @l5yth in <https://github.com/l5yth/potato-mesh/pull/282>
|
||||
* Normalize INSTANCE_DOMAIN configuration to require hostnames by @l5yth in <https://github.com/l5yth/potato-mesh/pull/280>
|
||||
* Avoid blocking startup on federation announcements by @l5yth in <https://github.com/l5yth/potato-mesh/pull/281>
|
||||
* Fix production Docker builds for web and ingestor images by @l5yth in <https://github.com/l5yth/potato-mesh/pull/279>
|
||||
* Improve instance domain detection logic by @l5yth in <https://github.com/l5yth/potato-mesh/pull/278>
|
||||
* Implement federation announcements and instances API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/277>
|
||||
* Fix federation signature handling and IP guard by @l5yth in <https://github.com/l5yth/potato-mesh/pull/276>
|
||||
* Add persistent federation metadata endpoint by @l5yth in <https://github.com/l5yth/potato-mesh/pull/274>
|
||||
* Add configurable instance domain with reverse DNS fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/272>
|
||||
* Document production deployment configuration by @l5yth in <https://github.com/l5yth/potato-mesh/pull/273>
|
||||
* Add targeted API endpoints and expose version metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/271>
|
||||
* Prometheus metrics updates on startup and for position/telemetry by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/270>
|
||||
* Add hourly reconnect handling for inactive mesh interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/267>
|
||||
* Dockerfile fixes by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/268>
|
||||
* Added prometheus /metrics endpoint by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/262>
|
||||
* Add fullscreen toggle to map view by @l5yth in <https://github.com/l5yth/potato-mesh/pull/263>
|
||||
* Relocate JS coverage export script into web directory by @l5yth in <https://github.com/l5yth/potato-mesh/pull/266>
|
||||
* V0.4.0 version string in web UI by @nicjansma in <https://github.com/l5yth/potato-mesh/pull/265>
|
||||
* Add energy saving cycle to ingestor daemon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/256>
|
||||
* Chore: restore apache headers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/260>
|
||||
* Docs: add matrix to readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/259>
|
||||
* Force dark theme default based on sanitized cookie by @l5yth in <https://github.com/l5yth/potato-mesh/pull/252>
|
||||
* Document mesh ingestor modules with PDoc-style docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/255>
|
||||
* Handle missing node IDs in Meshtastic nodeinfo packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/251>
|
||||
* Document Ruby helper methods with RDoc comments by @l5yth in <https://github.com/l5yth/potato-mesh/pull/254>
|
||||
* Add JSDoc documentation across client scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/253>
|
||||
* Fix mesh ingestor telemetry and neighbor handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/249>
|
||||
* Refactor front-end assets into external modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/245>
|
||||
* Add tests for helper utilities and asset routes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/243>
|
||||
* Docs: add ingestor inline docstrings by @l5yth in <https://github.com/l5yth/potato-mesh/pull/244>
|
||||
* Add comprehensive coverage tests for mesh ingestor by @l5yth in <https://github.com/l5yth/potato-mesh/pull/241>
|
||||
* Add inline documentation to config helpers and frontend scripts by @l5yth in <https://github.com/l5yth/potato-mesh/pull/240>
|
||||
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/238>
|
||||
|
||||
## v0.4.0
|
||||
|
||||
* Reformat neighbor overlay layout by @l5yth in <https://github.com/l5yth/potato-mesh/pull/237>
|
||||
* Add legend toggle for neighbor lines by @l5yth in <https://github.com/l5yth/potato-mesh/pull/236>
|
||||
* Hide Air Util Tx column on mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/235>
|
||||
* Add overlay for clickable neighbor links on map by @l5yth in <https://github.com/l5yth/potato-mesh/pull/234>
|
||||
* Hide humidity and pressure columns on mobile by @l5yth in <https://github.com/l5yth/potato-mesh/pull/232>
|
||||
* Remove last position timestamp from map info overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/233>
|
||||
* Improve live node positions and expose precision metadata by @l5yth in <https://github.com/l5yth/potato-mesh/pull/231>
|
||||
* Show neighbor short names in info overlays by @l5yth in <https://github.com/l5yth/potato-mesh/pull/228>
|
||||
* Add telemetry environment metrics to node UI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/227>
|
||||
* Reduce neighbor line opacity by @l5yth in <https://github.com/l5yth/potato-mesh/pull/226>
|
||||
* Visualize neighbor connections on map canvas by @l5yth in <https://github.com/l5yth/potato-mesh/pull/224>
|
||||
* Add clear control to filter input by @l5yth in <https://github.com/l5yth/potato-mesh/pull/225>
|
||||
* Handle Bluetooth shutdown hangs gracefully by @l5yth in <https://github.com/l5yth/potato-mesh/pull/221>
|
||||
* Adjust mesh priorities and receive topics by @l5yth in <https://github.com/l5yth/potato-mesh/pull/220>
|
||||
* Add BLE and fallback mesh interface handling by @l5yth in <https://github.com/l5yth/potato-mesh/pull/219>
|
||||
* Add neighbor info ingestion and API endpoints by @l5yth in <https://github.com/l5yth/potato-mesh/pull/218>
|
||||
* Add debug logs for unknown node creation and last-heard updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/214>
|
||||
* Update node last seen when events are received by @l5yth in <https://github.com/l5yth/potato-mesh/pull/212>
|
||||
* Improve debug logging for node and telemetry data by @l5yth in <https://github.com/l5yth/potato-mesh/pull/213>
|
||||
* Normalize stored message debug output by @l5yth in <https://github.com/l5yth/potato-mesh/pull/211>
|
||||
* Stop repeating ingestor node info snapshot and timestamp debug logs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/210>
|
||||
* Add telemetry API and ingestion support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/205>
|
||||
* Add private mode to hide chat and message APIs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/204>
|
||||
* Handle offline-ready map fallback by @l5yth in <https://github.com/l5yth/potato-mesh/pull/202>
|
||||
* Add linux/armv7 container builds and configuration options by @l5yth in <https://github.com/l5yth/potato-mesh/pull/201>
|
||||
* Update Docker documentation by @l5yth in <https://github.com/l5yth/potato-mesh/pull/200>
|
||||
* Update node last seen when ingesting encrypted messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/198>
|
||||
* Fix api in readme by @l5yth in <https://github.com/l5yth/potato-mesh/pull/197>
|
||||
|
||||
## v0.3.0
|
||||
|
||||
* Add connection recovery for TCP interface by @l5yth in <https://github.com/l5yth/potato-mesh/pull/186>
|
||||
* Bump version to 0.3 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/191>
|
||||
* Pgrade styles and fix interface issues by @l5yth in <https://github.com/l5yth/potato-mesh/pull/190>
|
||||
* Some updates in the front by @dkorotkih2014-hub in <https://github.com/l5yth/potato-mesh/pull/188>
|
||||
* Update last heard on node entry change by @l5yth in <https://github.com/l5yth/potato-mesh/pull/185>
|
||||
* Populate chat metadata for unknown nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/182>
|
||||
* Update role color theme to latest palette by @l5yth in <https://github.com/l5yth/potato-mesh/pull/183>
|
||||
* Add placeholder nodes for unknown senders by @l5yth in <https://github.com/l5yth/potato-mesh/pull/181>
|
||||
* Update role colors and ordering for firmware 2.7.10 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/180>
|
||||
* Handle plain IP addresses in mesh TCP detection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/154>
|
||||
* Handle encrypted messages by @l5yth in <https://github.com/l5yth/potato-mesh/pull/173>
|
||||
* Add fallback display names for unnamed nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/171>
|
||||
* Ensure routers render above other node types by @l5yth in <https://github.com/l5yth/potato-mesh/pull/169>
|
||||
* Move lint checks after tests in CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/168>
|
||||
* Handle proto values in nodeinfo payloads by @l5yth in <https://github.com/l5yth/potato-mesh/pull/167>
|
||||
* Remove raw payload storage from database schema by @l5yth in <https://github.com/l5yth/potato-mesh/pull/166>
|
||||
* Add POSITION_APP ingestion and API support by @l5yth in <https://github.com/l5yth/potato-mesh/pull/160>
|
||||
* Add support for NODEINFO_APP packets by @l5yth in <https://github.com/l5yth/potato-mesh/pull/159>
|
||||
* Derive SEO metadata from existing config values by @l5yth in <https://github.com/l5yth/potato-mesh/pull/153>
|
||||
* Tests: create helper script to dump all mesh data from serial by @l5yth in <https://github.com/l5yth/potato-mesh/pull/152>
|
||||
* Limit chat log to recent entries by @l5yth in <https://github.com/l5yth/potato-mesh/pull/151>
|
||||
* Require time library before formatting ISO timestamps by @l5yth in <https://github.com/l5yth/potato-mesh/pull/149>
|
||||
* Define docker compose network by @l5yth in <https://github.com/l5yth/potato-mesh/pull/148>
|
||||
* Fix sqlite3 native extension on Alpine by @l5yth in <https://github.com/l5yth/potato-mesh/pull/146>
|
||||
* Fix web app startup binding by @l5yth in <https://github.com/l5yth/potato-mesh/pull/147>
|
||||
* Ensure sqlite3 builds from source on Alpine by @l5yth in <https://github.com/l5yth/potato-mesh/pull/145>
|
||||
* Support mock serial interface in CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/143>
|
||||
* Fix Docker workflow matrix for supported platforms by @l5yth in <https://github.com/l5yth/potato-mesh/pull/142>
|
||||
* Add clickable role filters to the map legend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/140>
|
||||
* Rebuild chat log on each refresh by @l5yth in <https://github.com/l5yth/potato-mesh/pull/139>
|
||||
* Fix: retain alpine runtime libs after removing build deps by @l5yth in <https://github.com/l5yth/potato-mesh/pull/138>
|
||||
* Fix: support windows ingestor build by @l5yth in <https://github.com/l5yth/potato-mesh/pull/136>
|
||||
* Fix: use supported ruby image by @l5yth in <https://github.com/l5yth/potato-mesh/pull/135>
|
||||
* Feat: Add comprehensive Docker support by @trose in <https://github.com/l5yth/potato-mesh/pull/122>
|
||||
* Chore: bump version to 0.2.1 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/134>
|
||||
* Fix dark mode tile styling on new map tiles by @l5yth in <https://github.com/l5yth/potato-mesh/pull/132>
|
||||
* Switch map tiles to OSM HOT and add theme filters by @l5yth in <https://github.com/l5yth/potato-mesh/pull/130>
|
||||
* Add footer version display by @l5yth in <https://github.com/l5yth/potato-mesh/pull/128>
|
||||
* Add responsive controls for map legend by @l5yth in <https://github.com/l5yth/potato-mesh/pull/129>
|
||||
* Update changelog by @l5yth in <https://github.com/l5yth/potato-mesh/pull/119>
|
||||
|
||||
## v0.2.0
|
||||
|
||||
* Update readme for 0.2 by @l5yth in <https://github.com/l5yth/potato-mesh/pull/118>
|
||||
* Add PotatoMesh logo to header and favicon by @l5yth in <https://github.com/l5yth/potato-mesh/pull/117>
|
||||
* Harden API auth and request limits by @l5yth in <https://github.com/l5yth/potato-mesh/pull/116>
|
||||
* Add client-side sorting to node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/114>
|
||||
* Add short name overlay for node details by @l5yth in <https://github.com/l5yth/potato-mesh/pull/111>
|
||||
* Adjust python ingestor interval to 60 seconds by @l5yth in <https://github.com/l5yth/potato-mesh/pull/112>
|
||||
* Hide location columns on medium screens by @l5yth in <https://github.com/l5yth/potato-mesh/pull/109>
|
||||
* Handle message updates based on sender info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/108>
|
||||
* Prioritize node posts in queued API updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/107>
|
||||
* Add auto-refresh toggle to UI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/105>
|
||||
* Adjust Leaflet popup styling for dark mode by @l5yth in <https://github.com/l5yth/potato-mesh/pull/104>
|
||||
* Add site info overlay by @l5yth in <https://github.com/l5yth/potato-mesh/pull/103>
|
||||
* Add long name tooltip to short name badge by @l5yth in <https://github.com/l5yth/potato-mesh/pull/102>
|
||||
* Ensure node numeric aliases are derived from canonical IDs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/101>
|
||||
* Chore: clean up repository by @l5yth in <https://github.com/l5yth/potato-mesh/pull/96>
|
||||
* Handle SQLite busy errors when upserting nodes by @l5yth in <https://github.com/l5yth/potato-mesh/pull/100>
|
||||
* Configure Sinatra logging level from DEBUG flag by @l5yth in <https://github.com/l5yth/potato-mesh/pull/97>
|
||||
* Add penetration tests for authentication and SQL injection by @l5yth in <https://github.com/l5yth/potato-mesh/pull/95>
|
||||
* Document Python and Ruby source modules by @l5yth in <https://github.com/l5yth/potato-mesh/pull/94>
|
||||
* Add tests covering mesh helper edge cases by @l5yth in <https://github.com/l5yth/potato-mesh/pull/93>
|
||||
* Fix py code cov by @l5yth in <https://github.com/l5yth/potato-mesh/pull/92>
|
||||
* Add Codecov reporting to Python CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/91>
|
||||
* Skip null identifiers when selecting packet fields by @l5yth in <https://github.com/l5yth/potato-mesh/pull/88>
|
||||
* Create python yml ga by @l5yth in <https://github.com/l5yth/potato-mesh/pull/90>
|
||||
* Add unit tests for mesh ingestor script by @l5yth in <https://github.com/l5yth/potato-mesh/pull/89>
|
||||
* Add coverage for debug logging on messages without sender by @l5yth in <https://github.com/l5yth/potato-mesh/pull/86>
|
||||
* Handle concurrent node snapshot updates by @l5yth in <https://github.com/l5yth/potato-mesh/pull/85>
|
||||
* Fix ingestion mapping for message sender IDs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/84>
|
||||
* Add coverage for API authentication and payload edge cases by @l5yth in <https://github.com/l5yth/potato-mesh/pull/83>
|
||||
* Add JUnit test reporting to Ruby CI by @l5yth in <https://github.com/l5yth/potato-mesh/pull/82>
|
||||
* Configure SimpleCov reporting for Codecov by @l5yth in <https://github.com/l5yth/potato-mesh/pull/81>
|
||||
* Update codecov job by @l5yth in <https://github.com/l5yth/potato-mesh/pull/80>
|
||||
* Fix readme badges by @l5yth in <https://github.com/l5yth/potato-mesh/pull/79>
|
||||
* Add Codecov upload step to Ruby workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/78>
|
||||
* Add Apache license headers to source files by @l5yth in <https://github.com/l5yth/potato-mesh/pull/77>
|
||||
* Add integration specs for node and message APIs by @l5yth in <https://github.com/l5yth/potato-mesh/pull/76>
|
||||
* Docs: update for 0.2.0 release by @l5yth in <https://github.com/l5yth/potato-mesh/pull/75>
|
||||
* Create ruby workflow by @l5yth in <https://github.com/l5yth/potato-mesh/pull/74>
|
||||
* Add RSpec smoke tests for app boot and database init by @l5yth in <https://github.com/l5yth/potato-mesh/pull/73>
|
||||
* Align refresh controls with status text by @l5yth in <https://github.com/l5yth/potato-mesh/pull/72>
|
||||
* Improve mobile layout by @l5yth in <https://github.com/l5yth/potato-mesh/pull/68>
|
||||
* Normalize message sender IDs using node numbers by @l5yth in <https://github.com/l5yth/potato-mesh/pull/67>
|
||||
* Style: condense node table by @l5yth in <https://github.com/l5yth/potato-mesh/pull/65>
|
||||
* Log debug details for messages without sender by @l5yth in <https://github.com/l5yth/potato-mesh/pull/64>
|
||||
* Fix nested dataclass serialization for node snapshots by @l5yth in <https://github.com/l5yth/potato-mesh/pull/63>
|
||||
* Log node object on snapshot update failure by @l5yth in <https://github.com/l5yth/potato-mesh/pull/62>
|
||||
* Initialize database on startup by @l5yth in <https://github.com/l5yth/potato-mesh/pull/61>
|
||||
* Send mesh data to Potatomesh API by @l5yth in <https://github.com/l5yth/potato-mesh/pull/60>
|
||||
* Convert boolean flags for SQLite binding by @l5yth in <https://github.com/l5yth/potato-mesh/pull/59>
|
||||
* Use packet id as message primary key by @l5yth in <https://github.com/l5yth/potato-mesh/pull/58>
|
||||
* Add message ingestion API and stricter auth by @l5yth in <https://github.com/l5yth/potato-mesh/pull/56>
|
||||
* Feat: parameterize community info by @l5yth in <https://github.com/l5yth/potato-mesh/pull/55>
|
||||
* Feat: add dark mode toggle by @l5yth in <https://github.com/l5yth/potato-mesh/pull/54>
|
||||
|
||||
## v0.1.0
|
||||
|
||||
* Show daily node count in title and header by @l5yth in <https://github.com/l5yth/potato-mesh/pull/49>
|
||||
|
||||
@@ -0,0 +1,97 @@
|
||||
# PotatoMesh Docker Guide
|
||||
|
||||
PotatoMesh publishes ready-to-run container images to the GitHub Packages container
|
||||
registry (GHCR). You do not need to clone the repository to deploy them—Compose
|
||||
will pull the latest release images for you.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker Engine 24+ or Docker Desktop with the Compose plugin
|
||||
- Access to `/dev/ttyACM*` (or equivalent) if you plan to attach a Meshtastic
|
||||
device to the ingestor container
|
||||
- An API token that authorises the ingestor to post to your PotatoMesh instance
|
||||
|
||||
## Images on GHCR
|
||||
|
||||
| Service | Image |
|
||||
|----------|-------------------------------------------------------------------|
|
||||
| Web UI | `ghcr.io/l5yth/potato-mesh-web-linux-amd64:latest` |
|
||||
| Ingestor | `ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:latest` |
|
||||
|
||||
Images are published for every tagged release. Replace `latest` with a
|
||||
specific version tag if you prefer pinned deployments.
|
||||
|
||||
## Configure environment
|
||||
|
||||
Create a `.env` file alongside your Compose file and populate the variables you
|
||||
need. At a minimum you must set `API_TOKEN` so the ingestor can authenticate
|
||||
against the web API.
|
||||
|
||||
```env
|
||||
API_TOKEN=replace-with-a-strong-token
|
||||
SITE_NAME=PotatoMesh Demo
|
||||
CONNECTION=/dev/ttyACM0
|
||||
INSTANCE_DOMAIN=mesh.example.org
|
||||
```
|
||||
|
||||
Additional environment variables are optional:
|
||||
|
||||
- `CHANNEL`, `FREQUENCY`, `MAP_CENTER`, `MAX_DISTANCE`, and `CONTACT_LINK`
|
||||
customise the UI.
|
||||
- `POTATOMESH_INSTANCE` (defaults to `http://web:41447`) lets the ingestor post
|
||||
to a remote PotatoMesh instance if you do not run both services together.
|
||||
- `CONNECTION` overrides the default serial device or network endpoint used by
|
||||
the ingestor.
|
||||
- `CHANNEL_INDEX` selects the LoRa channel when using serial or Bluetooth
|
||||
connections.
|
||||
- `INSTANCE_DOMAIN` pins the public hostname advertised by the web UI and API
|
||||
responses, bypassing reverse DNS detection when set.
|
||||
- `DEBUG` enables verbose logging across the stack.
|
||||
|
||||
## Docker Compose file
|
||||
|
||||
Use the `docker-compose.yml` file provided in the repository (or download the
|
||||
[raw file from GitHub](https://raw.githubusercontent.com/l5yth/potato-mesh/main/docker-compose.yml)).
|
||||
It already references the published GHCR images, defines persistent volumes for
|
||||
data, configuration, and logs, and includes optional bridge-profile services for
|
||||
environments that require classic port mapping. Place this file in the same
|
||||
directory as your `.env` file so Compose can pick up both.
|
||||
|
||||
The dedicated configuration volume binds to `/app/.config/potato-mesh` inside
|
||||
the container. This path stores the instance private key and staged
|
||||
`/.well-known/potato-mesh` documents. Because the volume persists independently
|
||||
of container lifecycle events, generated credentials are not replaced on reboot
|
||||
or re-deploy.
|
||||
|
||||
## Start the stack
|
||||
|
||||
From the directory containing the Compose file:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Docker automatically pulls the GHCR images when they are not present locally.
|
||||
The dashboard becomes available at `http://127.0.0.1:41447`. Use the bridge
|
||||
profile when you need to map the port explicitly:
|
||||
|
||||
```bash
|
||||
COMPOSE_PROFILES=bridge docker compose up -d
|
||||
```
|
||||
|
||||
## Updating
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **Serial device permissions (Linux/macOS):** grant access with `sudo chmod 666
|
||||
/dev/ttyACM0` or add your user to the `dialout` group.
|
||||
- **Port already in use:** identify the conflicting service with `sudo lsof -i
|
||||
:41447`.
|
||||
- **Viewing logs:** `docker compose logs -f` tails output from both services.
|
||||
|
||||
For general Docker support, consult the [Docker Compose documentation](https://docs.docker.com/compose/).
|
||||
+78
@@ -0,0 +1,78 @@
|
||||
# NOTE: This Dockerfile is kept for backward compatibility. The canonical build
|
||||
# instructions live in `web/Dockerfile`; keep the two files in sync.
|
||||
|
||||
# Main application builder stage
|
||||
FROM ruby:3.3-alpine AS builder
|
||||
|
||||
# Ensure native extensions are built against musl libc rather than
|
||||
# using glibc precompiled binaries (which fail on Alpine).
|
||||
ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy Gemfile and install dependencies
|
||||
COPY web/Gemfile web/Gemfile.lock* ./
|
||||
|
||||
# Install gems with SQLite3 support
|
||||
RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1000 -S potatomesh && \
|
||||
adduser -u 1000 -S potatomesh -G potatomesh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
|
||||
# Copy application code (exclude Dockerfile from web directory)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb web/app.sh web/Gemfile web/Gemfile.lock* web/spec/ ./
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views/ ./views/
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
|
||||
# Create data directory for SQLite database
|
||||
RUN mkdir -p /app/data /app/.local/share/potato-mesh && \
|
||||
chown -R potatomesh:potatomesh /app/data /app/.local
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
|
||||
# Expose port
|
||||
EXPOSE 41447
|
||||
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV APP_ENV=production \
|
||||
RACK_ENV=production \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
CHANNEL="#LongFast" \
|
||||
FREQUENCY="915MHz" \
|
||||
MAP_CENTER="38.761944,-27.090833" \
|
||||
MAX_DISTANCE=42 \
|
||||
CONTACT_LINK="#potatomesh:dod.ngo" \
|
||||
DEBUG=0
|
||||
|
||||
# Start the application
|
||||
CMD ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
|
||||
@@ -1,14 +1,15 @@
|
||||
# 🥔 PotatoMesh
|
||||
|
||||
[](https://github.com/l5yth/potato-mesh/actions)
|
||||
[](https://github.com/l5yth/potato-mesh/releases)
|
||||
[](https://github.com/l5yth/potato-mesh/releases)
|
||||
[](https://codecov.io/gh/l5yth/potato-mesh)
|
||||
[](LICENSE)
|
||||
[](https://github.com/l5yth/potato-mesh/issues)
|
||||
[](https://matrix.to/#/#potatomesh:dod.ngo)
|
||||
|
||||
A simple Meshtastic-powered node dashboard for your local community. _No MQTT clutter, just local LoRa aether._
|
||||
|
||||
* Web app with chat window and map view showing nodes and messages.
|
||||
* Web app with chat window and map view showing nodes, neighbors, telemetry, and messages.
|
||||
* API to POST (authenticated) and to GET nodes and messages.
|
||||
* Supplemental Python ingestor to feed the POST APIs of the Web app with data remotely.
|
||||
* Shows new node notifications (first seen) in chat.
|
||||
@@ -16,7 +17,7 @@ A simple Meshtastic-powered node dashboard for your local community. _No MQTT cl
|
||||
|
||||
Live demo for Berlin #MediumFast: [potatomesh.net](https://potatomesh.net)
|
||||
|
||||

|
||||

|
||||
|
||||
## Web App
|
||||
|
||||
@@ -24,7 +25,7 @@ Requires Ruby for the Sinatra web app and SQLite3 for the app's database.
|
||||
|
||||
```bash
|
||||
pacman -S ruby sqlite3
|
||||
gem install sinatra sqlite3 rackup puma rspec rack-test rufo
|
||||
gem install sinatra sqlite3 rackup puma rspec rack-test rufo prometheus-client
|
||||
cd ./web
|
||||
bundle install
|
||||
```
|
||||
@@ -46,29 +47,77 @@ Puma starting in single mode...
|
||||
Check [127.0.0.1:41447](http://127.0.0.1:41447/) for the development preview
|
||||
of the node map. Set `API_TOKEN` required for authorizations on the API's POST endpoints.
|
||||
|
||||
### Production
|
||||
|
||||
When promoting the app to production, run the server with the minimum required
|
||||
configuration to ensure secure access and proper routing:
|
||||
|
||||
```bash
|
||||
RACK_ENV="production" \
|
||||
APP_ENV="production" \
|
||||
API_TOKEN="SuperSecureTokenReally" \
|
||||
INSTANCE_DOMAIN="https://potatomesh.net" \
|
||||
exec ruby app.rb -p 41447 -o 0.0.0.0
|
||||
```
|
||||
|
||||
* `RACK_ENV` and `APP_ENV` must be set to `production` to enable optimized
|
||||
settings suited for live deployments.
|
||||
* Bind the server to a production port and all interfaces (`-p 41447 -o 0.0.0.0`)
|
||||
so that clients can reach the dashboard over the network.
|
||||
* Provide a strong `API_TOKEN` value to authorize POST requests against the API.
|
||||
* Configure `INSTANCE_DOMAIN` with the public URL of your deployment so vanity
|
||||
links and generated metadata resolve correctly.
|
||||
|
||||
The web app can be configured with environment variables (defaults shown):
|
||||
|
||||
* `SITE_NAME` - title and header shown in the ui (default: "Meshtastic Berlin")
|
||||
* `DEFAULT_CHANNEL` - default channel shown in the ui (default: "#MediumFast")
|
||||
* `DEFAULT_FREQUENCY` - default channel shown in the ui (default: "868MHz")
|
||||
* `MAP_CENTER_LAT` / `MAP_CENTER_LON` - default map center coordinates (default: `52.502889` / `13.404194`)
|
||||
* `MAX_NODE_DISTANCE_KM` - hide nodes farther than this distance from the center (default: `137`)
|
||||
* `MATRIX_ROOM` - matrix room id for a footer link (default: `#meshtastic-berlin:matrix.org`)
|
||||
* `SITE_NAME` - title and header shown in the UI (default: "PotatoMesh Demo")
|
||||
* `CHANNEL` - default channel shown in the UI (default: "#LongFast")
|
||||
* `FREQUENCY` - default frequency shown in the UI (default: "915MHz")
|
||||
* `MAP_CENTER` - default map center coordinates (default: `38.761944,-27.090833`)
|
||||
* `MAX_DISTANCE` - hide nodes farther than this distance from the center (default: `42`)
|
||||
* `CONTACT_LINK` - chat link or Matrix alias for footer and overlay (default: `#potatomesh:dod.ngo`)
|
||||
* `PRIVATE` - set to `1` to hide the chat UI, disable message APIs, and exclude hidden clients (default: unset)
|
||||
* `INSTANCE_DOMAIN` - public hostname (optionally with port) used for metadata, federation, and API links (default: auto-detected)
|
||||
|
||||
The application derives SEO-friendly document titles, descriptions, and social
|
||||
preview tags from these existing configuration values and reuses the bundled
|
||||
logo for Open Graph and Twitter cards.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
SITE_NAME="Meshtastic Berlin" MAP_CENTER_LAT=52.502889 MAP_CENTER_LON=13.404194 MAX_NODE_DISTANCE_KM=137 MATRIX_ROOM="#meshtastic-berlin:matrix.org" ./app.sh
|
||||
SITE_NAME="PotatoMesh Demo" MAP_CENTER=38.761944,-27.090833 MAX_DISTANCE=42 CONTACT_LINK="#potatomesh:dod.ngo" ./app.sh
|
||||
```
|
||||
|
||||
### Configuration & Storage
|
||||
|
||||
PotatoMesh stores its runtime assets using the XDG base directory specification.
|
||||
When XDG directories are not provided the application falls back
|
||||
to the repository root.
|
||||
|
||||
The key is written to `$XDG_CONFIG_HOME/potato-mesh/keyfile` and the
|
||||
well-known document is staged in
|
||||
`$XDG_CONFIG_HOME/potato-mesh/well-known/potato-mesh`.
|
||||
|
||||
The database can be found in `$XDG_DATA_HOME/potato-mesh`.
|
||||
|
||||
### API
|
||||
|
||||
The web app contains an API:
|
||||
|
||||
* GET `/api/nodes?limit=100` - returns the latest 100 nodes reported to the app
|
||||
* GET `/api/messages?limit=100` - returns the latest 100 messages
|
||||
* GET `/api/positions?limit=100` - returns the latest 100 position data
|
||||
* GET `/api/messages?limit=100` - returns the latest 100 messages (disabled when `PRIVATE=1`)
|
||||
* GET `/api/telemetry?limit=100` - returns the latest 100 telemetry data
|
||||
* GET `/api/neighbors?limit=100` - returns the latest 100 neighbor tuples
|
||||
* GET `/api/instances` - returns known potato-mesh instances in other locations
|
||||
* GET `/metrics`- metrics for the prometheus endpoint
|
||||
* GET `/version`- information about the potato-mesh instance
|
||||
* POST `/api/nodes` - upserts nodes provided as JSON object mapping node ids to node data (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/positions` - appends positions provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/messages` - appends messages provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`; disabled when `PRIVATE=1`)
|
||||
* POST `/api/telemetry` - appends telemetry provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
* POST `/api/neighbors` - appends neighbor tuples provided as a JSON object or array (requires `Authorization: Bearer <API_TOKEN>`)
|
||||
|
||||
The `API_TOKEN` environment variable must be set to a non-empty value and match the token supplied in the `Authorization` header for `POST` requests.
|
||||
|
||||
@@ -80,8 +129,9 @@ accepts data through the API POST endpoints. Benefit is, here multiple nodes acr
|
||||
community can feed the dashboard with data. The web app handles messages and nodes
|
||||
by ID and there will be no duplication.
|
||||
|
||||
For convenience, the directory `./data` contains a Python ingestor. It connects to a local
|
||||
Meshtastic node via serial port to gather nodes and messages seen by the node.
|
||||
For convenience, the directory `./data` contains a Python ingestor. It connects to a
|
||||
Meshtastic node via serial port or to a remote device that exposes the Meshtastic TCP
|
||||
or Bluetooth (BLE) interfaces to gather nodes and messages seen by the node.
|
||||
|
||||
```bash
|
||||
pacman -S python
|
||||
@@ -97,19 +147,41 @@ to the configured potato-mesh instance.
|
||||
Check out `mesh.sh` ingestor script in the `./data` directory.
|
||||
|
||||
```bash
|
||||
POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 MESH_SERIAL=/dev/ttyACM0 DEBUG=1 ./mesh.sh
|
||||
Mesh daemon: nodes+messages → http://127.0.0.1 | port=41447 | channel=0
|
||||
POTATOMESH_INSTANCE=http://127.0.0.1:41447 API_TOKEN=1eb140fd-cab4-40be-b862-41c607762246 CONNECTION=/dev/ttyACM0 DEBUG=1 ./mesh.sh
|
||||
[2025-02-20T12:34:56.789012Z] [potato-mesh] [info] channel=0 context=daemon.main port='41447' target='http://127.0.0.1' Mesh daemon starting
|
||||
[...]
|
||||
[debug] upserted node !849b7154 shortName='7154'
|
||||
[debug] upserted node !ba653ae8 shortName='3ae8'
|
||||
[debug] upserted node !16ced364 shortName='Pat'
|
||||
[debug] stored message from '!9ee71c38' to '^all' ch=0 text='Guten Morgen!'
|
||||
[2025-02-20T12:34:57.012345Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!849b7154 short_name='7154' long_name='7154' Queued node upsert payload
|
||||
[2025-02-20T12:34:57.456789Z] [potato-mesh] [debug] context=handlers.upsert_node node_id=!ba653ae8 short_name='3ae8' long_name='3ae8' Queued node upsert payload
|
||||
[2025-02-20T12:34:58.001122Z] [potato-mesh] [debug] context=handlers.store_packet_dict channel=0 from_id='!9ee71c38' payload='Guten Morgen!' to_id='^all' Queued message payload
|
||||
```
|
||||
|
||||
Run the script with `POTATOMESH_INSTANCE` and `API_TOKEN` to keep updating
|
||||
node records and parsing new incoming messages. Enable debug output with `DEBUG=1`,
|
||||
specify the serial port with `MESH_SERIAL` (default `/dev/ttyACM0`), etc.
|
||||
specify the connection target with `CONNECTION` (default `/dev/ttyACM0`) or set it to
|
||||
an IP address (for example `192.168.1.20:4403`) to use the Meshtastic TCP
|
||||
interface. `CONNECTION` also accepts Bluetooth device addresses (e.g.,
|
||||
`ED:4D:9E:95:CF:60`) and the script attempts a BLE connection if available.
|
||||
|
||||
## Demos
|
||||
|
||||
Post your nodes here:
|
||||
|
||||
* <https://github.com/l5yth/potato-mesh/discussions/258>
|
||||
|
||||
## Docker
|
||||
|
||||
Docker images are published on Github for each release:
|
||||
|
||||
```bash
|
||||
docker pull ghcr.io/l5yth/potato-mesh/web:latest
|
||||
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
|
||||
```
|
||||
|
||||
See the [Docker guide](DOCKER.md) for more details and custome deployment instructions.
|
||||
|
||||
## License
|
||||
|
||||
Apache v2.0, Contact <COM0@l5y.tech>
|
||||
|
||||
Join our community chat to discuss the dashboard or ask for technical support:
|
||||
[#potatomesh:dod.ngo](https://matrix.to/#/#potatomesh:dod.ngo)
|
||||
|
||||
Executable
+197
@@ -0,0 +1,197 @@
|
||||
#!/bin/bash
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# PotatoMesh Configuration Script
|
||||
# This script helps you configure your PotatoMesh instance with your local settings
|
||||
|
||||
set -e
|
||||
|
||||
echo "🥔 PotatoMesh Configuration"
|
||||
echo "=========================="
|
||||
echo ""
|
||||
|
||||
# Check if .env exists, if not create from .env.example
|
||||
if [ ! -f .env ]; then
|
||||
if [ -f .env.example ]; then
|
||||
echo "📋 Creating .env file from .env.example..."
|
||||
cp .env.example .env
|
||||
else
|
||||
echo "📋 Creating new .env file..."
|
||||
touch .env
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "🔧 Let's configure your PotatoMesh instance!"
|
||||
echo ""
|
||||
|
||||
# Function to read input with default
|
||||
read_with_default() {
|
||||
local prompt="$1"
|
||||
local default="$2"
|
||||
local var_name="$3"
|
||||
|
||||
if [ -n "$default" ]; then
|
||||
read -p "$prompt [$default]: " input
|
||||
input=${input:-$default}
|
||||
else
|
||||
read -p "$prompt: " input
|
||||
fi
|
||||
|
||||
eval "$var_name='$input'"
|
||||
}
|
||||
|
||||
# Function to update .env file
|
||||
update_env() {
|
||||
local key="$1"
|
||||
local value="$2"
|
||||
|
||||
if grep -q "^$key=" .env; then
|
||||
# Update existing value
|
||||
sed -i.bak "s/^$key=.*/$key=$value/" .env
|
||||
else
|
||||
# Add new value
|
||||
echo "$key=$value" >> .env
|
||||
fi
|
||||
}
|
||||
|
||||
# Get current values from .env if they exist
|
||||
SITE_NAME=$(grep "^SITE_NAME=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "PotatoMesh Demo")
|
||||
CHANNEL=$(grep "^CHANNEL=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#LongFast")
|
||||
FREQUENCY=$(grep "^FREQUENCY=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "915MHz")
|
||||
MAP_CENTER=$(grep "^MAP_CENTER=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "38.761944,-27.090833")
|
||||
MAX_DISTANCE=$(grep "^MAX_DISTANCE=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "42")
|
||||
CONTACT_LINK=$(grep "^CONTACT_LINK=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "#potatomesh:dod.ngo")
|
||||
API_TOKEN=$(grep "^API_TOKEN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
|
||||
POTATOMESH_IMAGE_ARCH=$(grep "^POTATOMESH_IMAGE_ARCH=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "linux-amd64")
|
||||
INSTANCE_DOMAIN=$(grep "^INSTANCE_DOMAIN=" .env 2>/dev/null | cut -d'=' -f2- | tr -d '"' || echo "")
|
||||
|
||||
echo "📍 Location Settings"
|
||||
echo "-------------------"
|
||||
read_with_default "Site Name (your mesh network name)" "$SITE_NAME" SITE_NAME
|
||||
read_with_default "Map Center (lat,lon)" "$MAP_CENTER" MAP_CENTER
|
||||
read_with_default "Max Distance (km)" "$MAX_DISTANCE" MAX_DISTANCE
|
||||
|
||||
echo ""
|
||||
echo "📡 Meshtastic Settings"
|
||||
echo "---------------------"
|
||||
read_with_default "Channel" "$CHANNEL" CHANNEL
|
||||
read_with_default "Frequency (868MHz, 915MHz, etc.)" "$FREQUENCY" FREQUENCY
|
||||
|
||||
echo ""
|
||||
echo "💬 Optional Settings"
|
||||
echo "-------------------"
|
||||
read_with_default "Chat link or Matrix room (optional)" "$CONTACT_LINK" CONTACT_LINK
|
||||
|
||||
echo ""
|
||||
echo "🛠 Docker Settings"
|
||||
echo "------------------"
|
||||
echo "Specify the Docker image architecture for your host (linux-amd64, linux-arm64, linux-armv7)."
|
||||
read_with_default "Docker image architecture" "$POTATOMESH_IMAGE_ARCH" POTATOMESH_IMAGE_ARCH
|
||||
|
||||
echo ""
|
||||
echo "🌐 Domain Settings"
|
||||
echo "------------------"
|
||||
echo "Provide the public hostname that clients should use to reach this PotatoMesh instance."
|
||||
echo "Leave blank to allow automatic detection via reverse DNS."
|
||||
read_with_default "Instance domain (e.g. mesh.example.org)" "$INSTANCE_DOMAIN" INSTANCE_DOMAIN
|
||||
|
||||
echo ""
|
||||
echo "🔐 Security Settings"
|
||||
echo "-------------------"
|
||||
echo "The API token is used for secure communication between the web app and ingestor."
|
||||
echo "You can provide your own custom token or let us generate a secure one for you."
|
||||
echo ""
|
||||
|
||||
if [ -z "$API_TOKEN" ]; then
|
||||
echo "No existing API token found. Generating a secure token..."
|
||||
API_TOKEN=$(openssl rand -hex 32 2>/dev/null || python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null || echo "your-secure-api-token-here")
|
||||
echo "✅ Generated secure API token: ${API_TOKEN:0:8}..."
|
||||
echo ""
|
||||
read -p "Use this generated token? (Y/n): " use_generated
|
||||
if [[ "$use_generated" =~ ^[Nn]$ ]]; then
|
||||
read -p "Enter your custom API token: " API_TOKEN
|
||||
fi
|
||||
else
|
||||
echo "Existing API token found: ${API_TOKEN:0:8}..."
|
||||
read -p "Keep existing token? (Y/n): " keep_existing
|
||||
if [[ "$keep_existing" =~ ^[Nn]$ ]]; then
|
||||
read -p "Enter new API token (or press Enter to generate): " new_token
|
||||
if [ -n "$new_token" ]; then
|
||||
API_TOKEN="$new_token"
|
||||
else
|
||||
echo "Generating new secure token..."
|
||||
API_TOKEN=$(openssl rand -hex 32 2>/dev/null || python3 -c "import secrets; print(secrets.token_hex(32))" 2>/dev/null || echo "your-secure-api-token-here")
|
||||
echo "✅ Generated new API token: ${API_TOKEN:0:8}..."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "📝 Updating .env file..."
|
||||
|
||||
# Update .env file
|
||||
update_env "SITE_NAME" "\"$SITE_NAME\""
|
||||
update_env "CHANNEL" "\"$CHANNEL\""
|
||||
update_env "FREQUENCY" "\"$FREQUENCY\""
|
||||
update_env "MAP_CENTER" "\"$MAP_CENTER\""
|
||||
update_env "MAX_DISTANCE" "$MAX_DISTANCE"
|
||||
update_env "CONTACT_LINK" "\"$CONTACT_LINK\""
|
||||
update_env "API_TOKEN" "$API_TOKEN"
|
||||
update_env "POTATOMESH_IMAGE_ARCH" "$POTATOMESH_IMAGE_ARCH"
|
||||
if [ -n "$INSTANCE_DOMAIN" ]; then
|
||||
update_env "INSTANCE_DOMAIN" "$INSTANCE_DOMAIN"
|
||||
else
|
||||
sed -i.bak '/^INSTANCE_DOMAIN=.*/d' .env
|
||||
fi
|
||||
|
||||
# Migrate legacy connection settings and ensure defaults exist
|
||||
if grep -q "^MESH_SERIAL=" .env; then
|
||||
legacy_connection=$(grep "^MESH_SERIAL=" .env | head -n1 | cut -d'=' -f2-)
|
||||
if [ -n "$legacy_connection" ] && ! grep -q "^CONNECTION=" .env; then
|
||||
echo "♻️ Migrating legacy MESH_SERIAL value to CONNECTION"
|
||||
update_env "CONNECTION" "$legacy_connection"
|
||||
fi
|
||||
sed -i.bak '/^MESH_SERIAL=.*/d' .env
|
||||
fi
|
||||
|
||||
if ! grep -q "^CONNECTION=" .env; then
|
||||
echo "CONNECTION=/dev/ttyACM0" >> .env
|
||||
fi
|
||||
|
||||
if ! grep -q "^DEBUG=" .env; then
|
||||
echo "DEBUG=0" >> .env
|
||||
fi
|
||||
|
||||
# Clean up backup file
|
||||
rm -f .env.bak
|
||||
|
||||
echo ""
|
||||
echo "✅ Configuration complete!"
|
||||
echo ""
|
||||
echo "📋 Your settings:"
|
||||
echo " Site Name: $SITE_NAME"
|
||||
echo " Map Center: $MAP_CENTER"
|
||||
echo " Max Distance: ${MAX_DISTANCE}km"
|
||||
echo " Channel: $CHANNEL"
|
||||
echo " Frequency: $FREQUENCY"
|
||||
echo " Chat: ${CONTACT_LINK:-'Not set'}"
|
||||
echo " API Token: ${API_TOKEN:0:8}..."
|
||||
echo " Docker Image Arch: $POTATOMESH_IMAGE_ARCH"
|
||||
echo " Instance Domain: ${INSTANCE_DOMAIN:-'Auto-detected'}"
|
||||
echo ""
|
||||
echo "🚀 You can now start PotatoMesh with:"
|
||||
echo " docker-compose up -d"
|
||||
echo ""
|
||||
echo "📖 For more configuration options, see the README.md"
|
||||
@@ -0,0 +1,70 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
ARG TARGETOS=linux
|
||||
ARG PYTHON_VERSION=3.12.6
|
||||
|
||||
# Linux production image
|
||||
FROM python:${PYTHON_VERSION}-alpine AS production-linux
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY data/requirements.txt ./
|
||||
RUN set -eux; \
|
||||
apk add --no-cache \
|
||||
tzdata \
|
||||
curl \
|
||||
libstdc++ \
|
||||
libgcc; \
|
||||
apk add --no-cache --virtual .build-deps \
|
||||
gcc \
|
||||
musl-dev \
|
||||
linux-headers \
|
||||
build-base; \
|
||||
python -m pip install --no-cache-dir -r requirements.txt; \
|
||||
apk del .build-deps
|
||||
|
||||
COPY data /app/data
|
||||
RUN addgroup -S potatomesh && \
|
||||
adduser -S potatomesh -G potatomesh && \
|
||||
adduser potatomesh dialout && \
|
||||
chown -R potatomesh:potatomesh /app
|
||||
|
||||
USER potatomesh
|
||||
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
POTATOMESH_INSTANCE="" \
|
||||
API_TOKEN=""
|
||||
|
||||
CMD ["python", "-m", "data.mesh"]
|
||||
|
||||
# Windows production image
|
||||
FROM python:${PYTHON_VERSION}-windowsservercore-ltsc2022 AS production-windows
|
||||
|
||||
SHELL ["cmd", "/S", "/C"]
|
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY data/requirements.txt ./
|
||||
RUN python -m pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY data /app/data
|
||||
|
||||
USER ContainerUser
|
||||
|
||||
ENV CONNECTION=/dev/ttyACM0 \
|
||||
CHANNEL_INDEX=0 \
|
||||
DEBUG=0 \
|
||||
POTATOMESH_INSTANCE="" \
|
||||
API_TOKEN=""
|
||||
|
||||
CMD ["python", "-m", "data.mesh"]
|
||||
|
||||
FROM production-${TARGETOS} AS production
|
||||
@@ -0,0 +1,32 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
PRAGMA journal_mode=WAL;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS instances (
|
||||
id TEXT PRIMARY KEY,
|
||||
domain TEXT NOT NULL,
|
||||
pubkey TEXT NOT NULL,
|
||||
name TEXT,
|
||||
version TEXT,
|
||||
channel TEXT,
|
||||
frequency TEXT,
|
||||
latitude REAL,
|
||||
longitude REAL,
|
||||
last_update_time INTEGER,
|
||||
is_private BOOLEAN NOT NULL DEFAULT 0,
|
||||
signature TEXT
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_instances_domain ON instances(domain);
|
||||
+25
-453
@@ -1,5 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -14,458 +13,31 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Mesh daemon helpers for synchronising Meshtastic data.
|
||||
|
||||
This module wraps the Meshtastic serial interface and exposes helper
|
||||
functions that serialise nodes and text messages to JSON before forwarding
|
||||
them to the accompanying web API. It also provides the long-running daemon
|
||||
entry point that performs these synchronisation tasks.
|
||||
"""
|
||||
|
||||
import dataclasses
|
||||
import heapq
|
||||
import itertools
|
||||
import json, os, time, threading, signal, urllib.request, urllib.error
|
||||
from collections.abc import Mapping
|
||||
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
from pubsub import pub
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from google.protobuf.message import Message as ProtoMessage
|
||||
|
||||
# --- Config (env overrides) ---------------------------------------------------
|
||||
PORT = os.environ.get("MESH_SERIAL", "/dev/ttyACM0")
|
||||
SNAPSHOT_SECS = int(os.environ.get("MESH_SNAPSHOT_SECS", "60"))
|
||||
CHANNEL_INDEX = int(os.environ.get("MESH_CHANNEL_INDEX", "0"))
|
||||
DEBUG = os.environ.get("DEBUG") == "1"
|
||||
INSTANCE = os.environ.get("POTATOMESH_INSTANCE", "").rstrip("/")
|
||||
API_TOKEN = os.environ.get("API_TOKEN", "")
|
||||
|
||||
|
||||
# --- POST queue ----------------------------------------------------------------
|
||||
_POST_QUEUE_LOCK = threading.Lock()
|
||||
_POST_QUEUE = []
|
||||
_POST_QUEUE_COUNTER = itertools.count()
|
||||
_POST_QUEUE_ACTIVE = False
|
||||
|
||||
_NODE_POST_PRIORITY = 0
|
||||
_MESSAGE_POST_PRIORITY = 10
|
||||
_DEFAULT_POST_PRIORITY = 50
|
||||
|
||||
|
||||
def _get(obj, key, default=None):
|
||||
"""Return a key or attribute value from ``obj``.
|
||||
|
||||
Args:
|
||||
obj: Mapping or object containing the desired value.
|
||||
key: Key or attribute name to look up.
|
||||
default: Value returned when the key is missing.
|
||||
|
||||
Returns:
|
||||
The resolved value if present, otherwise ``default``.
|
||||
"""
|
||||
if isinstance(obj, dict):
|
||||
return obj.get(key, default)
|
||||
return getattr(obj, key, default)
|
||||
|
||||
|
||||
# --- HTTP helpers -------------------------------------------------------------
|
||||
def _post_json(path: str, payload: dict):
|
||||
"""Send a JSON payload to the configured web API.
|
||||
|
||||
Args:
|
||||
path: API path relative to the configured ``INSTANCE``.
|
||||
payload: Mapping serialised to JSON for the request body.
|
||||
"""
|
||||
|
||||
if not INSTANCE:
|
||||
return
|
||||
url = f"{INSTANCE}{path}"
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
url, data=data, headers={"Content-Type": "application/json"}
|
||||
)
|
||||
if API_TOKEN:
|
||||
req.add_header("Authorization", f"Bearer {API_TOKEN}")
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
resp.read()
|
||||
except Exception as e:
|
||||
if DEBUG:
|
||||
print(f"[warn] POST {url} failed: {e}")
|
||||
|
||||
|
||||
def _enqueue_post_json(path: str, payload: dict, priority: int):
|
||||
"""Store a POST request in the priority queue."""
|
||||
|
||||
with _POST_QUEUE_LOCK:
|
||||
heapq.heappush(
|
||||
_POST_QUEUE, (priority, next(_POST_QUEUE_COUNTER), path, payload)
|
||||
)
|
||||
|
||||
|
||||
def _drain_post_queue():
|
||||
"""Process queued POST requests in priority order."""
|
||||
|
||||
global _POST_QUEUE_ACTIVE
|
||||
while True:
|
||||
with _POST_QUEUE_LOCK:
|
||||
if not _POST_QUEUE:
|
||||
_POST_QUEUE_ACTIVE = False
|
||||
return
|
||||
_priority, _idx, path, payload = heapq.heappop(_POST_QUEUE)
|
||||
_post_json(path, payload)
|
||||
|
||||
|
||||
def _queue_post_json(
|
||||
path: str, payload: dict, *, priority: int = _DEFAULT_POST_PRIORITY
|
||||
):
|
||||
"""Queue a POST request and start processing if idle."""
|
||||
|
||||
global _POST_QUEUE_ACTIVE
|
||||
_enqueue_post_json(path, payload, priority)
|
||||
with _POST_QUEUE_LOCK:
|
||||
if _POST_QUEUE_ACTIVE:
|
||||
return
|
||||
_POST_QUEUE_ACTIVE = True
|
||||
_drain_post_queue()
|
||||
|
||||
|
||||
def _clear_post_queue():
|
||||
"""Clear the pending POST queue (used by tests)."""
|
||||
|
||||
global _POST_QUEUE_ACTIVE
|
||||
with _POST_QUEUE_LOCK:
|
||||
_POST_QUEUE.clear()
|
||||
_POST_QUEUE_ACTIVE = False
|
||||
|
||||
|
||||
# --- Node upsert --------------------------------------------------------------
|
||||
def _node_to_dict(n) -> dict:
|
||||
"""Convert Meshtastic node or user structures into plain dictionaries.
|
||||
|
||||
Args:
|
||||
n: ``dict``, dataclass or protobuf message describing a node or user.
|
||||
|
||||
Returns:
|
||||
JSON serialisable representation of ``n``.
|
||||
"""
|
||||
|
||||
def _convert(value):
|
||||
"""Recursively convert dataclasses and protobuf messages."""
|
||||
if isinstance(value, dict):
|
||||
return {k: _convert(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return [_convert(v) for v in value]
|
||||
if dataclasses.is_dataclass(value):
|
||||
return {k: _convert(getattr(value, k)) for k in value.__dataclass_fields__}
|
||||
if isinstance(value, ProtoMessage):
|
||||
return MessageToDict(
|
||||
value, preserving_proto_field_name=True, use_integers_for_enums=False
|
||||
)
|
||||
if isinstance(value, bytes):
|
||||
try:
|
||||
return value.decode()
|
||||
except Exception:
|
||||
return value.hex()
|
||||
if isinstance(value, (str, int, float, bool)) or value is None:
|
||||
return value
|
||||
try:
|
||||
return json.loads(json.dumps(value, default=str))
|
||||
except Exception:
|
||||
return str(value)
|
||||
|
||||
return _convert(n)
|
||||
|
||||
|
||||
def upsert_node(node_id, n):
|
||||
"""Forward a node snapshot to the web API.
|
||||
|
||||
Args:
|
||||
node_id: Unique identifier of the node in the mesh.
|
||||
n: Node object obtained from the Meshtastic serial interface.
|
||||
"""
|
||||
|
||||
ndict = _node_to_dict(n)
|
||||
_queue_post_json("/api/nodes", {node_id: ndict}, priority=_NODE_POST_PRIORITY)
|
||||
|
||||
if DEBUG:
|
||||
user = _get(ndict, "user") or {}
|
||||
short = _get(user, "shortName")
|
||||
print(f"[debug] upserted node {node_id} shortName={short!r}")
|
||||
|
||||
|
||||
# --- Message logging via PubSub -----------------------------------------------
|
||||
def _iso(ts: int | float) -> str:
|
||||
"""Return an ISO-8601 timestamp string for ``ts``.
|
||||
|
||||
Args:
|
||||
ts: POSIX timestamp as ``int`` or ``float``.
|
||||
|
||||
Returns:
|
||||
Timestamp formatted with a trailing ``Z`` to denote UTC.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
return (
|
||||
datetime.datetime.fromtimestamp(int(ts), datetime.UTC)
|
||||
.isoformat()
|
||||
.replace("+00:00", "Z")
|
||||
)
|
||||
|
||||
|
||||
def _first(d, *names, default=None):
|
||||
"""Return the first non-empty key from ``names`` (supports nested lookups).
|
||||
|
||||
Keys that resolve to ``None`` or an empty string are skipped so callers can
|
||||
provide multiple potential field names without accidentally capturing an
|
||||
explicit ``null`` value.
|
||||
|
||||
Args:
|
||||
d: Mapping or object to query.
|
||||
*names: Candidate field names using dotted paths for nesting.
|
||||
default: Value returned when all candidates are missing.
|
||||
|
||||
Returns:
|
||||
The first matching value or ``default`` if none resolve to content.
|
||||
"""
|
||||
|
||||
def _mapping_get(obj, key):
|
||||
if isinstance(obj, Mapping) and key in obj:
|
||||
return True, obj[key]
|
||||
if hasattr(obj, "__getitem__"):
|
||||
try:
|
||||
return True, obj[key]
|
||||
except Exception:
|
||||
pass
|
||||
if hasattr(obj, key):
|
||||
return True, getattr(obj, key)
|
||||
return False, None
|
||||
|
||||
for name in names:
|
||||
cur = d
|
||||
ok = True
|
||||
for part in name.split("."):
|
||||
ok, cur = _mapping_get(cur, part)
|
||||
if not ok:
|
||||
break
|
||||
if ok:
|
||||
if cur is None:
|
||||
continue
|
||||
if isinstance(cur, str) and cur == "":
|
||||
continue
|
||||
return cur
|
||||
return default
|
||||
|
||||
|
||||
def _pkt_to_dict(packet) -> dict:
|
||||
"""Normalise a received packet into a JSON-friendly dictionary.
|
||||
|
||||
Args:
|
||||
packet: Protobuf ``MeshPacket`` or dictionary received from the daemon.
|
||||
|
||||
Returns:
|
||||
Packet data ready for JSON serialisation.
|
||||
"""
|
||||
if isinstance(packet, dict):
|
||||
return packet
|
||||
if isinstance(packet, ProtoMessage):
|
||||
return MessageToDict(
|
||||
packet, preserving_proto_field_name=True, use_integers_for_enums=False
|
||||
)
|
||||
# Last resort: try to read attributes
|
||||
try:
|
||||
return json.loads(json.dumps(packet, default=lambda o: str(o)))
|
||||
except Exception:
|
||||
return {"_unparsed": str(packet)}
|
||||
|
||||
|
||||
def store_packet_dict(p: dict):
|
||||
"""Persist text messages extracted from a decoded packet.
|
||||
|
||||
Only packets from the ``TEXT_MESSAGE_APP`` port are forwarded to the
|
||||
web API. Field lookups tolerate camelCase and snake_case variants for
|
||||
compatibility across Meshtastic releases.
|
||||
|
||||
Args:
|
||||
p: Packet dictionary produced by ``_pkt_to_dict``.
|
||||
"""
|
||||
dec = p.get("decoded") or {}
|
||||
text = _first(dec, "payload.text", "text", default=None)
|
||||
if not text:
|
||||
return # ignore non-text packets
|
||||
|
||||
# port filter: only keep packets from the TEXT_MESSAGE_APP port
|
||||
portnum_raw = _first(dec, "portnum", default=None)
|
||||
portnum = str(portnum_raw).upper() if portnum_raw is not None else None
|
||||
if portnum and portnum not in {"1", "TEXT_MESSAGE_APP"}:
|
||||
return # ignore non-text-message ports
|
||||
|
||||
# channel (prefer decoded.channel if present; else top-level)
|
||||
ch = _first(dec, "channel", default=None)
|
||||
if ch is None:
|
||||
ch = _first(p, "channel", default=0)
|
||||
try:
|
||||
ch = int(ch)
|
||||
except Exception:
|
||||
ch = 0
|
||||
|
||||
# timestamps & ids
|
||||
pkt_id = _first(p, "id", "packet_id", "packetId", default=None)
|
||||
if pkt_id is None:
|
||||
return # ignore packets without an id
|
||||
rx_time = int(_first(p, "rxTime", "rx_time", default=time.time()))
|
||||
from_id = _first(p, "fromId", "from_id", "from", default=None)
|
||||
to_id = _first(p, "toId", "to_id", "to", default=None)
|
||||
|
||||
if (from_id is None or str(from_id) == "") and DEBUG:
|
||||
try:
|
||||
raw = json.dumps(p, default=str)
|
||||
except Exception:
|
||||
raw = str(p)
|
||||
print(f"[debug] packet missing from_id: {raw}")
|
||||
|
||||
# link metrics
|
||||
snr = _first(p, "snr", "rx_snr", "rxSnr", default=None)
|
||||
rssi = _first(p, "rssi", "rx_rssi", "rxRssi", default=None)
|
||||
hop = _first(p, "hopLimit", "hop_limit", default=None)
|
||||
|
||||
msg = {
|
||||
"id": int(pkt_id),
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"from_id": from_id,
|
||||
"to_id": to_id,
|
||||
"channel": ch,
|
||||
"portnum": str(portnum) if portnum is not None else None,
|
||||
"text": text,
|
||||
"snr": float(snr) if snr is not None else None,
|
||||
"rssi": int(rssi) if rssi is not None else None,
|
||||
"hop_limit": int(hop) if hop is not None else None,
|
||||
}
|
||||
_queue_post_json("/api/messages", msg, priority=_MESSAGE_POST_PRIORITY)
|
||||
|
||||
if DEBUG:
|
||||
print(
|
||||
f"[debug] stored message from {from_id!r} to {to_id!r} ch={ch} text={text!r}"
|
||||
)
|
||||
|
||||
|
||||
# PubSub receive handler
|
||||
def on_receive(packet, interface):
|
||||
"""PubSub callback that stores inbound text messages.
|
||||
|
||||
Args:
|
||||
packet: Packet received from the Meshtastic interface.
|
||||
interface: Serial interface instance (unused).
|
||||
"""
|
||||
|
||||
p = None
|
||||
try:
|
||||
p = _pkt_to_dict(packet)
|
||||
store_packet_dict(p)
|
||||
except Exception as e:
|
||||
info = list(p.keys()) if isinstance(p, dict) else type(packet)
|
||||
print(f"[warn] failed to store packet: {e} | info: {info}")
|
||||
|
||||
|
||||
# --- Main ---------------------------------------------------------------------
|
||||
def _node_items_snapshot(nodes_obj, retries: int = 3):
|
||||
"""Return a snapshot list of ``(node_id, node)`` pairs.
|
||||
|
||||
The Meshtastic ``SerialInterface`` updates ``iface.nodes`` from another
|
||||
thread. When that happens during iteration Python raises ``RuntimeError``.
|
||||
To keep the daemon quiet we retry a few times and, if it keeps changing,
|
||||
bail out for this loop.
|
||||
|
||||
Args:
|
||||
nodes_obj: Container mapping node IDs to node objects.
|
||||
retries: Number of attempts performed before giving up.
|
||||
|
||||
Returns:
|
||||
Snapshot of node entries or ``None`` when retries were exhausted because
|
||||
the container kept mutating.
|
||||
"""
|
||||
|
||||
if not nodes_obj:
|
||||
return []
|
||||
|
||||
items_callable = getattr(nodes_obj, "items", None)
|
||||
if callable(items_callable):
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
return list(items_callable())
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
if hasattr(nodes_obj, "__iter__") and hasattr(nodes_obj, "__getitem__"):
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
keys = list(nodes_obj)
|
||||
return [(k, nodes_obj[k]) for k in keys]
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the mesh synchronisation daemon."""
|
||||
|
||||
# Subscribe to PubSub topics (reliable in current meshtastic)
|
||||
pub.subscribe(on_receive, "meshtastic.receive")
|
||||
|
||||
iface = SerialInterface(devPath=PORT)
|
||||
|
||||
stop = threading.Event()
|
||||
|
||||
def handle_sig(*_):
|
||||
"""Stop the daemon when a termination signal is received."""
|
||||
|
||||
stop.set()
|
||||
|
||||
signal.signal(signal.SIGINT, handle_sig)
|
||||
signal.signal(signal.SIGTERM, handle_sig)
|
||||
|
||||
target = INSTANCE or "(no POTATOMESH_INSTANCE)"
|
||||
print(
|
||||
f"Mesh daemon: nodes+messages → {target} | port={PORT} | channel={CHANNEL_INDEX}"
|
||||
)
|
||||
while not stop.is_set():
|
||||
try:
|
||||
nodes = getattr(iface, "nodes", {}) or {}
|
||||
node_items = _node_items_snapshot(nodes)
|
||||
if node_items is None:
|
||||
if DEBUG:
|
||||
print(
|
||||
"[debug] skipping node snapshot; nodes changed during iteration"
|
||||
)
|
||||
else:
|
||||
for node_id, n in node_items:
|
||||
try:
|
||||
upsert_node(node_id, n)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[warn] failed to update node snapshot for {node_id}: {e}"
|
||||
)
|
||||
if DEBUG:
|
||||
print(f"[debug] node object: {n!r}")
|
||||
except Exception as e:
|
||||
print(f"[warn] failed to update node snapshot: {e}")
|
||||
stop.wait(SNAPSHOT_SECS)
|
||||
|
||||
try:
|
||||
iface.close()
|
||||
except Exception:
|
||||
pass
|
||||
"""Backward-compatible entry point for the mesh ingestor daemon."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
from . import mesh_ingestor as _mesh_ingestor
|
||||
except ImportError:
|
||||
if __package__ in {None, ""}:
|
||||
package_dir = Path(__file__).resolve().parent
|
||||
project_root = str(package_dir.parent)
|
||||
if project_root not in sys.path:
|
||||
sys.path.insert(0, project_root)
|
||||
_mesh_ingestor = importlib.import_module("data.mesh_ingestor")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Expose the refactored mesh ingestor module under the legacy name so existing
|
||||
# imports (``import data.mesh as mesh``) continue to work. Attribute access and
|
||||
# monkeypatching operate directly on the shared module instance.
|
||||
sys.modules[__name__] = _mesh_ingestor
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
_mesh_ingestor.main()
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""High-level API for the potato-mesh ingestor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import signal as signal # re-exported for compatibility
|
||||
import threading as threading # re-exported for compatibility
|
||||
import sys
|
||||
import types
|
||||
|
||||
from . import channels, config, daemon, handlers, interfaces, queue, serialization
|
||||
|
||||
__all__: list[str] = []
|
||||
|
||||
|
||||
def _reexport(module) -> None:
|
||||
names = getattr(module, "__all__", [])
|
||||
for name in names:
|
||||
globals()[name] = getattr(module, name)
|
||||
__all__.extend(names)
|
||||
|
||||
|
||||
def _export_constants() -> None:
|
||||
globals()["json"] = queue.json
|
||||
globals()["urllib"] = queue.urllib
|
||||
globals()["glob"] = interfaces.glob
|
||||
__all__.extend(["json", "urllib", "glob", "threading", "signal"])
|
||||
|
||||
|
||||
for _module in (channels, daemon, handlers, interfaces, queue, serialization):
|
||||
_reexport(_module)
|
||||
|
||||
_export_constants()
|
||||
|
||||
_CONFIG_ATTRS = {
|
||||
"CONNECTION",
|
||||
"SNAPSHOT_SECS",
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"API_TOKEN",
|
||||
"LORA_FREQ",
|
||||
"MODEM_PRESET",
|
||||
"_RECONNECT_INITIAL_DELAY_SECS",
|
||||
"_RECONNECT_MAX_DELAY_SECS",
|
||||
"_CLOSE_TIMEOUT_SECS",
|
||||
"_debug_log",
|
||||
}
|
||||
|
||||
# Legacy export maintained for backwards compatibility.
|
||||
_CONFIG_ATTRS.add("PORT")
|
||||
|
||||
_INTERFACE_ATTRS = {"BLEInterface", "SerialInterface", "TCPInterface"}
|
||||
|
||||
_QUEUE_ATTRS = set(queue.__all__)
|
||||
_HANDLER_ATTRS = set(handlers.__all__)
|
||||
_DAEMON_ATTRS = set(daemon.__all__)
|
||||
_SERIALIZATION_ATTRS = set(serialization.__all__)
|
||||
_INTERFACE_EXPORTS = set(interfaces.__all__)
|
||||
|
||||
__all__.extend(sorted(_CONFIG_ATTRS))
|
||||
__all__.extend(sorted(_INTERFACE_ATTRS))
|
||||
|
||||
|
||||
class _MeshIngestorModule(types.ModuleType):
|
||||
"""Module proxy that forwards config and interface state."""
|
||||
|
||||
def __getattr__(self, name: str): # type: ignore[override]
|
||||
"""Resolve attributes by delegating to the underlying submodules."""
|
||||
|
||||
if name in _CONFIG_ATTRS:
|
||||
return getattr(config, name)
|
||||
if name in _INTERFACE_ATTRS:
|
||||
return getattr(interfaces, name)
|
||||
if name in _INTERFACE_EXPORTS:
|
||||
return getattr(interfaces, name)
|
||||
raise AttributeError(name)
|
||||
|
||||
def __setattr__(self, name: str, value): # type: ignore[override]
|
||||
"""Propagate assignments to the appropriate submodule."""
|
||||
|
||||
if name in _CONFIG_ATTRS:
|
||||
setattr(config, name, value)
|
||||
super().__setattr__(name, value)
|
||||
return
|
||||
if name in _INTERFACE_ATTRS:
|
||||
setattr(interfaces, name, value)
|
||||
super().__setattr__(name, value)
|
||||
return
|
||||
handled = False
|
||||
if name in _INTERFACE_EXPORTS:
|
||||
setattr(interfaces, name, value)
|
||||
super().__setattr__(name, getattr(interfaces, name, value))
|
||||
handled = True
|
||||
if name in _QUEUE_ATTRS:
|
||||
setattr(queue, name, value)
|
||||
super().__setattr__(name, getattr(queue, name, value))
|
||||
handled = True
|
||||
if name in _HANDLER_ATTRS:
|
||||
setattr(handlers, name, value)
|
||||
super().__setattr__(name, getattr(handlers, name, value))
|
||||
handled = True
|
||||
if name in _DAEMON_ATTRS:
|
||||
setattr(daemon, name, value)
|
||||
super().__setattr__(name, getattr(daemon, name, value))
|
||||
handled = True
|
||||
if name in _SERIALIZATION_ATTRS:
|
||||
setattr(serialization, name, value)
|
||||
super().__setattr__(name, getattr(serialization, name, value))
|
||||
handled = True
|
||||
if handled:
|
||||
return
|
||||
super().__setattr__(name, value)
|
||||
|
||||
|
||||
sys.modules[__name__].__class__ = _MeshIngestorModule
|
||||
@@ -0,0 +1,238 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Helpers for capturing and exposing mesh channel metadata."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Iterable, Iterator
|
||||
|
||||
from . import config
|
||||
|
||||
try: # pragma: no cover - optional dependency for enum introspection
|
||||
from meshtastic.protobuf import channel_pb2
|
||||
except Exception: # pragma: no cover - exercised in environments without protobufs
|
||||
channel_pb2 = None # type: ignore[assignment]
|
||||
|
||||
_ROLE_PRIMARY = 1
|
||||
_ROLE_SECONDARY = 2
|
||||
|
||||
if channel_pb2 is not None: # pragma: no branch - evaluated once at import time
|
||||
try:
|
||||
_ROLE_PRIMARY = int(channel_pb2.Channel.Role.PRIMARY)
|
||||
_ROLE_SECONDARY = int(channel_pb2.Channel.Role.SECONDARY)
|
||||
except Exception: # pragma: no cover - defensive, version specific
|
||||
_ROLE_PRIMARY = 1
|
||||
_ROLE_SECONDARY = 2
|
||||
|
||||
_CHANNEL_MAPPINGS: tuple[tuple[int, str], ...] = ()
|
||||
_CHANNEL_LOOKUP: dict[int, str] = {}
|
||||
|
||||
|
||||
def _iter_channel_objects(channels_obj: Any) -> Iterator[Any]:
|
||||
"""Yield channel descriptors from ``channels_obj``.
|
||||
|
||||
The real Meshtastic API exposes channels via protobuf containers that are
|
||||
list-like. This helper converts the container into a deterministic iterator
|
||||
while avoiding runtime errors if an unexpected type is supplied.
|
||||
"""
|
||||
|
||||
if channels_obj is None:
|
||||
return iter(())
|
||||
|
||||
if isinstance(channels_obj, dict):
|
||||
return iter(channels_obj.values())
|
||||
|
||||
if isinstance(channels_obj, Iterable):
|
||||
return iter(list(channels_obj))
|
||||
|
||||
length_fn = getattr(channels_obj, "__len__", None)
|
||||
getitem = getattr(channels_obj, "__getitem__", None)
|
||||
if callable(length_fn) and callable(getitem):
|
||||
try:
|
||||
length = int(length_fn())
|
||||
except Exception: # pragma: no cover - defensive only
|
||||
length = None
|
||||
if length is not None and length >= 0:
|
||||
snapshot = []
|
||||
for index in range(length):
|
||||
try:
|
||||
snapshot.append(getitem(index))
|
||||
except Exception: # pragma: no cover - best effort copy
|
||||
break
|
||||
return iter(snapshot)
|
||||
|
||||
return iter(())
|
||||
|
||||
|
||||
def _primary_channel_name() -> str | None:
|
||||
"""Return the fallback name to use for the primary channel when needed."""
|
||||
|
||||
preset = getattr(config, "MODEM_PRESET", None)
|
||||
if isinstance(preset, str) and preset.strip():
|
||||
return preset.strip()
|
||||
env_name = os.environ.get("CHANNEL", "").strip()
|
||||
if env_name:
|
||||
return env_name
|
||||
return None
|
||||
|
||||
|
||||
def _extract_channel_name(settings_obj: Any) -> str | None:
|
||||
"""Normalise the configured channel name extracted from ``settings_obj``."""
|
||||
|
||||
if settings_obj is None:
|
||||
return None
|
||||
|
||||
if isinstance(settings_obj, dict):
|
||||
candidate = settings_obj.get("name")
|
||||
else:
|
||||
candidate = getattr(settings_obj, "name", None)
|
||||
|
||||
if isinstance(candidate, str):
|
||||
candidate = candidate.strip()
|
||||
if candidate:
|
||||
return candidate
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _normalize_role(role: Any) -> int | None:
|
||||
"""Convert a channel role descriptor into an integer value."""
|
||||
|
||||
if isinstance(role, int):
|
||||
return role
|
||||
if isinstance(role, str):
|
||||
value = role.strip().upper()
|
||||
if value == "PRIMARY":
|
||||
return _ROLE_PRIMARY
|
||||
if value == "SECONDARY":
|
||||
return _ROLE_SECONDARY
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
return None
|
||||
name_attr = getattr(role, "name", None)
|
||||
if isinstance(name_attr, str):
|
||||
return _normalize_role(name_attr)
|
||||
value_attr = getattr(role, "value", None)
|
||||
if isinstance(value_attr, int):
|
||||
return value_attr
|
||||
try:
|
||||
return int(role) # type: ignore[arg-type]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _channel_tuple(channel_obj: Any) -> tuple[int, str] | None:
|
||||
"""Return ``(index, name)`` for ``channel_obj`` when resolvable."""
|
||||
|
||||
role_value = _normalize_role(getattr(channel_obj, "role", None))
|
||||
if role_value == _ROLE_PRIMARY:
|
||||
channel_index = 0
|
||||
channel_name = _extract_channel_name(getattr(channel_obj, "settings", None))
|
||||
if channel_name is None:
|
||||
channel_name = _primary_channel_name()
|
||||
elif role_value == _ROLE_SECONDARY:
|
||||
raw_index = getattr(channel_obj, "index", None)
|
||||
try:
|
||||
channel_index = int(raw_index)
|
||||
except Exception:
|
||||
channel_index = None
|
||||
channel_name = _extract_channel_name(getattr(channel_obj, "settings", None))
|
||||
else:
|
||||
return None
|
||||
|
||||
if not isinstance(channel_index, int):
|
||||
return None
|
||||
|
||||
if not isinstance(channel_name, str) or not channel_name:
|
||||
return None
|
||||
|
||||
return channel_index, channel_name
|
||||
|
||||
|
||||
def capture_from_interface(iface: Any) -> None:
|
||||
"""Populate the channel cache by inspecting ``iface`` when possible."""
|
||||
|
||||
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
|
||||
|
||||
if iface is None or _CHANNEL_MAPPINGS:
|
||||
return
|
||||
|
||||
try:
|
||||
wait_for_config = getattr(iface, "waitForConfig", None)
|
||||
if callable(wait_for_config):
|
||||
wait_for_config()
|
||||
except Exception: # pragma: no cover - hardware dependent safeguard
|
||||
pass
|
||||
|
||||
local_node = getattr(iface, "localNode", None)
|
||||
channels_obj = getattr(local_node, "channels", None) if local_node else None
|
||||
|
||||
channel_entries: list[tuple[int, str]] = []
|
||||
seen_indices: set[int] = set()
|
||||
for candidate in _iter_channel_objects(channels_obj):
|
||||
result = _channel_tuple(candidate)
|
||||
if result is None:
|
||||
continue
|
||||
index, name = result
|
||||
if index in seen_indices:
|
||||
continue
|
||||
channel_entries.append((index, name))
|
||||
seen_indices.add(index)
|
||||
|
||||
if not channel_entries:
|
||||
return
|
||||
|
||||
_CHANNEL_MAPPINGS = tuple(channel_entries)
|
||||
_CHANNEL_LOOKUP = {index: name for index, name in _CHANNEL_MAPPINGS}
|
||||
|
||||
config._debug_log(
|
||||
"Captured channel metadata",
|
||||
context="channels.capture",
|
||||
severity="info",
|
||||
always=True,
|
||||
channels=_CHANNEL_MAPPINGS,
|
||||
)
|
||||
|
||||
|
||||
def channel_mappings() -> tuple[tuple[int, str], ...]:
|
||||
"""Return the cached ``(index, name)`` channel tuples."""
|
||||
|
||||
return _CHANNEL_MAPPINGS
|
||||
|
||||
|
||||
def channel_name(channel_index: int | None) -> str | None:
|
||||
"""Return the channel name for ``channel_index`` when known."""
|
||||
|
||||
if channel_index is None:
|
||||
return None
|
||||
return _CHANNEL_LOOKUP.get(int(channel_index))
|
||||
|
||||
|
||||
def _reset_channel_cache() -> None:
|
||||
"""Clear cached channel data. Intended for use in tests only."""
|
||||
|
||||
global _CHANNEL_MAPPINGS, _CHANNEL_LOOKUP
|
||||
_CHANNEL_MAPPINGS = ()
|
||||
_CHANNEL_LOOKUP = {}
|
||||
|
||||
|
||||
__all__ = [
|
||||
"capture_from_interface",
|
||||
"channel_mappings",
|
||||
"channel_name",
|
||||
"_reset_channel_cache",
|
||||
]
|
||||
@@ -0,0 +1,153 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Configuration helpers for the potato-mesh ingestor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from types import ModuleType
|
||||
from typing import Any
|
||||
|
||||
DEFAULT_SNAPSHOT_SECS = 60
|
||||
"""Default interval, in seconds, between state snapshot uploads."""
|
||||
|
||||
DEFAULT_CHANNEL_INDEX = 0
|
||||
"""Default LoRa channel index used when none is specified."""
|
||||
|
||||
DEFAULT_RECONNECT_INITIAL_DELAY_SECS = 5.0
|
||||
"""Initial reconnection delay applied after connection loss."""
|
||||
|
||||
DEFAULT_RECONNECT_MAX_DELAY_SECS = 60.0
|
||||
"""Maximum reconnection backoff delay applied by the ingestor."""
|
||||
|
||||
DEFAULT_CLOSE_TIMEOUT_SECS = 5.0
|
||||
"""Grace period for interface shutdown routines to complete."""
|
||||
|
||||
DEFAULT_INACTIVITY_RECONNECT_SECS = float(60 * 60)
|
||||
"""Interval before forcing a reconnect when no packets are observed."""
|
||||
|
||||
DEFAULT_ENERGY_ONLINE_DURATION_SECS = 300.0
|
||||
"""Duration to stay online before entering a low-power sleep cycle."""
|
||||
|
||||
DEFAULT_ENERGY_SLEEP_SECS = float(6 * 60 * 60)
|
||||
"""Sleep duration used when energy saving mode is active."""
|
||||
|
||||
CONNECTION = os.environ.get("CONNECTION") or os.environ.get("MESH_SERIAL")
|
||||
"""Optional connection target for the mesh interface.
|
||||
|
||||
When unset, platform-specific defaults will be inferred by the interface
|
||||
implementations. The legacy :envvar:`MESH_SERIAL` environment variable is still
|
||||
accepted for backwards compatibility.
|
||||
"""
|
||||
|
||||
SNAPSHOT_SECS = DEFAULT_SNAPSHOT_SECS
|
||||
"""Interval, in seconds, between state snapshot uploads."""
|
||||
|
||||
CHANNEL_INDEX = int(os.environ.get("CHANNEL_INDEX", str(DEFAULT_CHANNEL_INDEX)))
|
||||
"""Index of the LoRa channel to select when connecting."""
|
||||
|
||||
DEBUG = os.environ.get("DEBUG") == "1"
|
||||
INSTANCE = os.environ.get("POTATOMESH_INSTANCE", "").rstrip("/")
|
||||
API_TOKEN = os.environ.get("API_TOKEN", "")
|
||||
ENERGY_SAVING = os.environ.get("ENERGY_SAVING") == "1"
|
||||
"""When ``True``, enables the ingestor's energy saving mode."""
|
||||
|
||||
LORA_FREQ: int | None = None
|
||||
"""Frequency of the local node's configured LoRa region in MHz."""
|
||||
|
||||
MODEM_PRESET: str | None = None
|
||||
"""CamelCase modem preset name reported by the local node."""
|
||||
|
||||
_RECONNECT_INITIAL_DELAY_SECS = DEFAULT_RECONNECT_INITIAL_DELAY_SECS
|
||||
_RECONNECT_MAX_DELAY_SECS = DEFAULT_RECONNECT_MAX_DELAY_SECS
|
||||
_CLOSE_TIMEOUT_SECS = DEFAULT_CLOSE_TIMEOUT_SECS
|
||||
_INACTIVITY_RECONNECT_SECS = DEFAULT_INACTIVITY_RECONNECT_SECS
|
||||
_ENERGY_ONLINE_DURATION_SECS = DEFAULT_ENERGY_ONLINE_DURATION_SECS
|
||||
_ENERGY_SLEEP_SECS = DEFAULT_ENERGY_SLEEP_SECS
|
||||
|
||||
# Backwards compatibility shim for legacy imports.
|
||||
PORT = CONNECTION
|
||||
|
||||
|
||||
def _debug_log(
|
||||
message: str,
|
||||
*,
|
||||
context: str | None = None,
|
||||
severity: str = "debug",
|
||||
always: bool = False,
|
||||
**metadata: Any,
|
||||
) -> None:
|
||||
"""Print ``message`` with a UTC timestamp when ``DEBUG`` is enabled.
|
||||
|
||||
Parameters:
|
||||
message: Text to display when debug logging is active.
|
||||
context: Optional logical component emitting the message.
|
||||
severity: Log level label to embed in the formatted output.
|
||||
always: When ``True``, bypasses the :data:`DEBUG` guard.
|
||||
**metadata: Additional structured log metadata.
|
||||
"""
|
||||
|
||||
normalized_severity = severity.lower()
|
||||
|
||||
if not DEBUG and not always and normalized_severity == "debug":
|
||||
return
|
||||
|
||||
timestamp = datetime.now(timezone.utc).isoformat(timespec="milliseconds")
|
||||
timestamp = timestamp.replace("+00:00", "Z")
|
||||
parts = [f"[{timestamp}]", "[potato-mesh]", f"[{normalized_severity}]"]
|
||||
if context:
|
||||
parts.append(f"context={context}")
|
||||
for key, value in sorted(metadata.items()):
|
||||
parts.append(f"{key}={value!r}")
|
||||
parts.append(message)
|
||||
print(" ".join(parts))
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CONNECTION",
|
||||
"SNAPSHOT_SECS",
|
||||
"CHANNEL_INDEX",
|
||||
"DEBUG",
|
||||
"INSTANCE",
|
||||
"API_TOKEN",
|
||||
"ENERGY_SAVING",
|
||||
"LORA_FREQ",
|
||||
"MODEM_PRESET",
|
||||
"_RECONNECT_INITIAL_DELAY_SECS",
|
||||
"_RECONNECT_MAX_DELAY_SECS",
|
||||
"_CLOSE_TIMEOUT_SECS",
|
||||
"_INACTIVITY_RECONNECT_SECS",
|
||||
"_ENERGY_ONLINE_DURATION_SECS",
|
||||
"_ENERGY_SLEEP_SECS",
|
||||
"_debug_log",
|
||||
]
|
||||
|
||||
|
||||
class _ConfigModule(ModuleType):
|
||||
"""Module proxy that keeps connection aliases synchronised."""
|
||||
|
||||
def __setattr__(self, name: str, value: Any) -> None: # type: ignore[override]
|
||||
"""Propagate CONNECTION/PORT assignments to both attributes."""
|
||||
|
||||
if name in {"CONNECTION", "PORT"}:
|
||||
super().__setattr__("CONNECTION", value)
|
||||
super().__setattr__("PORT", value)
|
||||
return
|
||||
super().__setattr__(name, value)
|
||||
|
||||
|
||||
sys.modules[__name__].__class__ = _ConfigModule
|
||||
@@ -0,0 +1,519 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Runtime entry point for the mesh ingestor."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
import signal
|
||||
import threading
|
||||
import time
|
||||
|
||||
from pubsub import pub
|
||||
|
||||
from . import config, handlers, interfaces
|
||||
|
||||
_RECEIVE_TOPICS = (
|
||||
"meshtastic.receive",
|
||||
"meshtastic.receive.text",
|
||||
"meshtastic.receive.position",
|
||||
"meshtastic.receive.user",
|
||||
"meshtastic.receive.POSITION_APP",
|
||||
"meshtastic.receive.NODEINFO_APP",
|
||||
"meshtastic.receive.NEIGHBORINFO_APP",
|
||||
"meshtastic.receive.TEXT_MESSAGE_APP",
|
||||
"meshtastic.receive.TELEMETRY_APP",
|
||||
)
|
||||
|
||||
|
||||
def _event_wait_allows_default_timeout() -> bool:
|
||||
"""Return ``True`` when :meth:`threading.Event.wait` accepts ``timeout``.
|
||||
|
||||
The behaviour changed between Python versions; this helper shields the
|
||||
daemon from ``TypeError`` when the default timeout parameter is absent.
|
||||
"""
|
||||
|
||||
try:
|
||||
wait_signature = inspect.signature(threading.Event.wait)
|
||||
except (TypeError, ValueError): # pragma: no cover
|
||||
return True
|
||||
|
||||
parameters = list(wait_signature.parameters.values())
|
||||
if len(parameters) <= 1:
|
||||
return True
|
||||
|
||||
timeout_parameter = parameters[1]
|
||||
if timeout_parameter.kind in (
|
||||
inspect.Parameter.VAR_POSITIONAL,
|
||||
inspect.Parameter.VAR_KEYWORD,
|
||||
):
|
||||
return True
|
||||
|
||||
return timeout_parameter.default is not inspect._empty
|
||||
|
||||
|
||||
def _subscribe_receive_topics() -> list[str]:
|
||||
"""Subscribe the packet handler to all receive-related pubsub topics."""
|
||||
|
||||
subscribed = []
|
||||
for topic in _RECEIVE_TOPICS:
|
||||
try:
|
||||
pub.subscribe(handlers.on_receive, topic)
|
||||
subscribed.append(topic)
|
||||
except Exception as exc: # pragma: no cover
|
||||
config._debug_log(f"failed to subscribe to {topic!r}: {exc}")
|
||||
return subscribed
|
||||
|
||||
|
||||
def _node_items_snapshot(
|
||||
nodes_obj, retries: int = 3
|
||||
) -> list[tuple[str, object]] | None:
|
||||
"""Snapshot ``nodes_obj`` to avoid iteration errors during updates.
|
||||
|
||||
Parameters:
|
||||
nodes_obj: Meshtastic nodes mapping or iterable.
|
||||
retries: Number of attempts when encountering "dictionary changed"
|
||||
runtime errors.
|
||||
|
||||
Returns:
|
||||
A list of ``(node_id, node)`` tuples, ``None`` when retries are
|
||||
exhausted, or an empty list when no nodes exist.
|
||||
"""
|
||||
|
||||
if not nodes_obj:
|
||||
return []
|
||||
|
||||
items_callable = getattr(nodes_obj, "items", None)
|
||||
if callable(items_callable):
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
return list(items_callable())
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
if hasattr(nodes_obj, "__iter__") and hasattr(nodes_obj, "__getitem__"):
|
||||
for _ in range(max(1, retries)):
|
||||
try:
|
||||
keys = list(nodes_obj)
|
||||
return [(key, nodes_obj[key]) for key in keys]
|
||||
except RuntimeError as err:
|
||||
if "dictionary changed size during iteration" not in str(err):
|
||||
raise
|
||||
time.sleep(0)
|
||||
return None
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def _close_interface(iface_obj) -> None:
|
||||
"""Close ``iface_obj`` while respecting configured timeouts."""
|
||||
|
||||
if iface_obj is None:
|
||||
return
|
||||
|
||||
def _do_close() -> None:
|
||||
try:
|
||||
iface_obj.close()
|
||||
except Exception as exc: # pragma: no cover
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Error closing mesh interface",
|
||||
context="daemon.close",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
if config._CLOSE_TIMEOUT_SECS <= 0 or not _event_wait_allows_default_timeout():
|
||||
_do_close()
|
||||
return
|
||||
|
||||
close_thread = threading.Thread(target=_do_close, name="mesh-close", daemon=True)
|
||||
close_thread.start()
|
||||
close_thread.join(config._CLOSE_TIMEOUT_SECS)
|
||||
if close_thread.is_alive():
|
||||
config._debug_log(
|
||||
"Mesh interface close timed out",
|
||||
context="daemon.close",
|
||||
severity="warn",
|
||||
timeout_seconds=config._CLOSE_TIMEOUT_SECS,
|
||||
)
|
||||
|
||||
|
||||
def _is_ble_interface(iface_obj) -> bool:
|
||||
"""Return ``True`` when ``iface_obj`` appears to be a BLE interface."""
|
||||
|
||||
if iface_obj is None:
|
||||
return False
|
||||
iface_cls = getattr(iface_obj, "__class__", None)
|
||||
if iface_cls is None:
|
||||
return False
|
||||
module_name = getattr(iface_cls, "__module__", "") or ""
|
||||
return "ble_interface" in module_name
|
||||
|
||||
|
||||
def _connected_state(candidate) -> bool | None:
|
||||
"""Return the connection state advertised by ``candidate``.
|
||||
|
||||
Parameters:
|
||||
candidate: Attribute returned from ``iface.isConnected`` on a
|
||||
Meshtastic interface. The value may be a boolean, a callable that
|
||||
yields a boolean, or a :class:`threading.Event` instance.
|
||||
|
||||
Returns:
|
||||
``True`` when the interface is believed to be connected, ``False``
|
||||
when it appears disconnected, and ``None`` when the state cannot be
|
||||
determined from the provided attribute.
|
||||
"""
|
||||
|
||||
if candidate is None:
|
||||
return None
|
||||
|
||||
if isinstance(candidate, threading.Event):
|
||||
return candidate.is_set()
|
||||
|
||||
is_set_method = getattr(candidate, "is_set", None)
|
||||
if callable(is_set_method):
|
||||
try:
|
||||
return bool(is_set_method())
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if callable(candidate):
|
||||
try:
|
||||
return bool(candidate())
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
try:
|
||||
return bool(candidate)
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
return None
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Run the mesh ingestion daemon until interrupted."""
|
||||
|
||||
subscribed = _subscribe_receive_topics()
|
||||
if subscribed:
|
||||
config._debug_log(
|
||||
"Subscribed to receive topics",
|
||||
context="daemon.subscribe",
|
||||
severity="info",
|
||||
topics=subscribed,
|
||||
)
|
||||
|
||||
iface = None
|
||||
resolved_target = None
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
|
||||
stop = threading.Event()
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
iface_connected_at: float | None = None
|
||||
last_seen_packet_monotonic = handlers.last_packet_monotonic()
|
||||
last_inactivity_reconnect: float | None = None
|
||||
inactivity_reconnect_secs = max(
|
||||
0.0, getattr(config, "_INACTIVITY_RECONNECT_SECS", 0.0)
|
||||
)
|
||||
|
||||
energy_saving_enabled = config.ENERGY_SAVING
|
||||
energy_online_secs = max(0.0, config._ENERGY_ONLINE_DURATION_SECS)
|
||||
energy_sleep_secs = max(0.0, config._ENERGY_SLEEP_SECS)
|
||||
|
||||
def _energy_sleep(reason: str) -> None:
|
||||
if not energy_saving_enabled or energy_sleep_secs <= 0:
|
||||
return
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
f"energy saving: {reason}; sleeping for {energy_sleep_secs:g}s"
|
||||
)
|
||||
stop.wait(energy_sleep_secs)
|
||||
|
||||
def handle_sigterm(*_args) -> None:
|
||||
stop.set()
|
||||
|
||||
def handle_sigint(signum, frame) -> None:
|
||||
if stop.is_set():
|
||||
signal.default_int_handler(signum, frame)
|
||||
return
|
||||
stop.set()
|
||||
|
||||
signal.signal(signal.SIGINT, handle_sigint)
|
||||
signal.signal(signal.SIGTERM, handle_sigterm)
|
||||
|
||||
target = config.INSTANCE or "(no POTATOMESH_INSTANCE)"
|
||||
configured_port = config.CONNECTION
|
||||
active_candidate = configured_port
|
||||
announced_target = False
|
||||
config._debug_log(
|
||||
"Mesh daemon starting",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
target=target,
|
||||
port=configured_port or "auto",
|
||||
channel=config.CHANNEL_INDEX,
|
||||
)
|
||||
try:
|
||||
while not stop.is_set():
|
||||
if iface is None:
|
||||
try:
|
||||
if active_candidate:
|
||||
iface, resolved_target = interfaces._create_serial_interface(
|
||||
active_candidate
|
||||
)
|
||||
else:
|
||||
iface, resolved_target = interfaces._create_default_interface()
|
||||
active_candidate = resolved_target
|
||||
interfaces._ensure_radio_metadata(iface)
|
||||
interfaces._ensure_channel_metadata(iface)
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
initial_snapshot_sent = False
|
||||
if not announced_target and resolved_target:
|
||||
config._debug_log(
|
||||
"Using mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="info",
|
||||
target=resolved_target,
|
||||
)
|
||||
announced_target = True
|
||||
if energy_saving_enabled and energy_online_secs > 0:
|
||||
energy_session_deadline = time.monotonic() + energy_online_secs
|
||||
else:
|
||||
energy_session_deadline = None
|
||||
iface_connected_at = time.monotonic()
|
||||
# Seed the inactivity tracking from the connection time so a
|
||||
# reconnect is given a full inactivity window even when the
|
||||
# handler still reports the previous packet timestamp.
|
||||
last_seen_packet_monotonic = iface_connected_at
|
||||
last_inactivity_reconnect = None
|
||||
except interfaces.NoAvailableMeshInterface as exc:
|
||||
config._debug_log(
|
||||
"No mesh interface available",
|
||||
context="daemon.interface",
|
||||
severity="error",
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(iface)
|
||||
raise SystemExit(1) from exc
|
||||
except Exception as exc:
|
||||
candidate_desc = active_candidate or "auto"
|
||||
config._debug_log(
|
||||
"Failed to create mesh interface",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
candidate=candidate_desc,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if configured_port is None:
|
||||
active_candidate = None
|
||||
announced_target = False
|
||||
stop.wait(retry_delay)
|
||||
if config._RECONNECT_MAX_DELAY_SECS > 0:
|
||||
retry_delay = min(
|
||||
(
|
||||
retry_delay * 2
|
||||
if retry_delay
|
||||
else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
),
|
||||
config._RECONNECT_MAX_DELAY_SECS,
|
||||
)
|
||||
continue
|
||||
|
||||
if energy_saving_enabled and iface is not None:
|
||||
if (
|
||||
energy_session_deadline is not None
|
||||
and time.monotonic() >= energy_session_deadline
|
||||
):
|
||||
config._debug_log(
|
||||
"Energy saving disconnect",
|
||||
context="daemon.energy",
|
||||
severity="info",
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
_energy_sleep("disconnected after session")
|
||||
continue
|
||||
if (
|
||||
_is_ble_interface(iface)
|
||||
and getattr(iface, "client", object()) is None
|
||||
):
|
||||
config._debug_log(
|
||||
"Energy saving BLE disconnect",
|
||||
context="daemon.energy",
|
||||
severity="info",
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
_energy_sleep("BLE client disconnected")
|
||||
continue
|
||||
|
||||
if not initial_snapshot_sent:
|
||||
try:
|
||||
nodes = getattr(iface, "nodes", {}) or {}
|
||||
node_items = _node_items_snapshot(nodes)
|
||||
if node_items is None:
|
||||
config._debug_log(
|
||||
"Skipping node snapshot due to concurrent modification",
|
||||
context="daemon.snapshot",
|
||||
)
|
||||
else:
|
||||
processed_snapshot_item = False
|
||||
for node_id, node in node_items:
|
||||
processed_snapshot_item = True
|
||||
try:
|
||||
handlers.upsert_node(node_id, node)
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Failed to update node snapshot",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
node_id=node_id,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Snapshot node payload",
|
||||
context="daemon.snapshot",
|
||||
node=node,
|
||||
)
|
||||
if processed_snapshot_item:
|
||||
initial_snapshot_sent = True
|
||||
except Exception as exc:
|
||||
config._debug_log(
|
||||
"Snapshot refresh failed",
|
||||
context="daemon.snapshot",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
stop.wait(retry_delay)
|
||||
if config._RECONNECT_MAX_DELAY_SECS > 0:
|
||||
retry_delay = min(
|
||||
(
|
||||
retry_delay * 2
|
||||
if retry_delay
|
||||
else config._RECONNECT_INITIAL_DELAY_SECS
|
||||
),
|
||||
config._RECONNECT_MAX_DELAY_SECS,
|
||||
)
|
||||
continue
|
||||
|
||||
if iface is not None and inactivity_reconnect_secs > 0:
|
||||
now_monotonic = time.monotonic()
|
||||
iface_activity = handlers.last_packet_monotonic()
|
||||
if (
|
||||
iface_activity is not None
|
||||
and iface_connected_at is not None
|
||||
and iface_activity < iface_connected_at
|
||||
):
|
||||
iface_activity = iface_connected_at
|
||||
if iface_activity is not None and (
|
||||
last_seen_packet_monotonic is None
|
||||
or iface_activity > last_seen_packet_monotonic
|
||||
):
|
||||
last_seen_packet_monotonic = iface_activity
|
||||
last_inactivity_reconnect = None
|
||||
|
||||
latest_activity = iface_activity
|
||||
if latest_activity is None and iface_connected_at is not None:
|
||||
latest_activity = iface_connected_at
|
||||
if latest_activity is None:
|
||||
latest_activity = now_monotonic
|
||||
|
||||
inactivity_elapsed = now_monotonic - latest_activity
|
||||
|
||||
connected_attr = getattr(iface, "isConnected", None)
|
||||
believed_disconnected = False
|
||||
connected_state = _connected_state(connected_attr)
|
||||
if connected_state is None:
|
||||
if callable(connected_attr):
|
||||
try:
|
||||
believed_disconnected = not bool(connected_attr())
|
||||
except Exception:
|
||||
believed_disconnected = False
|
||||
elif connected_attr is not None:
|
||||
try:
|
||||
believed_disconnected = not bool(connected_attr)
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
believed_disconnected = False
|
||||
else:
|
||||
believed_disconnected = not connected_state
|
||||
|
||||
should_reconnect = believed_disconnected or (
|
||||
inactivity_elapsed >= inactivity_reconnect_secs
|
||||
)
|
||||
|
||||
if should_reconnect:
|
||||
if (
|
||||
last_inactivity_reconnect is None
|
||||
or now_monotonic - last_inactivity_reconnect
|
||||
>= inactivity_reconnect_secs
|
||||
):
|
||||
reason = (
|
||||
"disconnected"
|
||||
if believed_disconnected
|
||||
else f"no data for {inactivity_elapsed:.0f}s"
|
||||
)
|
||||
config._debug_log(
|
||||
"Mesh interface inactivity detected",
|
||||
context="daemon.interface",
|
||||
severity="warn",
|
||||
reason=reason,
|
||||
)
|
||||
last_inactivity_reconnect = now_monotonic
|
||||
_close_interface(iface)
|
||||
iface = None
|
||||
announced_target = False
|
||||
initial_snapshot_sent = False
|
||||
energy_session_deadline = None
|
||||
iface_connected_at = None
|
||||
continue
|
||||
|
||||
retry_delay = max(0.0, config._RECONNECT_INITIAL_DELAY_SECS)
|
||||
stop.wait(config.SNAPSHOT_SECS)
|
||||
except KeyboardInterrupt: # pragma: no cover - interactive only
|
||||
config._debug_log(
|
||||
"Received KeyboardInterrupt; shutting down",
|
||||
context="daemon.main",
|
||||
severity="info",
|
||||
)
|
||||
stop.set()
|
||||
finally:
|
||||
_close_interface(iface)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_RECEIVE_TOPICS",
|
||||
"_event_wait_allows_default_timeout",
|
||||
"_node_items_snapshot",
|
||||
"_subscribe_receive_topics",
|
||||
"_is_ble_interface",
|
||||
"_connected_state",
|
||||
"main",
|
||||
]
|
||||
@@ -0,0 +1,985 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Packet handlers that serialise data and push it to the HTTP queue."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from . import channels, config, queue
|
||||
from .serialization import (
|
||||
_canonical_node_id,
|
||||
_coerce_float,
|
||||
_coerce_int,
|
||||
_decode_nodeinfo_payload,
|
||||
_extract_payload_bytes,
|
||||
_first,
|
||||
_get,
|
||||
_iso,
|
||||
_merge_mappings,
|
||||
_node_num_from_id,
|
||||
_node_to_dict,
|
||||
_nodeinfo_metrics_dict,
|
||||
_nodeinfo_position_dict,
|
||||
_nodeinfo_user_dict,
|
||||
_pkt_to_dict,
|
||||
upsert_payload,
|
||||
)
|
||||
|
||||
|
||||
def _radio_metadata_fields() -> dict[str, object]:
|
||||
"""Return the shared radio metadata fields for payload enrichment."""
|
||||
|
||||
metadata: dict[str, object] = {}
|
||||
freq = getattr(config, "LORA_FREQ", None)
|
||||
if freq is not None:
|
||||
metadata["lora_freq"] = freq
|
||||
preset = getattr(config, "MODEM_PRESET", None)
|
||||
if preset is not None:
|
||||
metadata["modem_preset"] = preset
|
||||
return metadata
|
||||
|
||||
|
||||
def _apply_radio_metadata(payload: dict) -> dict:
|
||||
"""Augment ``payload`` with radio metadata when available."""
|
||||
|
||||
metadata = _radio_metadata_fields()
|
||||
if metadata:
|
||||
payload.update(metadata)
|
||||
return payload
|
||||
|
||||
|
||||
def _is_encrypted_flag(value) -> bool:
|
||||
"""Return ``True`` when ``value`` represents an encrypted payload."""
|
||||
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, (int, float)):
|
||||
return value != 0
|
||||
if isinstance(value, str):
|
||||
normalized = value.strip().lower()
|
||||
if normalized in {"", "0", "false", "no"}:
|
||||
return False
|
||||
return True
|
||||
return bool(value)
|
||||
|
||||
|
||||
def _apply_radio_metadata_to_nodes(payload: dict) -> dict:
|
||||
"""Attach radio metadata to each node entry stored in ``payload``."""
|
||||
|
||||
metadata = _radio_metadata_fields()
|
||||
if not metadata:
|
||||
return payload
|
||||
for value in payload.values():
|
||||
if isinstance(value, dict):
|
||||
value.update(metadata)
|
||||
return payload
|
||||
|
||||
|
||||
def upsert_node(node_id, node) -> None:
|
||||
"""Schedule an upsert for a single node.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical identifier for the node in the ``!xxxxxxxx`` format.
|
||||
node: Node object or mapping to serialise for the API payload.
|
||||
|
||||
Returns:
|
||||
``None``. The payload is forwarded to the shared HTTP queue.
|
||||
"""
|
||||
|
||||
payload = _apply_radio_metadata_to_nodes(upsert_payload(node_id, node))
|
||||
_queue_post_json("/api/nodes", payload, priority=queue._NODE_POST_PRIORITY)
|
||||
|
||||
if config.DEBUG:
|
||||
user = _get(payload[node_id], "user") or {}
|
||||
short = _get(user, "shortName")
|
||||
long = _get(user, "longName")
|
||||
config._debug_log(
|
||||
"Queued node upsert payload",
|
||||
context="handlers.upsert_node",
|
||||
node_id=node_id,
|
||||
short_name=short,
|
||||
long_name=long,
|
||||
)
|
||||
|
||||
|
||||
def store_position_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist a decoded position packet.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata emitted by Meshtastic.
|
||||
decoded: Decoded payload extracted from ``packet['decoded']``.
|
||||
|
||||
Returns:
|
||||
``None``. The formatted position data is queued for HTTP submission.
|
||||
"""
|
||||
|
||||
node_ref = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
if node_ref is None:
|
||||
node_ref = _first(decoded, "num", default=None)
|
||||
node_id = _canonical_node_id(node_ref)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_num = _coerce_int(_first(decoded, "num", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
return
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
to_id = to_id if to_id not in {"", None} else None
|
||||
|
||||
position_section = decoded.get("position") if isinstance(decoded, Mapping) else None
|
||||
if not isinstance(position_section, Mapping):
|
||||
position_section = {}
|
||||
|
||||
latitude = _coerce_float(
|
||||
_first(position_section, "latitude", "raw.latitude", default=None)
|
||||
)
|
||||
if latitude is None:
|
||||
lat_i = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"latitudeI",
|
||||
"latitude_i",
|
||||
"raw.latitude_i",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
if lat_i is not None:
|
||||
latitude = lat_i / 1e7
|
||||
|
||||
longitude = _coerce_float(
|
||||
_first(position_section, "longitude", "raw.longitude", default=None)
|
||||
)
|
||||
if longitude is None:
|
||||
lon_i = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"longitudeI",
|
||||
"longitude_i",
|
||||
"raw.longitude_i",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
if lon_i is not None:
|
||||
longitude = lon_i / 1e7
|
||||
|
||||
altitude = _coerce_float(
|
||||
_first(position_section, "altitude", "raw.altitude", default=None)
|
||||
)
|
||||
position_time = _coerce_int(
|
||||
_first(position_section, "time", "raw.time", default=None)
|
||||
)
|
||||
location_source = _first(
|
||||
position_section,
|
||||
"locationSource",
|
||||
"location_source",
|
||||
"raw.location_source",
|
||||
default=None,
|
||||
)
|
||||
location_source = (
|
||||
str(location_source).strip() if location_source not in {None, ""} else None
|
||||
)
|
||||
|
||||
precision_bits = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"precisionBits",
|
||||
"precision_bits",
|
||||
"raw.precision_bits",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
sats_in_view = _coerce_int(
|
||||
_first(
|
||||
position_section,
|
||||
"satsInView",
|
||||
"sats_in_view",
|
||||
"raw.sats_in_view",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
pdop = _coerce_float(
|
||||
_first(position_section, "PDOP", "pdop", "raw.PDOP", "raw.pdop", default=None)
|
||||
)
|
||||
ground_speed = _coerce_float(
|
||||
_first(
|
||||
position_section,
|
||||
"groundSpeed",
|
||||
"ground_speed",
|
||||
"raw.ground_speed",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
ground_track = _coerce_float(
|
||||
_first(
|
||||
position_section,
|
||||
"groundTrack",
|
||||
"ground_track",
|
||||
"raw.ground_track",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
snr = _coerce_float(_first(packet, "snr", "rx_snr", "rxSnr", default=None))
|
||||
rssi = _coerce_int(_first(packet, "rssi", "rx_rssi", "rxRssi", default=None))
|
||||
hop_limit = _coerce_int(_first(packet, "hopLimit", "hop_limit", default=None))
|
||||
bitfield = _coerce_int(_first(decoded, "bitfield", default=None))
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
payload_b64 = base64_payload(payload_bytes)
|
||||
|
||||
raw_section = decoded.get("raw") if isinstance(decoded, Mapping) else None
|
||||
raw_payload = _node_to_dict(raw_section) if raw_section else None
|
||||
if raw_payload is None and position_section:
|
||||
raw_position = (
|
||||
position_section.get("raw")
|
||||
if isinstance(position_section, Mapping)
|
||||
else None
|
||||
)
|
||||
if raw_position:
|
||||
raw_payload = _node_to_dict(raw_position)
|
||||
|
||||
position_payload = {
|
||||
"id": pkt_id,
|
||||
"node_id": node_id or node_ref,
|
||||
"node_num": node_num,
|
||||
"num": node_num,
|
||||
"from_id": node_id,
|
||||
"to_id": to_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"latitude": latitude,
|
||||
"longitude": longitude,
|
||||
"altitude": altitude,
|
||||
"position_time": position_time,
|
||||
"location_source": location_source,
|
||||
"precision_bits": precision_bits,
|
||||
"sats_in_view": sats_in_view,
|
||||
"pdop": pdop,
|
||||
"ground_speed": ground_speed,
|
||||
"ground_track": ground_track,
|
||||
"snr": snr,
|
||||
"rssi": rssi,
|
||||
"hop_limit": hop_limit,
|
||||
"bitfield": bitfield,
|
||||
"payload_b64": payload_b64,
|
||||
}
|
||||
if raw_payload:
|
||||
position_payload["raw"] = raw_payload
|
||||
|
||||
_queue_post_json(
|
||||
"/api/positions",
|
||||
_apply_radio_metadata(position_payload),
|
||||
priority=queue._POSITION_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued position payload",
|
||||
context="handlers.store_position",
|
||||
node_id=node_id,
|
||||
latitude=latitude,
|
||||
longitude=longitude,
|
||||
position_time=position_time,
|
||||
)
|
||||
|
||||
|
||||
def base64_payload(payload_bytes: bytes | None) -> str | None:
|
||||
"""Encode raw payload bytes for JSON transport.
|
||||
|
||||
Parameters:
|
||||
payload_bytes: Optional payload to encode. ``None`` is returned when
|
||||
the payload is empty or missing.
|
||||
|
||||
Returns:
|
||||
The Base64 encoded payload string or ``None`` when no payload exists.
|
||||
"""
|
||||
|
||||
if not payload_bytes:
|
||||
return None
|
||||
return base64.b64encode(payload_bytes).decode("ascii")
|
||||
|
||||
|
||||
def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist telemetry metrics extracted from a packet.
|
||||
|
||||
Parameters:
|
||||
packet: Packet metadata received from the radio interface.
|
||||
decoded: Meshtastic-decoded view containing telemetry structures.
|
||||
|
||||
Returns:
|
||||
``None``. The telemetry payload is added to the HTTP queue.
|
||||
"""
|
||||
|
||||
telemetry_section = (
|
||||
decoded.get("telemetry") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if not isinstance(telemetry_section, Mapping):
|
||||
return
|
||||
|
||||
pkt_id = _coerce_int(_first(packet, "id", "packet_id", "packetId", default=None))
|
||||
if pkt_id is None:
|
||||
return
|
||||
|
||||
raw_from = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
node_id = _canonical_node_id(raw_from)
|
||||
node_num = _coerce_int(_first(decoded, "num", "node_num", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id or raw_from)
|
||||
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
|
||||
raw_rx_time = _first(packet, "rxTime", "rx_time", default=time.time())
|
||||
try:
|
||||
rx_time = int(raw_rx_time)
|
||||
except (TypeError, ValueError):
|
||||
rx_time = int(time.time())
|
||||
rx_iso = _iso(rx_time)
|
||||
|
||||
telemetry_time = _coerce_int(_first(telemetry_section, "time", default=None))
|
||||
|
||||
channel = _coerce_int(_first(decoded, "channel", default=None))
|
||||
if channel is None:
|
||||
channel = _coerce_int(_first(packet, "channel", default=None))
|
||||
if channel is None:
|
||||
channel = 0
|
||||
|
||||
portnum = _first(decoded, "portnum", default=None)
|
||||
portnum = str(portnum) if portnum not in {None, ""} else None
|
||||
|
||||
bitfield = _coerce_int(_first(decoded, "bitfield", default=None))
|
||||
|
||||
snr = _coerce_float(_first(packet, "snr", "rx_snr", "rxSnr", default=None))
|
||||
rssi = _coerce_int(_first(packet, "rssi", "rx_rssi", "rxRssi", default=None))
|
||||
hop_limit = _coerce_int(_first(packet, "hopLimit", "hop_limit", default=None))
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
payload_b64 = base64_payload(payload_bytes) or ""
|
||||
|
||||
battery_level = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"batteryLevel",
|
||||
"battery_level",
|
||||
"deviceMetrics.batteryLevel",
|
||||
"environmentMetrics.battery_level",
|
||||
"deviceMetrics.battery_level",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
voltage = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"voltage",
|
||||
"environmentMetrics.voltage",
|
||||
"deviceMetrics.voltage",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
channel_utilization = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"channelUtilization",
|
||||
"channel_utilization",
|
||||
"deviceMetrics.channelUtilization",
|
||||
"deviceMetrics.channel_utilization",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
air_util_tx = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"airUtilTx",
|
||||
"air_util_tx",
|
||||
"deviceMetrics.airUtilTx",
|
||||
"deviceMetrics.air_util_tx",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
uptime_seconds = _coerce_int(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"uptimeSeconds",
|
||||
"uptime_seconds",
|
||||
"deviceMetrics.uptimeSeconds",
|
||||
"deviceMetrics.uptime_seconds",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
temperature = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"temperature",
|
||||
"environmentMetrics.temperature",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
relative_humidity = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"relativeHumidity",
|
||||
"relative_humidity",
|
||||
"environmentMetrics.relativeHumidity",
|
||||
"environmentMetrics.relative_humidity",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
barometric_pressure = _coerce_float(
|
||||
_first(
|
||||
telemetry_section,
|
||||
"barometricPressure",
|
||||
"barometric_pressure",
|
||||
"environmentMetrics.barometricPressure",
|
||||
"environmentMetrics.barometric_pressure",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
telemetry_payload = {
|
||||
"id": pkt_id,
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"from_id": node_id or raw_from,
|
||||
"to_id": to_id,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": rx_iso,
|
||||
"telemetry_time": telemetry_time,
|
||||
"channel": channel,
|
||||
"portnum": portnum,
|
||||
"bitfield": bitfield,
|
||||
"snr": snr,
|
||||
"rssi": rssi,
|
||||
"hop_limit": hop_limit,
|
||||
"payload_b64": payload_b64,
|
||||
}
|
||||
|
||||
if battery_level is not None:
|
||||
telemetry_payload["battery_level"] = battery_level
|
||||
if voltage is not None:
|
||||
telemetry_payload["voltage"] = voltage
|
||||
if channel_utilization is not None:
|
||||
telemetry_payload["channel_utilization"] = channel_utilization
|
||||
if air_util_tx is not None:
|
||||
telemetry_payload["air_util_tx"] = air_util_tx
|
||||
if uptime_seconds is not None:
|
||||
telemetry_payload["uptime_seconds"] = uptime_seconds
|
||||
if temperature is not None:
|
||||
telemetry_payload["temperature"] = temperature
|
||||
if relative_humidity is not None:
|
||||
telemetry_payload["relative_humidity"] = relative_humidity
|
||||
if barometric_pressure is not None:
|
||||
telemetry_payload["barometric_pressure"] = barometric_pressure
|
||||
|
||||
_queue_post_json(
|
||||
"/api/telemetry",
|
||||
_apply_radio_metadata(telemetry_payload),
|
||||
priority=queue._TELEMETRY_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued telemetry payload",
|
||||
context="handlers.store_telemetry",
|
||||
node_id=node_id,
|
||||
battery_level=battery_level,
|
||||
voltage=voltage,
|
||||
)
|
||||
|
||||
|
||||
def store_nodeinfo_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist node information updates.
|
||||
|
||||
Parameters:
|
||||
packet: Raw packet metadata describing the update.
|
||||
decoded: Decoded payload that may include ``user`` and ``position``
|
||||
sections.
|
||||
|
||||
Returns:
|
||||
``None``. The node payload is merged into the API queue.
|
||||
"""
|
||||
|
||||
payload_bytes = _extract_payload_bytes(decoded)
|
||||
node_info = _decode_nodeinfo_payload(payload_bytes)
|
||||
decoded_user = decoded.get("user")
|
||||
user_dict = _nodeinfo_user_dict(node_info, decoded_user)
|
||||
|
||||
node_info_fields = set()
|
||||
if node_info:
|
||||
node_info_fields = {field_desc.name for field_desc, _ in node_info.ListFields()}
|
||||
|
||||
node_id = None
|
||||
if isinstance(user_dict, Mapping):
|
||||
node_id = _canonical_node_id(user_dict.get("id"))
|
||||
|
||||
if node_id is None:
|
||||
node_id = _canonical_node_id(
|
||||
_first(packet, "fromId", "from_id", "from", default=None)
|
||||
)
|
||||
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_payload: dict = {}
|
||||
if user_dict:
|
||||
node_payload["user"] = user_dict
|
||||
|
||||
node_num = None
|
||||
if node_info and "num" in node_info_fields:
|
||||
try:
|
||||
node_num = int(node_info.num)
|
||||
except (TypeError, ValueError):
|
||||
node_num = None
|
||||
if node_num is None:
|
||||
decoded_num = decoded.get("num")
|
||||
if decoded_num is not None:
|
||||
try:
|
||||
node_num = int(decoded_num)
|
||||
except (TypeError, ValueError):
|
||||
try:
|
||||
node_num = int(str(decoded_num).strip(), 0)
|
||||
except Exception:
|
||||
node_num = None
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
if node_num is not None:
|
||||
node_payload["num"] = node_num
|
||||
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
last_heard = None
|
||||
if node_info and "last_heard" in node_info_fields:
|
||||
try:
|
||||
last_heard = int(node_info.last_heard)
|
||||
except (TypeError, ValueError):
|
||||
last_heard = None
|
||||
if last_heard is None:
|
||||
decoded_last_heard = decoded.get("lastHeard")
|
||||
if decoded_last_heard is not None:
|
||||
try:
|
||||
last_heard = int(decoded_last_heard)
|
||||
except (TypeError, ValueError):
|
||||
last_heard = None
|
||||
if last_heard is None or last_heard < rx_time:
|
||||
last_heard = rx_time
|
||||
node_payload["lastHeard"] = last_heard
|
||||
|
||||
snr = None
|
||||
if node_info and "snr" in node_info_fields:
|
||||
try:
|
||||
snr = float(node_info.snr)
|
||||
except (TypeError, ValueError):
|
||||
snr = None
|
||||
if snr is None:
|
||||
snr = _first(packet, "snr", "rx_snr", "rxSnr", default=None)
|
||||
if snr is not None:
|
||||
try:
|
||||
snr = float(snr)
|
||||
except (TypeError, ValueError):
|
||||
snr = None
|
||||
if snr is not None:
|
||||
node_payload["snr"] = snr
|
||||
|
||||
hops = None
|
||||
if node_info and "hops_away" in node_info_fields:
|
||||
try:
|
||||
hops = int(node_info.hops_away)
|
||||
except (TypeError, ValueError):
|
||||
hops = None
|
||||
if hops is None:
|
||||
hops = decoded.get("hopsAway")
|
||||
if hops is not None:
|
||||
try:
|
||||
hops = int(hops)
|
||||
except (TypeError, ValueError):
|
||||
hops = None
|
||||
if hops is not None:
|
||||
node_payload["hopsAway"] = hops
|
||||
|
||||
if node_info and "channel" in node_info_fields:
|
||||
try:
|
||||
node_payload["channel"] = int(node_info.channel)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
if node_info and "via_mqtt" in node_info_fields:
|
||||
node_payload["viaMqtt"] = bool(node_info.via_mqtt)
|
||||
|
||||
if node_info and "is_favorite" in node_info_fields:
|
||||
node_payload["isFavorite"] = bool(node_info.is_favorite)
|
||||
elif "isFavorite" in decoded:
|
||||
node_payload["isFavorite"] = bool(decoded.get("isFavorite"))
|
||||
|
||||
if node_info and "is_ignored" in node_info_fields:
|
||||
node_payload["isIgnored"] = bool(node_info.is_ignored)
|
||||
if node_info and "is_key_manually_verified" in node_info_fields:
|
||||
node_payload["isKeyManuallyVerified"] = bool(node_info.is_key_manually_verified)
|
||||
|
||||
metrics = _nodeinfo_metrics_dict(node_info)
|
||||
decoded_metrics = decoded.get("deviceMetrics")
|
||||
if isinstance(decoded_metrics, Mapping):
|
||||
metrics = _merge_mappings(metrics, _node_to_dict(decoded_metrics))
|
||||
if metrics:
|
||||
node_payload["deviceMetrics"] = metrics
|
||||
|
||||
position = _nodeinfo_position_dict(node_info)
|
||||
decoded_position = decoded.get("position")
|
||||
if isinstance(decoded_position, Mapping):
|
||||
position = _merge_mappings(position, _node_to_dict(decoded_position))
|
||||
if position:
|
||||
node_payload["position"] = position
|
||||
|
||||
hop_limit = _first(packet, "hopLimit", "hop_limit", default=None)
|
||||
if hop_limit is not None and "hopLimit" not in node_payload:
|
||||
try:
|
||||
node_payload["hopLimit"] = int(hop_limit)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
_queue_post_json(
|
||||
"/api/nodes",
|
||||
_apply_radio_metadata_to_nodes({node_id: node_payload}),
|
||||
priority=queue._NODE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
short = None
|
||||
long_name = None
|
||||
if isinstance(user_dict, Mapping):
|
||||
short = user_dict.get("shortName")
|
||||
long_name = user_dict.get("longName")
|
||||
config._debug_log(
|
||||
"Queued nodeinfo payload",
|
||||
context="handlers.store_nodeinfo",
|
||||
node_id=node_id,
|
||||
short_name=short,
|
||||
long_name=long_name,
|
||||
)
|
||||
|
||||
|
||||
def store_neighborinfo_packet(packet: Mapping, decoded: Mapping) -> None:
|
||||
"""Persist neighbour information gathered from a packet.
|
||||
|
||||
Parameters:
|
||||
packet: Raw Meshtastic packet metadata.
|
||||
decoded: Decoded view containing the neighbour information section.
|
||||
|
||||
Returns:
|
||||
``None``. The neighbour snapshot is queued for submission.
|
||||
"""
|
||||
|
||||
neighbor_section = (
|
||||
decoded.get("neighborinfo") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if not isinstance(neighbor_section, Mapping):
|
||||
return
|
||||
|
||||
node_ref = _first(
|
||||
neighbor_section,
|
||||
"nodeId",
|
||||
"node_id",
|
||||
default=_first(packet, "fromId", "from_id", "from", default=None),
|
||||
)
|
||||
node_id = _canonical_node_id(node_ref)
|
||||
if node_id is None:
|
||||
return
|
||||
|
||||
node_num = _coerce_int(_first(neighbor_section, "nodeId", "node_id", default=None))
|
||||
if node_num is None:
|
||||
node_num = _node_num_from_id(node_id)
|
||||
|
||||
node_broadcast_interval = _coerce_int(
|
||||
_first(
|
||||
neighbor_section,
|
||||
"nodeBroadcastIntervalSecs",
|
||||
"node_broadcast_interval_secs",
|
||||
default=None,
|
||||
)
|
||||
)
|
||||
|
||||
last_sent_by_ref = _first(
|
||||
neighbor_section,
|
||||
"lastSentById",
|
||||
"last_sent_by_id",
|
||||
default=None,
|
||||
)
|
||||
last_sent_by_id = _canonical_node_id(last_sent_by_ref)
|
||||
|
||||
rx_time = _coerce_int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
if rx_time is None:
|
||||
rx_time = int(time.time())
|
||||
|
||||
neighbors_payload = neighbor_section.get("neighbors")
|
||||
neighbors_iterable = (
|
||||
neighbors_payload if isinstance(neighbors_payload, list) else []
|
||||
)
|
||||
|
||||
neighbor_entries: list[dict] = []
|
||||
for entry in neighbors_iterable:
|
||||
if not isinstance(entry, Mapping):
|
||||
continue
|
||||
neighbor_ref = _first(entry, "nodeId", "node_id", default=None)
|
||||
neighbor_id = _canonical_node_id(neighbor_ref)
|
||||
if neighbor_id is None:
|
||||
continue
|
||||
neighbor_num = _coerce_int(_first(entry, "nodeId", "node_id", default=None))
|
||||
if neighbor_num is None:
|
||||
neighbor_num = _node_num_from_id(neighbor_id)
|
||||
snr = _coerce_float(_first(entry, "snr", default=None))
|
||||
entry_rx_time = _coerce_int(_first(entry, "rxTime", "rx_time", default=None))
|
||||
if entry_rx_time is None:
|
||||
entry_rx_time = rx_time
|
||||
neighbor_entries.append(
|
||||
{
|
||||
"neighbor_id": neighbor_id,
|
||||
"neighbor_num": neighbor_num,
|
||||
"snr": snr,
|
||||
"rx_time": entry_rx_time,
|
||||
"rx_iso": _iso(entry_rx_time),
|
||||
}
|
||||
)
|
||||
|
||||
payload = {
|
||||
"node_id": node_id,
|
||||
"node_num": node_num,
|
||||
"neighbors": neighbor_entries,
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
}
|
||||
|
||||
if node_broadcast_interval is not None:
|
||||
payload["node_broadcast_interval_secs"] = node_broadcast_interval
|
||||
if last_sent_by_id is not None:
|
||||
payload["last_sent_by_id"] = last_sent_by_id
|
||||
|
||||
_queue_post_json(
|
||||
"/api/neighbors",
|
||||
_apply_radio_metadata(payload),
|
||||
priority=queue._NEIGHBOR_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
config._debug_log(
|
||||
"Queued neighborinfo payload",
|
||||
context="handlers.store_neighborinfo",
|
||||
node_id=node_id,
|
||||
neighbors=len(neighbor_entries),
|
||||
)
|
||||
|
||||
|
||||
def store_packet_dict(packet: Mapping) -> None:
|
||||
"""Route a decoded packet to the appropriate storage handler.
|
||||
|
||||
Parameters:
|
||||
packet: Packet dictionary emitted by the mesh interface.
|
||||
|
||||
Returns:
|
||||
``None``. Side-effects depend on the specific handler invoked.
|
||||
"""
|
||||
|
||||
decoded = packet.get("decoded") or {}
|
||||
|
||||
portnum_raw = _first(decoded, "portnum", default=None)
|
||||
portnum = str(portnum_raw).upper() if portnum_raw is not None else None
|
||||
portnum_int = _coerce_int(portnum_raw)
|
||||
|
||||
telemetry_section = (
|
||||
decoded.get("telemetry") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if (
|
||||
portnum == "TELEMETRY_APP"
|
||||
or portnum_int == 65
|
||||
or isinstance(telemetry_section, Mapping)
|
||||
):
|
||||
store_telemetry_packet(packet, decoded)
|
||||
return
|
||||
|
||||
if portnum in {"5", "NODEINFO_APP"}:
|
||||
store_nodeinfo_packet(packet, decoded)
|
||||
return
|
||||
|
||||
if portnum in {"4", "POSITION_APP"}:
|
||||
store_position_packet(packet, decoded)
|
||||
return
|
||||
|
||||
neighborinfo_section = (
|
||||
decoded.get("neighborinfo") if isinstance(decoded, Mapping) else None
|
||||
)
|
||||
if portnum == "NEIGHBORINFO_APP" or isinstance(neighborinfo_section, Mapping):
|
||||
store_neighborinfo_packet(packet, decoded)
|
||||
return
|
||||
|
||||
text = _first(decoded, "payload.text", "text", default=None)
|
||||
encrypted = _first(decoded, "payload.encrypted", "encrypted", default=None)
|
||||
if encrypted is None:
|
||||
encrypted = _first(packet, "encrypted", default=None)
|
||||
if not text and not encrypted:
|
||||
return
|
||||
|
||||
if portnum and portnum not in {"1", "TEXT_MESSAGE_APP"}:
|
||||
return
|
||||
|
||||
channel = _first(decoded, "channel", default=None)
|
||||
if channel is None:
|
||||
channel = _first(packet, "channel", default=0)
|
||||
try:
|
||||
channel = int(channel)
|
||||
except Exception:
|
||||
channel = 0
|
||||
|
||||
pkt_id = _first(packet, "id", "packet_id", "packetId", default=None)
|
||||
if pkt_id is None:
|
||||
return
|
||||
rx_time = int(_first(packet, "rxTime", "rx_time", default=time.time()))
|
||||
from_id = _first(packet, "fromId", "from_id", "from", default=None)
|
||||
to_id = _first(packet, "toId", "to_id", "to", default=None)
|
||||
|
||||
if (from_id is None or str(from_id) == "") and config.DEBUG:
|
||||
try:
|
||||
raw = json.dumps(packet, default=str)
|
||||
except Exception:
|
||||
raw = str(packet)
|
||||
config._debug_log(
|
||||
"Packet missing from_id",
|
||||
context="handlers.store_packet_dict",
|
||||
packet=raw,
|
||||
)
|
||||
|
||||
snr = _first(packet, "snr", "rx_snr", "rxSnr", default=None)
|
||||
rssi = _first(packet, "rssi", "rx_rssi", "rxRssi", default=None)
|
||||
hop = _first(packet, "hopLimit", "hop_limit", default=None)
|
||||
|
||||
encrypted_flag = _is_encrypted_flag(encrypted)
|
||||
|
||||
message_payload = {
|
||||
"id": int(pkt_id),
|
||||
"rx_time": rx_time,
|
||||
"rx_iso": _iso(rx_time),
|
||||
"from_id": from_id,
|
||||
"to_id": to_id,
|
||||
"channel": channel,
|
||||
"portnum": str(portnum) if portnum is not None else None,
|
||||
"text": text,
|
||||
"encrypted": encrypted,
|
||||
"snr": float(snr) if snr is not None else None,
|
||||
"rssi": int(rssi) if rssi is not None else None,
|
||||
"hop_limit": int(hop) if hop is not None else None,
|
||||
}
|
||||
|
||||
channel_name_value = None
|
||||
if not encrypted_flag:
|
||||
channel_name_value = channels.channel_name(channel)
|
||||
if channel_name_value:
|
||||
message_payload["channel_name"] = channel_name_value
|
||||
_queue_post_json(
|
||||
"/api/messages",
|
||||
_apply_radio_metadata(message_payload),
|
||||
priority=queue._MESSAGE_POST_PRIORITY,
|
||||
)
|
||||
|
||||
if config.DEBUG:
|
||||
from_label = _canonical_node_id(from_id) or from_id
|
||||
to_label = _canonical_node_id(to_id) or to_id
|
||||
payload_desc = "Encrypted" if text is None and encrypted else text
|
||||
log_kwargs = {
|
||||
"context": "handlers.store_packet_dict",
|
||||
"from_id": from_label,
|
||||
"to_id": to_label,
|
||||
"channel": channel,
|
||||
"channel_display": channel_name_value or channel,
|
||||
"payload": payload_desc,
|
||||
}
|
||||
if channel_name_value:
|
||||
log_kwargs["channel_name"] = channel_name_value
|
||||
config._debug_log("Queued message payload", **log_kwargs)
|
||||
|
||||
|
||||
_last_packet_monotonic: float | None = None
|
||||
|
||||
|
||||
def last_packet_monotonic() -> float | None:
|
||||
"""Return the monotonic timestamp of the most recent packet."""
|
||||
|
||||
return _last_packet_monotonic
|
||||
|
||||
|
||||
def _mark_packet_seen() -> None:
|
||||
"""Record that a packet has been processed."""
|
||||
|
||||
global _last_packet_monotonic
|
||||
_last_packet_monotonic = time.monotonic()
|
||||
|
||||
|
||||
def on_receive(packet, interface) -> None:
|
||||
"""Callback registered with Meshtastic to capture incoming packets.
|
||||
|
||||
Parameters:
|
||||
packet: Packet payload supplied by the Meshtastic pubsub topic.
|
||||
interface: Interface instance that produced the packet. Only used for
|
||||
compatibility with Meshtastic's callback signature.
|
||||
|
||||
Returns:
|
||||
``None``. Packets are serialised and enqueued asynchronously.
|
||||
"""
|
||||
|
||||
if isinstance(packet, dict):
|
||||
if packet.get("_potatomesh_seen"):
|
||||
return
|
||||
packet["_potatomesh_seen"] = True
|
||||
|
||||
_mark_packet_seen()
|
||||
|
||||
packet_dict = None
|
||||
try:
|
||||
packet_dict = _pkt_to_dict(packet)
|
||||
store_packet_dict(packet_dict)
|
||||
except Exception as exc:
|
||||
info = (
|
||||
list(packet_dict.keys()) if isinstance(packet_dict, dict) else type(packet)
|
||||
)
|
||||
config._debug_log(
|
||||
"Failed to store packet",
|
||||
context="handlers.on_receive",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
packet_info=info,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_queue_post_json",
|
||||
"last_packet_monotonic",
|
||||
"on_receive",
|
||||
"store_neighborinfo_packet",
|
||||
"store_nodeinfo_packet",
|
||||
"store_packet_dict",
|
||||
"store_position_packet",
|
||||
"store_telemetry_packet",
|
||||
"upsert_node",
|
||||
]
|
||||
|
||||
_queue_post_json = queue._queue_post_json
|
||||
@@ -0,0 +1,615 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Mesh interface discovery helpers for interacting with Meshtastic hardware."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import glob
|
||||
import ipaddress
|
||||
import re
|
||||
import urllib.parse
|
||||
from collections.abc import Mapping
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
from meshtastic.tcp_interface import TCPInterface
|
||||
|
||||
from . import channels, config, serialization
|
||||
|
||||
if TYPE_CHECKING: # pragma: no cover - import only used for type checking
|
||||
from meshtastic.ble_interface import BLEInterface as _BLEInterface
|
||||
|
||||
BLEInterface = None
|
||||
|
||||
|
||||
def _patch_meshtastic_nodeinfo_handler() -> None:
|
||||
"""Ensure Meshtastic nodeinfo packets always include an ``id`` field."""
|
||||
|
||||
try:
|
||||
import meshtastic # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
return
|
||||
|
||||
original = getattr(meshtastic, "_onNodeInfoReceive", None)
|
||||
if not callable(original):
|
||||
return
|
||||
if getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
def _safe_on_node_info_receive(iface, packet): # type: ignore[override]
|
||||
candidate_mapping: Mapping | None = None
|
||||
if isinstance(packet, Mapping):
|
||||
candidate_mapping = packet
|
||||
elif hasattr(packet, "__dict__") and isinstance(packet.__dict__, Mapping):
|
||||
candidate_mapping = packet.__dict__
|
||||
|
||||
node_id = None
|
||||
if candidate_mapping is not None:
|
||||
node_id = serialization._canonical_node_id(candidate_mapping.get("id"))
|
||||
if node_id is None:
|
||||
user_section = candidate_mapping.get("user")
|
||||
if isinstance(user_section, Mapping):
|
||||
node_id = serialization._canonical_node_id(user_section.get("id"))
|
||||
if node_id is None:
|
||||
for key in ("fromId", "from_id", "from", "num", "nodeId", "node_id"):
|
||||
node_id = serialization._canonical_node_id(
|
||||
candidate_mapping.get(key)
|
||||
)
|
||||
if node_id:
|
||||
break
|
||||
|
||||
if node_id:
|
||||
if not isinstance(candidate_mapping, dict):
|
||||
try:
|
||||
candidate_mapping = dict(candidate_mapping)
|
||||
except Exception:
|
||||
candidate_mapping = {
|
||||
k: candidate_mapping[k] for k in candidate_mapping
|
||||
}
|
||||
if candidate_mapping.get("id") != node_id:
|
||||
candidate_mapping["id"] = node_id
|
||||
packet = candidate_mapping
|
||||
|
||||
try:
|
||||
return original(iface, packet)
|
||||
except KeyError as exc: # pragma: no cover - defensive only
|
||||
if exc.args and exc.args[0] == "id":
|
||||
return None
|
||||
raise
|
||||
|
||||
_safe_on_node_info_receive._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
meshtastic._onNodeInfoReceive = _safe_on_node_info_receive
|
||||
|
||||
|
||||
_patch_meshtastic_nodeinfo_handler()
|
||||
|
||||
|
||||
def _patch_meshtastic_ble_receive_loop() -> None:
|
||||
"""Prevent ``UnboundLocalError`` crashes in Meshtastic's BLE reader."""
|
||||
|
||||
try:
|
||||
from meshtastic import ble_interface as _ble_interface_module # type: ignore
|
||||
except Exception: # pragma: no cover - dependency optional in tests
|
||||
return
|
||||
|
||||
ble_class = getattr(_ble_interface_module, "BLEInterface", None)
|
||||
if ble_class is None:
|
||||
return
|
||||
|
||||
original = getattr(ble_class, "_receiveFromRadioImpl", None)
|
||||
if not callable(original):
|
||||
return
|
||||
if getattr(original, "_potato_mesh_safe_wrapper", False):
|
||||
return
|
||||
|
||||
FROMRADIO_UUID = getattr(_ble_interface_module, "FROMRADIO_UUID", None)
|
||||
BleakDBusError = getattr(_ble_interface_module, "BleakDBusError", ())
|
||||
BleakError = getattr(_ble_interface_module, "BleakError", ())
|
||||
logger = getattr(_ble_interface_module, "logger", None)
|
||||
time = getattr(_ble_interface_module, "time", None)
|
||||
|
||||
if not FROMRADIO_UUID or logger is None or time is None:
|
||||
return
|
||||
|
||||
def _safe_receive_from_radio(self): # type: ignore[override]
|
||||
while self._want_receive:
|
||||
if self.should_read:
|
||||
self.should_read = False
|
||||
retries: int = 0
|
||||
while self._want_receive:
|
||||
if self.client is None:
|
||||
logger.debug("BLE client is None, shutting down")
|
||||
self._want_receive = False
|
||||
continue
|
||||
|
||||
payload: bytes = b""
|
||||
try:
|
||||
payload = bytes(self.client.read_gatt_char(FROMRADIO_UUID))
|
||||
except BleakDBusError as exc:
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
except BleakError as exc:
|
||||
if "Not connected" in str(exc):
|
||||
logger.debug("Device disconnected, shutting down %s", exc)
|
||||
self._want_receive = False
|
||||
payload = b""
|
||||
else:
|
||||
raise ble_class.BLEError("Error reading BLE") from exc
|
||||
|
||||
if not payload:
|
||||
if not self._want_receive:
|
||||
break
|
||||
if retries < 5:
|
||||
time.sleep(0.1)
|
||||
retries += 1
|
||||
continue
|
||||
break
|
||||
|
||||
logger.debug("FROMRADIO read: %s", payload.hex())
|
||||
self._handleFromRadio(payload)
|
||||
else:
|
||||
time.sleep(0.01)
|
||||
|
||||
_safe_receive_from_radio._potato_mesh_safe_wrapper = True # type: ignore[attr-defined]
|
||||
ble_class._receiveFromRadioImpl = _safe_receive_from_radio
|
||||
|
||||
|
||||
_patch_meshtastic_ble_receive_loop()
|
||||
|
||||
|
||||
def _has_field(message: Any, field_name: str) -> bool:
|
||||
"""Return ``True`` when ``message`` advertises ``field_name`` via ``HasField``."""
|
||||
|
||||
if message is None:
|
||||
return False
|
||||
has_field = getattr(message, "HasField", None)
|
||||
if callable(has_field):
|
||||
try:
|
||||
return bool(has_field(field_name))
|
||||
except Exception: # pragma: no cover - defensive guard
|
||||
return False
|
||||
return hasattr(message, field_name)
|
||||
|
||||
|
||||
def _enum_name_from_field(message: Any, field_name: str, value: Any) -> str | None:
|
||||
"""Return the enum name for ``value`` using ``message`` descriptors."""
|
||||
|
||||
descriptor = getattr(message, "DESCRIPTOR", None)
|
||||
if descriptor is None:
|
||||
return None
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {})
|
||||
field_desc = fields_by_name.get(field_name)
|
||||
if field_desc is None:
|
||||
return None
|
||||
enum_type = getattr(field_desc, "enum_type", None)
|
||||
if enum_type is None:
|
||||
return None
|
||||
enum_values = getattr(enum_type, "values_by_number", {})
|
||||
enum_value = enum_values.get(value)
|
||||
if enum_value is None:
|
||||
return None
|
||||
return getattr(enum_value, "name", None)
|
||||
|
||||
|
||||
def _resolve_lora_message(local_config: Any) -> Any | None:
|
||||
"""Return the LoRa configuration sub-message from ``local_config``."""
|
||||
|
||||
if local_config is None:
|
||||
return None
|
||||
if _has_field(local_config, "lora"):
|
||||
candidate = getattr(local_config, "lora", None)
|
||||
if candidate is not None:
|
||||
return candidate
|
||||
radio_section = getattr(local_config, "radio", None)
|
||||
if radio_section is not None:
|
||||
if _has_field(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora", None)
|
||||
if hasattr(radio_section, "lora"):
|
||||
return getattr(radio_section, "lora")
|
||||
if hasattr(local_config, "lora"):
|
||||
return getattr(local_config, "lora")
|
||||
return None
|
||||
|
||||
|
||||
def _region_frequency(lora_message: Any) -> int | None:
|
||||
"""Derive the LoRa region frequency in MHz from ``lora_message``."""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
region_value = getattr(lora_message, "region", None)
|
||||
if region_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, "region", region_value)
|
||||
if enum_name:
|
||||
digits = re.findall(r"\d+", enum_name)
|
||||
for token in digits:
|
||||
try:
|
||||
freq = int(token)
|
||||
except ValueError: # pragma: no cover - regex guarantees digits
|
||||
continue
|
||||
if freq >= 100:
|
||||
return freq
|
||||
for token in reversed(digits):
|
||||
try:
|
||||
return int(token)
|
||||
except ValueError: # pragma: no cover - defensive only
|
||||
continue
|
||||
if isinstance(region_value, int) and region_value >= 100:
|
||||
return region_value
|
||||
return None
|
||||
|
||||
|
||||
def _camelcase_enum_name(name: str | None) -> str | None:
|
||||
"""Convert ``name`` from ``SCREAMING_SNAKE`` to ``CamelCase``."""
|
||||
|
||||
if not name:
|
||||
return None
|
||||
parts = re.split(r"[^0-9A-Za-z]+", name.strip())
|
||||
camel_parts = [part.capitalize() for part in parts if part]
|
||||
if not camel_parts:
|
||||
return None
|
||||
return "".join(camel_parts)
|
||||
|
||||
|
||||
def _modem_preset(lora_message: Any) -> str | None:
|
||||
"""Return the CamelCase modem preset configured on ``lora_message``."""
|
||||
|
||||
if lora_message is None:
|
||||
return None
|
||||
descriptor = getattr(lora_message, "DESCRIPTOR", None)
|
||||
fields_by_name = getattr(descriptor, "fields_by_name", {}) if descriptor else {}
|
||||
if "modem_preset" in fields_by_name:
|
||||
preset_field = "modem_preset"
|
||||
elif "preset" in fields_by_name:
|
||||
preset_field = "preset"
|
||||
elif hasattr(lora_message, "modem_preset"):
|
||||
preset_field = "modem_preset"
|
||||
elif hasattr(lora_message, "preset"):
|
||||
preset_field = "preset"
|
||||
else:
|
||||
return None
|
||||
|
||||
preset_value = getattr(lora_message, preset_field, None)
|
||||
if preset_value is None:
|
||||
return None
|
||||
enum_name = _enum_name_from_field(lora_message, preset_field, preset_value)
|
||||
if isinstance(enum_name, str) and enum_name:
|
||||
return _camelcase_enum_name(enum_name)
|
||||
if isinstance(preset_value, str) and preset_value:
|
||||
return _camelcase_enum_name(preset_value)
|
||||
return None
|
||||
|
||||
|
||||
def _ensure_radio_metadata(iface: Any) -> None:
|
||||
"""Populate cached LoRa metadata by inspecting ``iface`` when available."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
wait_for_config = getattr(iface, "waitForConfig", None)
|
||||
if callable(wait_for_config):
|
||||
wait_for_config()
|
||||
except Exception: # pragma: no cover - hardware dependent guard
|
||||
pass
|
||||
|
||||
local_node = getattr(iface, "localNode", None)
|
||||
local_config = getattr(local_node, "localConfig", None) if local_node else None
|
||||
lora_message = _resolve_lora_message(local_config)
|
||||
if lora_message is None:
|
||||
return
|
||||
|
||||
frequency = _region_frequency(lora_message)
|
||||
preset = _modem_preset(lora_message)
|
||||
|
||||
updated = False
|
||||
if frequency is not None and getattr(config, "LORA_FREQ", None) is None:
|
||||
config.LORA_FREQ = frequency
|
||||
updated = True
|
||||
if preset is not None and getattr(config, "MODEM_PRESET", None) is None:
|
||||
config.MODEM_PRESET = preset
|
||||
updated = True
|
||||
|
||||
if updated:
|
||||
config._debug_log(
|
||||
"Captured LoRa radio metadata",
|
||||
context="interfaces.ensure_radio_metadata",
|
||||
severity="info",
|
||||
always=True,
|
||||
lora_freq=frequency,
|
||||
modem_preset=preset,
|
||||
)
|
||||
|
||||
|
||||
def _ensure_channel_metadata(iface: Any) -> None:
|
||||
"""Capture channel metadata by inspecting ``iface`` once per runtime."""
|
||||
|
||||
if iface is None:
|
||||
return
|
||||
|
||||
try:
|
||||
channels.capture_from_interface(iface)
|
||||
except Exception as exc: # pragma: no cover - defensive instrumentation
|
||||
config._debug_log(
|
||||
"Failed to capture channel metadata",
|
||||
context="interfaces.ensure_channel_metadata",
|
||||
severity="warn",
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
|
||||
_DEFAULT_TCP_PORT = 4403
|
||||
_DEFAULT_TCP_TARGET = "http://127.0.0.1"
|
||||
|
||||
_DEFAULT_SERIAL_PATTERNS = (
|
||||
"/dev/ttyACM*",
|
||||
"/dev/ttyUSB*",
|
||||
"/dev/tty.usbmodem*",
|
||||
"/dev/tty.usbserial*",
|
||||
"/dev/cu.usbmodem*",
|
||||
"/dev/cu.usbserial*",
|
||||
)
|
||||
|
||||
_BLE_ADDRESS_RE = re.compile(r"^(?:[0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}$")
|
||||
|
||||
|
||||
class _DummySerialInterface:
|
||||
"""In-memory replacement for ``meshtastic.serial_interface.SerialInterface``."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.nodes: dict = {}
|
||||
|
||||
def close(self) -> None: # pragma: no cover - nothing to close
|
||||
pass
|
||||
|
||||
|
||||
def _parse_ble_target(value: str) -> str | None:
|
||||
"""Return an uppercase BLE MAC address when ``value`` matches the format.
|
||||
|
||||
Parameters:
|
||||
value: User-provided target string.
|
||||
|
||||
Returns:
|
||||
The normalised MAC address or ``None`` when validation fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
if _BLE_ADDRESS_RE.fullmatch(value):
|
||||
return value.upper()
|
||||
return None
|
||||
|
||||
|
||||
def _parse_network_target(value: str) -> tuple[str, int] | None:
|
||||
"""Return ``(host, port)`` when ``value`` is a numeric IP address string.
|
||||
|
||||
Only literal IPv4 or IPv6 addresses are accepted, optionally paired with a
|
||||
port or scheme. Callers that start from hostnames should resolve them to an
|
||||
address before invoking this helper.
|
||||
|
||||
Parameters:
|
||||
value: Numeric IP literal or URL describing the TCP interface.
|
||||
|
||||
Returns:
|
||||
A ``(host, port)`` tuple or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return None
|
||||
|
||||
value = value.strip()
|
||||
if not value:
|
||||
return None
|
||||
|
||||
def _validated_result(host: str | None, port: int | None) -> tuple[str, int] | None:
|
||||
if not host:
|
||||
return None
|
||||
try:
|
||||
ipaddress.ip_address(host)
|
||||
except ValueError:
|
||||
return None
|
||||
return host, port or _DEFAULT_TCP_PORT
|
||||
|
||||
parsed_values = []
|
||||
if "://" in value:
|
||||
parsed_values.append(urllib.parse.urlparse(value, scheme="tcp"))
|
||||
parsed_values.append(urllib.parse.urlparse(f"//{value}", scheme="tcp"))
|
||||
|
||||
for parsed in parsed_values:
|
||||
try:
|
||||
port = parsed.port
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(parsed.hostname, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
if value.count(":") == 1 and not value.startswith("["):
|
||||
host, _, port_text = value.partition(":")
|
||||
try:
|
||||
port = int(port_text) if port_text else None
|
||||
except ValueError:
|
||||
port = None
|
||||
result = _validated_result(host, port)
|
||||
if result:
|
||||
return result
|
||||
|
||||
return _validated_result(value, None)
|
||||
|
||||
|
||||
def _load_ble_interface():
|
||||
"""Return :class:`meshtastic.ble_interface.BLEInterface` when available.
|
||||
|
||||
Returns:
|
||||
The resolved BLE interface class.
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the BLE dependencies are not installed.
|
||||
"""
|
||||
|
||||
global BLEInterface
|
||||
if BLEInterface is not None:
|
||||
return BLEInterface
|
||||
|
||||
try:
|
||||
from meshtastic.ble_interface import BLEInterface as _resolved_interface
|
||||
except ImportError as exc: # pragma: no cover - exercised in non-BLE envs
|
||||
raise RuntimeError(
|
||||
"BLE interface requested but the Meshtastic BLE dependencies are not installed. "
|
||||
"Install the 'meshtastic[ble]' extra to enable BLE support."
|
||||
) from exc
|
||||
BLEInterface = _resolved_interface
|
||||
try:
|
||||
import sys
|
||||
|
||||
for module_name in ("data.mesh_ingestor", "data.mesh"):
|
||||
mesh_module = sys.modules.get(module_name)
|
||||
if mesh_module is not None:
|
||||
setattr(mesh_module, "BLEInterface", BLEInterface)
|
||||
except Exception: # pragma: no cover - defensive only
|
||||
pass
|
||||
return _resolved_interface
|
||||
|
||||
|
||||
def _create_serial_interface(port: str) -> tuple[object, str]:
|
||||
"""Return an appropriate mesh interface for ``port``.
|
||||
|
||||
Parameters:
|
||||
port: User-supplied port string which may represent serial, BLE or TCP.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` describing the created interface.
|
||||
"""
|
||||
|
||||
port_value = (port or "").strip()
|
||||
if port_value.lower() in {"", "mock", "none", "null", "disabled"}:
|
||||
config._debug_log(
|
||||
"Using dummy serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return _DummySerialInterface(), "mock"
|
||||
ble_target = _parse_ble_target(port_value)
|
||||
if ble_target:
|
||||
config._debug_log(
|
||||
"Using BLE interface",
|
||||
context="interfaces.ble",
|
||||
address=ble_target,
|
||||
)
|
||||
return _load_ble_interface()(address=ble_target), ble_target
|
||||
network_target = _parse_network_target(port_value)
|
||||
if network_target:
|
||||
host, tcp_port = network_target
|
||||
config._debug_log(
|
||||
"Using TCP interface",
|
||||
context="interfaces.tcp",
|
||||
host=host,
|
||||
port=tcp_port,
|
||||
)
|
||||
return (
|
||||
TCPInterface(hostname=host, portNumber=tcp_port),
|
||||
f"tcp://{host}:{tcp_port}",
|
||||
)
|
||||
config._debug_log(
|
||||
"Using serial interface",
|
||||
context="interfaces.serial",
|
||||
port=port_value,
|
||||
)
|
||||
return SerialInterface(devPath=port_value), port_value
|
||||
|
||||
|
||||
class NoAvailableMeshInterface(RuntimeError):
|
||||
"""Raised when no default mesh interface can be created."""
|
||||
|
||||
|
||||
def _default_serial_targets() -> list[str]:
|
||||
"""Return candidate serial device paths for auto-discovery."""
|
||||
|
||||
candidates: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for pattern in _DEFAULT_SERIAL_PATTERNS:
|
||||
for path in sorted(glob.glob(pattern)):
|
||||
if path not in seen:
|
||||
candidates.append(path)
|
||||
seen.add(path)
|
||||
if "/dev/ttyACM0" not in seen:
|
||||
candidates.append("/dev/ttyACM0")
|
||||
return candidates
|
||||
|
||||
|
||||
def _create_default_interface() -> tuple[object, str]:
|
||||
"""Attempt to create the default mesh interface, raising on failure.
|
||||
|
||||
Returns:
|
||||
``(interface, resolved_target)`` for the discovered connection.
|
||||
|
||||
Raises:
|
||||
NoAvailableMeshInterface: When no usable connection can be created.
|
||||
"""
|
||||
|
||||
errors: list[tuple[str, Exception]] = []
|
||||
for candidate in _default_serial_targets():
|
||||
try:
|
||||
return _create_serial_interface(candidate)
|
||||
except Exception as exc: # pragma: no cover - hardware dependent
|
||||
errors.append((candidate, exc))
|
||||
config._debug_log(
|
||||
"Failed to open serial candidate",
|
||||
context="interfaces.auto_discovery",
|
||||
target=candidate,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
try:
|
||||
return _create_serial_interface(_DEFAULT_TCP_TARGET)
|
||||
except Exception as exc: # pragma: no cover - network dependent
|
||||
errors.append((_DEFAULT_TCP_TARGET, exc))
|
||||
config._debug_log(
|
||||
"Failed to open TCP fallback",
|
||||
context="interfaces.auto_discovery",
|
||||
target=_DEFAULT_TCP_TARGET,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
if errors:
|
||||
summary = "; ".join(f"{target}: {error}" for target, error in errors)
|
||||
raise NoAvailableMeshInterface(
|
||||
f"no mesh interface available ({summary})"
|
||||
) from errors[-1][1]
|
||||
raise NoAvailableMeshInterface("no mesh interface available")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"BLEInterface",
|
||||
"NoAvailableMeshInterface",
|
||||
"_ensure_channel_metadata",
|
||||
"_ensure_radio_metadata",
|
||||
"_DummySerialInterface",
|
||||
"_DEFAULT_TCP_PORT",
|
||||
"_DEFAULT_TCP_TARGET",
|
||||
"_create_default_interface",
|
||||
"_create_serial_interface",
|
||||
"_default_serial_targets",
|
||||
"_load_ble_interface",
|
||||
"_parse_ble_target",
|
||||
"_parse_network_target",
|
||||
"SerialInterface",
|
||||
"TCPInterface",
|
||||
]
|
||||
@@ -0,0 +1,210 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Priority queue for POST operations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import heapq
|
||||
import itertools
|
||||
import json
|
||||
import threading
|
||||
import urllib.request
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, Iterable, Tuple
|
||||
|
||||
from . import config
|
||||
|
||||
_MESSAGE_POST_PRIORITY = 10
|
||||
_NEIGHBOR_POST_PRIORITY = 20
|
||||
_POSITION_POST_PRIORITY = 30
|
||||
_TELEMETRY_POST_PRIORITY = 40
|
||||
_NODE_POST_PRIORITY = 50
|
||||
_DEFAULT_POST_PRIORITY = 90
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueueState:
|
||||
"""Mutable state for the HTTP POST priority queue."""
|
||||
|
||||
lock: threading.Lock = field(default_factory=threading.Lock)
|
||||
queue: list[tuple[int, int, str, dict]] = field(default_factory=list)
|
||||
counter: Iterable[int] = field(default_factory=itertools.count)
|
||||
active: bool = False
|
||||
|
||||
|
||||
STATE = QueueState()
|
||||
|
||||
|
||||
def _post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
instance: str | None = None,
|
||||
api_token: str | None = None,
|
||||
) -> None:
|
||||
"""Send a JSON payload to the configured web API.
|
||||
|
||||
Parameters:
|
||||
path: API path relative to the configured instance root.
|
||||
payload: JSON-serialisable body to transmit.
|
||||
instance: Optional override for :data:`config.INSTANCE`.
|
||||
api_token: Optional override for :data:`config.API_TOKEN`.
|
||||
"""
|
||||
|
||||
if instance is None:
|
||||
instance = config.INSTANCE
|
||||
if api_token is None:
|
||||
api_token = config.API_TOKEN
|
||||
|
||||
if not instance:
|
||||
return
|
||||
url = f"{instance}{path}"
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
|
||||
# Add full headers to avoid Cloudflare blocks on instances behind cloudflare proxy
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
||||
"Accept": "application/json",
|
||||
"Accept-Language": "en-US,en;q=0.9",
|
||||
"Origin": f"{instance}",
|
||||
"Referer": f"{instance}",
|
||||
}
|
||||
if api_token:
|
||||
headers["Authorization"] = f"Bearer {api_token}"
|
||||
|
||||
req = urllib.request.Request(
|
||||
url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
resp.read()
|
||||
except Exception as exc: # pragma: no cover - exercised in production
|
||||
config._debug_log(
|
||||
"POST request failed",
|
||||
context="queue.post_json",
|
||||
severity="warn",
|
||||
url=url,
|
||||
error_class=exc.__class__.__name__,
|
||||
error_message=str(exc),
|
||||
)
|
||||
|
||||
|
||||
def _enqueue_post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
priority: int,
|
||||
*,
|
||||
state: QueueState = STATE,
|
||||
) -> None:
|
||||
"""Store a POST request in the priority queue.
|
||||
|
||||
Parameters:
|
||||
path: API path for the queued request.
|
||||
payload: JSON-serialisable body.
|
||||
priority: Lower values execute first.
|
||||
state: Shared queue state, injectable for testing.
|
||||
"""
|
||||
|
||||
with state.lock:
|
||||
counter = next(state.counter)
|
||||
heapq.heappush(state.queue, (priority, counter, path, payload))
|
||||
|
||||
|
||||
def _drain_post_queue(
|
||||
state: QueueState = STATE, send: Callable[[str, dict], None] | None = None
|
||||
) -> None:
|
||||
"""Process queued POST requests in priority order.
|
||||
|
||||
Parameters:
|
||||
state: Queue container holding pending items.
|
||||
send: Optional callable used to transmit requests.
|
||||
"""
|
||||
|
||||
if send is None:
|
||||
send = _post_json
|
||||
|
||||
try:
|
||||
while True:
|
||||
with state.lock:
|
||||
if not state.queue:
|
||||
state.active = False
|
||||
return
|
||||
_priority, _idx, path, payload = heapq.heappop(state.queue)
|
||||
send(path, payload)
|
||||
finally:
|
||||
with state.lock:
|
||||
state.active = False
|
||||
|
||||
|
||||
def _queue_post_json(
|
||||
path: str,
|
||||
payload: dict,
|
||||
*,
|
||||
priority: int = _DEFAULT_POST_PRIORITY,
|
||||
state: QueueState = STATE,
|
||||
send: Callable[[str, dict], None] | None = None,
|
||||
) -> None:
|
||||
"""Queue a POST request and start processing if idle.
|
||||
|
||||
Parameters:
|
||||
path: API path for the request.
|
||||
payload: JSON payload to send.
|
||||
priority: Scheduling priority where lower values run first.
|
||||
state: Queue container used to store pending requests.
|
||||
send: Optional transport override, primarily for tests.
|
||||
"""
|
||||
|
||||
if send is None:
|
||||
send = _post_json
|
||||
|
||||
_enqueue_post_json(path, payload, priority, state=state)
|
||||
with state.lock:
|
||||
if state.active:
|
||||
return
|
||||
state.active = True
|
||||
_drain_post_queue(state, send=send)
|
||||
|
||||
|
||||
def _clear_post_queue(state: QueueState = STATE) -> None:
|
||||
"""Clear the pending POST queue.
|
||||
|
||||
Parameters:
|
||||
state: Queue state to reset. Defaults to the global queue.
|
||||
"""
|
||||
|
||||
with state.lock:
|
||||
state.queue.clear()
|
||||
state.active = False
|
||||
|
||||
|
||||
__all__ = [
|
||||
"STATE",
|
||||
"QueueState",
|
||||
"_DEFAULT_POST_PRIORITY",
|
||||
"_MESSAGE_POST_PRIORITY",
|
||||
"_NEIGHBOR_POST_PRIORITY",
|
||||
"_NODE_POST_PRIORITY",
|
||||
"_POSITION_POST_PRIORITY",
|
||||
"_TELEMETRY_POST_PRIORITY",
|
||||
"_clear_post_queue",
|
||||
"_drain_post_queue",
|
||||
"_enqueue_post_json",
|
||||
"_post_json",
|
||||
"_queue_post_json",
|
||||
]
|
||||
@@ -0,0 +1,613 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utilities for converting Meshtastic structures into JSON-friendly forms.
|
||||
|
||||
The helpers normalise loosely structured Meshtastic packets so they can be
|
||||
forwarded to the web application using predictable field names and types.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import dataclasses
|
||||
import json
|
||||
import math
|
||||
import time
|
||||
from collections.abc import Mapping
|
||||
|
||||
from google.protobuf.json_format import MessageToDict
|
||||
from google.protobuf.message import DecodeError
|
||||
from google.protobuf.message import Message as ProtoMessage
|
||||
|
||||
|
||||
def _get(obj, key, default=None):
|
||||
"""Return ``obj[key]`` or ``getattr(obj, key)`` when available.
|
||||
|
||||
Parameters:
|
||||
obj: Mapping or object supplying attributes.
|
||||
key: Name of the attribute or mapping key to retrieve.
|
||||
default: Fallback value when ``key`` is not present.
|
||||
|
||||
Returns:
|
||||
The resolved value or ``default`` if the lookup fails.
|
||||
"""
|
||||
|
||||
if isinstance(obj, dict):
|
||||
return obj.get(key, default)
|
||||
return getattr(obj, key, default)
|
||||
|
||||
|
||||
def _node_to_dict(n) -> dict:
|
||||
"""Convert ``n`` into a JSON-serialisable mapping.
|
||||
|
||||
Parameters:
|
||||
n: Arbitrary data structure, commonly a protobuf message, dataclass or
|
||||
nested containers produced by Meshtastic.
|
||||
|
||||
Returns:
|
||||
A plain dictionary containing recursively converted values.
|
||||
"""
|
||||
|
||||
def _convert(value):
|
||||
if isinstance(value, dict):
|
||||
return {k: _convert(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple, set)):
|
||||
return [_convert(v) for v in value]
|
||||
if dataclasses.is_dataclass(value):
|
||||
return {k: _convert(getattr(value, k)) for k in value.__dataclass_fields__}
|
||||
if isinstance(value, ProtoMessage):
|
||||
try:
|
||||
return MessageToDict(
|
||||
value,
|
||||
preserving_proto_field_name=True,
|
||||
use_integers_for_enums=False,
|
||||
)
|
||||
except Exception:
|
||||
if hasattr(value, "to_dict"):
|
||||
try:
|
||||
return value.to_dict()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
return json.loads(json.dumps(value, default=str))
|
||||
except Exception:
|
||||
return str(value)
|
||||
if isinstance(value, bytes):
|
||||
try:
|
||||
return value.decode()
|
||||
except Exception:
|
||||
return value.hex()
|
||||
if isinstance(value, (str, int, float, bool)) or value is None:
|
||||
return value
|
||||
try:
|
||||
return json.loads(json.dumps(value, default=str))
|
||||
except Exception:
|
||||
return str(value)
|
||||
|
||||
return _convert(n)
|
||||
|
||||
|
||||
def upsert_payload(node_id, node) -> dict:
|
||||
"""Return the payload expected by ``/api/nodes`` upsert requests.
|
||||
|
||||
Parameters:
|
||||
node_id: Canonical node identifier.
|
||||
node: Node representation to convert with :func:`_node_to_dict`.
|
||||
|
||||
Returns:
|
||||
A mapping keyed by ``node_id`` describing the node.
|
||||
"""
|
||||
|
||||
ndict = _node_to_dict(node)
|
||||
return {node_id: ndict}
|
||||
|
||||
|
||||
def _iso(ts: int | float) -> str:
|
||||
"""Convert ``ts`` into an ISO-8601 timestamp in UTC."""
|
||||
|
||||
import datetime
|
||||
|
||||
return (
|
||||
datetime.datetime.fromtimestamp(int(ts), datetime.UTC)
|
||||
.isoformat()
|
||||
.replace("+00:00", "Z")
|
||||
)
|
||||
|
||||
|
||||
def _first(d, *names, default=None):
|
||||
"""Return the first matching attribute or key from ``d``.
|
||||
|
||||
Parameters:
|
||||
d: Mapping or object providing nested attributes.
|
||||
*names: Candidate names, optionally using ``dot.separated`` notation
|
||||
for nested lookups.
|
||||
default: Value returned when no candidates succeed.
|
||||
|
||||
Returns:
|
||||
The first non-empty value encountered or ``default``.
|
||||
"""
|
||||
|
||||
def _mapping_get(obj, key):
|
||||
if isinstance(obj, Mapping) and key in obj:
|
||||
return True, obj[key]
|
||||
if hasattr(obj, "__getitem__"):
|
||||
try:
|
||||
return True, obj[key]
|
||||
except Exception:
|
||||
pass
|
||||
if hasattr(obj, key):
|
||||
return True, getattr(obj, key)
|
||||
return False, None
|
||||
|
||||
for name in names:
|
||||
cur = d
|
||||
ok = True
|
||||
for part in name.split("."):
|
||||
ok, cur = _mapping_get(cur, part)
|
||||
if not ok:
|
||||
break
|
||||
if ok:
|
||||
if cur is None:
|
||||
continue
|
||||
if isinstance(cur, str) and cur == "":
|
||||
continue
|
||||
return cur
|
||||
return default
|
||||
|
||||
|
||||
def _coerce_int(value):
|
||||
"""Best-effort conversion of ``value`` to an integer.
|
||||
|
||||
Parameters:
|
||||
value: Any type supported by Meshtastic payloads.
|
||||
|
||||
Returns:
|
||||
An integer or ``None`` when conversion is not possible.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, bool):
|
||||
return int(value)
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
if isinstance(value, float):
|
||||
return int(value) if math.isfinite(value) else None
|
||||
if isinstance(value, (str, bytes, bytearray)):
|
||||
text = value.decode() if isinstance(value, (bytes, bytearray)) else value
|
||||
stripped = text.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
try:
|
||||
if stripped.lower().startswith("0x"):
|
||||
return int(stripped, 16)
|
||||
return int(stripped, 10)
|
||||
except ValueError:
|
||||
try:
|
||||
return int(float(stripped))
|
||||
except ValueError:
|
||||
return None
|
||||
try:
|
||||
return int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _coerce_float(value):
|
||||
"""Best-effort conversion of ``value`` to a float.
|
||||
|
||||
Parameters:
|
||||
value: Any type supported by Meshtastic payloads.
|
||||
|
||||
Returns:
|
||||
A float or ``None`` when conversion fails or results in ``NaN``.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, bool):
|
||||
return float(value)
|
||||
if isinstance(value, (int, float)):
|
||||
result = float(value)
|
||||
return result if math.isfinite(result) else None
|
||||
if isinstance(value, (str, bytes, bytearray)):
|
||||
text = value.decode() if isinstance(value, (bytes, bytearray)) else value
|
||||
stripped = text.strip()
|
||||
if not stripped:
|
||||
return None
|
||||
try:
|
||||
result = float(stripped)
|
||||
except ValueError:
|
||||
return None
|
||||
return result if math.isfinite(result) else None
|
||||
try:
|
||||
result = float(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return result if math.isfinite(result) else None
|
||||
|
||||
|
||||
def _pkt_to_dict(packet) -> dict:
|
||||
"""Normalise a packet into a plain dictionary.
|
||||
|
||||
Parameters:
|
||||
packet: Packet object or mapping emitted by Meshtastic.
|
||||
|
||||
Returns:
|
||||
A dictionary representation suitable for downstream processing.
|
||||
"""
|
||||
|
||||
if isinstance(packet, dict):
|
||||
return packet
|
||||
if isinstance(packet, ProtoMessage):
|
||||
try:
|
||||
return MessageToDict(
|
||||
packet, preserving_proto_field_name=True, use_integers_for_enums=False
|
||||
)
|
||||
except Exception:
|
||||
if hasattr(packet, "to_dict"):
|
||||
try:
|
||||
return packet.to_dict()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
return json.loads(json.dumps(packet, default=lambda o: str(o)))
|
||||
except Exception:
|
||||
return {"_unparsed": str(packet)}
|
||||
|
||||
|
||||
def _canonical_node_id(value) -> str | None:
|
||||
"""Convert node identifiers into the canonical ``!xxxxxxxx`` format.
|
||||
|
||||
Parameters:
|
||||
value: Input identifier which may be an int, float or string.
|
||||
|
||||
Returns:
|
||||
The canonical identifier or ``None`` if conversion fails.
|
||||
"""
|
||||
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, (int, float)):
|
||||
try:
|
||||
num = int(value)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if num < 0:
|
||||
return None
|
||||
return f"!{num & 0xFFFFFFFF:08x}"
|
||||
if not isinstance(value, str):
|
||||
return None
|
||||
|
||||
trimmed = value.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("^"):
|
||||
return trimmed
|
||||
if trimmed.startswith("!"):
|
||||
body = trimmed[1:]
|
||||
elif trimmed.lower().startswith("0x"):
|
||||
body = trimmed[2:]
|
||||
elif trimmed.isdigit():
|
||||
try:
|
||||
return f"!{int(trimmed, 10) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
else:
|
||||
body = trimmed
|
||||
|
||||
if not body:
|
||||
return None
|
||||
try:
|
||||
return f"!{int(body, 16) & 0xFFFFFFFF:08x}"
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _node_num_from_id(node_id) -> int | None:
|
||||
"""Extract the numeric node ID from a canonical identifier.
|
||||
|
||||
Parameters:
|
||||
node_id: Identifier value accepted by :func:`_canonical_node_id`.
|
||||
|
||||
Returns:
|
||||
The numeric node ID or ``None`` when parsing fails.
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
return None
|
||||
if isinstance(node_id, (int, float)):
|
||||
try:
|
||||
num = int(node_id)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
return num if num >= 0 else None
|
||||
if not isinstance(node_id, str):
|
||||
return None
|
||||
|
||||
trimmed = node_id.strip()
|
||||
if not trimmed:
|
||||
return None
|
||||
if trimmed.startswith("!"):
|
||||
trimmed = trimmed[1:]
|
||||
if trimmed.lower().startswith("0x"):
|
||||
trimmed = trimmed[2:]
|
||||
try:
|
||||
return int(trimmed, 16)
|
||||
except ValueError:
|
||||
try:
|
||||
return int(trimmed, 10)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def _merge_mappings(base, extra):
|
||||
"""Merge two mapping-like objects recursively.
|
||||
|
||||
Parameters:
|
||||
base: Existing mapping or mapping-like structure.
|
||||
extra: Mapping or compatible object whose entries should overlay
|
||||
``base``.
|
||||
|
||||
Returns:
|
||||
A new dictionary containing the merged values.
|
||||
"""
|
||||
|
||||
base_dict: dict
|
||||
if isinstance(base, Mapping):
|
||||
base_dict = dict(base)
|
||||
elif base:
|
||||
converted_base = _node_to_dict(base)
|
||||
base_dict = dict(converted_base) if isinstance(converted_base, Mapping) else {}
|
||||
else:
|
||||
base_dict = {}
|
||||
|
||||
if not isinstance(extra, Mapping):
|
||||
converted_extra = _node_to_dict(extra)
|
||||
if not isinstance(converted_extra, Mapping):
|
||||
return base_dict
|
||||
extra = converted_extra
|
||||
|
||||
for key, value in extra.items():
|
||||
if isinstance(value, Mapping):
|
||||
existing = base_dict.get(key)
|
||||
base_dict[key] = _merge_mappings(existing, value)
|
||||
else:
|
||||
base_dict[key] = _node_to_dict(value)
|
||||
return base_dict
|
||||
|
||||
|
||||
def _extract_payload_bytes(decoded_section: Mapping) -> bytes | None:
|
||||
"""Return raw payload bytes from ``decoded_section`` when available.
|
||||
|
||||
Parameters:
|
||||
decoded_section: Mapping that may include a ``payload`` entry.
|
||||
|
||||
Returns:
|
||||
Raw payload bytes or ``None`` when the payload is missing or invalid.
|
||||
"""
|
||||
|
||||
if not isinstance(decoded_section, Mapping):
|
||||
return None
|
||||
payload = decoded_section.get("payload")
|
||||
if isinstance(payload, Mapping):
|
||||
data = payload.get("__bytes_b64__") or payload.get("bytes")
|
||||
if isinstance(data, str):
|
||||
try:
|
||||
return base64.b64decode(data)
|
||||
except Exception:
|
||||
return None
|
||||
if isinstance(payload, (bytes, bytearray)):
|
||||
return bytes(payload)
|
||||
if isinstance(payload, str):
|
||||
try:
|
||||
return base64.b64decode(payload)
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _decode_nodeinfo_payload(payload_bytes):
|
||||
"""Decode ``NodeInfo`` protobuf payloads from raw bytes.
|
||||
|
||||
Parameters:
|
||||
payload_bytes: Serialized protobuf data from a NODEINFO packet.
|
||||
|
||||
Returns:
|
||||
A :class:`meshtastic.protobuf.mesh_pb2.NodeInfo` instance or ``None``.
|
||||
"""
|
||||
|
||||
if not payload_bytes:
|
||||
return None
|
||||
try:
|
||||
from meshtastic.protobuf import mesh_pb2
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
node_info = mesh_pb2.NodeInfo()
|
||||
try:
|
||||
node_info.ParseFromString(payload_bytes)
|
||||
return node_info
|
||||
except DecodeError:
|
||||
try:
|
||||
user_msg = mesh_pb2.User()
|
||||
user_msg.ParseFromString(payload_bytes)
|
||||
except DecodeError:
|
||||
return None
|
||||
node_info = mesh_pb2.NodeInfo()
|
||||
node_info.user.CopyFrom(user_msg)
|
||||
return node_info
|
||||
|
||||
|
||||
def _nodeinfo_metrics_dict(node_info) -> dict | None:
|
||||
"""Extract device metric fields from a NodeInfo message.
|
||||
|
||||
Parameters:
|
||||
node_info: Parsed NodeInfo protobuf message.
|
||||
|
||||
Returns:
|
||||
A dictionary containing selected metric fields, or ``None`` when no
|
||||
metrics are present.
|
||||
"""
|
||||
|
||||
if not node_info:
|
||||
return None
|
||||
metrics_field_names = {f[0].name for f in node_info.ListFields()}
|
||||
if "device_metrics" not in metrics_field_names:
|
||||
return None
|
||||
metrics = {}
|
||||
for field_desc, value in node_info.device_metrics.ListFields():
|
||||
name = field_desc.name
|
||||
if name == "battery_level":
|
||||
metrics["batteryLevel"] = float(value)
|
||||
elif name == "voltage":
|
||||
metrics["voltage"] = float(value)
|
||||
elif name == "channel_utilization":
|
||||
metrics["channelUtilization"] = float(value)
|
||||
elif name == "air_util_tx":
|
||||
metrics["airUtilTx"] = float(value)
|
||||
elif name == "uptime_seconds":
|
||||
metrics["uptimeSeconds"] = int(value)
|
||||
elif name == "humidity":
|
||||
metrics["humidity"] = float(value)
|
||||
elif name == "temperature":
|
||||
metrics["temperature"] = float(value)
|
||||
elif name == "barometric_pressure":
|
||||
metrics["barometricPressure"] = float(value)
|
||||
return metrics or None
|
||||
|
||||
|
||||
def _nodeinfo_position_dict(node_info) -> dict | None:
|
||||
"""Return a dictionary view of positional data from NodeInfo.
|
||||
|
||||
Parameters:
|
||||
node_info: Parsed NodeInfo protobuf message.
|
||||
|
||||
Returns:
|
||||
A dictionary of positional fields or ``None`` if no data exists.
|
||||
"""
|
||||
|
||||
if not node_info:
|
||||
return None
|
||||
fields = {f[0].name for f in node_info.ListFields()}
|
||||
if "position" not in fields:
|
||||
return None
|
||||
|
||||
result = {}
|
||||
latitude_i = None
|
||||
longitude_i = None
|
||||
|
||||
for field_desc, value in node_info.position.ListFields():
|
||||
name = field_desc.name
|
||||
if name == "latitude_i":
|
||||
latitude_i = int(value)
|
||||
result["latitudeI"] = latitude_i
|
||||
elif name == "longitude_i":
|
||||
longitude_i = int(value)
|
||||
result["longitudeI"] = longitude_i
|
||||
elif name == "latitude":
|
||||
result["latitude"] = float(value)
|
||||
elif name == "longitude":
|
||||
result["longitude"] = float(value)
|
||||
elif name == "altitude":
|
||||
result["altitude"] = int(value)
|
||||
elif name == "time":
|
||||
result["time"] = int(value)
|
||||
elif name == "ground_speed":
|
||||
result["groundSpeed"] = float(value)
|
||||
elif name == "ground_track":
|
||||
result["groundTrack"] = float(value)
|
||||
elif name == "precision_bits":
|
||||
result["precisionBits"] = int(value)
|
||||
elif name == "location_source":
|
||||
# Preserve the raw enum value to allow downstream formatting.
|
||||
result["locationSource"] = int(value)
|
||||
|
||||
if "latitude" not in result and latitude_i is not None:
|
||||
result["latitude"] = latitude_i / 1e7
|
||||
if "longitude" not in result and longitude_i is not None:
|
||||
result["longitude"] = longitude_i / 1e7
|
||||
|
||||
return result or None
|
||||
|
||||
|
||||
def _nodeinfo_user_dict(node_info, decoded_user):
|
||||
"""Combine protobuf and decoded user information into a mapping.
|
||||
|
||||
Parameters:
|
||||
node_info: Parsed NodeInfo protobuf message that may contain a ``user``
|
||||
field.
|
||||
decoded_user: Mapping or protobuf message representing decoded user
|
||||
data from the packet payload.
|
||||
|
||||
Returns:
|
||||
A merged mapping of user information or ``None`` when no data exists.
|
||||
"""
|
||||
|
||||
user_dict = None
|
||||
if node_info:
|
||||
field_names = {f[0].name for f in node_info.ListFields()}
|
||||
if "user" in field_names:
|
||||
try:
|
||||
user_dict = MessageToDict(
|
||||
node_info.user,
|
||||
preserving_proto_field_name=False,
|
||||
use_integers_for_enums=False,
|
||||
)
|
||||
except Exception:
|
||||
user_dict = None
|
||||
|
||||
if isinstance(decoded_user, ProtoMessage):
|
||||
try:
|
||||
decoded_user = MessageToDict(
|
||||
decoded_user,
|
||||
preserving_proto_field_name=False,
|
||||
use_integers_for_enums=False,
|
||||
)
|
||||
except Exception:
|
||||
decoded_user = _node_to_dict(decoded_user)
|
||||
|
||||
if isinstance(decoded_user, Mapping):
|
||||
user_dict = _merge_mappings(user_dict, decoded_user)
|
||||
|
||||
if isinstance(user_dict, Mapping):
|
||||
canonical = _canonical_node_id(user_dict.get("id"))
|
||||
if canonical:
|
||||
user_dict = dict(user_dict)
|
||||
user_dict["id"] = canonical
|
||||
return user_dict
|
||||
|
||||
|
||||
__all__ = [
|
||||
"_canonical_node_id",
|
||||
"_coerce_float",
|
||||
"_coerce_int",
|
||||
"_decode_nodeinfo_payload",
|
||||
"_extract_payload_bytes",
|
||||
"_first",
|
||||
"_get",
|
||||
"_iso",
|
||||
"_merge_mappings",
|
||||
"_node_num_from_id",
|
||||
"_node_to_dict",
|
||||
"_nodeinfo_metrics_dict",
|
||||
"_nodeinfo_position_dict",
|
||||
"_nodeinfo_user_dict",
|
||||
"_pkt_to_dict",
|
||||
"DecodeError",
|
||||
"MessageToDict",
|
||||
"ProtoMessage",
|
||||
"upsert_payload",
|
||||
]
|
||||
+4
-1
@@ -21,10 +21,13 @@ CREATE TABLE IF NOT EXISTS messages (
|
||||
channel INTEGER,
|
||||
portnum TEXT,
|
||||
text TEXT,
|
||||
encrypted TEXT,
|
||||
snr REAL,
|
||||
rssi INTEGER,
|
||||
hop_limit INTEGER,
|
||||
raw_json TEXT
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT,
|
||||
channel_name TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
-- Add support for encrypted messages to the existing schema.
|
||||
BEGIN;
|
||||
ALTER TABLE messages ADD COLUMN encrypted TEXT;
|
||||
COMMIT;
|
||||
@@ -0,0 +1,22 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
--
|
||||
-- Extend the nodes and messages tables with LoRa metadata columns.
|
||||
BEGIN;
|
||||
ALTER TABLE nodes ADD COLUMN lora_freq INTEGER;
|
||||
ALTER TABLE nodes ADD COLUMN modem_preset TEXT;
|
||||
ALTER TABLE messages ADD COLUMN lora_freq INTEGER;
|
||||
ALTER TABLE messages ADD COLUMN modem_preset TEXT;
|
||||
ALTER TABLE messages ADD COLUMN channel_name TEXT;
|
||||
COMMIT;
|
||||
@@ -0,0 +1,26 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS neighbors (
|
||||
node_id TEXT NOT NULL,
|
||||
neighbor_id TEXT NOT NULL,
|
||||
snr REAL,
|
||||
rx_time INTEGER NOT NULL,
|
||||
PRIMARY KEY (node_id, neighbor_id),
|
||||
FOREIGN KEY (node_id) REFERENCES nodes(node_id) ON DELETE CASCADE,
|
||||
FOREIGN KEY (neighbor_id) REFERENCES nodes(node_id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_neighbors_rx_time ON neighbors(rx_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_neighbors_neighbor_id ON neighbors(neighbor_id);
|
||||
+4
-1
@@ -36,9 +36,12 @@ CREATE TABLE IF NOT EXISTS nodes (
|
||||
uptime_seconds INTEGER,
|
||||
position_time INTEGER,
|
||||
location_source TEXT,
|
||||
precision_bits INTEGER,
|
||||
latitude REAL,
|
||||
longitude REAL,
|
||||
altitude REAL
|
||||
altitude REAL,
|
||||
lora_freq INTEGER,
|
||||
modem_preset TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_nodes_last_heard ON nodes(last_heard);
|
||||
|
||||
@@ -0,0 +1,40 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS positions (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_id TEXT,
|
||||
node_num INTEGER,
|
||||
rx_time INTEGER NOT NULL,
|
||||
rx_iso TEXT NOT NULL,
|
||||
position_time INTEGER,
|
||||
to_id TEXT,
|
||||
latitude REAL,
|
||||
longitude REAL,
|
||||
altitude REAL,
|
||||
location_source TEXT,
|
||||
precision_bits INTEGER,
|
||||
sats_in_view INTEGER,
|
||||
pdop REAL,
|
||||
ground_speed REAL,
|
||||
ground_track REAL,
|
||||
snr REAL,
|
||||
rssi INTEGER,
|
||||
hop_limit INTEGER,
|
||||
bitfield INTEGER,
|
||||
payload_b64 TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_positions_rx_time ON positions(rx_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_positions_node_id ON positions(node_id);
|
||||
@@ -0,0 +1,8 @@
|
||||
# Production dependencies
|
||||
meshtastic>=2.5.0
|
||||
protobuf>=5.27.2
|
||||
|
||||
# Development dependencies (optional)
|
||||
black>=24.8.0
|
||||
pytest>=8.3.0
|
||||
pytest-cov>=5.0.0
|
||||
@@ -0,0 +1,43 @@
|
||||
-- Copyright (C) 2025 l5yth
|
||||
--
|
||||
-- Licensed under the Apache License, Version 2.0 (the "License");
|
||||
-- you may not use this file except in compliance with the License.
|
||||
-- You may obtain a copy of the License at
|
||||
--
|
||||
-- http://www.apache.org/licenses/LICENSE-2.0
|
||||
--
|
||||
-- Unless required by applicable law or agreed to in writing, software
|
||||
-- distributed under the License is distributed on an "AS IS" BASIS,
|
||||
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
-- See the License for the specific language governing permissions and
|
||||
-- limitations under the License.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS telemetry (
|
||||
id INTEGER PRIMARY KEY,
|
||||
node_id TEXT,
|
||||
node_num INTEGER,
|
||||
from_id TEXT,
|
||||
to_id TEXT,
|
||||
rx_time INTEGER NOT NULL,
|
||||
rx_iso TEXT NOT NULL,
|
||||
telemetry_time INTEGER,
|
||||
channel INTEGER,
|
||||
portnum TEXT,
|
||||
hop_limit INTEGER,
|
||||
snr REAL,
|
||||
rssi INTEGER,
|
||||
bitfield INTEGER,
|
||||
payload_b64 TEXT,
|
||||
battery_level REAL,
|
||||
voltage REAL,
|
||||
channel_utilization REAL,
|
||||
air_util_tx REAL,
|
||||
uptime_seconds INTEGER,
|
||||
temperature REAL,
|
||||
relative_humidity REAL,
|
||||
barometric_pressure REAL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_rx_time ON telemetry(rx_time);
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_node_id ON telemetry(node_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_telemetry_time ON telemetry(telemetry_time);
|
||||
@@ -0,0 +1,42 @@
|
||||
# Development overrides for docker-compose.yml
|
||||
services:
|
||||
web:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./web:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/vendor/bundle
|
||||
|
||||
web-bridge:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./web:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/vendor/bundle
|
||||
ports:
|
||||
- "41447:41447"
|
||||
- "9292:9292"
|
||||
|
||||
ingestor:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./data:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/.local
|
||||
- /dev:/dev
|
||||
|
||||
ingestor-bridge:
|
||||
environment:
|
||||
DEBUG: 1
|
||||
volumes:
|
||||
- ./data:/app
|
||||
- ./data:/app/.local/share/potato-mesh
|
||||
- ./.config/potato-mesh:/app/.config/potato-mesh
|
||||
- /app/.local
|
||||
- /dev:/dev
|
||||
@@ -0,0 +1,37 @@
|
||||
# Production overrides for docker-compose.yml
|
||||
services:
|
||||
web:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: web/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
web-bridge:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: web/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
ingestor:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: data/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
|
||||
ingestor-bridge:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: data/Dockerfile
|
||||
target: production
|
||||
environment:
|
||||
DEBUG: 0
|
||||
restart: always
|
||||
@@ -0,0 +1,102 @@
|
||||
x-web-base: &web-base
|
||||
image: ghcr.io/l5yth/potato-mesh-web-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:latest
|
||||
environment:
|
||||
APP_ENV: ${APP_ENV:-production}
|
||||
RACK_ENV: ${RACK_ENV:-production}
|
||||
SITE_NAME: ${SITE_NAME:-PotatoMesh Demo}
|
||||
CHANNEL: ${CHANNEL:-#LongFast}
|
||||
FREQUENCY: ${FREQUENCY:-915MHz}
|
||||
MAP_CENTER: ${MAP_CENTER:-38.761944,-27.090833}
|
||||
MAX_DISTANCE: ${MAX_DISTANCE:-42}
|
||||
CONTACT_LINK: ${CONTACT_LINK:-#potatomesh:dod.ngo}
|
||||
API_TOKEN: ${API_TOKEN}
|
||||
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
|
||||
DEBUG: ${DEBUG:-0}
|
||||
command: ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
|
||||
volumes:
|
||||
- potatomesh_data:/app/.local/share/potato-mesh
|
||||
- potatomesh_config:/app/.config/potato-mesh
|
||||
- potatomesh_logs:/app/logs
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
cpus: '0.5'
|
||||
reservations:
|
||||
memory: 256M
|
||||
cpus: '0.25'
|
||||
|
||||
x-ingestor-base: &ingestor-base
|
||||
image: ghcr.io/l5yth/potato-mesh-ingestor-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:latest
|
||||
environment:
|
||||
CONNECTION: ${CONNECTION:-/dev/ttyACM0}
|
||||
CHANNEL_INDEX: ${CHANNEL_INDEX:-0}
|
||||
POTATOMESH_INSTANCE: ${POTATOMESH_INSTANCE:-http://web:41447}
|
||||
API_TOKEN: ${API_TOKEN}
|
||||
INSTANCE_DOMAIN: ${INSTANCE_DOMAIN}
|
||||
DEBUG: ${DEBUG:-0}
|
||||
volumes:
|
||||
- potatomesh_data:/app/.local/share/potato-mesh
|
||||
- potatomesh_config:/app/.config/potato-mesh
|
||||
- potatomesh_logs:/app/logs
|
||||
- /dev:/dev
|
||||
device_cgroup_rules:
|
||||
- 'c 166:* rwm' # ttyACM devices
|
||||
- 'c 188:* rwm' # ttyUSB devices
|
||||
- 'c 4:* rwm' # ttyS devices
|
||||
privileged: false
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
cpus: '0.25'
|
||||
reservations:
|
||||
memory: 128M
|
||||
cpus: '0.1'
|
||||
|
||||
services:
|
||||
web:
|
||||
<<: *web-base
|
||||
network_mode: host
|
||||
|
||||
ingestor:
|
||||
<<: *ingestor-base
|
||||
network_mode: host
|
||||
depends_on:
|
||||
- web
|
||||
extra_hosts:
|
||||
- "web:127.0.0.1"
|
||||
|
||||
web-bridge:
|
||||
<<: *web-base
|
||||
container_name: potatomesh-web-bridge
|
||||
networks:
|
||||
- potatomesh-network
|
||||
ports:
|
||||
- "41447:41447"
|
||||
profiles:
|
||||
- bridge
|
||||
|
||||
ingestor-bridge:
|
||||
<<: *ingestor-base
|
||||
container_name: potatomesh-ingestor-bridge
|
||||
networks:
|
||||
- potatomesh-network
|
||||
depends_on:
|
||||
- web-bridge
|
||||
profiles:
|
||||
- bridge
|
||||
|
||||
volumes:
|
||||
potatomesh_data:
|
||||
driver: local
|
||||
potatomesh_config:
|
||||
driver: local
|
||||
potatomesh_logs:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
potatomesh-network:
|
||||
driver: bridge
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 952 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 907 KiB |
+30
-2
@@ -14,6 +14,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Interactive debugging helpers for live Meshtastic sessions."""
|
||||
|
||||
import time, json, base64, threading
|
||||
from pubsub import pub # comes with meshtastic
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
@@ -28,7 +30,14 @@ stop = threading.Event()
|
||||
|
||||
|
||||
def to_jsonable(obj):
|
||||
"""Recursively convert protobuf/bytes/etc. into JSON-serializable structures."""
|
||||
"""Recursively convert complex objects into JSON-serialisable structures.
|
||||
|
||||
Parameters:
|
||||
obj: Any Meshtastic-related payload or protobuf message.
|
||||
|
||||
Returns:
|
||||
A structure composed of standard Python types.
|
||||
"""
|
||||
if obj is None:
|
||||
return None
|
||||
if isinstance(obj, ProtoMessage):
|
||||
@@ -49,7 +58,14 @@ def to_jsonable(obj):
|
||||
|
||||
|
||||
def extract_text(d):
|
||||
"""Best-effort pull of decoded text from a dict produced by to_jsonable()."""
|
||||
"""Best-effort pull of decoded text from :func:`to_jsonable` output.
|
||||
|
||||
Parameters:
|
||||
d: Mapping derived from :func:`to_jsonable`.
|
||||
|
||||
Returns:
|
||||
The decoded text when available, otherwise ``None``.
|
||||
"""
|
||||
dec = d.get("decoded") or {}
|
||||
# Text packets usually at decoded.payload.text
|
||||
payload = dec.get("payload") or {}
|
||||
@@ -62,6 +78,12 @@ def extract_text(d):
|
||||
|
||||
|
||||
def on_receive(packet, interface):
|
||||
"""Display human-readable output for each received packet.
|
||||
|
||||
Parameters:
|
||||
packet: Packet instance supplied by Meshtastic.
|
||||
interface: Interface that produced the packet.
|
||||
"""
|
||||
global packet_count, last_rx_ts
|
||||
packet_count += 1
|
||||
last_rx_ts = time.time()
|
||||
@@ -86,14 +108,20 @@ def on_receive(packet, interface):
|
||||
|
||||
|
||||
def on_connected(interface, *args, **kwargs):
|
||||
"""Log when a connection is established."""
|
||||
|
||||
print("[info] connection established")
|
||||
|
||||
|
||||
def on_disconnected(interface, *args, **kwargs):
|
||||
"""Log when the interface disconnects."""
|
||||
|
||||
print("[info] disconnected")
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the interactive debugging loop."""
|
||||
|
||||
print(f"Opening Meshtastic on {PORT} …")
|
||||
|
||||
# Use PubSub topics (reliable in current meshtastic)
|
||||
|
||||
+127
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Utility script to dump Meshtastic traffic for offline analysis."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from meshtastic.mesh_interface import MeshInterface
|
||||
from meshtastic.serial_interface import SerialInterface
|
||||
from pubsub import pub
|
||||
|
||||
CONNECTION = os.environ.get("CONNECTION") or os.environ.get(
|
||||
"MESH_SERIAL", "/dev/ttyACM0"
|
||||
)
|
||||
"""Connection target opened to capture Meshtastic traffic."""
|
||||
OUT = os.environ.get("MESH_DUMP_FILE", "meshtastic-dump.ndjson")
|
||||
|
||||
# line-buffered append so you can tail -f safely
|
||||
f = open(OUT, "a", buffering=1, encoding="utf-8")
|
||||
|
||||
|
||||
def now() -> str:
|
||||
"""Return the current UTC timestamp in ISO 8601 format."""
|
||||
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def write(kind: str, payload: dict) -> None:
|
||||
"""Append a JSON record to the dump file.
|
||||
|
||||
Parameters:
|
||||
kind: Logical record type such as ``"packet"`` or ``"node"``.
|
||||
payload: Serializable payload containing the record body.
|
||||
"""
|
||||
|
||||
rec = {"ts": now(), "kind": kind, **payload}
|
||||
f.write(json.dumps(rec, ensure_ascii=False, default=str) + "\n")
|
||||
|
||||
|
||||
# Connect to the node
|
||||
iface: MeshInterface = SerialInterface(CONNECTION)
|
||||
|
||||
|
||||
# Packet callback: every RF/Mesh packet the node receives/decodes lands here
|
||||
def on_packet(packet, iface):
|
||||
"""Write packet metadata whenever the radio receives a frame.
|
||||
|
||||
Parameters:
|
||||
packet: Meshtastic packet object or dictionary.
|
||||
iface: Interface instance delivering the packet.
|
||||
"""
|
||||
|
||||
# 'packet' already includes decoded fields when available (portnum, payload, position, telemetry, etc.)
|
||||
write("packet", {"packet": packet})
|
||||
|
||||
|
||||
# Node callback: topology/metadata updates (nodeinfo, hops, lastHeard, etc.)
|
||||
def on_node(node, iface):
|
||||
"""Write node metadata updates produced by Meshtastic.
|
||||
|
||||
Parameters:
|
||||
node: Meshtastic node object or mapping.
|
||||
iface: Interface instance emitting the update.
|
||||
"""
|
||||
|
||||
write("node", {"node": node})
|
||||
|
||||
|
||||
iface.onReceive = on_packet
|
||||
pub.subscribe(on_node, "meshtastic.node")
|
||||
|
||||
# Write a little header so you know what you captured
|
||||
try:
|
||||
my = getattr(iface, "myInfo", None)
|
||||
write(
|
||||
"meta",
|
||||
{
|
||||
"event": "started",
|
||||
"port": CONNECTION,
|
||||
"my_node_num": getattr(my, "my_node_num", None) if my else None,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
write("meta", {"event": "started", "port": CONNECTION, "error": str(e)})
|
||||
|
||||
|
||||
# Keep the process alive until Ctrl-C
|
||||
def _stop(signum, frame):
|
||||
"""Handle termination signals by flushing buffers and exiting."""
|
||||
|
||||
write("meta", {"event": "stopping"})
|
||||
try:
|
||||
try:
|
||||
pub.unsubscribe(on_node, "meshtastic.node")
|
||||
except Exception:
|
||||
pass
|
||||
iface.close()
|
||||
finally:
|
||||
f.close()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
signal.signal(signal.SIGINT, _stop)
|
||||
signal.signal(signal.SIGTERM, _stop)
|
||||
|
||||
# Simple sleep loop; avoids busy-wait
|
||||
while True:
|
||||
time.sleep(1)
|
||||
@@ -0,0 +1,239 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Minimal Meshtastic protobuf stubs for isolated unit testing."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import types
|
||||
from typing import Any, Callable, Dict, Tuple
|
||||
|
||||
|
||||
def _enum_value(name: str, mapping: Dict[str, int]) -> int:
|
||||
normalized = name.upper()
|
||||
if normalized not in mapping:
|
||||
raise KeyError(f"Unknown enum value: {name}")
|
||||
return mapping[normalized]
|
||||
|
||||
|
||||
def build(message_base, decode_error) -> Tuple[types.ModuleType, types.ModuleType]:
|
||||
"""Return ``(config_pb2, mesh_pb2)`` stubs built from protobuf shims."""
|
||||
|
||||
class _ProtoMessage(message_base):
|
||||
"""Base class implementing JSON round-tripping for protobuf stubs."""
|
||||
|
||||
_FIELD_ALIASES: Dict[str, str] = {}
|
||||
_FIELD_FACTORIES: Dict[str, Callable[[], "_ProtoMessage"]] = {}
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
object.__setattr__(self, "_fields", {})
|
||||
|
||||
def __setattr__(
|
||||
self, name: str, value: Any
|
||||
) -> None: # noqa: D401 - behaviour documented on base class
|
||||
object.__setattr__(self, name, value)
|
||||
if not name.startswith("_"):
|
||||
self._fields[name] = value
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
factories = getattr(self, "_FIELD_FACTORIES", {})
|
||||
if name in factories:
|
||||
value = factories[name]()
|
||||
self.__setattr__(name, value)
|
||||
return value
|
||||
raise AttributeError(name)
|
||||
|
||||
def _alias_for(self, name: str) -> str:
|
||||
return self._FIELD_ALIASES.get(name, name)
|
||||
|
||||
def _name_for(self, alias: str) -> str:
|
||||
reverse = getattr(self, "_FIELD_ALIASES", {})
|
||||
for key, candidate in reverse.items():
|
||||
if candidate == alias:
|
||||
return key
|
||||
return alias
|
||||
|
||||
def _to_dict(self) -> Dict[str, Any]:
|
||||
result: Dict[str, Any] = {}
|
||||
for name, value in self._fields.items():
|
||||
alias = self._alias_for(name)
|
||||
if isinstance(value, _ProtoMessage):
|
||||
result[alias] = value._to_dict()
|
||||
elif isinstance(value, list):
|
||||
result[alias] = [
|
||||
item._to_dict() if isinstance(item, _ProtoMessage) else item
|
||||
for item in value
|
||||
]
|
||||
else:
|
||||
result[alias] = value
|
||||
return result
|
||||
|
||||
def SerializeToString(self) -> bytes:
|
||||
"""Encode the message contents as a JSON byte string."""
|
||||
|
||||
return json.dumps(self._to_dict(), sort_keys=True).encode("utf-8")
|
||||
|
||||
def ParseFromString(self, payload: bytes) -> None:
|
||||
"""Populate the message from a JSON byte string."""
|
||||
|
||||
try:
|
||||
data = json.loads(payload.decode("utf-8"))
|
||||
except Exception as exc: # pragma: no cover - defensive guard
|
||||
raise decode_error(str(exc)) from exc
|
||||
self._load_from_dict(data)
|
||||
|
||||
def _load_from_dict(self, data: Dict[str, Any]) -> None:
|
||||
factories = getattr(self, "_FIELD_FACTORIES", {})
|
||||
for alias, value in data.items():
|
||||
name = self._name_for(alias)
|
||||
if name in factories and isinstance(value, dict):
|
||||
nested = getattr(self, name, None)
|
||||
if not isinstance(nested, _ProtoMessage):
|
||||
nested = factories[name]()
|
||||
object.__setattr__(self, name, nested)
|
||||
nested._load_from_dict(value)
|
||||
self._fields[name] = nested
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Return a JSON-compatible representation of the message."""
|
||||
|
||||
return self._to_dict()
|
||||
|
||||
def ListFields(self):
|
||||
"""Mimic protobuf ``ListFields`` for the subset of tests used."""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
entries = []
|
||||
for name, value in self._fields.items():
|
||||
descriptor = SimpleNamespace(name=name)
|
||||
entries.append((descriptor, value))
|
||||
return entries
|
||||
|
||||
def CopyFrom(self, other: "_ProtoMessage") -> None:
|
||||
"""Populate this message with values from ``other``."""
|
||||
|
||||
if not isinstance(other, _ProtoMessage):
|
||||
raise TypeError("CopyFrom expects another protobuf message")
|
||||
self._fields.clear()
|
||||
for name, value in other._fields.items():
|
||||
if isinstance(value, _ProtoMessage):
|
||||
copied = type(value)()
|
||||
copied.CopyFrom(value)
|
||||
setattr(self, name, copied)
|
||||
elif isinstance(value, list):
|
||||
converted = []
|
||||
for item in value:
|
||||
if isinstance(item, _ProtoMessage):
|
||||
nested = type(item)()
|
||||
nested.CopyFrom(item)
|
||||
converted.append(nested)
|
||||
else:
|
||||
converted.append(item)
|
||||
setattr(self, name, converted)
|
||||
else:
|
||||
setattr(self, name, value)
|
||||
|
||||
class _DeviceMetrics(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"battery_level": "batteryLevel",
|
||||
"voltage": "voltage",
|
||||
"channel_utilization": "channelUtilization",
|
||||
"air_util_tx": "airUtilTx",
|
||||
"uptime_seconds": "uptimeSeconds",
|
||||
}
|
||||
|
||||
class _Position(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"latitude_i": "latitudeI",
|
||||
"longitude_i": "longitudeI",
|
||||
"location_source": "locationSource",
|
||||
}
|
||||
|
||||
class LocSource:
|
||||
_VALUES = {
|
||||
"LOC_UNSET": 0,
|
||||
"LOC_INTERNAL": 1,
|
||||
"LOC_EXTERNAL": 2,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def Value(cls, name: str) -> int:
|
||||
return _enum_value(name, cls._VALUES)
|
||||
|
||||
class _User(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"short_name": "shortName",
|
||||
"long_name": "longName",
|
||||
"hw_model": "hwModel",
|
||||
}
|
||||
|
||||
class _NodeInfo(_ProtoMessage):
|
||||
_FIELD_ALIASES = {
|
||||
"last_heard": "lastHeard",
|
||||
"is_favorite": "isFavorite",
|
||||
"hops_away": "hopsAway",
|
||||
}
|
||||
_FIELD_FACTORIES = {
|
||||
"user": _User,
|
||||
"device_metrics": _DeviceMetrics,
|
||||
"position": _Position,
|
||||
}
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
class _HardwareModel:
|
||||
_VALUES = {
|
||||
"UNKNOWN": 0,
|
||||
"TBEAM": 1,
|
||||
"HELTEC": 2,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def Value(cls, name: str) -> int:
|
||||
return _enum_value(name, cls._VALUES)
|
||||
|
||||
mesh_pb2 = types.ModuleType("mesh_pb2")
|
||||
mesh_pb2.NodeInfo = _NodeInfo
|
||||
mesh_pb2.User = _User
|
||||
mesh_pb2.Position = _Position
|
||||
mesh_pb2.DeviceMetrics = _DeviceMetrics
|
||||
mesh_pb2.HardwareModel = _HardwareModel
|
||||
|
||||
class _RoleEnum:
|
||||
_VALUES = {
|
||||
"UNKNOWN": 0,
|
||||
"CLIENT": 1,
|
||||
"REPEATER": 2,
|
||||
"ROUTER": 3,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def Value(cls, name: str) -> int:
|
||||
return _enum_value(name, cls._VALUES)
|
||||
|
||||
class _DeviceConfig:
|
||||
Role = _RoleEnum
|
||||
|
||||
class _Config:
|
||||
DeviceConfig = _DeviceConfig
|
||||
|
||||
config_pb2 = types.ModuleType("config_pb2")
|
||||
config_pb2.Config = _Config
|
||||
|
||||
return config_pb2, mesh_pb2
|
||||
+6
-100
@@ -11,9 +11,11 @@
|
||||
"rssi": -121,
|
||||
"hop_limit": 1,
|
||||
"snr": -13.25,
|
||||
"lora_freq": 915,
|
||||
"modem_preset": "LONG_FAST",
|
||||
"channel_name": "SpecChannel",
|
||||
"node": {
|
||||
"snr": -13.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!bba83318",
|
||||
"num": 3148362520,
|
||||
"short_name": "BerF",
|
||||
@@ -51,9 +53,11 @@
|
||||
"rssi": -117,
|
||||
"hop_limit": 3,
|
||||
"snr": -12.0,
|
||||
"lora_freq": 868,
|
||||
"modem_preset": "MEDIUM_SLOW",
|
||||
"channel_name": "SpecChannel",
|
||||
"node": {
|
||||
"snr": -12.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!43b6e530",
|
||||
"num": 1136059696,
|
||||
"short_name": "FFSR",
|
||||
@@ -93,7 +97,6 @@
|
||||
"snr": -13.5,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!d42e18e8",
|
||||
"num": 3559790824,
|
||||
"short_name": "RRun",
|
||||
@@ -133,7 +136,6 @@
|
||||
"snr": -13.0,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!d42e18e8",
|
||||
"num": 3559790824,
|
||||
"short_name": "RRun",
|
||||
@@ -173,7 +175,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -213,7 +214,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 11.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!4ed36bd0",
|
||||
"num": 1322478544,
|
||||
"short_name": "RDM",
|
||||
@@ -253,7 +253,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -293,7 +292,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -333,7 +331,6 @@
|
||||
"snr": 12.0,
|
||||
"node": {
|
||||
"snr": 12.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!b03c97a4",
|
||||
"num": 2956760996,
|
||||
"short_name": "BLN1",
|
||||
@@ -373,7 +370,6 @@
|
||||
"snr": -15.0,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9eeb25ec",
|
||||
"num": 2666210796,
|
||||
"short_name": "25ec",
|
||||
@@ -413,7 +409,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 11.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!f9b0938c",
|
||||
"num": 4189098892,
|
||||
"short_name": "Ed-1",
|
||||
@@ -453,7 +448,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!6c73bf84",
|
||||
"num": 1819524996,
|
||||
"short_name": "ts1",
|
||||
@@ -493,7 +487,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -533,7 +526,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -573,7 +565,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!6cf821fb",
|
||||
"num": 1828200955,
|
||||
"short_name": "OKP1",
|
||||
@@ -613,7 +604,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!6cf821fb",
|
||||
"num": 1828200955,
|
||||
"short_name": "OKP1",
|
||||
@@ -653,7 +643,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -693,7 +682,6 @@
|
||||
"snr": 10.25,
|
||||
"node": {
|
||||
"snr": 10.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!db2b23f4",
|
||||
"num": 3677037556,
|
||||
"short_name": "Eagl",
|
||||
@@ -733,7 +721,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!6cf821fb",
|
||||
"num": 1828200955,
|
||||
"short_name": "OKP1",
|
||||
@@ -773,7 +760,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -813,7 +799,6 @@
|
||||
"snr": -11.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!177cfa26",
|
||||
"num": 394066470,
|
||||
"short_name": "lun1",
|
||||
@@ -853,7 +838,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ea0c780",
|
||||
"num": 2661336960,
|
||||
"short_name": "nguE",
|
||||
@@ -893,7 +877,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -933,7 +916,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!e80cda12",
|
||||
"num": 3893156370,
|
||||
"short_name": "mowW",
|
||||
@@ -973,7 +955,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -1013,7 +994,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -1053,7 +1033,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -1093,7 +1072,6 @@
|
||||
"snr": -11.75,
|
||||
"node": {
|
||||
"snr": -9.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cb1608",
|
||||
"num": 2697664008,
|
||||
"short_name": "KBV5",
|
||||
@@ -1133,7 +1111,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!bcf10936",
|
||||
"num": 3169913142,
|
||||
"short_name": "0936",
|
||||
@@ -1173,7 +1150,6 @@
|
||||
"snr": 11.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -1213,7 +1189,6 @@
|
||||
"snr": -13.25,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cc6904",
|
||||
"num": 2697750788,
|
||||
"short_name": "Kdû",
|
||||
@@ -1253,7 +1228,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -1293,7 +1267,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9eeb25ec",
|
||||
"num": 2666210796,
|
||||
"short_name": "25ec",
|
||||
@@ -1333,7 +1306,6 @@
|
||||
"snr": -14.0,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cc6904",
|
||||
"num": 2697750788,
|
||||
"short_name": "Kdû",
|
||||
@@ -1373,7 +1345,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9eeb25ec",
|
||||
"num": 2666210796,
|
||||
"short_name": "25ec",
|
||||
@@ -1413,7 +1384,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9eeb25ec",
|
||||
"num": 2666210796,
|
||||
"short_name": "25ec",
|
||||
@@ -1453,7 +1423,6 @@
|
||||
"snr": 11.75,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9eeb25ec",
|
||||
"num": 2666210796,
|
||||
"short_name": "25ec",
|
||||
@@ -1493,7 +1462,6 @@
|
||||
"snr": 11.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -1533,7 +1501,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!03b9ca11",
|
||||
"num": 62507537,
|
||||
"short_name": "ca11",
|
||||
@@ -1573,7 +1540,6 @@
|
||||
"snr": 7.5,
|
||||
"node": {
|
||||
"snr": 10.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!db2b23f4",
|
||||
"num": 3677037556,
|
||||
"short_name": "Eagl",
|
||||
@@ -1613,7 +1579,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -1653,7 +1618,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!db2b23f4",
|
||||
"num": 3677037556,
|
||||
"short_name": "Eagl",
|
||||
@@ -1693,7 +1657,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -1733,7 +1696,6 @@
|
||||
"snr": 10.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -1773,7 +1735,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -1813,7 +1774,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cc6904",
|
||||
"num": 2697750788,
|
||||
"short_name": "Kdû",
|
||||
@@ -1853,7 +1813,6 @@
|
||||
"snr": -12.25,
|
||||
"node": {
|
||||
"snr": -12.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!2f945044",
|
||||
"num": 798249028,
|
||||
"short_name": "BND",
|
||||
@@ -1893,7 +1852,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -1933,7 +1891,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71c38",
|
||||
"num": 2665946168,
|
||||
"short_name": "1c38",
|
||||
@@ -1973,7 +1930,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -2013,7 +1969,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -2053,7 +2008,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": -6.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!7c5b0920",
|
||||
"num": 2086340896,
|
||||
"short_name": "FFTB",
|
||||
@@ -2093,7 +2047,6 @@
|
||||
"snr": 10.25,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -2133,7 +2086,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ea0c780",
|
||||
"num": 2661336960,
|
||||
"short_name": "nguE",
|
||||
@@ -2173,7 +2125,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": -12.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!0910c922",
|
||||
"num": 152095010,
|
||||
"short_name": "c922",
|
||||
@@ -2213,7 +2164,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -2253,7 +2203,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71430",
|
||||
"num": 2665944112,
|
||||
"short_name": "FiSp",
|
||||
@@ -2293,7 +2242,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -2333,7 +2281,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!bcf10936",
|
||||
"num": 3169913142,
|
||||
"short_name": "0936",
|
||||
@@ -2373,7 +2320,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!16ced364",
|
||||
"num": 382653284,
|
||||
"short_name": "Pat",
|
||||
@@ -2413,7 +2359,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71c38",
|
||||
"num": 2665946168,
|
||||
"short_name": "1c38",
|
||||
@@ -2453,7 +2398,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71c38",
|
||||
"num": 2665946168,
|
||||
"short_name": "1c38",
|
||||
@@ -2493,7 +2437,6 @@
|
||||
"snr": 10.25,
|
||||
"node": {
|
||||
"snr": 10.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!a3deea53",
|
||||
"num": 2749295187,
|
||||
"short_name": "🐸",
|
||||
@@ -2533,7 +2476,6 @@
|
||||
"snr": 9.0,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ea0c780",
|
||||
"num": 2661336960,
|
||||
"short_name": "nguE",
|
||||
@@ -2573,7 +2515,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": -13.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!bba83318",
|
||||
"num": 3148362520,
|
||||
"short_name": "BerF",
|
||||
@@ -2613,7 +2554,6 @@
|
||||
"snr": 9.25,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71c38",
|
||||
"num": 2665946168,
|
||||
"short_name": "1c38",
|
||||
@@ -2653,7 +2593,6 @@
|
||||
"snr": 10.25,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!e80cda12",
|
||||
"num": 3893156370,
|
||||
"short_name": "mowW",
|
||||
@@ -2693,7 +2632,6 @@
|
||||
"snr": -5.0,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cc6904",
|
||||
"num": 2697750788,
|
||||
"short_name": "Kdû",
|
||||
@@ -2733,7 +2671,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!e80cda12",
|
||||
"num": 3893156370,
|
||||
"short_name": "mowW",
|
||||
@@ -2773,7 +2710,6 @@
|
||||
"snr": 0.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -2813,7 +2749,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -2853,7 +2788,6 @@
|
||||
"snr": 11.5,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -2893,7 +2827,6 @@
|
||||
"snr": 10.0,
|
||||
"node": {
|
||||
"snr": 11.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!16ced364",
|
||||
"num": 382653284,
|
||||
"short_name": "Pat",
|
||||
@@ -2933,7 +2866,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": -9.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cb1608",
|
||||
"num": 2697664008,
|
||||
"short_name": "KBV5",
|
||||
@@ -2973,7 +2905,6 @@
|
||||
"snr": 9.5,
|
||||
"node": {
|
||||
"snr": -9.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cb1608",
|
||||
"num": 2697664008,
|
||||
"short_name": "KBV5",
|
||||
@@ -3013,7 +2944,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -3053,7 +2983,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": -12.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!43b6e530",
|
||||
"num": 1136059696,
|
||||
"short_name": "FFSR",
|
||||
@@ -3093,7 +3022,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!e80cda12",
|
||||
"num": 3893156370,
|
||||
"short_name": "mowW",
|
||||
@@ -3133,7 +3061,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -3173,7 +3100,6 @@
|
||||
"snr": 10.25,
|
||||
"node": {
|
||||
"snr": 11.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!16ced364",
|
||||
"num": 382653284,
|
||||
"short_name": "Pat",
|
||||
@@ -3213,7 +3139,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -3253,7 +3178,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
@@ -3293,7 +3217,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!abbdf3f7",
|
||||
"num": 2881352695,
|
||||
"short_name": "f3f7",
|
||||
@@ -3333,7 +3256,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!c0c32348",
|
||||
"num": 3234014024,
|
||||
"short_name": "CooP",
|
||||
@@ -3373,7 +3295,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.25,
|
||||
"raw_json": null,
|
||||
"node_id": "!16ced364",
|
||||
"num": 382653284,
|
||||
"short_name": "Pat",
|
||||
@@ -3413,7 +3334,6 @@
|
||||
"snr": 10.5,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -3453,7 +3373,6 @@
|
||||
"snr": -12.5,
|
||||
"node": {
|
||||
"snr": -9.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!a0cb1608",
|
||||
"num": 2697664008,
|
||||
"short_name": "KBV5",
|
||||
@@ -3493,7 +3412,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!da635e24",
|
||||
"num": 3663945252,
|
||||
"short_name": "LAN",
|
||||
@@ -3533,7 +3451,6 @@
|
||||
"snr": -8.75,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -3573,7 +3490,6 @@
|
||||
"snr": 10.25,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!5d823fb1",
|
||||
"num": 1568817073,
|
||||
"short_name": "3fb1",
|
||||
@@ -3613,7 +3529,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": -12.0,
|
||||
"raw_json": null,
|
||||
"node_id": "!43b6e530",
|
||||
"num": 1136059696,
|
||||
"short_name": "FFSR",
|
||||
@@ -3653,7 +3568,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!849a8ba4",
|
||||
"num": 2224720804,
|
||||
"short_name": "MGN1",
|
||||
@@ -3693,7 +3607,6 @@
|
||||
"snr": -13.25,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!849a8ba4",
|
||||
"num": 2224720804,
|
||||
"short_name": "MGN1",
|
||||
@@ -3733,7 +3646,6 @@
|
||||
"snr": 10.75,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9c93a2df",
|
||||
"num": 2626921183,
|
||||
"short_name": "xaRa",
|
||||
@@ -3773,7 +3685,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71c38",
|
||||
"num": 2665946168,
|
||||
"short_name": "1c38",
|
||||
@@ -3813,7 +3724,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 11.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!9ee71c38",
|
||||
"num": 2665946168,
|
||||
"short_name": "1c38",
|
||||
@@ -3853,7 +3763,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!5d823fb1",
|
||||
"num": 1568817073,
|
||||
"short_name": "3fb1",
|
||||
@@ -3893,7 +3802,6 @@
|
||||
"snr": 11.0,
|
||||
"node": {
|
||||
"snr": 10.5,
|
||||
"raw_json": null,
|
||||
"node_id": "!6c73bf84",
|
||||
"num": 1819524996,
|
||||
"short_name": "ts1",
|
||||
@@ -3933,7 +3841,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": null,
|
||||
"raw_json": null,
|
||||
"node_id": null,
|
||||
"num": null,
|
||||
"short_name": null,
|
||||
@@ -3973,7 +3880,6 @@
|
||||
"snr": 11.25,
|
||||
"node": {
|
||||
"snr": 10.75,
|
||||
"raw_json": null,
|
||||
"node_id": "!194a7351",
|
||||
"num": 424309585,
|
||||
"short_name": "l5y7",
|
||||
|
||||
@@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"node_id": "!7c5b0920",
|
||||
"rx_time": 1758884186,
|
||||
"node_broadcast_interval_secs": 1800,
|
||||
"last_sent_by": "!9e99f8c0",
|
||||
"neighbors": [
|
||||
{ "node_id": "!2b22accc", "snr": -6.5, "rx_time": 1758884106 },
|
||||
{ "node_id": "!43ba26d0", "snr": -5.0, "rx_time": 1758884120 },
|
||||
{ "node_id": "!69ba6f71", "snr": -13.0, "rx_time": 1758884135 },
|
||||
{ "node_id": "!fa848384", "snr": -14.75, "rx_time": 1758884150 },
|
||||
{ "node_id": "!da6a35b4", "snr": -6.5, "rx_time": 1758884165 }
|
||||
]
|
||||
},
|
||||
{
|
||||
"node_id": "!cafebabe",
|
||||
"rx_time": 1758883200,
|
||||
"neighbors": []
|
||||
}
|
||||
]
|
||||
+567
-207
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,84 @@
|
||||
[
|
||||
{
|
||||
"id": 1256091342,
|
||||
"node_id": "!9e95cf60",
|
||||
"from_id": "!9e95cf60",
|
||||
"to_id": "^all",
|
||||
"rx_time": 1758024300,
|
||||
"rx_iso": "2025-09-16T12:05:00Z",
|
||||
"telemetry_time": 1758024300,
|
||||
"channel": 0,
|
||||
"portnum": "TELEMETRY_APP",
|
||||
"battery_level": 101,
|
||||
"bitfield": 1,
|
||||
"payload_b64": "DTVr0mgSFQhlFQIrh0AdJb8YPyXYFSA9KJTPEg==",
|
||||
"device_metrics": {
|
||||
"batteryLevel": 101,
|
||||
"voltage": 4.224,
|
||||
"channelUtilization": 0.59666663,
|
||||
"airUtilTx": 0.03908333,
|
||||
"uptimeSeconds": 305044
|
||||
},
|
||||
"raw": {
|
||||
"device_metrics": {
|
||||
"battery_level": 101,
|
||||
"voltage": 4.224,
|
||||
"channel_utilization": 0.59666663,
|
||||
"air_util_tx": 0.03908333,
|
||||
"uptime_seconds": 305044
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2817720548,
|
||||
"node_id": "!2a2a2a2a",
|
||||
"from_id": "!2a2a2a2a",
|
||||
"to_id": "^all",
|
||||
"rx_time": 1758024400,
|
||||
"rx_iso": "2025-09-16T12:06:40Z",
|
||||
"telemetry_time": 1758024390,
|
||||
"channel": 0,
|
||||
"portnum": "TELEMETRY_APP",
|
||||
"bitfield": 1,
|
||||
"environment_metrics": {
|
||||
"temperature": 21.98,
|
||||
"relativeHumidity": 39.475586,
|
||||
"barometricPressure": 1017.8353
|
||||
},
|
||||
"raw": {
|
||||
"environment_metrics": {
|
||||
"temperature": 21.98,
|
||||
"relative_humidity": 39.475586,
|
||||
"barometric_pressure": 1017.8353
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 345678901,
|
||||
"node_id": "!1234abcd",
|
||||
"from_id": "!1234abcd",
|
||||
"node_num": 305441741,
|
||||
"to_id": "^all",
|
||||
"rx_time": 1758024500,
|
||||
"rx_iso": "2025-09-16T12:08:20Z",
|
||||
"telemetry_time": 1758024450,
|
||||
"channel": 1,
|
||||
"portnum": "TELEMETRY_APP",
|
||||
"payload_b64": "AAEC",
|
||||
"device_metrics": {
|
||||
"battery_level": 58.5,
|
||||
"voltage": 3.92,
|
||||
"channel_utilization": 0.284,
|
||||
"air_util_tx": 0.051,
|
||||
"uptime_seconds": 86400
|
||||
},
|
||||
"local_stats": {
|
||||
"numPacketsTx": 1280,
|
||||
"numPacketsRx": 1425,
|
||||
"numClients": 6,
|
||||
"numNodes": 18,
|
||||
"freeHeap": 21344,
|
||||
"heapLowWater": 19876
|
||||
}
|
||||
}
|
||||
]
|
||||
+1844
-37
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,86 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
|
||||
# Main application builder stage
|
||||
FROM ruby:3.3-alpine AS builder
|
||||
|
||||
# Ensure native extensions are built against musl libc rather than
|
||||
# using glibc precompiled binaries (which fail on Alpine).
|
||||
ENV BUNDLE_FORCE_RUBY_PLATFORM=true
|
||||
|
||||
# Install build dependencies and SQLite3
|
||||
RUN apk add --no-cache \
|
||||
build-base \
|
||||
sqlite-dev \
|
||||
linux-headers \
|
||||
pkgconfig
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy Gemfile and install dependencies
|
||||
COPY web/Gemfile web/Gemfile.lock* ./
|
||||
|
||||
# Install gems with SQLite3 support
|
||||
RUN bundle config set --local force_ruby_platform true && \
|
||||
bundle config set --local without 'development test' && \
|
||||
bundle install --jobs=4 --retry=3
|
||||
|
||||
# Production stage
|
||||
FROM ruby:3.3-alpine AS production
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
sqlite \
|
||||
tzdata \
|
||||
curl
|
||||
|
||||
# Create non-root user
|
||||
RUN addgroup -g 1000 -S potatomesh && \
|
||||
adduser -u 1000 -S potatomesh -G potatomesh
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy installed gems from builder stage
|
||||
COPY --from=builder /usr/local/bundle /usr/local/bundle
|
||||
|
||||
# Copy application code (excluding the Dockerfile which is not required at runtime)
|
||||
COPY --chown=potatomesh:potatomesh web/app.rb ./
|
||||
COPY --chown=potatomesh:potatomesh web/app.sh ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile ./
|
||||
COPY --chown=potatomesh:potatomesh web/Gemfile.lock* ./
|
||||
COPY --chown=potatomesh:potatomesh web/lib ./lib
|
||||
COPY --chown=potatomesh:potatomesh web/spec ./spec
|
||||
COPY --chown=potatomesh:potatomesh web/public ./public
|
||||
COPY --chown=potatomesh:potatomesh web/views ./views
|
||||
COPY --chown=potatomesh:potatomesh web/scripts ./scripts
|
||||
|
||||
# Copy SQL schema files from data directory
|
||||
COPY --chown=potatomesh:potatomesh data/*.sql /data/
|
||||
|
||||
# Create data and configuration directories with correct ownership
|
||||
RUN mkdir -p /app/.local/share/potato-mesh \
|
||||
&& mkdir -p /app/.config/potato-mesh/well-known \
|
||||
&& chown -R potatomesh:potatomesh /app/.local/share /app/.config
|
||||
|
||||
# Switch to non-root user
|
||||
USER potatomesh
|
||||
|
||||
# Expose port
|
||||
EXPOSE 41447
|
||||
|
||||
# Default environment variables (can be overridden by host)
|
||||
ENV RACK_ENV=production \
|
||||
APP_ENV=production \
|
||||
XDG_DATA_HOME=/app/.local/share \
|
||||
XDG_CONFIG_HOME=/app/.config \
|
||||
SITE_NAME="PotatoMesh Demo" \
|
||||
CHANNEL="#LongFast" \
|
||||
FREQUENCY="915MHz" \
|
||||
MAP_CENTER="38.761944,-27.090833" \
|
||||
MAX_DISTANCE=42 \
|
||||
CONTACT_LINK="#potatomesh:dod.ngo" \
|
||||
DEBUG=0
|
||||
|
||||
# Start the application
|
||||
CMD ["ruby", "app.rb", "-p", "41447", "-o", "0.0.0.0"]
|
||||
@@ -18,6 +18,7 @@ gem "sinatra", "~> 4.0"
|
||||
gem "sqlite3", "~> 1.7"
|
||||
gem "rackup", "~> 2.2"
|
||||
gem "puma", "~> 7.0"
|
||||
gem "prometheus-client"
|
||||
|
||||
group :test do
|
||||
gem "rspec", "~> 3.12"
|
||||
|
||||
+2
-537
@@ -1,5 +1,3 @@
|
||||
# Copyright (C) 2025 l5yth
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
@@ -14,539 +12,6 @@
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
# Main Sinatra application exposing the Meshtastic node and message archive.
|
||||
# The daemon in +data/mesh.py+ pushes updates into the SQLite database that
|
||||
# this web process reads from, providing JSON APIs and a rendered HTML index
|
||||
# page for human visitors.
|
||||
require "sinatra"
|
||||
require "json"
|
||||
require "sqlite3"
|
||||
require "fileutils"
|
||||
require "logger"
|
||||
require "rack/utils"
|
||||
require_relative "lib/potato_mesh/application"
|
||||
|
||||
DB_PATH = ENV.fetch("MESH_DB", File.join(__dir__, "../data/mesh.db"))
|
||||
DB_BUSY_TIMEOUT_MS = ENV.fetch("DB_BUSY_TIMEOUT_MS", "5000").to_i
|
||||
DB_BUSY_MAX_RETRIES = ENV.fetch("DB_BUSY_MAX_RETRIES", "5").to_i
|
||||
DB_BUSY_RETRY_DELAY = ENV.fetch("DB_BUSY_RETRY_DELAY", "0.05").to_f
|
||||
WEEK_SECONDS = 7 * 24 * 60 * 60
|
||||
DEFAULT_MAX_JSON_BODY_BYTES = 1_048_576
|
||||
MAX_JSON_BODY_BYTES = begin
|
||||
raw = ENV.fetch("MAX_JSON_BODY_BYTES", DEFAULT_MAX_JSON_BODY_BYTES.to_s)
|
||||
value = Integer(raw, 10)
|
||||
value.positive? ? value : DEFAULT_MAX_JSON_BODY_BYTES
|
||||
rescue ArgumentError
|
||||
DEFAULT_MAX_JSON_BODY_BYTES
|
||||
end
|
||||
|
||||
set :public_folder, File.join(__dir__, "public")
|
||||
set :views, File.join(__dir__, "views")
|
||||
|
||||
SITE_NAME = ENV.fetch("SITE_NAME", "Meshtastic Berlin")
|
||||
DEFAULT_CHANNEL = ENV.fetch("DEFAULT_CHANNEL", "#MediumFast")
|
||||
DEFAULT_FREQUENCY = ENV.fetch("DEFAULT_FREQUENCY", "868MHz")
|
||||
MAP_CENTER_LAT = ENV.fetch("MAP_CENTER_LAT", "52.502889").to_f
|
||||
MAP_CENTER_LON = ENV.fetch("MAP_CENTER_LON", "13.404194").to_f
|
||||
MAX_NODE_DISTANCE_KM = ENV.fetch("MAX_NODE_DISTANCE_KM", "137").to_f
|
||||
MATRIX_ROOM = ENV.fetch("MATRIX_ROOM", "#meshtastic-berlin:matrix.org")
|
||||
DEBUG = ENV["DEBUG"] == "1"
|
||||
|
||||
class << Sinatra::Application
|
||||
def apply_logger_level!
|
||||
logger = settings.logger
|
||||
return unless logger
|
||||
|
||||
logger.level = DEBUG ? Logger::DEBUG : Logger::WARN
|
||||
end
|
||||
end
|
||||
|
||||
Sinatra::Application.configure do
|
||||
app_logger = Logger.new($stdout)
|
||||
set :logger, app_logger
|
||||
use Rack::CommonLogger, app_logger
|
||||
Sinatra::Application.apply_logger_level!
|
||||
end
|
||||
|
||||
# Open the SQLite database with a configured busy timeout.
|
||||
#
|
||||
# @param readonly [Boolean] whether to open the database in read-only mode.
|
||||
# @return [SQLite3::Database]
|
||||
def open_database(readonly: false)
|
||||
SQLite3::Database.new(DB_PATH, readonly: readonly).tap do |db|
|
||||
db.busy_timeout = DB_BUSY_TIMEOUT_MS
|
||||
end
|
||||
end
|
||||
|
||||
# Execute the provided block, retrying when SQLite reports the database is
|
||||
# temporarily locked.
|
||||
#
|
||||
# @param max_retries [Integer] maximum number of retries after the initial
|
||||
# attempt.
|
||||
# @param base_delay [Float] base delay in seconds for linear backoff between
|
||||
# retries.
|
||||
# @yieldreturn [Object] result of the block once it succeeds.
|
||||
def with_busy_retry(max_retries: DB_BUSY_MAX_RETRIES, base_delay: DB_BUSY_RETRY_DELAY)
|
||||
attempts = 0
|
||||
begin
|
||||
yield
|
||||
rescue SQLite3::BusyException
|
||||
attempts += 1
|
||||
raise if attempts > max_retries
|
||||
sleep(base_delay * attempts)
|
||||
retry
|
||||
end
|
||||
end
|
||||
|
||||
# Checks whether the SQLite database already contains the required tables.
|
||||
#
|
||||
# @return [Boolean] true when both +nodes+ and +messages+ tables exist.
|
||||
def db_schema_present?
|
||||
return false unless File.exist?(DB_PATH)
|
||||
db = open_database(readonly: true)
|
||||
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages')").flatten
|
||||
tables.include?("nodes") && tables.include?("messages")
|
||||
rescue SQLite3::Exception
|
||||
false
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Create the SQLite database and seed it with the node and message schemas.
|
||||
#
|
||||
# @return [void]
|
||||
def init_db
|
||||
FileUtils.mkdir_p(File.dirname(DB_PATH))
|
||||
db = open_database
|
||||
%w[nodes messages].each do |schema|
|
||||
sql_file = File.expand_path("../data/#{schema}.sql", __dir__)
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
init_db unless db_schema_present?
|
||||
|
||||
# Retrieve recently heard nodes ordered by their last contact time.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows returned.
|
||||
# @return [Array<Hash>] collection of node records formatted for the API.
|
||||
def query_nodes(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
now = Time.now.to_i
|
||||
min_last_heard = now - WEEK_SECONDS
|
||||
rows = db.execute <<~SQL, [min_last_heard, limit]
|
||||
SELECT node_id, short_name, long_name, hw_model, role, snr,
|
||||
battery_level, voltage, last_heard, first_heard,
|
||||
uptime_seconds, channel_utilization, air_util_tx,
|
||||
position_time, latitude, longitude, altitude
|
||||
FROM nodes
|
||||
WHERE last_heard >= ?
|
||||
ORDER BY last_heard DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
rows.each do |r|
|
||||
r["role"] ||= "CLIENT"
|
||||
lh = r["last_heard"]&.to_i
|
||||
pt = r["position_time"]&.to_i
|
||||
lh = now if lh && lh > now
|
||||
pt = nil if pt && pt > now
|
||||
r["last_heard"] = lh
|
||||
r["position_time"] = pt
|
||||
r["last_seen_iso"] = Time.at(lh).utc.iso8601 if lh
|
||||
r["pos_time_iso"] = Time.at(pt).utc.iso8601 if pt
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# GET /api/nodes
|
||||
#
|
||||
# Returns a JSON array of the most recently heard nodes.
|
||||
get "/api/nodes" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_nodes(limit).to_json
|
||||
end
|
||||
|
||||
# Retrieve recent text messages joined with related node information.
|
||||
#
|
||||
# @param limit [Integer] maximum number of rows returned.
|
||||
# @return [Array<Hash>] collection of message rows suitable for serialisation.
|
||||
def query_messages(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
rows = db.execute <<~SQL, [limit]
|
||||
SELECT m.*, n.*, m.snr AS msg_snr
|
||||
FROM messages m
|
||||
LEFT JOIN nodes n ON (
|
||||
m.from_id = n.node_id OR (
|
||||
CAST(m.from_id AS TEXT) <> '' AND
|
||||
CAST(m.from_id AS TEXT) GLOB '[0-9]*' AND
|
||||
CAST(m.from_id AS INTEGER) = n.num
|
||||
)
|
||||
)
|
||||
ORDER BY m.rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
msg_fields = %w[id rx_time rx_iso from_id to_id channel portnum text msg_snr rssi hop_limit]
|
||||
rows.each do |r|
|
||||
if DEBUG && (r["from_id"].nil? || r["from_id"].to_s.empty?)
|
||||
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
|
||||
Kernel.warn "[debug] messages row before join: #{raw.inspect}"
|
||||
Kernel.warn "[debug] row after join: #{r.inspect}"
|
||||
end
|
||||
node = {}
|
||||
r.keys.each do |k|
|
||||
next if msg_fields.include?(k)
|
||||
node[k] = r.delete(k)
|
||||
end
|
||||
r["snr"] = r.delete("msg_snr")
|
||||
if r["from_id"] && (node["node_id"].nil? || node["node_id"].to_s.empty?)
|
||||
lookup_keys = []
|
||||
canonical = normalize_node_id(db, r["from_id"])
|
||||
lookup_keys << canonical if canonical
|
||||
raw_ref = r["from_id"].to_s.strip
|
||||
lookup_keys << raw_ref unless raw_ref.empty?
|
||||
lookup_keys << raw_ref.to_i if raw_ref.match?(/\A[0-9]+\z/)
|
||||
fallback = nil
|
||||
lookup_keys.uniq.each do |ref|
|
||||
sql = ref.is_a?(Integer) ? "SELECT * FROM nodes WHERE num = ?" : "SELECT * FROM nodes WHERE node_id = ?"
|
||||
fallback = db.get_first_row(sql, [ref])
|
||||
break if fallback
|
||||
end
|
||||
if fallback
|
||||
fallback.each do |key, value|
|
||||
next unless key.is_a?(String)
|
||||
next if msg_fields.include?(key)
|
||||
node[key] = value if node[key].nil?
|
||||
end
|
||||
end
|
||||
end
|
||||
node["role"] = "CLIENT" if node.key?("role") && (node["role"].nil? || node["role"].to_s.empty?)
|
||||
r["node"] = node
|
||||
if DEBUG && (r["from_id"].nil? || r["from_id"].to_s.empty?)
|
||||
Kernel.warn "[debug] row after processing: #{r.inspect}"
|
||||
end
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# GET /api/messages
|
||||
#
|
||||
# Returns a JSON array of stored text messages including node metadata.
|
||||
get "/api/messages" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_messages(limit).to_json
|
||||
end
|
||||
|
||||
# Determine the numeric node reference for a canonical node identifier.
|
||||
#
|
||||
# The Meshtastic protobuf encodes the node ID as a hexadecimal string prefixed
|
||||
# with an exclamation mark (for example ``!4ed36bd0``). Many payloads also
|
||||
# include a decimal ``num`` alias, but some integrations omit it. When the
|
||||
# alias is missing we can reconstruct it from the canonical identifier so that
|
||||
# later joins using ``nodes.num`` continue to work.
|
||||
#
|
||||
# @param node_id [String, nil] canonical node identifier (e.g. ``!4ed36bd0``).
|
||||
# @param payload [Hash] raw node payload provided by the data daemon.
|
||||
# @return [Integer, nil] numeric node reference if it can be determined.
|
||||
def resolve_node_num(node_id, payload)
|
||||
raw = payload["num"]
|
||||
|
||||
case raw
|
||||
when Integer
|
||||
return raw
|
||||
when Numeric
|
||||
return raw.to_i
|
||||
when String
|
||||
trimmed = raw.strip
|
||||
return nil if trimmed.empty?
|
||||
return Integer(trimmed, 10) if trimmed.match?(/\A[0-9]+\z/)
|
||||
return Integer(trimmed.delete_prefix("0x").delete_prefix("0X"), 16) if trimmed.match?(/\A0[xX][0-9A-Fa-f]+\z/)
|
||||
if trimmed.match?(/\A[0-9A-Fa-f]+\z/)
|
||||
canonical = node_id.is_a?(String) ? node_id.strip : ""
|
||||
return Integer(trimmed, 16) if canonical.match?(/\A!?[0-9A-Fa-f]+\z/)
|
||||
end
|
||||
end
|
||||
|
||||
return nil unless node_id.is_a?(String)
|
||||
|
||||
hex = node_id.strip
|
||||
return nil if hex.empty?
|
||||
hex = hex.delete_prefix("!")
|
||||
return nil unless hex.match?(/\A[0-9A-Fa-f]+\z/)
|
||||
|
||||
Integer(hex, 16)
|
||||
rescue ArgumentError
|
||||
nil
|
||||
end
|
||||
|
||||
# Insert or update a node row with the most recent metrics.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database handle.
|
||||
# @param node_id [String] primary identifier for the node.
|
||||
# @param n [Hash] node payload provided by the data daemon.
|
||||
def upsert_node(db, node_id, n)
|
||||
user = n["user"] || {}
|
||||
met = n["deviceMetrics"] || {}
|
||||
pos = n["position"] || {}
|
||||
role = user["role"] || "CLIENT"
|
||||
lh = n["lastHeard"]
|
||||
pt = pos["time"]
|
||||
now = Time.now.to_i
|
||||
pt = nil if pt && pt > now
|
||||
lh = now if lh && lh > now
|
||||
lh = pt if pt && (!lh || lh < pt)
|
||||
bool = ->(v) {
|
||||
case v
|
||||
when true then 1
|
||||
when false then 0
|
||||
else v
|
||||
end
|
||||
}
|
||||
node_num = resolve_node_num(node_id, n)
|
||||
|
||||
row = [
|
||||
node_id,
|
||||
node_num,
|
||||
user["shortName"],
|
||||
user["longName"],
|
||||
user["macaddr"],
|
||||
user["hwModel"] || n["hwModel"],
|
||||
role,
|
||||
user["publicKey"],
|
||||
bool.call(user["isUnmessagable"]),
|
||||
bool.call(n["isFavorite"]),
|
||||
n["hopsAway"],
|
||||
n["snr"],
|
||||
lh,
|
||||
lh,
|
||||
met["batteryLevel"],
|
||||
met["voltage"],
|
||||
met["channelUtilization"],
|
||||
met["airUtilTx"],
|
||||
met["uptimeSeconds"],
|
||||
pt,
|
||||
pos["locationSource"],
|
||||
pos["latitude"],
|
||||
pos["longitude"],
|
||||
pos["altitude"],
|
||||
]
|
||||
with_busy_retry do
|
||||
db.execute <<~SQL, row
|
||||
INSERT INTO nodes(node_id,num,short_name,long_name,macaddr,hw_model,role,public_key,is_unmessagable,is_favorite,
|
||||
hops_away,snr,last_heard,first_heard,battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,
|
||||
position_time,location_source,latitude,longitude,altitude)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
ON CONFLICT(node_id) DO UPDATE SET
|
||||
num=excluded.num, short_name=excluded.short_name, long_name=excluded.long_name, macaddr=excluded.macaddr,
|
||||
hw_model=excluded.hw_model, role=excluded.role, public_key=excluded.public_key, is_unmessagable=excluded.is_unmessagable,
|
||||
is_favorite=excluded.is_favorite, hops_away=excluded.hops_away, snr=excluded.snr, last_heard=excluded.last_heard,
|
||||
battery_level=excluded.battery_level, voltage=excluded.voltage, channel_utilization=excluded.channel_utilization,
|
||||
air_util_tx=excluded.air_util_tx, uptime_seconds=excluded.uptime_seconds, position_time=excluded.position_time,
|
||||
location_source=excluded.location_source, latitude=excluded.latitude, longitude=excluded.longitude,
|
||||
altitude=excluded.altitude
|
||||
WHERE COALESCE(excluded.last_heard,0) >= COALESCE(nodes.last_heard,0)
|
||||
SQL
|
||||
end
|
||||
end
|
||||
|
||||
# Ensure the request includes the expected bearer token.
|
||||
#
|
||||
# @return [void]
|
||||
# @raise [Sinatra::Halt] when authentication fails.
|
||||
def require_token!
|
||||
token = ENV["API_TOKEN"]
|
||||
provided = request.env["HTTP_AUTHORIZATION"].to_s.sub(/^Bearer\s+/i, "")
|
||||
halt 403, { error: "Forbidden" }.to_json unless token && !token.empty? && secure_token_match?(token, provided)
|
||||
end
|
||||
|
||||
# Perform a constant-time comparison between two strings, returning false on
|
||||
# length mismatches or invalid input.
|
||||
#
|
||||
# @param expected [String]
|
||||
# @param provided [String]
|
||||
# @return [Boolean]
|
||||
def secure_token_match?(expected, provided)
|
||||
return false unless expected.is_a?(String) && provided.is_a?(String)
|
||||
|
||||
expected_bytes = expected.b
|
||||
provided_bytes = provided.b
|
||||
return false unless expected_bytes.bytesize == provided_bytes.bytesize
|
||||
Rack::Utils.secure_compare(expected_bytes, provided_bytes)
|
||||
rescue Rack::Utils::SecurityError
|
||||
false
|
||||
end
|
||||
|
||||
# Read the request body enforcing a maximum allowed size.
|
||||
#
|
||||
# @param limit [Integer, nil] optional override for the number of bytes.
|
||||
# @return [String]
|
||||
def read_json_body(limit: nil)
|
||||
max_bytes = limit || MAX_JSON_BODY_BYTES
|
||||
max_bytes = max_bytes.to_i
|
||||
max_bytes = MAX_JSON_BODY_BYTES if max_bytes <= 0
|
||||
|
||||
body = request.body.read(max_bytes + 1)
|
||||
body = "" if body.nil?
|
||||
halt 413, { error: "payload too large" }.to_json if body.bytesize > max_bytes
|
||||
|
||||
body
|
||||
ensure
|
||||
request.body.rewind if request.body.respond_to?(:rewind)
|
||||
end
|
||||
|
||||
# Determine whether the canonical node identifier should replace the provided
|
||||
# sender reference for a message payload.
|
||||
#
|
||||
# @param message [Object] raw request payload element.
|
||||
# @return [Boolean]
|
||||
def prefer_canonical_sender?(message)
|
||||
message.is_a?(Hash) && message.key?("packet_id") && !message.key?("id")
|
||||
end
|
||||
|
||||
# Insert a text message if it does not already exist.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database handle.
|
||||
# @param m [Hash] message payload provided by the data daemon.
|
||||
def insert_message(db, m)
|
||||
msg_id = m["id"] || m["packet_id"]
|
||||
return unless msg_id
|
||||
rx_time = m["rx_time"]&.to_i || Time.now.to_i
|
||||
rx_iso = m["rx_iso"] || Time.at(rx_time).utc.iso8601
|
||||
raw_from_id = m["from_id"]
|
||||
if raw_from_id.nil? || raw_from_id.to_s.strip.empty?
|
||||
alt_from = m["from"]
|
||||
raw_from_id = alt_from unless alt_from.nil? || alt_from.to_s.strip.empty?
|
||||
end
|
||||
trimmed_from_id = raw_from_id.nil? ? nil : raw_from_id.to_s.strip
|
||||
trimmed_from_id = nil if trimmed_from_id&.empty?
|
||||
canonical_from_id = normalize_node_id(db, raw_from_id)
|
||||
use_canonical = canonical_from_id && (trimmed_from_id.nil? || prefer_canonical_sender?(m))
|
||||
from_id = if use_canonical
|
||||
canonical_from_id.to_s.strip
|
||||
else
|
||||
trimmed_from_id
|
||||
end
|
||||
from_id = nil if from_id&.empty?
|
||||
row = [
|
||||
msg_id,
|
||||
rx_time,
|
||||
rx_iso,
|
||||
from_id,
|
||||
m["to_id"],
|
||||
m["channel"],
|
||||
m["portnum"],
|
||||
m["text"],
|
||||
m["snr"],
|
||||
m["rssi"],
|
||||
m["hop_limit"],
|
||||
]
|
||||
with_busy_retry do
|
||||
existing = db.get_first_row("SELECT from_id FROM messages WHERE id = ?", [msg_id])
|
||||
if existing
|
||||
if from_id
|
||||
existing_from = existing.is_a?(Hash) ? existing["from_id"] : existing[0]
|
||||
existing_from_str = existing_from&.to_s
|
||||
should_update = existing_from_str.nil? || existing_from_str.strip.empty?
|
||||
should_update ||= existing_from != from_id
|
||||
db.execute("UPDATE messages SET from_id = ? WHERE id = ?", [from_id, msg_id]) if should_update
|
||||
end
|
||||
else
|
||||
begin
|
||||
db.execute <<~SQL, row
|
||||
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,snr,rssi,hop_limit)
|
||||
VALUES (?,?,?,?,?,?,?,?,?,?,?)
|
||||
SQL
|
||||
rescue SQLite3::ConstraintException
|
||||
db.execute("UPDATE messages SET from_id = ? WHERE id = ?", [from_id, msg_id]) if from_id
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# Resolve a node reference to the canonical node ID when possible.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database handle.
|
||||
# @param node_ref [Object] raw node identifier or numeric reference.
|
||||
# @return [String, nil] canonical node ID or nil if it cannot be resolved.
|
||||
def normalize_node_id(db, node_ref)
|
||||
return nil if node_ref.nil?
|
||||
ref_str = node_ref.to_s.strip
|
||||
return nil if ref_str.empty?
|
||||
|
||||
node_id = db.get_first_value("SELECT node_id FROM nodes WHERE node_id = ?", [ref_str])
|
||||
return node_id if node_id
|
||||
|
||||
begin
|
||||
ref_num = Integer(ref_str, 10)
|
||||
rescue ArgumentError
|
||||
return nil
|
||||
end
|
||||
|
||||
db.get_first_value("SELECT node_id FROM nodes WHERE num = ?", [ref_num])
|
||||
end
|
||||
|
||||
# POST /api/nodes
|
||||
#
|
||||
# Upserts one or more nodes provided as a JSON object keyed by node ID.
|
||||
post "/api/nodes" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
halt 400, { error: "too many nodes" }.to_json if data.is_a?(Hash) && data.size > 1000
|
||||
db = open_database
|
||||
data.each do |node_id, node|
|
||||
upsert_node(db, node_id, node)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# POST /api/messages
|
||||
#
|
||||
# Accepts an array or object describing text messages and stores each entry.
|
||||
post "/api/messages" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
messages = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many messages" }.to_json if messages.size > 1000
|
||||
db = open_database
|
||||
messages.each do |msg|
|
||||
insert_message(db, msg)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# GET /
|
||||
#
|
||||
# Renders the main site with configuration-driven defaults for the template.
|
||||
get "/" do
|
||||
erb :index, locals: {
|
||||
site_name: SITE_NAME,
|
||||
default_channel: DEFAULT_CHANNEL,
|
||||
default_frequency: DEFAULT_FREQUENCY,
|
||||
map_center_lat: MAP_CENTER_LAT,
|
||||
map_center_lon: MAP_CENTER_LON,
|
||||
max_node_distance_km: MAX_NODE_DISTANCE_KM,
|
||||
matrix_room: MATRIX_ROOM,
|
||||
}
|
||||
end
|
||||
PotatoMesh::Application.run! if $PROGRAM_NAME == __FILE__
|
||||
|
||||
+2
-1
@@ -17,4 +17,5 @@
|
||||
set -euo pipefail
|
||||
|
||||
bundle install
|
||||
exec ruby app.rb -p 41447 -o 127.0.0.1
|
||||
|
||||
exec ruby app.rb -p 41447 -o 0.0.0.0
|
||||
|
||||
@@ -0,0 +1,178 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "sinatra/base"
|
||||
require "json"
|
||||
require "sqlite3"
|
||||
require "fileutils"
|
||||
require "logger"
|
||||
require "rack/utils"
|
||||
require "open3"
|
||||
require "resolv"
|
||||
require "socket"
|
||||
require "time"
|
||||
require "openssl"
|
||||
require "base64"
|
||||
require "prometheus/client"
|
||||
require "prometheus/client/formats/text"
|
||||
require "prometheus/middleware/collector"
|
||||
require "prometheus/middleware/exporter"
|
||||
require "net/http"
|
||||
require "uri"
|
||||
require "ipaddr"
|
||||
require "set"
|
||||
require "digest"
|
||||
|
||||
require_relative "config"
|
||||
require_relative "sanitizer"
|
||||
require_relative "meta"
|
||||
require_relative "logging"
|
||||
require_relative "application/helpers"
|
||||
require_relative "application/errors"
|
||||
require_relative "application/database"
|
||||
require_relative "application/networking"
|
||||
require_relative "application/identity"
|
||||
require_relative "application/federation"
|
||||
require_relative "application/prometheus"
|
||||
require_relative "application/queries"
|
||||
require_relative "application/data_processing"
|
||||
require_relative "application/filesystem"
|
||||
require_relative "application/instances"
|
||||
require_relative "application/routes/api"
|
||||
require_relative "application/routes/ingest"
|
||||
require_relative "application/routes/root"
|
||||
|
||||
module PotatoMesh
|
||||
class Application < Sinatra::Base
|
||||
extend App::Helpers
|
||||
extend App::Database
|
||||
extend App::Networking
|
||||
extend App::Identity
|
||||
extend App::Federation
|
||||
extend App::Instances
|
||||
extend App::Prometheus
|
||||
extend App::Queries
|
||||
extend App::DataProcessing
|
||||
extend App::Filesystem
|
||||
|
||||
helpers App::Helpers
|
||||
include App::Database
|
||||
include App::Networking
|
||||
include App::Identity
|
||||
include App::Federation
|
||||
include App::Instances
|
||||
include App::Prometheus
|
||||
include App::Queries
|
||||
include App::DataProcessing
|
||||
include App::Filesystem
|
||||
|
||||
register App::Routes::Api
|
||||
register App::Routes::Ingest
|
||||
register App::Routes::Root
|
||||
|
||||
DEFAULT_PORT = 41_447
|
||||
DEFAULT_BIND_ADDRESS = "0.0.0.0"
|
||||
|
||||
APP_VERSION = determine_app_version
|
||||
INSTANCE_PRIVATE_KEY, INSTANCE_KEY_GENERATED = load_or_generate_instance_private_key
|
||||
INSTANCE_PUBLIC_KEY_PEM = INSTANCE_PRIVATE_KEY.public_key.export
|
||||
SELF_INSTANCE_ID = Digest::SHA256.hexdigest(INSTANCE_PUBLIC_KEY_PEM)
|
||||
INSTANCE_DOMAIN, INSTANCE_DOMAIN_SOURCE = determine_instance_domain
|
||||
|
||||
# Adjust the runtime logger severity to match the DEBUG flag.
|
||||
#
|
||||
# @return [void]
|
||||
def self.apply_logger_level!
|
||||
logger = settings.logger
|
||||
return unless logger
|
||||
|
||||
logger.level = PotatoMesh::Config.debug? ? Logger::DEBUG : Logger::WARN
|
||||
end
|
||||
|
||||
# Determine the port the application should listen on.
|
||||
#
|
||||
# @param default_port [Integer] fallback port when ENV['PORT'] is absent or invalid.
|
||||
# @return [Integer] port number for the HTTP server.
|
||||
def self.resolve_port(default_port: DEFAULT_PORT)
|
||||
default_port
|
||||
end
|
||||
|
||||
configure do
|
||||
set :public_folder, File.expand_path("../../public", __dir__)
|
||||
set :views, File.expand_path("../../views", __dir__)
|
||||
set :federation_thread, nil
|
||||
set :port, resolve_port
|
||||
set :bind, DEFAULT_BIND_ADDRESS
|
||||
|
||||
app_logger = PotatoMesh::Logging.build_logger($stdout)
|
||||
set :logger, app_logger
|
||||
use Rack::CommonLogger, app_logger
|
||||
use Rack::Deflater
|
||||
use ::Prometheus::Middleware::Collector
|
||||
use ::Prometheus::Middleware::Exporter
|
||||
|
||||
apply_logger_level!
|
||||
|
||||
perform_initial_filesystem_setup!
|
||||
cleanup_legacy_well_known_artifacts
|
||||
init_db unless db_schema_present?
|
||||
ensure_schema_upgrades
|
||||
|
||||
log_instance_domain_resolution
|
||||
log_instance_public_key
|
||||
refresh_well_known_document_if_stale
|
||||
ensure_self_instance_record!
|
||||
update_all_prometheus_metrics_from_nodes
|
||||
|
||||
if federation_announcements_active?
|
||||
start_initial_federation_announcement!
|
||||
start_federation_announcer!
|
||||
elsif federation_enabled?
|
||||
debug_log(
|
||||
"Federation announcements disabled",
|
||||
context: "federation",
|
||||
reason: "test environment",
|
||||
)
|
||||
else
|
||||
debug_log(
|
||||
"Federation announcements disabled",
|
||||
context: "federation",
|
||||
reason: "configuration",
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if defined?(Sinatra::Application) && Sinatra::Application != PotatoMesh::Application
|
||||
Sinatra.send(:remove_const, :Application)
|
||||
end
|
||||
Sinatra::Application = PotatoMesh::Application unless defined?(Sinatra::Application)
|
||||
|
||||
APP_VERSION = PotatoMesh::Application::APP_VERSION unless defined?(APP_VERSION)
|
||||
SELF_INSTANCE_ID = PotatoMesh::Application::SELF_INSTANCE_ID unless defined?(SELF_INSTANCE_ID)
|
||||
|
||||
[
|
||||
PotatoMesh::App::Helpers,
|
||||
PotatoMesh::App::Database,
|
||||
PotatoMesh::App::Networking,
|
||||
PotatoMesh::App::Identity,
|
||||
PotatoMesh::App::Federation,
|
||||
PotatoMesh::App::Instances,
|
||||
PotatoMesh::App::Prometheus,
|
||||
PotatoMesh::App::Queries,
|
||||
PotatoMesh::App::DataProcessing,
|
||||
].each do |mod|
|
||||
Object.include(mod) unless Object < mod
|
||||
end
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,134 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Database
|
||||
# Open a connection to the application database applying common pragmas.
|
||||
#
|
||||
# @param readonly [Boolean] whether to open the database in read-only mode.
|
||||
# @return [SQLite3::Database] configured database handle.
|
||||
def open_database(readonly: false)
|
||||
SQLite3::Database.new(PotatoMesh::Config.db_path, readonly: readonly).tap do |db|
|
||||
db.busy_timeout = PotatoMesh::Config.db_busy_timeout_ms
|
||||
db.execute("PRAGMA foreign_keys = ON")
|
||||
end
|
||||
end
|
||||
|
||||
# Execute the provided block and retry when SQLite reports a busy error.
|
||||
#
|
||||
# @param max_retries [Integer] maximum number of retries when locked.
|
||||
# @param base_delay [Float] incremental back-off delay between retries.
|
||||
# @yield Executes the database operation.
|
||||
# @return [Object] result of the block.
|
||||
def with_busy_retry(
|
||||
max_retries: PotatoMesh::Config.db_busy_max_retries,
|
||||
base_delay: PotatoMesh::Config.db_busy_retry_delay
|
||||
)
|
||||
attempts = 0
|
||||
begin
|
||||
yield
|
||||
rescue SQLite3::BusyException
|
||||
attempts += 1
|
||||
raise if attempts > max_retries
|
||||
|
||||
sleep(base_delay * attempts)
|
||||
retry
|
||||
end
|
||||
end
|
||||
|
||||
# Determine whether the database schema has already been provisioned.
|
||||
#
|
||||
# @return [Boolean] true when all required tables exist.
|
||||
def db_schema_present?
|
||||
return false unless File.exist?(PotatoMesh::Config.db_path)
|
||||
|
||||
db = open_database(readonly: true)
|
||||
required = %w[nodes messages positions telemetry neighbors instances]
|
||||
tables =
|
||||
db.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes','messages','positions','telemetry','neighbors','instances')",
|
||||
).flatten
|
||||
(required - tables).empty?
|
||||
rescue SQLite3::Exception
|
||||
false
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Create the database schema using the bundled SQL files.
|
||||
#
|
||||
# @return [void]
|
||||
def init_db
|
||||
FileUtils.mkdir_p(File.dirname(PotatoMesh::Config.db_path))
|
||||
db = open_database
|
||||
%w[nodes messages positions telemetry neighbors instances].each do |schema|
|
||||
sql_file = File.expand_path("../../../../data/#{schema}.sql", __dir__)
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Apply any schema migrations required for older installations.
|
||||
#
|
||||
# @return [void]
|
||||
def ensure_schema_upgrades
|
||||
db = open_database
|
||||
node_columns = db.execute("PRAGMA table_info(nodes)").map { |row| row[1] }
|
||||
unless node_columns.include?("precision_bits")
|
||||
db.execute("ALTER TABLE nodes ADD COLUMN precision_bits INTEGER")
|
||||
node_columns << "precision_bits"
|
||||
end
|
||||
|
||||
unless node_columns.include?("lora_freq")
|
||||
db.execute("ALTER TABLE nodes ADD COLUMN lora_freq INTEGER")
|
||||
end
|
||||
|
||||
unless node_columns.include?("modem_preset")
|
||||
db.execute("ALTER TABLE nodes ADD COLUMN modem_preset TEXT")
|
||||
end
|
||||
|
||||
message_columns = db.execute("PRAGMA table_info(messages)").map { |row| row[1] }
|
||||
|
||||
unless message_columns.include?("lora_freq")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN lora_freq INTEGER")
|
||||
end
|
||||
|
||||
unless message_columns.include?("modem_preset")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN modem_preset TEXT")
|
||||
end
|
||||
|
||||
unless message_columns.include?("channel_name")
|
||||
db.execute("ALTER TABLE messages ADD COLUMN channel_name TEXT")
|
||||
end
|
||||
|
||||
tables = db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='instances'").flatten
|
||||
if tables.empty?
|
||||
sql_file = File.expand_path("../../../../data/instances.sql", __dir__)
|
||||
db.execute_batch(File.read(sql_file))
|
||||
end
|
||||
rescue SQLite3::SQLException, Errno::ENOENT => e
|
||||
warn_log(
|
||||
"Failed to apply schema upgrade",
|
||||
context: "database.schema",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,20 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Raised when a remote instance fails to provide valid federation data.
|
||||
class InstanceFetchError < StandardError; end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,870 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Federation
|
||||
def self_instance_domain
|
||||
sanitized = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
|
||||
return sanitized if sanitized
|
||||
|
||||
raise "INSTANCE_DOMAIN could not be determined"
|
||||
end
|
||||
|
||||
# Determine whether the local instance should persist its own record.
|
||||
#
|
||||
# @param domain [String, nil] candidate domain for the running instance.
|
||||
# @return [Array(Boolean, String, nil)] tuple containing a decision flag and an optional reason.
|
||||
def self_instance_registration_decision(domain)
|
||||
source = app_constant(:INSTANCE_DOMAIN_SOURCE)
|
||||
return [false, "INSTANCE_DOMAIN source is #{source}"] unless source == :environment
|
||||
|
||||
sanitized = sanitize_instance_domain(domain)
|
||||
return [false, "INSTANCE_DOMAIN missing or invalid"] unless sanitized
|
||||
|
||||
ip = ip_from_domain(sanitized)
|
||||
if ip && restricted_ip_address?(ip)
|
||||
return [false, "INSTANCE_DOMAIN resolves to restricted IP"]
|
||||
end
|
||||
|
||||
[true, nil]
|
||||
end
|
||||
|
||||
def self_instance_attributes
|
||||
domain = self_instance_domain
|
||||
last_update = latest_node_update_timestamp || Time.now.to_i
|
||||
{
|
||||
id: app_constant(:SELF_INSTANCE_ID),
|
||||
domain: domain,
|
||||
pubkey: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
|
||||
name: sanitized_site_name,
|
||||
version: app_constant(:APP_VERSION),
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
latitude: PotatoMesh::Config.map_center_lat,
|
||||
longitude: PotatoMesh::Config.map_center_lon,
|
||||
last_update_time: last_update,
|
||||
is_private: private_mode?,
|
||||
}
|
||||
end
|
||||
|
||||
def sign_instance_attributes(attributes)
|
||||
payload = canonical_instance_payload(attributes)
|
||||
Base64.strict_encode64(
|
||||
app_constant(:INSTANCE_PRIVATE_KEY).sign(OpenSSL::Digest::SHA256.new, payload),
|
||||
)
|
||||
end
|
||||
|
||||
def instance_announcement_payload(attributes, signature)
|
||||
payload = {
|
||||
"id" => attributes[:id],
|
||||
"domain" => attributes[:domain],
|
||||
"pubkey" => attributes[:pubkey],
|
||||
"name" => attributes[:name],
|
||||
"version" => attributes[:version],
|
||||
"channel" => attributes[:channel],
|
||||
"frequency" => attributes[:frequency],
|
||||
"latitude" => attributes[:latitude],
|
||||
"longitude" => attributes[:longitude],
|
||||
"lastUpdateTime" => attributes[:last_update_time],
|
||||
"isPrivate" => attributes[:is_private],
|
||||
"signature" => signature,
|
||||
}
|
||||
payload.reject { |_, value| value.nil? }
|
||||
end
|
||||
|
||||
def ensure_self_instance_record!
|
||||
attributes = self_instance_attributes
|
||||
signature = sign_instance_attributes(attributes)
|
||||
db = nil
|
||||
allowed, reason = self_instance_registration_decision(attributes[:domain])
|
||||
if allowed
|
||||
db = open_database
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
debug_log(
|
||||
"Registered self instance record",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
instance_id: attributes[:id],
|
||||
)
|
||||
else
|
||||
debug_log(
|
||||
"Skipped self instance registration",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: reason,
|
||||
)
|
||||
end
|
||||
[attributes, signature]
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def federation_target_domains(self_domain)
|
||||
normalized_self = sanitize_instance_domain(self_domain)&.downcase
|
||||
ordered = []
|
||||
seen = Set.new
|
||||
|
||||
PotatoMesh::Config.federation_seed_domains.each do |seed|
|
||||
sanitized = sanitize_instance_domain(seed)&.downcase
|
||||
next unless sanitized
|
||||
next if normalized_self && sanitized == normalized_self
|
||||
next if seen.include?(sanitized)
|
||||
|
||||
ordered << sanitized
|
||||
seen << sanitized
|
||||
end
|
||||
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = false
|
||||
rows = with_busy_retry {
|
||||
db.execute("SELECT domain FROM instances WHERE domain IS NOT NULL AND TRIM(domain) != ''")
|
||||
}
|
||||
rows.flatten.compact.each do |raw_domain|
|
||||
sanitized = sanitize_instance_domain(raw_domain)&.downcase
|
||||
next unless sanitized
|
||||
next if normalized_self && sanitized == normalized_self
|
||||
next if seen.include?(sanitized)
|
||||
|
||||
ordered << sanitized
|
||||
seen << sanitized
|
||||
end
|
||||
ordered
|
||||
rescue SQLite3::Exception
|
||||
fallback = PotatoMesh::Config.federation_seed_domains.filter_map do |seed|
|
||||
candidate = sanitize_instance_domain(seed)&.downcase
|
||||
next if normalized_self && candidate == normalized_self
|
||||
|
||||
candidate
|
||||
end
|
||||
fallback.uniq
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def announce_instance_to_domain(domain, payload_json)
|
||||
return false unless domain && !domain.empty?
|
||||
|
||||
https_failures = []
|
||||
|
||||
instance_uri_candidates(domain, "/api/instances").each do |uri|
|
||||
begin
|
||||
http = build_remote_http_client(uri)
|
||||
response = http.start do |connection|
|
||||
request = Net::HTTP::Post.new(uri)
|
||||
request["Content-Type"] = "application/json"
|
||||
request.body = payload_json
|
||||
connection.request(request)
|
||||
end
|
||||
if response.is_a?(Net::HTTPSuccess)
|
||||
debug_log(
|
||||
"Published federation announcement",
|
||||
context: "federation.announce",
|
||||
target: uri.to_s,
|
||||
status: response.code,
|
||||
)
|
||||
return true
|
||||
end
|
||||
debug_log(
|
||||
"Federation announcement failed",
|
||||
context: "federation.announce",
|
||||
target: uri.to_s,
|
||||
status: response.code,
|
||||
)
|
||||
rescue StandardError => e
|
||||
metadata = {
|
||||
context: "federation.announce",
|
||||
target: uri.to_s,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
}
|
||||
|
||||
if uri.scheme == "https" && https_connection_refused?(e)
|
||||
debug_log(
|
||||
"HTTPS federation announcement failed, retrying with HTTP",
|
||||
**metadata,
|
||||
)
|
||||
https_failures << metadata
|
||||
next
|
||||
end
|
||||
|
||||
warn_log(
|
||||
"Federation announcement raised exception",
|
||||
**metadata,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
https_failures.each do |metadata|
|
||||
warn_log(
|
||||
"Federation announcement raised exception",
|
||||
**metadata,
|
||||
)
|
||||
end
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
# Determine whether an HTTPS announcement failure should fall back to HTTP.
|
||||
#
|
||||
# @param error [StandardError] failure raised while attempting HTTPS.
|
||||
# @return [Boolean] true when the error corresponds to a refused TCP connection.
|
||||
def https_connection_refused?(error)
|
||||
current = error
|
||||
while current
|
||||
return true if current.is_a?(Errno::ECONNREFUSED)
|
||||
|
||||
current = current.respond_to?(:cause) ? current.cause : nil
|
||||
end
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
def announce_instance_to_all_domains
|
||||
return unless federation_enabled?
|
||||
|
||||
attributes, signature = ensure_self_instance_record!
|
||||
payload_json = JSON.generate(instance_announcement_payload(attributes, signature))
|
||||
domains = federation_target_domains(attributes[:domain])
|
||||
domains.each do |domain|
|
||||
announce_instance_to_domain(domain, payload_json)
|
||||
end
|
||||
unless domains.empty?
|
||||
debug_log(
|
||||
"Federation announcement cycle complete",
|
||||
context: "federation.announce",
|
||||
targets: domains,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
def start_federation_announcer!
|
||||
existing = settings.federation_thread
|
||||
return existing if existing&.alive?
|
||||
|
||||
thread = Thread.new do
|
||||
loop do
|
||||
sleep PotatoMesh::Config.federation_announcement_interval
|
||||
begin
|
||||
announce_instance_to_all_domains
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Federation announcement loop error",
|
||||
context: "federation.announce",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
thread.name = "potato-mesh-federation" if thread.respond_to?(:name=)
|
||||
set(:federation_thread, thread)
|
||||
thread
|
||||
end
|
||||
|
||||
# Launch a background thread responsible for the first federation broadcast.
|
||||
#
|
||||
# @return [Thread, nil] the thread handling the initial announcement.
|
||||
def start_initial_federation_announcement!
|
||||
existing = settings.respond_to?(:initial_federation_thread) ? settings.initial_federation_thread : nil
|
||||
return existing if existing&.alive?
|
||||
|
||||
thread = Thread.new do
|
||||
begin
|
||||
delay = PotatoMesh::Config.initial_federation_delay_seconds
|
||||
Kernel.sleep(delay) if delay.positive?
|
||||
announce_instance_to_all_domains
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Initial federation announcement failed",
|
||||
context: "federation.announce",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
ensure
|
||||
set(:initial_federation_thread, nil)
|
||||
end
|
||||
end
|
||||
thread.name = "potato-mesh-federation-initial" if thread.respond_to?(:name=)
|
||||
thread.report_on_exception = false if thread.respond_to?(:report_on_exception=)
|
||||
set(:initial_federation_thread, thread)
|
||||
thread
|
||||
end
|
||||
|
||||
def canonical_instance_payload(attributes)
|
||||
data = {}
|
||||
data["id"] = attributes[:id] if attributes[:id]
|
||||
data["domain"] = attributes[:domain] if attributes[:domain]
|
||||
data["pubkey"] = attributes[:pubkey] if attributes[:pubkey]
|
||||
data["name"] = attributes[:name] if attributes[:name]
|
||||
data["version"] = attributes[:version] if attributes[:version]
|
||||
data["channel"] = attributes[:channel] if attributes[:channel]
|
||||
data["frequency"] = attributes[:frequency] if attributes[:frequency]
|
||||
data["latitude"] = attributes[:latitude] unless attributes[:latitude].nil?
|
||||
data["longitude"] = attributes[:longitude] unless attributes[:longitude].nil?
|
||||
data["lastUpdateTime"] = attributes[:last_update_time] unless attributes[:last_update_time].nil?
|
||||
data["isPrivate"] = attributes[:is_private] unless attributes[:is_private].nil?
|
||||
|
||||
JSON.generate(data, sort_keys: true)
|
||||
end
|
||||
|
||||
def verify_instance_signature(attributes, signature, public_key_pem)
|
||||
return false unless signature && public_key_pem
|
||||
|
||||
canonical = canonical_instance_payload(attributes)
|
||||
signature_bytes = Base64.strict_decode64(signature)
|
||||
key = OpenSSL::PKey::RSA.new(public_key_pem)
|
||||
key.verify(OpenSSL::Digest::SHA256.new, signature_bytes, canonical)
|
||||
rescue ArgumentError, OpenSSL::PKey::PKeyError
|
||||
false
|
||||
end
|
||||
|
||||
def instance_uri_candidates(domain, path)
|
||||
base = domain
|
||||
[
|
||||
URI.parse("https://#{base}#{path}"),
|
||||
URI.parse("http://#{base}#{path}"),
|
||||
]
|
||||
rescue URI::InvalidURIError
|
||||
[]
|
||||
end
|
||||
|
||||
def perform_instance_http_request(uri)
|
||||
http = build_remote_http_client(uri)
|
||||
http.start do |connection|
|
||||
response = connection.request(Net::HTTP::Get.new(uri))
|
||||
case response
|
||||
when Net::HTTPSuccess
|
||||
response.body
|
||||
else
|
||||
raise InstanceFetchError, "unexpected response #{response.code}"
|
||||
end
|
||||
end
|
||||
rescue StandardError => e
|
||||
raise_instance_fetch_error(e)
|
||||
end
|
||||
|
||||
# Build a human readable error message for a failed instance request.
|
||||
#
|
||||
# @param error [StandardError] failure raised while performing the request.
|
||||
# @return [String] description including the error class when necessary.
|
||||
def instance_fetch_error_message(error)
|
||||
message = error.message.to_s.strip
|
||||
class_name = error.class.name || error.class.to_s
|
||||
return class_name if message.empty?
|
||||
|
||||
message.include?(class_name) ? message : "#{class_name}: #{message}"
|
||||
end
|
||||
|
||||
# Raise an InstanceFetchError that preserves the original context.
|
||||
#
|
||||
# @param error [StandardError] failure raised while performing the request.
|
||||
# @return [void]
|
||||
def raise_instance_fetch_error(error)
|
||||
message = instance_fetch_error_message(error)
|
||||
wrapped = InstanceFetchError.new(message)
|
||||
wrapped.set_backtrace(error.backtrace)
|
||||
raise wrapped
|
||||
end
|
||||
|
||||
def fetch_instance_json(domain, path)
|
||||
errors = []
|
||||
instance_uri_candidates(domain, path).each do |uri|
|
||||
begin
|
||||
body = perform_instance_http_request(uri)
|
||||
return [JSON.parse(body), uri] if body
|
||||
rescue JSON::ParserError => e
|
||||
errors << "#{uri}: invalid JSON (#{e.message})"
|
||||
rescue InstanceFetchError => e
|
||||
errors << "#{uri}: #{e.message}"
|
||||
end
|
||||
end
|
||||
[nil, errors]
|
||||
end
|
||||
|
||||
# Parse a remote federation instance payload into canonical attributes.
|
||||
#
|
||||
# @param payload [Hash] JSON object describing a remote instance.
|
||||
# @return [Array<(Hash, String), String>] tuple containing the attribute
|
||||
# hash and signature when valid or a failure reason when invalid.
|
||||
def remote_instance_attributes_from_payload(payload)
|
||||
unless payload.is_a?(Hash)
|
||||
return [nil, nil, "instance payload is not an object"]
|
||||
end
|
||||
|
||||
id = string_or_nil(payload["id"])
|
||||
return [nil, nil, "missing instance id"] unless id
|
||||
|
||||
domain = sanitize_instance_domain(payload["domain"])
|
||||
return [nil, nil, "missing instance domain"] unless domain
|
||||
|
||||
pubkey = sanitize_public_key_pem(payload["pubkey"])
|
||||
return [nil, nil, "missing instance public key"] unless pubkey
|
||||
|
||||
signature = string_or_nil(payload["signature"])
|
||||
return [nil, nil, "missing instance signature"] unless signature
|
||||
|
||||
private_value = if payload.key?("isPrivate")
|
||||
payload["isPrivate"]
|
||||
else
|
||||
payload["is_private"]
|
||||
end
|
||||
private_flag = coerce_boolean(private_value)
|
||||
if private_flag.nil?
|
||||
numeric_flag = coerce_integer(private_value)
|
||||
private_flag = !numeric_flag.to_i.zero? if numeric_flag
|
||||
end
|
||||
|
||||
attributes = {
|
||||
id: id,
|
||||
domain: domain,
|
||||
pubkey: pubkey,
|
||||
name: string_or_nil(payload["name"]),
|
||||
version: string_or_nil(payload["version"]),
|
||||
channel: string_or_nil(payload["channel"]),
|
||||
frequency: string_or_nil(payload["frequency"]),
|
||||
latitude: coerce_float(payload["latitude"]),
|
||||
longitude: coerce_float(payload["longitude"]),
|
||||
last_update_time: coerce_integer(payload["lastUpdateTime"]),
|
||||
is_private: private_flag,
|
||||
}
|
||||
|
||||
[attributes, signature, nil]
|
||||
rescue StandardError => e
|
||||
[nil, nil, e.message]
|
||||
end
|
||||
|
||||
# Recursively ingest federation records exposed by the supplied domain.
|
||||
#
|
||||
# @param db [SQLite3::Database] open database connection used for writes.
|
||||
# @param domain [String] remote domain to crawl for federation records.
|
||||
# @param visited [Set<String>] domains processed during this crawl.
|
||||
# @param per_response_limit [Integer, nil] maximum entries processed per response.
|
||||
# @param overall_limit [Integer, nil] maximum unique domains visited.
|
||||
# @return [Set<String>] updated set of visited domains.
|
||||
def ingest_known_instances_from!(
|
||||
db,
|
||||
domain,
|
||||
visited: nil,
|
||||
per_response_limit: nil,
|
||||
overall_limit: nil
|
||||
)
|
||||
sanitized = sanitize_instance_domain(domain)
|
||||
return visited || Set.new unless sanitized
|
||||
|
||||
visited ||= Set.new
|
||||
|
||||
overall_limit ||= PotatoMesh::Config.federation_max_domains_per_crawl
|
||||
per_response_limit ||= PotatoMesh::Config.federation_max_instances_per_response
|
||||
|
||||
if overall_limit && overall_limit.positive? && visited.size >= overall_limit
|
||||
debug_log(
|
||||
"Skipped remote instance crawl due to crawl limit",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
limit: overall_limit,
|
||||
)
|
||||
return visited
|
||||
end
|
||||
|
||||
return visited if visited.include?(sanitized)
|
||||
|
||||
visited << sanitized
|
||||
|
||||
payload, metadata = fetch_instance_json(sanitized, "/api/instances")
|
||||
unless payload.is_a?(Array)
|
||||
warn_log(
|
||||
"Failed to load remote federation instances",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
reason: Array(metadata).map(&:to_s).join("; "),
|
||||
)
|
||||
return visited
|
||||
end
|
||||
|
||||
processed_entries = 0
|
||||
payload.each do |entry|
|
||||
if per_response_limit && per_response_limit.positive? && processed_entries >= per_response_limit
|
||||
debug_log(
|
||||
"Skipped remote instance entry due to response limit",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
limit: per_response_limit,
|
||||
)
|
||||
break
|
||||
end
|
||||
|
||||
if overall_limit && overall_limit.positive? && visited.size >= overall_limit
|
||||
debug_log(
|
||||
"Skipped remote instance entry due to crawl limit",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
limit: overall_limit,
|
||||
)
|
||||
break
|
||||
end
|
||||
|
||||
processed_entries += 1
|
||||
attributes, signature, reason = remote_instance_attributes_from_payload(entry)
|
||||
unless attributes && signature
|
||||
warn_log(
|
||||
"Discarded remote instance entry",
|
||||
context: "federation.instances",
|
||||
domain: sanitized,
|
||||
reason: reason || "invalid payload",
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
if attributes[:is_private]
|
||||
debug_log(
|
||||
"Skipped private remote instance",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
unless verify_instance_signature(attributes, signature, attributes[:pubkey])
|
||||
warn_log(
|
||||
"Discarded remote instance entry",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: "invalid signature",
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
attributes[:is_private] = false if attributes[:is_private].nil?
|
||||
|
||||
remote_nodes, node_metadata = fetch_instance_json(attributes[:domain], "/api/nodes")
|
||||
unless remote_nodes
|
||||
warn_log(
|
||||
"Failed to load remote node data",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: Array(node_metadata).map(&:to_s).join("; "),
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
fresh, freshness_reason = validate_remote_nodes(remote_nodes)
|
||||
unless fresh
|
||||
warn_log(
|
||||
"Discarded remote instance entry",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
reason: freshness_reason || "stale node data",
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
begin
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
ingest_known_instances_from!(
|
||||
db,
|
||||
attributes[:domain],
|
||||
visited: visited,
|
||||
per_response_limit: per_response_limit,
|
||||
overall_limit: overall_limit,
|
||||
)
|
||||
rescue ArgumentError => e
|
||||
warn_log(
|
||||
"Failed to persist remote instance",
|
||||
context: "federation.instances",
|
||||
domain: attributes[:domain],
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
visited
|
||||
end
|
||||
|
||||
# Resolve the host component of a remote URI and ensure the destination is
|
||||
# safe for federation HTTP requests.
|
||||
#
|
||||
# The method performs a DNS lookup using Addrinfo to capture every
|
||||
# available address for the supplied URI host. The resulting addresses are
|
||||
# converted to {IPAddr} objects for consistent inspection via
|
||||
# {restricted_ip_address?}. When all resolved addresses fall within
|
||||
# restricted ranges, the method raises an ArgumentError so callers can
|
||||
# abort the federation request before contacting the remote endpoint.
|
||||
#
|
||||
# @param uri [URI::Generic] remote endpoint candidate.
|
||||
# @return [Array<IPAddr>] list of resolved, unrestricted IP addresses.
|
||||
# @raise [ArgumentError] when +uri.host+ is blank or resolves solely to
|
||||
# restricted addresses.
|
||||
def resolve_remote_ip_addresses(uri)
|
||||
host = uri&.host
|
||||
raise ArgumentError, "URI missing host" unless host
|
||||
|
||||
addrinfo_records = Addrinfo.getaddrinfo(host, nil, Socket::AF_UNSPEC, Socket::SOCK_STREAM)
|
||||
addresses = addrinfo_records.filter_map do |addr|
|
||||
begin
|
||||
IPAddr.new(addr.ip_address)
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
end
|
||||
unique_addresses = addresses.uniq { |ip| [ip.family, ip.to_s] }
|
||||
unrestricted_addresses = unique_addresses.reject { |ip| restricted_ip_address?(ip) }
|
||||
|
||||
if unique_addresses.any? && unrestricted_addresses.empty?
|
||||
raise ArgumentError, "restricted domain"
|
||||
end
|
||||
|
||||
unrestricted_addresses
|
||||
end
|
||||
|
||||
# Build an HTTP client configured for communication with a remote instance.
|
||||
#
|
||||
# @param uri [URI::Generic] target URI describing the remote endpoint.
|
||||
# @return [Net::HTTP] HTTP client ready to execute the request.
|
||||
def build_remote_http_client(uri)
|
||||
remote_addresses = resolve_remote_ip_addresses(uri)
|
||||
http = Net::HTTP.new(uri.host, uri.port)
|
||||
if http.respond_to?(:ipaddr=) && remote_addresses.any?
|
||||
http.ipaddr = remote_addresses.first.to_s
|
||||
end
|
||||
http.open_timeout = PotatoMesh::Config.remote_instance_http_timeout
|
||||
http.read_timeout = PotatoMesh::Config.remote_instance_read_timeout
|
||||
http.use_ssl = uri.scheme == "https"
|
||||
return http unless http.use_ssl?
|
||||
|
||||
http.verify_mode = OpenSSL::SSL::VERIFY_PEER
|
||||
http.min_version = :TLS1_2 if http.respond_to?(:min_version=)
|
||||
store = remote_instance_cert_store
|
||||
http.cert_store = store if store
|
||||
callback = remote_instance_verify_callback
|
||||
http.verify_callback = callback if callback
|
||||
http
|
||||
end
|
||||
|
||||
# Construct a certificate store that disables strict CRL enforcement.
|
||||
#
|
||||
# OpenSSL may fail remote requests when certificate revocation lists are
|
||||
# unavailable from the issuing authority. The returned store mirrors the
|
||||
# default system trust store while clearing CRL-related flags so that
|
||||
# federation announcements gracefully succeed when CRLs cannot be fetched.
|
||||
#
|
||||
# @return [OpenSSL::X509::Store, nil] configured store or nil when setup fails.
|
||||
def remote_instance_cert_store
|
||||
return @remote_instance_cert_store if defined?(@remote_instance_cert_store) && @remote_instance_cert_store
|
||||
|
||||
store = OpenSSL::X509::Store.new
|
||||
store.set_default_paths
|
||||
store.flags = 0 if store.respond_to?(:flags=)
|
||||
@remote_instance_cert_store = store
|
||||
rescue OpenSSL::X509::StoreError => e
|
||||
debug_log(
|
||||
"Failed to initialize certificate store for federation HTTP: #{e.message}",
|
||||
)
|
||||
@remote_instance_cert_store = nil
|
||||
end
|
||||
|
||||
# Build a TLS verification callback that tolerates CRL availability failures.
|
||||
#
|
||||
# Some certificate authorities publish CRL endpoints that may occasionally be
|
||||
# unreachable. When OpenSSL cannot download the CRL it raises the
|
||||
# V_ERR_UNABLE_TO_GET_CRL error which would otherwise cause HTTPS federation
|
||||
# announcements to abort. The generated callback accepts those specific
|
||||
# failures while preserving strict verification for all other errors.
|
||||
#
|
||||
# @return [Proc, nil] verification callback or nil when creation fails.
|
||||
def remote_instance_verify_callback
|
||||
if defined?(@remote_instance_verify_callback) && @remote_instance_verify_callback
|
||||
return @remote_instance_verify_callback
|
||||
end
|
||||
|
||||
callback = lambda do |preverify_ok, store_context|
|
||||
return true if preverify_ok
|
||||
|
||||
if store_context && crl_unavailable_error?(store_context.error)
|
||||
debug_log(
|
||||
"Ignoring TLS CRL retrieval failure during federation request",
|
||||
context: "federation.announce",
|
||||
)
|
||||
true
|
||||
else
|
||||
false
|
||||
end
|
||||
end
|
||||
|
||||
@remote_instance_verify_callback = callback
|
||||
rescue StandardError => e
|
||||
debug_log(
|
||||
"Failed to initialize federation TLS verify callback: #{e.message}",
|
||||
context: "federation.announce",
|
||||
)
|
||||
@remote_instance_verify_callback = nil
|
||||
end
|
||||
|
||||
# Determine whether the supplied OpenSSL verification error corresponds to a
|
||||
# missing certificate revocation list.
|
||||
#
|
||||
# @param error_code [Integer, nil] OpenSSL verification error value.
|
||||
# @return [Boolean] true when the error should be ignored.
|
||||
def crl_unavailable_error?(error_code)
|
||||
allowed_errors = [OpenSSL::X509::V_ERR_UNABLE_TO_GET_CRL]
|
||||
if defined?(OpenSSL::X509::V_ERR_UNABLE_TO_GET_CRL_ISSUER)
|
||||
allowed_errors << OpenSSL::X509::V_ERR_UNABLE_TO_GET_CRL_ISSUER
|
||||
end
|
||||
allowed_errors.include?(error_code)
|
||||
end
|
||||
|
||||
def validate_well_known_document(document, domain, pubkey)
|
||||
unless document.is_a?(Hash)
|
||||
return [false, "document is not an object"]
|
||||
end
|
||||
|
||||
remote_pubkey = sanitize_public_key_pem(document["publicKey"])
|
||||
return [false, "public key missing"] unless remote_pubkey
|
||||
return [false, "public key mismatch"] unless remote_pubkey == pubkey
|
||||
|
||||
remote_domain = string_or_nil(document["domain"])
|
||||
return [false, "domain missing"] unless remote_domain
|
||||
return [false, "domain mismatch"] unless remote_domain.casecmp?(domain)
|
||||
|
||||
algorithm = string_or_nil(document["signatureAlgorithm"])
|
||||
unless algorithm&.casecmp?(PotatoMesh::Config.instance_signature_algorithm)
|
||||
return [false, "unsupported signature algorithm"]
|
||||
end
|
||||
|
||||
signed_payload_b64 = string_or_nil(document["signedPayload"])
|
||||
signature_b64 = string_or_nil(document["signature"])
|
||||
return [false, "missing signed payload"] unless signed_payload_b64
|
||||
return [false, "missing signature"] unless signature_b64
|
||||
|
||||
signed_payload = Base64.strict_decode64(signed_payload_b64)
|
||||
signature = Base64.strict_decode64(signature_b64)
|
||||
key = OpenSSL::PKey::RSA.new(remote_pubkey)
|
||||
unless key.verify(OpenSSL::Digest::SHA256.new, signature, signed_payload)
|
||||
return [false, "invalid well-known signature"]
|
||||
end
|
||||
|
||||
payload = JSON.parse(signed_payload)
|
||||
unless payload.is_a?(Hash)
|
||||
return [false, "signed payload is not an object"]
|
||||
end
|
||||
|
||||
payload_domain = string_or_nil(payload["domain"])
|
||||
payload_pubkey = sanitize_public_key_pem(payload["publicKey"])
|
||||
return [false, "signed payload domain mismatch"] unless payload_domain&.casecmp?(domain)
|
||||
return [false, "signed payload public key mismatch"] unless payload_pubkey == pubkey
|
||||
|
||||
[true, nil]
|
||||
rescue ArgumentError, OpenSSL::PKey::PKeyError => e
|
||||
[false, e.message]
|
||||
rescue JSON::ParserError => e
|
||||
[false, "signed payload JSON error: #{e.message}"]
|
||||
end
|
||||
|
||||
def validate_remote_nodes(nodes)
|
||||
unless nodes.is_a?(Array)
|
||||
return [false, "node response is not an array"]
|
||||
end
|
||||
|
||||
if nodes.length < PotatoMesh::Config.remote_instance_min_node_count
|
||||
return [false, "insufficient nodes"]
|
||||
end
|
||||
|
||||
latest = nodes.filter_map do |node|
|
||||
next unless node.is_a?(Hash)
|
||||
|
||||
last_heard_values = []
|
||||
last_heard_values << coerce_integer(node["last_heard"])
|
||||
last_heard_values << coerce_integer(node["lastHeard"])
|
||||
last_heard_values.compact.max
|
||||
end.compact.max
|
||||
|
||||
return [false, "missing last_heard data"] unless latest
|
||||
|
||||
cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
|
||||
return [false, "node data is stale"] if latest < cutoff
|
||||
|
||||
[true, nil]
|
||||
end
|
||||
|
||||
def upsert_instance_record(db, attributes, signature)
|
||||
sanitized_domain = sanitize_instance_domain(attributes[:domain])
|
||||
raise ArgumentError, "invalid domain" unless sanitized_domain
|
||||
|
||||
ip = ip_from_domain(sanitized_domain)
|
||||
if ip && restricted_ip_address?(ip)
|
||||
raise ArgumentError, "restricted domain"
|
||||
end
|
||||
|
||||
normalized_domain = sanitized_domain
|
||||
existing_id = with_busy_retry do
|
||||
db.get_first_value(
|
||||
"SELECT id FROM instances WHERE domain = ?",
|
||||
normalized_domain,
|
||||
)
|
||||
end
|
||||
if existing_id && existing_id != attributes[:id]
|
||||
with_busy_retry do
|
||||
db.execute("DELETE FROM instances WHERE id = ?", existing_id)
|
||||
end
|
||||
debug_log(
|
||||
"Removed conflicting instance by domain",
|
||||
context: "federation.instances",
|
||||
domain: normalized_domain,
|
||||
replaced_id: existing_id,
|
||||
incoming_id: attributes[:id],
|
||||
)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
INSERT INTO instances (
|
||||
id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, signature
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(id) DO UPDATE SET
|
||||
domain=excluded.domain,
|
||||
pubkey=excluded.pubkey,
|
||||
name=excluded.name,
|
||||
version=excluded.version,
|
||||
channel=excluded.channel,
|
||||
frequency=excluded.frequency,
|
||||
latitude=excluded.latitude,
|
||||
longitude=excluded.longitude,
|
||||
last_update_time=excluded.last_update_time,
|
||||
is_private=excluded.is_private,
|
||||
signature=excluded.signature
|
||||
SQL
|
||||
|
||||
params = [
|
||||
attributes[:id],
|
||||
normalized_domain,
|
||||
attributes[:pubkey],
|
||||
attributes[:name],
|
||||
attributes[:version],
|
||||
attributes[:channel],
|
||||
attributes[:frequency],
|
||||
attributes[:latitude],
|
||||
attributes[:longitude],
|
||||
attributes[:last_update_time],
|
||||
attributes[:is_private] ? 1 : 0,
|
||||
signature,
|
||||
]
|
||||
|
||||
with_busy_retry do
|
||||
db.execute(sql, params)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,121 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "fileutils"
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Filesystem helpers responsible for migrating legacy assets to XDG compliant
|
||||
# directories and preparing runtime storage locations.
|
||||
module Filesystem
|
||||
# Execute all filesystem migrations required before the application boots.
|
||||
#
|
||||
# @return [void]
|
||||
def perform_initial_filesystem_setup!
|
||||
migrate_legacy_database!
|
||||
migrate_legacy_keyfile!
|
||||
migrate_legacy_well_known_assets!
|
||||
end
|
||||
|
||||
private
|
||||
|
||||
# Copy the legacy database file into the configured XDG data directory.
|
||||
#
|
||||
# @return [void]
|
||||
def migrate_legacy_database!
|
||||
return unless default_database_destination?
|
||||
|
||||
migrate_legacy_file(
|
||||
PotatoMesh::Config.legacy_db_path,
|
||||
PotatoMesh::Config.db_path,
|
||||
chmod: 0o600,
|
||||
context: "filesystem.db",
|
||||
)
|
||||
end
|
||||
|
||||
# Copy the legacy keyfile into the configured XDG configuration directory.
|
||||
#
|
||||
# @return [void]
|
||||
def migrate_legacy_keyfile!
|
||||
PotatoMesh::Config.legacy_keyfile_candidates.each do |candidate|
|
||||
migrate_legacy_file(
|
||||
candidate,
|
||||
PotatoMesh::Config.keyfile_path,
|
||||
chmod: 0o600,
|
||||
context: "filesystem.keys",
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# Copy the legacy well-known document into the configured XDG directory.
|
||||
#
|
||||
# @return [void]
|
||||
def migrate_legacy_well_known_assets!
|
||||
destination = File.join(
|
||||
PotatoMesh::Config.well_known_storage_root,
|
||||
File.basename(PotatoMesh::Config.well_known_relative_path),
|
||||
)
|
||||
|
||||
PotatoMesh::Config.legacy_well_known_candidates.each do |candidate|
|
||||
migrate_legacy_file(
|
||||
candidate,
|
||||
destination,
|
||||
chmod: 0o644,
|
||||
context: "filesystem.well_known",
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# Migrate a legacy file if it exists and the destination has not been created yet.
|
||||
#
|
||||
# @param source_path [String] absolute path to the legacy file.
|
||||
# @param destination_path [String] absolute path to the new file location.
|
||||
# @param chmod [Integer, nil] optional permission bits applied to the destination file.
|
||||
# @param context [String] logging context describing the migration target.
|
||||
# @return [void]
|
||||
def migrate_legacy_file(source_path, destination_path, chmod:, context:)
|
||||
return if source_path == destination_path
|
||||
return unless File.exist?(source_path)
|
||||
return if File.exist?(destination_path)
|
||||
|
||||
FileUtils.mkdir_p(File.dirname(destination_path))
|
||||
FileUtils.cp(source_path, destination_path)
|
||||
File.chmod(chmod, destination_path) if chmod
|
||||
|
||||
debug_log(
|
||||
"Migrated legacy file to XDG directory",
|
||||
context: context,
|
||||
source: source_path,
|
||||
destination: destination_path,
|
||||
)
|
||||
rescue SystemCallError => e
|
||||
warn_log(
|
||||
"Failed to migrate legacy file",
|
||||
context: context,
|
||||
source: source_path,
|
||||
destination: destination_path,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
end
|
||||
|
||||
# Determine whether the database destination matches the configured default.
|
||||
#
|
||||
# @return [Boolean] true when the destination should receive migrated data.
|
||||
def default_database_destination?
|
||||
PotatoMesh::Config.db_path == PotatoMesh::Config.default_db_path
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,351 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Shared view and controller helper methods. Each helper is documented with
|
||||
# its intended consumers to ensure consistent behaviour across the Sinatra
|
||||
# application.
|
||||
module Helpers
|
||||
# Fetch an application level constant exposed by {PotatoMesh::Application}.
|
||||
#
|
||||
# @param name [Symbol] constant identifier to retrieve.
|
||||
# @return [Object] constant value stored on the application class.
|
||||
def app_constant(name)
|
||||
PotatoMesh::Application.const_get(name)
|
||||
end
|
||||
|
||||
# Retrieve the configured Prometheus report identifiers as an array.
|
||||
#
|
||||
# @return [Array<String>] list of report IDs used on the metrics page.
|
||||
def prom_report_ids
|
||||
PotatoMesh::Config.prom_report_id_list
|
||||
end
|
||||
|
||||
# Read a text configuration value with a fallback.
|
||||
#
|
||||
# @param key [String] environment variable key.
|
||||
# @param default [String] fallback value when unset.
|
||||
# @return [String] sanitised configuration string.
|
||||
def fetch_config_string(key, default)
|
||||
PotatoMesh::Config.fetch_string(key, default)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.string_or_nil}.
|
||||
#
|
||||
# @param value [Object] value to sanitise.
|
||||
# @return [String, nil] cleaned string or nil.
|
||||
def string_or_nil(value)
|
||||
PotatoMesh::Sanitizer.string_or_nil(value)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.sanitize_instance_domain}.
|
||||
#
|
||||
# @param value [Object] candidate domain string.
|
||||
# @param downcase [Boolean] whether to force lowercase normalisation.
|
||||
# @return [String, nil] canonical domain or nil.
|
||||
def sanitize_instance_domain(value, downcase: true)
|
||||
PotatoMesh::Sanitizer.sanitize_instance_domain(value, downcase: downcase)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.instance_domain_host}.
|
||||
#
|
||||
# @param domain [String] domain literal.
|
||||
# @return [String, nil] host portion of the domain.
|
||||
def instance_domain_host(domain)
|
||||
PotatoMesh::Sanitizer.instance_domain_host(domain)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.ip_from_domain}.
|
||||
#
|
||||
# @param domain [String] domain literal.
|
||||
# @return [IPAddr, nil] parsed address object.
|
||||
def ip_from_domain(domain)
|
||||
PotatoMesh::Sanitizer.ip_from_domain(domain)
|
||||
end
|
||||
|
||||
# Proxy for {PotatoMesh::Sanitizer.sanitized_string}.
|
||||
#
|
||||
# @param value [Object] arbitrary input.
|
||||
# @return [String] trimmed string representation.
|
||||
def sanitized_string(value)
|
||||
PotatoMesh::Sanitizer.sanitized_string(value)
|
||||
end
|
||||
|
||||
# Retrieve the site name presented to users.
|
||||
#
|
||||
# @return [String] sanitised site label.
|
||||
def sanitized_site_name
|
||||
PotatoMesh::Sanitizer.sanitized_site_name
|
||||
end
|
||||
|
||||
# Retrieve the configured channel.
|
||||
#
|
||||
# @return [String] sanitised channel identifier.
|
||||
def sanitized_channel
|
||||
PotatoMesh::Sanitizer.sanitized_channel
|
||||
end
|
||||
|
||||
# Retrieve the configured frequency descriptor.
|
||||
#
|
||||
# @return [String] sanitised frequency text.
|
||||
def sanitized_frequency
|
||||
PotatoMesh::Sanitizer.sanitized_frequency
|
||||
end
|
||||
|
||||
# Build the configuration hash exposed to the frontend application.
|
||||
#
|
||||
# @return [Hash] JSON serialisable configuration payload.
|
||||
def frontend_app_config
|
||||
{
|
||||
refreshIntervalSeconds: PotatoMesh::Config.refresh_interval_seconds,
|
||||
refreshMs: PotatoMesh::Config.refresh_interval_seconds * 1000,
|
||||
chatEnabled: !private_mode?,
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
contactLink: sanitized_contact_link,
|
||||
contactLinkUrl: sanitized_contact_link_url,
|
||||
mapCenter: {
|
||||
lat: PotatoMesh::Config.map_center_lat,
|
||||
lon: PotatoMesh::Config.map_center_lon,
|
||||
},
|
||||
maxDistanceKm: PotatoMesh::Config.max_distance_km,
|
||||
tileFilters: PotatoMesh::Config.tile_filters,
|
||||
instanceDomain: app_constant(:INSTANCE_DOMAIN),
|
||||
}
|
||||
end
|
||||
|
||||
# Retrieve the configured contact link or nil when unset.
|
||||
#
|
||||
# @return [String, nil] contact link identifier.
|
||||
def sanitized_contact_link
|
||||
PotatoMesh::Sanitizer.sanitized_contact_link
|
||||
end
|
||||
|
||||
# Retrieve the hyperlink derived from the configured contact link.
|
||||
#
|
||||
# @return [String, nil] hyperlink pointing to the community chat.
|
||||
def sanitized_contact_link_url
|
||||
PotatoMesh::Sanitizer.sanitized_contact_link_url
|
||||
end
|
||||
|
||||
# Retrieve the configured maximum node distance in kilometres.
|
||||
#
|
||||
# @return [Numeric, nil] maximum distance or nil if disabled.
|
||||
def sanitized_max_distance_km
|
||||
PotatoMesh::Sanitizer.sanitized_max_distance_km
|
||||
end
|
||||
|
||||
# Format a kilometre value for human readable output.
|
||||
#
|
||||
# @param distance [Numeric] distance in kilometres.
|
||||
# @return [String] formatted distance value.
|
||||
def formatted_distance_km(distance)
|
||||
PotatoMesh::Meta.formatted_distance_km(distance)
|
||||
end
|
||||
|
||||
# Generate the meta description used in SEO tags.
|
||||
#
|
||||
# @return [String] combined descriptive sentence.
|
||||
def meta_description
|
||||
PotatoMesh::Meta.description(private_mode: private_mode?)
|
||||
end
|
||||
|
||||
# Generate the structured meta configuration for the UI.
|
||||
#
|
||||
# @return [Hash] frozen configuration metadata.
|
||||
def meta_configuration
|
||||
PotatoMesh::Meta.configuration(private_mode: private_mode?)
|
||||
end
|
||||
|
||||
# Coerce an arbitrary value into an integer when possible.
|
||||
#
|
||||
# @param value [Object] user supplied value.
|
||||
# @return [Integer, nil] parsed integer or nil when invalid.
|
||||
def coerce_integer(value)
|
||||
case value
|
||||
when Integer
|
||||
value
|
||||
when Float
|
||||
value.finite? ? value.to_i : nil
|
||||
when Numeric
|
||||
value.to_i
|
||||
when String
|
||||
trimmed = value.strip
|
||||
return nil if trimmed.empty?
|
||||
return trimmed.to_i(16) if trimmed.match?(/\A0[xX][0-9A-Fa-f]+\z/)
|
||||
return trimmed.to_i(10) if trimmed.match?(/\A-?\d+\z/)
|
||||
begin
|
||||
float_val = Float(trimmed)
|
||||
float_val.finite? ? float_val.to_i : nil
|
||||
rescue ArgumentError
|
||||
nil
|
||||
end
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Coerce an arbitrary value into a floating point number when possible.
|
||||
#
|
||||
# @param value [Object] user supplied value.
|
||||
# @return [Float, nil] parsed float or nil when invalid.
|
||||
def coerce_float(value)
|
||||
case value
|
||||
when Float
|
||||
value.finite? ? value : nil
|
||||
when Integer
|
||||
value.to_f
|
||||
when Numeric
|
||||
value.to_f
|
||||
when String
|
||||
trimmed = value.strip
|
||||
return nil if trimmed.empty?
|
||||
begin
|
||||
float_val = Float(trimmed)
|
||||
float_val.finite? ? float_val : nil
|
||||
rescue ArgumentError
|
||||
nil
|
||||
end
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Coerce an arbitrary value into a boolean according to common truthy
|
||||
# conventions.
|
||||
#
|
||||
# @param value [Object] user supplied value.
|
||||
# @return [Boolean, nil] boolean interpretation or nil when unknown.
|
||||
def coerce_boolean(value)
|
||||
case value
|
||||
when true, false
|
||||
value
|
||||
when String
|
||||
trimmed = value.strip.downcase
|
||||
return true if %w[true 1 yes y].include?(trimmed)
|
||||
return false if %w[false 0 no n].include?(trimmed)
|
||||
nil
|
||||
when Numeric
|
||||
!value.to_i.zero?
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Normalise PEM encoded public key content into LF line endings.
|
||||
#
|
||||
# @param value [String, #to_s, nil] raw PEM content.
|
||||
# @return [String, nil] cleaned PEM string or nil when blank.
|
||||
def sanitize_public_key_pem(value)
|
||||
return nil if value.nil?
|
||||
|
||||
pem = value.is_a?(String) ? value : value.to_s
|
||||
pem = pem.gsub(/\r\n?/, "\n")
|
||||
return nil if pem.strip.empty?
|
||||
|
||||
pem
|
||||
end
|
||||
|
||||
# Recursively coerce hash keys to strings and normalise nested arrays.
|
||||
#
|
||||
# @param value [Object] JSON compatible value.
|
||||
# @return [Object] structure with canonical string keys.
|
||||
def normalize_json_value(value)
|
||||
case value
|
||||
when Hash
|
||||
value.each_with_object({}) do |(key, val), memo|
|
||||
memo[key.to_s] = normalize_json_value(val)
|
||||
end
|
||||
when Array
|
||||
value.map { |element| normalize_json_value(element) }
|
||||
else
|
||||
value
|
||||
end
|
||||
end
|
||||
|
||||
# Parse JSON payloads or hashes into normalised hashes with string keys.
|
||||
#
|
||||
# @param value [Hash, String, nil] raw JSON object or string representation.
|
||||
# @return [Hash, nil] canonicalised hash or nil when parsing fails.
|
||||
def normalize_json_object(value)
|
||||
case value
|
||||
when Hash
|
||||
normalize_json_value(value)
|
||||
when String
|
||||
trimmed = value.strip
|
||||
return nil if trimmed.empty?
|
||||
begin
|
||||
parsed = JSON.parse(trimmed)
|
||||
rescue JSON::ParserError
|
||||
return nil
|
||||
end
|
||||
parsed.is_a?(Hash) ? normalize_json_value(parsed) : nil
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Emit a structured debug log entry tagged with the calling context.
|
||||
#
|
||||
# @param message [String] text to emit.
|
||||
# @param context [String] logical source of the message.
|
||||
# @param metadata [Hash] additional structured key/value data.
|
||||
# @return [void]
|
||||
def debug_log(message, context: "app", **metadata)
|
||||
logger = PotatoMesh::Logging.logger_for(self)
|
||||
PotatoMesh::Logging.log(logger, :debug, message, context: context, **metadata)
|
||||
end
|
||||
|
||||
# Emit a structured warning log entry tagged with the calling context.
|
||||
#
|
||||
# @param message [String] text to emit.
|
||||
# @param context [String] logical source of the message.
|
||||
# @param metadata [Hash] additional structured key/value data.
|
||||
# @return [void]
|
||||
def warn_log(message, context: "app", **metadata)
|
||||
logger = PotatoMesh::Logging.logger_for(self)
|
||||
PotatoMesh::Logging.log(logger, :warn, message, context: context, **metadata)
|
||||
end
|
||||
|
||||
# Indicate whether private mode has been requested.
|
||||
#
|
||||
# @return [Boolean] true when PRIVATE=1.
|
||||
def private_mode?
|
||||
ENV["PRIVATE"] == "1"
|
||||
end
|
||||
|
||||
# Identify whether the Rack environment corresponds to the test suite.
|
||||
#
|
||||
# @return [Boolean] true when RACK_ENV is "test".
|
||||
def test_environment?
|
||||
ENV["RACK_ENV"] == "test"
|
||||
end
|
||||
|
||||
# Determine whether federation features should be active.
|
||||
#
|
||||
# @return [Boolean] true when federation configuration allows it.
|
||||
def federation_enabled?
|
||||
ENV.fetch("FEDERATION", "1") != "0" && !private_mode?
|
||||
end
|
||||
|
||||
# Determine whether federation announcements should run asynchronously.
|
||||
#
|
||||
# @return [Boolean] true when announcements are enabled.
|
||||
def federation_announcements_active?
|
||||
federation_enabled? && !test_environment?
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,288 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Identity
|
||||
# Resolve the current application version string using git metadata when available.
|
||||
#
|
||||
# @return [String] semantic version compatible identifier.
|
||||
def determine_app_version
|
||||
repo_root = locate_git_repo_root(File.expand_path("../../..", __dir__))
|
||||
return PotatoMesh::Config.version_fallback unless repo_root
|
||||
|
||||
stdout, status = Open3.capture2("git", "-C", repo_root, "describe", "--tags", "--long", "--abbrev=7")
|
||||
return PotatoMesh::Config.version_fallback unless status.success?
|
||||
|
||||
raw = stdout.strip
|
||||
return PotatoMesh::Config.version_fallback if raw.empty?
|
||||
|
||||
match = /\A(?<tag>.+)-(?<count>\d+)-g(?<hash>[0-9a-f]+)\z/.match(raw)
|
||||
return raw unless match
|
||||
|
||||
tag = match[:tag]
|
||||
count = match[:count].to_i
|
||||
hash = match[:hash]
|
||||
return tag if count.zero?
|
||||
|
||||
"#{tag}+#{count}-#{hash}"
|
||||
rescue StandardError
|
||||
PotatoMesh::Config.version_fallback
|
||||
end
|
||||
|
||||
# Discover the root directory of the git repository containing the
|
||||
# application by traversing parent directories until a ``.git`` entry is
|
||||
# located. This supports both traditional repositories where ``.git`` is a
|
||||
# directory and worktree checkouts where it is a plain file.
|
||||
#
|
||||
# @param start_dir [String] absolute path where the search should begin.
|
||||
# @return [String, nil] absolute path to the repository root when found,
|
||||
# otherwise ``nil``.
|
||||
def locate_git_repo_root(start_dir)
|
||||
current = File.expand_path(start_dir)
|
||||
loop do
|
||||
git_entry = File.join(current, ".git")
|
||||
return current if File.exist?(git_entry)
|
||||
|
||||
parent = File.dirname(current)
|
||||
break if parent == current
|
||||
|
||||
current = parent
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Load the persisted instance private key or generate a new one when absent.
|
||||
#
|
||||
# @return [Array<OpenSSL::PKey::RSA, Boolean>] tuple of key and generation flag.
|
||||
def load_or_generate_instance_private_key
|
||||
keyfile_path = PotatoMesh::Config.keyfile_path
|
||||
migrate_legacy_keyfile_for_identity!(keyfile_path)
|
||||
FileUtils.mkdir_p(File.dirname(keyfile_path))
|
||||
if File.exist?(keyfile_path)
|
||||
contents = File.binread(keyfile_path)
|
||||
return [OpenSSL::PKey.read(contents), false]
|
||||
end
|
||||
|
||||
key = OpenSSL::PKey::RSA.new(2048)
|
||||
File.open(keyfile_path, File::WRONLY | File::CREAT | File::TRUNC, 0o600) do |file|
|
||||
file.write(key.export)
|
||||
end
|
||||
[key, true]
|
||||
rescue OpenSSL::PKey::PKeyError, ArgumentError => e
|
||||
warn_log(
|
||||
"Failed to load instance private key",
|
||||
context: "identity.keys",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
key = OpenSSL::PKey::RSA.new(2048)
|
||||
File.open(keyfile_path, File::WRONLY | File::CREAT | File::TRUNC, 0o600) do |file|
|
||||
file.write(key.export)
|
||||
end
|
||||
[key, true]
|
||||
end
|
||||
|
||||
# Migrate an existing legacy keyfile into the configured destination.
|
||||
#
|
||||
# @param destination_path [String] absolute path where the keyfile should reside.
|
||||
# @return [void]
|
||||
def migrate_legacy_keyfile_for_identity!(destination_path)
|
||||
return if File.exist?(destination_path)
|
||||
|
||||
PotatoMesh::Config.legacy_keyfile_candidates.each do |candidate|
|
||||
next unless File.exist?(candidate)
|
||||
next if candidate == destination_path
|
||||
|
||||
begin
|
||||
FileUtils.mkdir_p(File.dirname(destination_path))
|
||||
FileUtils.cp(candidate, destination_path)
|
||||
File.chmod(0o600, destination_path)
|
||||
|
||||
debug_log(
|
||||
"Migrated legacy keyfile to XDG directory",
|
||||
context: "identity.keys",
|
||||
source: candidate,
|
||||
destination: destination_path,
|
||||
)
|
||||
rescue SystemCallError => e
|
||||
warn_log(
|
||||
"Failed to migrate legacy keyfile",
|
||||
context: "identity.keys",
|
||||
source: candidate,
|
||||
destination: destination_path,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
next
|
||||
end
|
||||
|
||||
break
|
||||
end
|
||||
end
|
||||
|
||||
private :migrate_legacy_keyfile_for_identity!, :locate_git_repo_root
|
||||
|
||||
# Return the directory used to store well-known documents.
|
||||
#
|
||||
# @return [String] absolute path to the staging directory.
|
||||
def well_known_directory
|
||||
PotatoMesh::Config.well_known_storage_root
|
||||
end
|
||||
|
||||
# Determine the absolute path to the well-known document file.
|
||||
#
|
||||
# @return [String] filesystem path for the JSON document.
|
||||
def well_known_file_path
|
||||
File.join(
|
||||
well_known_directory,
|
||||
File.basename(PotatoMesh::Config.well_known_relative_path),
|
||||
)
|
||||
end
|
||||
|
||||
# Remove legacy well-known artifacts from previous releases.
|
||||
#
|
||||
# @return [void]
|
||||
def cleanup_legacy_well_known_artifacts
|
||||
legacy_path = PotatoMesh::Config.legacy_public_well_known_path
|
||||
FileUtils.rm_f(legacy_path)
|
||||
legacy_dir = File.dirname(legacy_path)
|
||||
FileUtils.rmdir(legacy_dir) if Dir.exist?(legacy_dir) && Dir.empty?(legacy_dir)
|
||||
rescue SystemCallError
|
||||
# Ignore errors removing legacy static files; failure only means the directory
|
||||
# or file did not exist or is in use.
|
||||
end
|
||||
|
||||
# Construct the JSON body and detached signature for the well-known document.
|
||||
#
|
||||
# @return [Array(String, String)] pair of JSON output and base64 signature.
|
||||
def build_well_known_document
|
||||
last_update = latest_node_update_timestamp
|
||||
domain_value = sanitize_instance_domain(app_constant(:INSTANCE_DOMAIN))
|
||||
|
||||
payload = {
|
||||
publicKey: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
|
||||
name: sanitized_site_name,
|
||||
version: app_constant(:APP_VERSION),
|
||||
domain: domain_value,
|
||||
lastUpdate: last_update,
|
||||
}
|
||||
|
||||
signed_payload = JSON.generate(payload, sort_keys: true)
|
||||
signature = Base64.strict_encode64(
|
||||
app_constant(:INSTANCE_PRIVATE_KEY).sign(OpenSSL::Digest::SHA256.new, signed_payload),
|
||||
)
|
||||
|
||||
document = payload.merge(
|
||||
signature: signature,
|
||||
signatureAlgorithm: PotatoMesh::Config.instance_signature_algorithm,
|
||||
signedPayload: Base64.strict_encode64(signed_payload),
|
||||
)
|
||||
|
||||
json_output = JSON.pretty_generate(document)
|
||||
[json_output, signature]
|
||||
end
|
||||
|
||||
# Regenerate the well-known document when it is stale or when the existing
|
||||
# content no longer matches the current instance configuration.
|
||||
#
|
||||
# @return [void]
|
||||
def refresh_well_known_document_if_stale
|
||||
FileUtils.mkdir_p(well_known_directory)
|
||||
path = well_known_file_path
|
||||
now = Time.now
|
||||
json_output, signature = build_well_known_document
|
||||
expected_contents = json_output.end_with?("\n") ? json_output : "#{json_output}\n"
|
||||
|
||||
needs_update = true
|
||||
if File.exist?(path)
|
||||
current_contents = File.binread(path)
|
||||
mtime = File.mtime(path)
|
||||
if current_contents == expected_contents &&
|
||||
(now - mtime) < PotatoMesh::Config.well_known_refresh_interval
|
||||
needs_update = false
|
||||
end
|
||||
end
|
||||
|
||||
return unless needs_update
|
||||
|
||||
File.open(path, File::WRONLY | File::CREAT | File::TRUNC, 0o644) do |file|
|
||||
file.write(expected_contents)
|
||||
end
|
||||
|
||||
debug_log(
|
||||
"Refreshed well-known document content",
|
||||
context: "identity.well_known",
|
||||
path: PotatoMesh::Config.well_known_relative_path,
|
||||
bytes: json_output.bytesize,
|
||||
document: json_output,
|
||||
)
|
||||
debug_log(
|
||||
"Refreshed well-known document signature",
|
||||
context: "identity.well_known",
|
||||
path: PotatoMesh::Config.well_known_relative_path,
|
||||
algorithm: PotatoMesh::Config.instance_signature_algorithm,
|
||||
signature: signature,
|
||||
)
|
||||
end
|
||||
|
||||
# Retrieve the latest node update timestamp from the database.
|
||||
#
|
||||
# @return [Integer, nil] Unix timestamp or nil when unavailable.
|
||||
def latest_node_update_timestamp
|
||||
return nil unless File.exist?(PotatoMesh::Config.db_path)
|
||||
|
||||
db = open_database(readonly: true)
|
||||
value = db.get_first_value("SELECT MAX(last_heard) FROM nodes")
|
||||
value&.to_i
|
||||
rescue SQLite3::Exception
|
||||
nil
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Emit a debug entry describing the active instance key material.
|
||||
#
|
||||
# @return [void]
|
||||
def log_instance_public_key
|
||||
debug_log(
|
||||
"Loaded instance public key",
|
||||
context: "identity.keys",
|
||||
public_key_pem: app_constant(:INSTANCE_PUBLIC_KEY_PEM),
|
||||
)
|
||||
if app_constant(:INSTANCE_KEY_GENERATED)
|
||||
debug_log(
|
||||
"Generated new instance private key",
|
||||
context: "identity.keys",
|
||||
path: PotatoMesh::Config.keyfile_path,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
# Emit a debug entry describing how the instance domain was derived.
|
||||
#
|
||||
# @return [void]
|
||||
def log_instance_domain_resolution
|
||||
source = app_constant(:INSTANCE_DOMAIN_SOURCE) || :unknown
|
||||
debug_log(
|
||||
"Resolved instance domain",
|
||||
context: "identity.domain",
|
||||
source: source,
|
||||
domain: app_constant(:INSTANCE_DOMAIN),
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,199 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
# Helper methods for maintaining and presenting instance records.
|
||||
module Instances
|
||||
# Remove duplicate instance records grouped by their canonical domain name
|
||||
# while favouring the most recent entry.
|
||||
#
|
||||
# @return [void]
|
||||
def clean_duplicate_instances!
|
||||
db = open_database
|
||||
rows = with_busy_retry do
|
||||
db.execute(
|
||||
<<~SQL
|
||||
SELECT rowid, domain, last_update_time
|
||||
FROM instances
|
||||
WHERE domain IS NOT NULL AND TRIM(domain) != ''
|
||||
SQL
|
||||
)
|
||||
end
|
||||
|
||||
grouped = rows.group_by do |row|
|
||||
sanitize_instance_domain(row[1])&.downcase
|
||||
rescue StandardError
|
||||
nil
|
||||
end
|
||||
|
||||
deletions = []
|
||||
updates = {}
|
||||
|
||||
grouped.each do |canonical_domain, entries|
|
||||
next if canonical_domain.nil?
|
||||
next if entries.size <= 1
|
||||
|
||||
sorted_entries = entries.sort_by do |entry|
|
||||
timestamp = coerce_integer(entry[2]) || -1
|
||||
[timestamp, entry[0].to_i]
|
||||
end
|
||||
keeper = sorted_entries.last
|
||||
next unless keeper
|
||||
|
||||
deletions.concat(sorted_entries[0...-1].map { |entry| entry[0].to_i })
|
||||
|
||||
current_domain = entries.find { |entry| entry[0] == keeper[0] }&.[](1)
|
||||
if canonical_domain && current_domain != canonical_domain
|
||||
updates[keeper[0].to_i] = canonical_domain
|
||||
end
|
||||
|
||||
removed_count = sorted_entries.length - 1
|
||||
warn_log(
|
||||
"Removed duplicate instance records",
|
||||
context: "instances.cleanup",
|
||||
domain: canonical_domain,
|
||||
removed: removed_count,
|
||||
) if removed_count.positive?
|
||||
end
|
||||
|
||||
unless deletions.empty?
|
||||
placeholders = Array.new(deletions.size, "?").join(",")
|
||||
with_busy_retry do
|
||||
db.execute("DELETE FROM instances WHERE rowid IN (#{placeholders})", deletions)
|
||||
end
|
||||
end
|
||||
|
||||
updates.each do |rowid, canonical_domain|
|
||||
with_busy_retry do
|
||||
db.execute("UPDATE instances SET domain = ? WHERE rowid = ?", [canonical_domain, rowid])
|
||||
end
|
||||
end
|
||||
rescue SQLite3::Exception => e
|
||||
warn_log(
|
||||
"Failed to clean duplicate instances",
|
||||
context: "instances.cleanup",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
# Normalise and validate an instance database row for API presentation.
|
||||
#
|
||||
# @param row [Hash] raw database row with string keys.
|
||||
# @return [Hash, nil] cleaned hash or +nil+ when the row is discarded.
|
||||
def normalize_instance_row(row)
|
||||
unless row.is_a?(Hash)
|
||||
warn_log(
|
||||
"Discarded malformed instance row",
|
||||
context: "instances.normalize",
|
||||
reason: "row not hash",
|
||||
)
|
||||
return nil
|
||||
end
|
||||
|
||||
id = string_or_nil(row["id"])
|
||||
domain = sanitize_instance_domain(row["domain"])&.downcase
|
||||
pubkey = sanitize_public_key_pem(row["pubkey"])
|
||||
signature = string_or_nil(row["signature"])
|
||||
last_update_time = coerce_integer(row["last_update_time"])
|
||||
is_private_raw = row["is_private"]
|
||||
private_flag = coerce_boolean(is_private_raw)
|
||||
if private_flag.nil?
|
||||
numeric_private = coerce_integer(is_private_raw)
|
||||
private_flag = !numeric_private.to_i.zero? if numeric_private
|
||||
end
|
||||
private_flag = false if private_flag.nil?
|
||||
|
||||
if id.nil? || domain.nil? || pubkey.nil?
|
||||
warn_log(
|
||||
"Discarded malformed instance row",
|
||||
context: "instances.normalize",
|
||||
instance_id: row["id"],
|
||||
domain: row["domain"],
|
||||
reason: "missing required fields",
|
||||
)
|
||||
return nil
|
||||
end
|
||||
|
||||
payload = {
|
||||
"id" => id,
|
||||
"domain" => domain,
|
||||
"pubkey" => pubkey,
|
||||
"name" => string_or_nil(row["name"]),
|
||||
"version" => string_or_nil(row["version"]),
|
||||
"channel" => string_or_nil(row["channel"]),
|
||||
"frequency" => string_or_nil(row["frequency"]),
|
||||
"latitude" => coerce_float(row["latitude"]),
|
||||
"longitude" => coerce_float(row["longitude"]),
|
||||
"lastUpdateTime" => last_update_time,
|
||||
"isPrivate" => private_flag,
|
||||
"signature" => signature,
|
||||
}
|
||||
|
||||
payload.reject { |_, value| value.nil? }
|
||||
rescue StandardError => e
|
||||
warn_log(
|
||||
"Failed to normalise instance row",
|
||||
context: "instances.normalize",
|
||||
instance_id: row.respond_to?(:[]) ? row["id"] : nil,
|
||||
domain: row.respond_to?(:[]) ? row["domain"] : nil,
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
nil
|
||||
end
|
||||
|
||||
# Fetch all instance rows ready to be served by the API while handling
|
||||
# malformed rows gracefully.
|
||||
#
|
||||
# @return [Array<Hash>] list of cleaned instance payloads.
|
||||
def load_instances_for_api
|
||||
clean_duplicate_instances!
|
||||
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
rows = with_busy_retry do
|
||||
db.execute(
|
||||
<<~SQL
|
||||
SELECT id, domain, pubkey, name, version, channel, frequency,
|
||||
latitude, longitude, last_update_time, is_private, signature
|
||||
FROM instances
|
||||
WHERE domain IS NOT NULL AND TRIM(domain) != ''
|
||||
AND pubkey IS NOT NULL AND TRIM(pubkey) != ''
|
||||
ORDER BY LOWER(domain)
|
||||
SQL
|
||||
)
|
||||
end
|
||||
|
||||
rows.each_with_object([]) do |row, memo|
|
||||
normalized = normalize_instance_row(row)
|
||||
memo << normalized if normalized
|
||||
end
|
||||
rescue SQLite3::Exception => e
|
||||
warn_log(
|
||||
"Failed to load instance records",
|
||||
context: "instances.load",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
[]
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,355 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Networking
|
||||
# Normalise the configured instance domain by stripping schemes and verifying structure.
|
||||
#
|
||||
# @param raw [String, nil] environment supplied domain or URL.
|
||||
# @return [String, nil] canonicalised hostname with optional port.
|
||||
def canonicalize_configured_instance_domain(raw)
|
||||
return nil if raw.nil?
|
||||
|
||||
trimmed = raw.to_s.strip
|
||||
return nil if trimmed.empty?
|
||||
|
||||
candidate = trimmed
|
||||
|
||||
if candidate.include?("://")
|
||||
begin
|
||||
uri = URI.parse(candidate)
|
||||
rescue URI::InvalidURIError => e
|
||||
raise "INSTANCE_DOMAIN must be a valid hostname or URL, but parsing #{candidate.inspect} failed: #{e.message}"
|
||||
end
|
||||
|
||||
unless uri.host
|
||||
raise "INSTANCE_DOMAIN URL must include a hostname: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
if uri.userinfo
|
||||
raise "INSTANCE_DOMAIN URL must not include credentials: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
if uri.path && !uri.path.empty? && uri.path != "/"
|
||||
raise "INSTANCE_DOMAIN URL must not include a path component: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
if uri.query || uri.fragment
|
||||
raise "INSTANCE_DOMAIN URL must not include query or fragment data: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
hostname = uri.hostname
|
||||
unless hostname
|
||||
raise "INSTANCE_DOMAIN URL must include a hostname: #{candidate.inspect}"
|
||||
end
|
||||
|
||||
ip_host = ipv6_literal?(hostname)
|
||||
candidate_host = ip_host ? "[#{ip_host}]" : hostname
|
||||
candidate = candidate_host
|
||||
port = uri.port
|
||||
candidate = "#{candidate_host}:#{port}" if port_required?(uri, trimmed)
|
||||
end
|
||||
|
||||
ipv6_with_port = candidate.match(/\A(?<address>.+):(?<port>\d+)\z/)
|
||||
if ipv6_with_port
|
||||
address = ipv6_with_port[:address]
|
||||
port = ipv6_with_port[:port]
|
||||
literal = ipv6_literal?(address)
|
||||
if literal && PotatoMesh::Sanitizer.valid_port?(port)
|
||||
candidate = "[#{literal}]:#{port}"
|
||||
else
|
||||
ipv6_literal = ipv6_literal?(candidate)
|
||||
candidate = "[#{ipv6_literal}]" if ipv6_literal
|
||||
end
|
||||
else
|
||||
ipv6_literal = ipv6_literal?(candidate)
|
||||
candidate = "[#{ipv6_literal}]" if ipv6_literal
|
||||
end
|
||||
|
||||
sanitized = sanitize_instance_domain(candidate)
|
||||
unless sanitized
|
||||
raise "INSTANCE_DOMAIN must be a bare hostname (optionally with a port) without schemes or paths: #{raw.inspect}"
|
||||
end
|
||||
|
||||
ensure_ipv6_instance_domain(sanitized).downcase
|
||||
end
|
||||
|
||||
# Resolve the best domain for the running instance using configuration and network discovery.
|
||||
#
|
||||
# @return [Array(String, Symbol)] tuple containing the domain and the discovery source.
|
||||
def determine_instance_domain
|
||||
raw = ENV["INSTANCE_DOMAIN"]
|
||||
if raw
|
||||
canonical = canonicalize_configured_instance_domain(raw)
|
||||
return [canonical, :environment] if canonical
|
||||
end
|
||||
|
||||
reverse = sanitize_instance_domain(reverse_dns_domain)
|
||||
return [reverse, :reverse_dns] if reverse
|
||||
|
||||
public_ip = discover_public_ip_address
|
||||
return [public_ip, :public_ip] if public_ip
|
||||
|
||||
protected_ip = discover_protected_ip_address
|
||||
return [protected_ip, :protected_ip] if protected_ip
|
||||
|
||||
[discover_local_ip_address, :local_ip]
|
||||
end
|
||||
|
||||
# Attempt to determine the reverse DNS hostname for the local machine.
|
||||
#
|
||||
# @return [String, nil] resolved hostname or nil when unavailable.
|
||||
def reverse_dns_domain
|
||||
Socket.ip_address_list.each do |address|
|
||||
next unless address.respond_to?(:ip?) && address.ip?
|
||||
|
||||
loopback =
|
||||
(address.respond_to?(:ipv4_loopback?) && address.ipv4_loopback?) ||
|
||||
(address.respond_to?(:ipv6_loopback?) && address.ipv6_loopback?)
|
||||
next if loopback
|
||||
|
||||
link_local =
|
||||
address.respond_to?(:ipv6_linklocal?) && address.ipv6_linklocal?
|
||||
next if link_local
|
||||
|
||||
ip = address.ip_address
|
||||
next if ip.nil? || ip.empty?
|
||||
|
||||
begin
|
||||
hostname = Resolv.getname(ip)
|
||||
trimmed = hostname&.strip
|
||||
return trimmed unless trimmed.nil? || trimmed.empty?
|
||||
rescue Resolv::ResolvError, Resolv::ResolvTimeout, SocketError
|
||||
next
|
||||
end
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Identify the first public IP address of the current host.
|
||||
#
|
||||
# @return [String, nil] public IP address string or nil.
|
||||
def discover_public_ip_address
|
||||
address = ip_address_candidates.find { |candidate| public_ip_address?(candidate) }
|
||||
address&.ip_address
|
||||
end
|
||||
|
||||
# Identify a private yet non-loopback IP address suitable for protected networks.
|
||||
#
|
||||
# @return [String, nil] protected network address or nil.
|
||||
def discover_protected_ip_address
|
||||
address = ip_address_candidates.find { |candidate| protected_ip_address?(candidate) }
|
||||
address&.ip_address
|
||||
end
|
||||
|
||||
# Collect viable socket addresses for evaluation.
|
||||
#
|
||||
# @return [Array<#ip?>] list of socket addresses supporting IP queries.
|
||||
def ip_address_candidates
|
||||
Socket.ip_address_list.select { |addr| addr.respond_to?(:ip?) && addr.ip? }
|
||||
end
|
||||
|
||||
# Determine whether a socket address represents a public IP.
|
||||
#
|
||||
# @param addr [Addrinfo] candidate socket address.
|
||||
# @return [Boolean] true when the address is publicly routable.
|
||||
def public_ip_address?(addr)
|
||||
ip = ipaddr_from(addr)
|
||||
return false unless ip
|
||||
return false if loopback_address?(addr, ip)
|
||||
return false if link_local_address?(addr, ip)
|
||||
return false if private_address?(addr, ip)
|
||||
return false if unspecified_address?(ip)
|
||||
|
||||
true
|
||||
end
|
||||
|
||||
# Determine whether a socket address resides on a protected private network.
|
||||
#
|
||||
# @param addr [Addrinfo] candidate socket address.
|
||||
# @return [Boolean] true when the address is private but not loopback/link-local.
|
||||
def protected_ip_address?(addr)
|
||||
ip = ipaddr_from(addr)
|
||||
return false unless ip
|
||||
return false if loopback_address?(addr, ip)
|
||||
return false if link_local_address?(addr, ip)
|
||||
|
||||
private_address?(addr, ip)
|
||||
end
|
||||
|
||||
# Parse an IP address from the provided socket address.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to examine.
|
||||
# @return [IPAddr, nil] parsed IP or nil when invalid.
|
||||
def ipaddr_from(addr)
|
||||
ip = addr.ip_address
|
||||
return nil if ip.nil? || ip.empty?
|
||||
|
||||
IPAddr.new(ip)
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
|
||||
# Determine whether a socket address is loopback.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to inspect.
|
||||
# @param ip [IPAddr] parsed IP representation of the address.
|
||||
# @return [Boolean] true when the address is loopback.
|
||||
def loopback_address?(addr, ip)
|
||||
(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) ||
|
||||
(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?) ||
|
||||
ip.loopback?
|
||||
end
|
||||
|
||||
# Determine whether a socket address is link-local.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to inspect.
|
||||
# @param ip [IPAddr] parsed IP representation of the address.
|
||||
# @return [Boolean] true when the address is link-local.
|
||||
def link_local_address?(addr, ip)
|
||||
(addr.respond_to?(:ipv6_linklocal?) && addr.ipv6_linklocal?) ||
|
||||
(ip.respond_to?(:link_local?) && ip.link_local?)
|
||||
end
|
||||
|
||||
# Determine whether a socket address is private.
|
||||
#
|
||||
# @param addr [Addrinfo] socket address to inspect.
|
||||
# @param ip [IPAddr] parsed IP representation of the address.
|
||||
# @return [Boolean] true when the address is private.
|
||||
def private_address?(addr, ip)
|
||||
if addr.respond_to?(:ipv4?) && addr.ipv4? && addr.respond_to?(:ipv4_private?)
|
||||
addr.ipv4_private?
|
||||
else
|
||||
ip.private?
|
||||
end
|
||||
end
|
||||
|
||||
# Identify unspecified IP addresses.
|
||||
#
|
||||
# @param ip [IPAddr] parsed IP.
|
||||
# @return [Boolean] true for unspecified addresses (0.0.0.0 / ::).
|
||||
def unspecified_address?(ip)
|
||||
(ip.ipv4? || ip.ipv6?) && ip.to_i.zero?
|
||||
end
|
||||
|
||||
# Choose the most appropriate local IP address for the instance domain.
|
||||
#
|
||||
# @return [String] selected IP address string.
|
||||
def discover_local_ip_address
|
||||
candidates = ip_address_candidates
|
||||
|
||||
ipv4 = candidates.find do |addr|
|
||||
addr.respond_to?(:ipv4?) && addr.ipv4? && !(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?)
|
||||
end
|
||||
return ipv4.ip_address if ipv4
|
||||
|
||||
non_loopback = candidates.find do |addr|
|
||||
!(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) &&
|
||||
!(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?)
|
||||
end
|
||||
return non_loopback.ip_address if non_loopback
|
||||
|
||||
loopback = candidates.find do |addr|
|
||||
(addr.respond_to?(:ipv4_loopback?) && addr.ipv4_loopback?) ||
|
||||
(addr.respond_to?(:ipv6_loopback?) && addr.ipv6_loopback?)
|
||||
end
|
||||
return loopback.ip_address if loopback
|
||||
|
||||
"127.0.0.1"
|
||||
end
|
||||
|
||||
# Determine whether an IP should be restricted from exposure.
|
||||
#
|
||||
# @param ip [IPAddr] candidate IP address.
|
||||
# @return [Boolean] true when the IP should not be exposed.
|
||||
def restricted_ip_address?(ip)
|
||||
return true if ip.loopback?
|
||||
return true if ip.private?
|
||||
return true if ip.link_local?
|
||||
return true if ip.to_i.zero?
|
||||
|
||||
false
|
||||
end
|
||||
|
||||
# Normalize IPv6 instance domains so that they remain bracketed and URI-compatible.
|
||||
#
|
||||
# @param domain [String] sanitized hostname optionally including a port suffix.
|
||||
# @return [String] domain with IPv6 literals wrapped in brackets when necessary.
|
||||
def ensure_ipv6_instance_domain(domain)
|
||||
bracketed_match = domain.match(/\A\[(?<host>[^\]]+)\](?::(?<port>\d+))?\z/)
|
||||
if bracketed_match
|
||||
host = bracketed_match[:host]
|
||||
port = bracketed_match[:port]
|
||||
ipv6 = ipv6_literal?(host)
|
||||
if ipv6
|
||||
return "[#{ipv6}]#{port ? ":#{port}" : ""}"
|
||||
end
|
||||
|
||||
return domain
|
||||
end
|
||||
|
||||
host_candidate = domain
|
||||
port_candidate = nil
|
||||
split_host, separator, split_port = domain.rpartition(":")
|
||||
if !separator.empty? && split_port.match?(/\A\d+\z/) && !split_host.empty? && !split_host.end_with?(":")
|
||||
host_candidate = split_host
|
||||
port_candidate = split_port
|
||||
end
|
||||
|
||||
if port_candidate
|
||||
ipv6_host = ipv6_literal?(host_candidate)
|
||||
return "[#{ipv6_host}]:#{port_candidate}" if ipv6_host
|
||||
|
||||
host_candidate = domain
|
||||
port_candidate = nil
|
||||
end
|
||||
|
||||
ipv6 = ipv6_literal?(host_candidate)
|
||||
return "[#{ipv6}]" if ipv6
|
||||
|
||||
domain
|
||||
end
|
||||
|
||||
# Parse an IPv6 literal and return its canonical representation when valid.
|
||||
#
|
||||
# @param candidate [String] potential IPv6 literal.
|
||||
# @return [String, nil] normalized IPv6 literal or nil when the candidate is not IPv6.
|
||||
def ipv6_literal?(candidate)
|
||||
IPAddr.new(candidate).yield_self do |ip|
|
||||
return ip.ipv6? ? ip.to_s : nil
|
||||
end
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
|
||||
# Determine whether a URI's port should be included in the canonicalized domain.
|
||||
#
|
||||
# @param uri [URI::Generic] parsed URI for the instance domain.
|
||||
# @param raw [String] original sanitized input string.
|
||||
# @return [Boolean] true when the port must be preserved.
|
||||
def port_required?(uri, raw)
|
||||
port = uri.port
|
||||
return false unless port
|
||||
|
||||
return true unless uri.respond_to?(:default_port) && uri.default_port && port == uri.default_port
|
||||
|
||||
raw_port_fragment = ":#{port}"
|
||||
sanitized_raw = raw.strip
|
||||
sanitized_raw.end_with?(raw_port_fragment)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,196 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Prometheus
|
||||
MESSAGES_TOTAL = ::Prometheus::Client::Counter.new(
|
||||
:meshtastic_messages_total,
|
||||
docstring: "Total number of messages received",
|
||||
)
|
||||
|
||||
NODES_GAUGE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_nodes,
|
||||
docstring: "Number of nodes tracked",
|
||||
)
|
||||
|
||||
NODE_GAUGE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node,
|
||||
docstring: "Presence of a Meshtastic node",
|
||||
labels: %i[node short_name long_name hw_model role],
|
||||
)
|
||||
|
||||
NODE_BATTERY_LEVEL = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_battery_level,
|
||||
docstring: "Battery level of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_VOLTAGE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_voltage,
|
||||
docstring: "Battery voltage of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_UPTIME = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_uptime_seconds,
|
||||
docstring: "Uptime reported by a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_CHANNEL_UTIL = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_channel_utilization,
|
||||
docstring: "Channel utilization reported by a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_AIR_UTIL_TX = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_transmit_air_utilization,
|
||||
docstring: "Transmit air utilization reported by a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_LATITUDE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_latitude,
|
||||
docstring: "Latitude of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_LONGITUDE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_longitude,
|
||||
docstring: "Longitude of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
NODE_ALTITUDE = ::Prometheus::Client::Gauge.new(
|
||||
:meshtastic_node_altitude,
|
||||
docstring: "Altitude of a Meshtastic node",
|
||||
labels: [:node],
|
||||
)
|
||||
|
||||
METRICS = [
|
||||
MESSAGES_TOTAL,
|
||||
NODES_GAUGE,
|
||||
NODE_GAUGE,
|
||||
NODE_BATTERY_LEVEL,
|
||||
NODE_VOLTAGE,
|
||||
NODE_UPTIME,
|
||||
NODE_CHANNEL_UTIL,
|
||||
NODE_AIR_UTIL_TX,
|
||||
NODE_LATITUDE,
|
||||
NODE_LONGITUDE,
|
||||
NODE_ALTITUDE,
|
||||
].freeze
|
||||
|
||||
METRICS.each do |metric|
|
||||
::Prometheus::Client.registry.register(metric)
|
||||
rescue ::Prometheus::Client::Registry::AlreadyRegisteredError
|
||||
# Ignore duplicate registrations when the code is reloaded.
|
||||
end
|
||||
|
||||
def update_prometheus_metrics(node_id, user = nil, role = "", met = nil, pos = nil)
|
||||
ids = prom_report_ids
|
||||
return if ids.empty? || !node_id
|
||||
|
||||
return unless ids[0] == "*" || ids.include?(node_id)
|
||||
|
||||
if user && user.is_a?(Hash) && role && role != ""
|
||||
NODE_GAUGE.set(
|
||||
1,
|
||||
labels: {
|
||||
node: node_id,
|
||||
short_name: user["shortName"],
|
||||
long_name: user["longName"],
|
||||
hw_model: user["hwModel"],
|
||||
role: role,
|
||||
},
|
||||
)
|
||||
end
|
||||
|
||||
if met && met.is_a?(Hash)
|
||||
if met["batteryLevel"]
|
||||
NODE_BATTERY_LEVEL.set(met["batteryLevel"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["voltage"]
|
||||
NODE_VOLTAGE.set(met["voltage"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["uptimeSeconds"]
|
||||
NODE_UPTIME.set(met["uptimeSeconds"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["channelUtilization"]
|
||||
NODE_CHANNEL_UTIL.set(met["channelUtilization"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if met["airUtilTx"]
|
||||
NODE_AIR_UTIL_TX.set(met["airUtilTx"], labels: { node: node_id })
|
||||
end
|
||||
end
|
||||
|
||||
if pos && pos.is_a?(Hash)
|
||||
if pos["latitude"]
|
||||
NODE_LATITUDE.set(pos["latitude"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if pos["longitude"]
|
||||
NODE_LONGITUDE.set(pos["longitude"], labels: { node: node_id })
|
||||
end
|
||||
|
||||
if pos["altitude"]
|
||||
NODE_ALTITUDE.set(pos["altitude"], labels: { node: node_id })
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def update_all_prometheus_metrics_from_nodes
|
||||
nodes = query_nodes(1000)
|
||||
|
||||
NODES_GAUGE.set(nodes.size)
|
||||
|
||||
ids = prom_report_ids
|
||||
unless ids.empty?
|
||||
nodes.each do |n|
|
||||
node_id = n["node_id"]
|
||||
|
||||
next if ids[0] != "*" && !ids.include?(node_id)
|
||||
|
||||
update_prometheus_metrics(
|
||||
node_id,
|
||||
{
|
||||
"shortName" => n["short_name"] || "",
|
||||
"longName" => n["long_name"] || "",
|
||||
"hwModel" => n["hw_model"] || "",
|
||||
},
|
||||
n["role"] || "",
|
||||
{
|
||||
"batteryLevel" => n["battery_level"],
|
||||
"voltage" => n["voltage"],
|
||||
"uptimeSeconds" => n["uptime_seconds"],
|
||||
"channelUtilization" => n["channel_utilization"],
|
||||
"airUtilTx" => n["air_util_tx"],
|
||||
},
|
||||
{
|
||||
"latitude" => n["latitude"],
|
||||
"longitude" => n["longitude"],
|
||||
"altitude" => n["altitude"],
|
||||
},
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,410 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Queries
|
||||
MAX_QUERY_LIMIT = 1000
|
||||
|
||||
# Normalise a caller-provided limit to a sane, positive integer.
|
||||
#
|
||||
# @param limit [Object] value coerced to an integer.
|
||||
# @param default [Integer] fallback used when coercion fails.
|
||||
# @return [Integer] limit clamped between 1 and MAX_QUERY_LIMIT.
|
||||
def coerce_query_limit(limit, default: 200)
|
||||
coerced = begin
|
||||
if limit.is_a?(Integer)
|
||||
limit
|
||||
else
|
||||
Integer(limit, 10)
|
||||
end
|
||||
rescue ArgumentError, TypeError
|
||||
nil
|
||||
end
|
||||
|
||||
coerced = default if coerced.nil? || coerced <= 0
|
||||
coerced = MAX_QUERY_LIMIT if coerced > MAX_QUERY_LIMIT
|
||||
coerced
|
||||
end
|
||||
|
||||
def node_reference_tokens(node_ref)
|
||||
parts = canonical_node_parts(node_ref)
|
||||
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
|
||||
|
||||
string_values = []
|
||||
numeric_values = []
|
||||
|
||||
case node_ref
|
||||
when Integer
|
||||
numeric_values << node_ref
|
||||
string_values << node_ref.to_s
|
||||
when Numeric
|
||||
coerced = node_ref.to_i
|
||||
numeric_values << coerced
|
||||
string_values << coerced.to_s
|
||||
when String
|
||||
trimmed = node_ref.strip
|
||||
unless trimmed.empty?
|
||||
string_values << trimmed
|
||||
numeric_values << trimmed.to_i if trimmed.match?(/\A-?\d+\z/)
|
||||
end
|
||||
when nil
|
||||
# no-op
|
||||
else
|
||||
coerced = node_ref.to_s.strip
|
||||
string_values << coerced unless coerced.empty?
|
||||
end
|
||||
|
||||
if canonical_id
|
||||
string_values << canonical_id
|
||||
string_values << canonical_id.upcase
|
||||
end
|
||||
|
||||
if numeric_id
|
||||
numeric_values << numeric_id
|
||||
string_values << numeric_id.to_s
|
||||
end
|
||||
|
||||
cleaned_strings = string_values.compact.map(&:to_s).map(&:strip).reject(&:empty?).uniq
|
||||
cleaned_numbers = numeric_values.compact.map do |value|
|
||||
begin
|
||||
Integer(value, 10)
|
||||
rescue ArgumentError, TypeError
|
||||
nil
|
||||
end
|
||||
end.compact.uniq
|
||||
|
||||
{
|
||||
string_values: cleaned_strings,
|
||||
numeric_values: cleaned_numbers,
|
||||
}
|
||||
end
|
||||
|
||||
def node_lookup_clause(node_ref, string_columns:, numeric_columns: [])
|
||||
tokens = node_reference_tokens(node_ref)
|
||||
string_values = tokens[:string_values]
|
||||
numeric_values = tokens[:numeric_values]
|
||||
|
||||
clauses = []
|
||||
params = []
|
||||
|
||||
unless string_columns.empty? || string_values.empty?
|
||||
string_columns.each do |column|
|
||||
placeholders = Array.new(string_values.length, "?").join(", ")
|
||||
clauses << "#{column} IN (#{placeholders})"
|
||||
params.concat(string_values)
|
||||
end
|
||||
end
|
||||
|
||||
unless numeric_columns.empty? || numeric_values.empty?
|
||||
numeric_columns.each do |column|
|
||||
placeholders = Array.new(numeric_values.length, "?").join(", ")
|
||||
clauses << "#{column} IN (#{placeholders})"
|
||||
params.concat(numeric_values)
|
||||
end
|
||||
end
|
||||
|
||||
return nil if clauses.empty?
|
||||
|
||||
["(#{clauses.join(" OR ")})", params]
|
||||
end
|
||||
|
||||
def query_nodes(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
now = Time.now.to_i
|
||||
min_last_heard = now - PotatoMesh::Config.week_seconds
|
||||
params = []
|
||||
where_clauses = []
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["num"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
else
|
||||
where_clauses << "last_heard >= ?"
|
||||
params << min_last_heard
|
||||
end
|
||||
|
||||
if private_mode?
|
||||
where_clauses << "(role IS NULL OR role <> 'CLIENT_HIDDEN')"
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT node_id, short_name, long_name, hw_model, role, snr,
|
||||
battery_level, voltage, last_heard, first_heard,
|
||||
uptime_seconds, channel_utilization, air_util_tx,
|
||||
position_time, location_source, precision_bits,
|
||||
latitude, longitude, altitude, lora_freq, modem_preset
|
||||
FROM nodes
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY last_heard DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
|
||||
rows = db.execute(sql, params)
|
||||
rows = rows.select do |r|
|
||||
last_candidate = [r["last_heard"], r["position_time"], r["first_heard"]]
|
||||
.map { |value| coerce_integer(value) }
|
||||
.compact
|
||||
.max
|
||||
last_candidate && last_candidate >= min_last_heard
|
||||
end
|
||||
rows.each do |r|
|
||||
r["role"] ||= "CLIENT"
|
||||
lh = r["last_heard"]&.to_i
|
||||
pt = r["position_time"]&.to_i
|
||||
lh = now if lh && lh > now
|
||||
pt = nil if pt && pt > now
|
||||
r["last_heard"] = lh
|
||||
r["position_time"] = pt
|
||||
r["last_seen_iso"] = Time.at(lh).utc.iso8601 if lh
|
||||
r["pos_time_iso"] = Time.at(pt).utc.iso8601 if pt
|
||||
pb = r["precision_bits"]
|
||||
r["precision_bits"] = pb.to_i if pb
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_messages(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = ["COALESCE(TRIM(m.encrypted), '') = ''"]
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "m.rx_time >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["m.from_id", "m.to_id"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
|
||||
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
|
||||
m.lora_freq, m.modem_preset, m.channel_name, m.snr
|
||||
FROM messages m
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n"
|
||||
sql += <<~SQL
|
||||
ORDER BY m.rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
r.delete_if { |key, _| key.is_a?(Integer) }
|
||||
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
|
||||
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
|
||||
debug_log(
|
||||
"Message query produced empty sender",
|
||||
context: "queries.messages",
|
||||
stage: "raw_row",
|
||||
row: raw,
|
||||
)
|
||||
end
|
||||
|
||||
canonical_from_id = string_or_nil(normalize_node_id(db, r["from_id"]))
|
||||
node_id = canonical_from_id || string_or_nil(r["from_id"])
|
||||
|
||||
if canonical_from_id
|
||||
raw_from_id = string_or_nil(r["from_id"])
|
||||
if raw_from_id.nil? || raw_from_id.match?(/\A[0-9]+\z/)
|
||||
r["from_id"] = canonical_from_id
|
||||
elsif raw_from_id.start_with?("!") && raw_from_id.casecmp(canonical_from_id) != 0
|
||||
r["from_id"] = canonical_from_id
|
||||
end
|
||||
end
|
||||
|
||||
r["node_id"] = node_id if node_id
|
||||
|
||||
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
|
||||
debug_log(
|
||||
"Message query produced empty sender",
|
||||
context: "queries.messages",
|
||||
stage: "after_normalization",
|
||||
row: r,
|
||||
)
|
||||
end
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_positions(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "COALESCE(rx_time, position_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT * FROM positions
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
rx_time = coerce_integer(r["rx_time"])
|
||||
r["rx_time"] = rx_time if rx_time
|
||||
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
|
||||
|
||||
node_num = coerce_integer(r["node_num"])
|
||||
r["node_num"] = node_num if node_num
|
||||
|
||||
position_time = coerce_integer(r["position_time"])
|
||||
position_time = nil if position_time && position_time > now
|
||||
r["position_time"] = position_time
|
||||
r["position_time_iso"] = Time.at(position_time).utc.iso8601 if position_time
|
||||
|
||||
r["precision_bits"] = coerce_integer(r["precision_bits"])
|
||||
r["sats_in_view"] = coerce_integer(r["sats_in_view"])
|
||||
r["pdop"] = coerce_float(r["pdop"])
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_neighbors(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "COALESCE(rx_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id", "neighbor_id"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT * FROM neighbors
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
rx_time = coerce_integer(r["rx_time"])
|
||||
rx_time = now if rx_time && rx_time > now
|
||||
r["rx_time"] = rx_time if rx_time
|
||||
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
def query_telemetry(limit, node_ref: nil)
|
||||
limit = coerce_query_limit(limit)
|
||||
db = open_database(readonly: true)
|
||||
db.results_as_hash = true
|
||||
params = []
|
||||
where_clauses = []
|
||||
now = Time.now.to_i
|
||||
min_rx_time = now - PotatoMesh::Config.week_seconds
|
||||
where_clauses << "COALESCE(rx_time, telemetry_time, 0) >= ?"
|
||||
params << min_rx_time
|
||||
|
||||
if node_ref
|
||||
clause = node_lookup_clause(node_ref, string_columns: ["node_id"], numeric_columns: ["node_num"])
|
||||
return [] unless clause
|
||||
where_clauses << clause.first
|
||||
params.concat(clause.last)
|
||||
end
|
||||
|
||||
sql = <<~SQL
|
||||
SELECT * FROM telemetry
|
||||
SQL
|
||||
sql += " WHERE #{where_clauses.join(" AND ")}\n" if where_clauses.any?
|
||||
sql += <<~SQL
|
||||
ORDER BY rx_time DESC
|
||||
LIMIT ?
|
||||
SQL
|
||||
params << limit
|
||||
rows = db.execute(sql, params)
|
||||
rows.each do |r|
|
||||
rx_time = coerce_integer(r["rx_time"])
|
||||
r["rx_time"] = rx_time if rx_time
|
||||
r["rx_iso"] = Time.at(rx_time).utc.iso8601 if rx_time && string_or_nil(r["rx_iso"]).nil?
|
||||
|
||||
node_num = coerce_integer(r["node_num"])
|
||||
r["node_num"] = node_num if node_num
|
||||
|
||||
telemetry_time = coerce_integer(r["telemetry_time"])
|
||||
telemetry_time = nil if telemetry_time && telemetry_time > now
|
||||
r["telemetry_time"] = telemetry_time
|
||||
r["telemetry_time_iso"] = Time.at(telemetry_time).utc.iso8601 if telemetry_time
|
||||
|
||||
r["channel"] = coerce_integer(r["channel"])
|
||||
r["hop_limit"] = coerce_integer(r["hop_limit"])
|
||||
r["rssi"] = coerce_integer(r["rssi"])
|
||||
r["bitfield"] = coerce_integer(r["bitfield"])
|
||||
r["snr"] = coerce_float(r["snr"])
|
||||
r["battery_level"] = coerce_float(r["battery_level"])
|
||||
r["voltage"] = coerce_float(r["voltage"])
|
||||
r["channel_utilization"] = coerce_float(r["channel_utilization"])
|
||||
r["air_util_tx"] = coerce_float(r["air_util_tx"])
|
||||
r["uptime_seconds"] = coerce_integer(r["uptime_seconds"])
|
||||
r["temperature"] = coerce_float(r["temperature"])
|
||||
r["relative_humidity"] = coerce_float(r["relative_humidity"])
|
||||
r["barometric_pressure"] = coerce_float(r["barometric_pressure"])
|
||||
end
|
||||
rows
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,137 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Api
|
||||
def self.registered(app)
|
||||
app.get "/version" do
|
||||
content_type :json
|
||||
last_update = latest_node_update_timestamp
|
||||
payload = {
|
||||
name: sanitized_site_name,
|
||||
version: app_constant(:APP_VERSION),
|
||||
lastNodeUpdate: last_update,
|
||||
config: {
|
||||
siteName: sanitized_site_name,
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
contactLink: sanitized_contact_link,
|
||||
contactLinkUrl: sanitized_contact_link_url,
|
||||
refreshIntervalSeconds: PotatoMesh::Config.refresh_interval_seconds,
|
||||
mapCenter: {
|
||||
lat: PotatoMesh::Config.map_center_lat,
|
||||
lon: PotatoMesh::Config.map_center_lon,
|
||||
},
|
||||
maxDistanceKm: PotatoMesh::Config.max_distance_km,
|
||||
instanceDomain: app_constant(:INSTANCE_DOMAIN),
|
||||
privateMode: private_mode?,
|
||||
},
|
||||
}
|
||||
payload.to_json
|
||||
end
|
||||
|
||||
app.get "/.well-known/potato-mesh" do
|
||||
refresh_well_known_document_if_stale
|
||||
cache_control :public, max_age: PotatoMesh::Config.well_known_refresh_interval
|
||||
content_type :json
|
||||
send_file well_known_file_path
|
||||
end
|
||||
|
||||
app.get "/api/nodes" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_nodes(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/nodes/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
rows = query_nodes(limit, node_ref: node_ref)
|
||||
halt 404, { error: "not found" }.to_json if rows.empty?
|
||||
rows.first.to_json
|
||||
end
|
||||
|
||||
app.get "/api/messages" do
|
||||
halt 404 if private_mode?
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_messages(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/messages/:id" do
|
||||
halt 404 if private_mode?
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_messages(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/positions" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/positions/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_positions(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/neighbors" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/neighbors/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_neighbors(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry" do
|
||||
content_type :json
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit).to_json
|
||||
end
|
||||
|
||||
app.get "/api/telemetry/:id" do
|
||||
content_type :json
|
||||
node_ref = string_or_nil(params["id"])
|
||||
halt 400, { error: "missing node id" }.to_json unless node_ref
|
||||
limit = [params["limit"]&.to_i || 200, 1000].min
|
||||
query_telemetry(limit, node_ref: node_ref).to_json
|
||||
end
|
||||
|
||||
app.get "/api/instances" do
|
||||
content_type :json
|
||||
ensure_self_instance_record!
|
||||
payload = load_instances_for_api
|
||||
JSON.generate(payload)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,322 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Ingest
|
||||
def self.registered(app)
|
||||
app.post "/api/nodes" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
unless data.is_a?(Hash)
|
||||
halt 400, { error: "invalid payload" }.to_json
|
||||
end
|
||||
halt 400, { error: "too many nodes" }.to_json if data.size > 1000
|
||||
db = open_database
|
||||
data.each do |node_id, node|
|
||||
upsert_node(db, node_id, node)
|
||||
end
|
||||
PotatoMesh::App::Prometheus::NODES_GAUGE.set(query_nodes(1000).length)
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/messages" do
|
||||
halt 404 if private_mode?
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
messages = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many messages" }.to_json if messages.size > 1000
|
||||
db = open_database
|
||||
messages.each do |msg|
|
||||
insert_message(db, msg)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/instances" do
|
||||
content_type :json
|
||||
begin
|
||||
payload = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError => e
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
reason: "invalid JSON",
|
||||
error_class: e.class.name,
|
||||
error_message: e.message,
|
||||
)
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
|
||||
unless payload.is_a?(Hash)
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
reason: "payload is not an object",
|
||||
)
|
||||
halt 400, { error: "invalid payload" }.to_json
|
||||
end
|
||||
|
||||
id = string_or_nil(payload["id"]) || string_or_nil(payload["instanceId"])
|
||||
raw_domain_input = payload["domain"]
|
||||
raw_domain = sanitize_instance_domain(raw_domain_input, downcase: false)
|
||||
normalized_domain = raw_domain && sanitize_instance_domain(raw_domain)
|
||||
unless raw_domain && normalized_domain
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: string_or_nil(raw_domain_input),
|
||||
reason: "invalid domain",
|
||||
)
|
||||
halt 400, { error: "invalid domain" }.to_json
|
||||
end
|
||||
pubkey = sanitize_public_key_pem(payload["pubkey"])
|
||||
name = string_or_nil(payload["name"])
|
||||
version = string_or_nil(payload["version"])
|
||||
channel = string_or_nil(payload["channel"])
|
||||
frequency = string_or_nil(payload["frequency"])
|
||||
latitude = coerce_float(payload["latitude"])
|
||||
longitude = coerce_float(payload["longitude"])
|
||||
last_update_time = coerce_integer(payload["last_update_time"] || payload["lastUpdateTime"])
|
||||
raw_private = payload.key?("isPrivate") ? payload["isPrivate"] : payload["is_private"]
|
||||
is_private = coerce_boolean(raw_private)
|
||||
signature = string_or_nil(payload["signature"])
|
||||
|
||||
attributes = {
|
||||
id: id,
|
||||
domain: normalized_domain,
|
||||
pubkey: pubkey,
|
||||
name: name,
|
||||
version: version,
|
||||
channel: channel,
|
||||
frequency: frequency,
|
||||
latitude: latitude,
|
||||
longitude: longitude,
|
||||
last_update_time: last_update_time,
|
||||
is_private: is_private,
|
||||
}
|
||||
|
||||
if [attributes[:id], attributes[:domain], attributes[:pubkey], signature, attributes[:last_update_time]].any?(&:nil?)
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
reason: "missing required fields",
|
||||
)
|
||||
halt 400, { error: "missing required fields" }.to_json
|
||||
end
|
||||
|
||||
signature_valid = verify_instance_signature(attributes, signature, attributes[:pubkey])
|
||||
# Some remote peers sign payloads using a canonicalised lowercase
|
||||
# domain while still sending a mixed-case domain. Retry signature
|
||||
# verification with the original casing when the first attempt
|
||||
# fails to maximise interoperability.
|
||||
if !signature_valid && raw_domain && normalized_domain && raw_domain.casecmp?(normalized_domain) && raw_domain != normalized_domain
|
||||
alternate_attributes = attributes.merge(domain: raw_domain)
|
||||
signature_valid = verify_instance_signature(alternate_attributes, signature, attributes[:pubkey])
|
||||
end
|
||||
|
||||
unless signature_valid
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: raw_domain || attributes[:domain],
|
||||
reason: "invalid signature",
|
||||
)
|
||||
halt 400, { error: "invalid signature" }.to_json
|
||||
end
|
||||
|
||||
if attributes[:is_private]
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "instance marked private",
|
||||
)
|
||||
halt 403, { error: "instance marked private" }.to_json
|
||||
end
|
||||
|
||||
ip = ip_from_domain(attributes[:domain])
|
||||
if ip && restricted_ip_address?(ip)
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "restricted IP address",
|
||||
resolved_ip: ip,
|
||||
)
|
||||
halt 400, { error: "restricted domain" }.to_json
|
||||
end
|
||||
|
||||
begin
|
||||
resolve_remote_ip_addresses(URI.parse("https://#{attributes[:domain]}"))
|
||||
rescue ArgumentError => e
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "restricted domain",
|
||||
error_message: e.message,
|
||||
)
|
||||
halt 400, { error: "restricted domain" }.to_json
|
||||
rescue SocketError
|
||||
# DNS lookups that fail to resolve are handled later when the
|
||||
# registration flow attempts to contact the remote instance.
|
||||
end
|
||||
|
||||
well_known, well_known_meta = fetch_instance_json(attributes[:domain], "/.well-known/potato-mesh")
|
||||
unless well_known
|
||||
details_list = Array(well_known_meta).map(&:to_s)
|
||||
details = details_list.empty? ? "no response" : details_list.join("; ")
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "failed to fetch well-known document",
|
||||
details: details,
|
||||
)
|
||||
halt 400, { error: "failed to verify well-known document" }.to_json
|
||||
end
|
||||
|
||||
valid, reason = validate_well_known_document(well_known, attributes[:domain], attributes[:pubkey])
|
||||
unless valid
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: reason || "invalid well-known document",
|
||||
)
|
||||
halt 400, { error: reason || "invalid well-known document" }.to_json
|
||||
end
|
||||
|
||||
remote_nodes, node_source = fetch_instance_json(attributes[:domain], "/api/nodes")
|
||||
unless remote_nodes
|
||||
details_list = Array(node_source).map(&:to_s)
|
||||
details = details_list.empty? ? "no response" : details_list.join("; ")
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: "failed to fetch nodes",
|
||||
details: details,
|
||||
)
|
||||
halt 400, { error: "failed to fetch nodes" }.to_json
|
||||
end
|
||||
|
||||
fresh, freshness_reason = validate_remote_nodes(remote_nodes)
|
||||
unless fresh
|
||||
warn_log(
|
||||
"Instance registration rejected",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
reason: freshness_reason || "stale node data",
|
||||
)
|
||||
halt 400, { error: freshness_reason || "stale node data" }.to_json
|
||||
end
|
||||
|
||||
db = open_database
|
||||
upsert_instance_record(db, attributes, signature)
|
||||
ingest_known_instances_from!(
|
||||
db,
|
||||
attributes[:domain],
|
||||
per_response_limit: PotatoMesh::Config.federation_max_instances_per_response,
|
||||
overall_limit: PotatoMesh::Config.federation_max_domains_per_crawl,
|
||||
)
|
||||
debug_log(
|
||||
"Registered remote instance",
|
||||
context: "ingest.register",
|
||||
domain: attributes[:domain],
|
||||
instance_id: attributes[:id],
|
||||
)
|
||||
status 201
|
||||
{ status: "registered" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/positions" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
positions = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many positions" }.to_json if positions.size > 1000
|
||||
db = open_database
|
||||
positions.each do |pos|
|
||||
insert_position(db, pos)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/neighbors" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
neighbor_payloads = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many neighbor packets" }.to_json if neighbor_payloads.size > 1000
|
||||
db = open_database
|
||||
neighbor_payloads.each do |packet|
|
||||
insert_neighbors(db, packet)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
|
||||
app.post "/api/telemetry" do
|
||||
require_token!
|
||||
content_type :json
|
||||
begin
|
||||
data = JSON.parse(read_json_body)
|
||||
rescue JSON::ParserError
|
||||
halt 400, { error: "invalid JSON" }.to_json
|
||||
end
|
||||
telemetry_packets = data.is_a?(Array) ? data : [data]
|
||||
halt 400, { error: "too many telemetry packets" }.to_json if telemetry_packets.size > 1000
|
||||
db = open_database
|
||||
telemetry_packets.each do |packet|
|
||||
insert_telemetry(db, packet)
|
||||
end
|
||||
{ status: "ok" }.to_json
|
||||
ensure
|
||||
db&.close
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,79 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
module App
|
||||
module Routes
|
||||
module Root
|
||||
def self.registered(app)
|
||||
app.get "/favicon.ico" do
|
||||
cache_control :public, max_age: PotatoMesh::Config.week_seconds
|
||||
ico_path = File.join(settings.public_folder, "favicon.ico")
|
||||
if File.file?(ico_path)
|
||||
send_file ico_path, type: "image/x-icon"
|
||||
else
|
||||
send_file File.join(settings.public_folder, "potatomesh-logo.svg"), type: "image/svg+xml"
|
||||
end
|
||||
end
|
||||
|
||||
app.get "/potatomesh-logo.svg" do
|
||||
path = File.expand_path("potatomesh-logo.svg", settings.public_folder)
|
||||
settings.logger&.info("logo_path=#{path} exist=#{File.exist?(path)} file=#{File.file?(path)}")
|
||||
halt 404, "Not Found" unless File.exist?(path) && File.readable?(path)
|
||||
|
||||
content_type "image/svg+xml"
|
||||
last_modified File.mtime(path)
|
||||
cache_control :public, max_age: 3600
|
||||
send_file path
|
||||
end
|
||||
|
||||
app.get "/" do
|
||||
meta = meta_configuration
|
||||
config = frontend_app_config
|
||||
|
||||
raw_theme = request.cookies["theme"]
|
||||
theme = %w[dark light].include?(raw_theme) ? raw_theme : "dark"
|
||||
if raw_theme != theme
|
||||
response.set_cookie("theme", value: theme, path: "/", max_age: 60 * 60 * 24 * 7, same_site: :lax)
|
||||
end
|
||||
|
||||
erb :index, locals: {
|
||||
site_name: meta[:name],
|
||||
meta_title: meta[:title],
|
||||
meta_name: meta[:name],
|
||||
meta_description: meta[:description],
|
||||
channel: sanitized_channel,
|
||||
frequency: sanitized_frequency,
|
||||
map_center_lat: PotatoMesh::Config.map_center_lat,
|
||||
map_center_lon: PotatoMesh::Config.map_center_lon,
|
||||
max_distance_km: PotatoMesh::Config.max_distance_km,
|
||||
contact_link: sanitized_contact_link,
|
||||
contact_link_url: sanitized_contact_link_url,
|
||||
version: app_constant(:APP_VERSION),
|
||||
private_mode: private_mode?,
|
||||
refresh_interval_seconds: PotatoMesh::Config.refresh_interval_seconds,
|
||||
app_config_json: JSON.generate(config),
|
||||
initial_theme: theme,
|
||||
}
|
||||
end
|
||||
|
||||
app.get "/metrics" do
|
||||
content_type ::Prometheus::Client::Formats::Text::CONTENT_TYPE
|
||||
::Prometheus::Client::Formats::Text.marshal(::Prometheus::Client.registry)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,507 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
module PotatoMesh
|
||||
# Configuration wrapper responsible for exposing ENV backed settings used by
|
||||
# the web and data ingestion services.
|
||||
module Config
|
||||
module_function
|
||||
|
||||
DEFAULT_DB_BUSY_TIMEOUT_MS = 5_000
|
||||
DEFAULT_DB_BUSY_MAX_RETRIES = 5
|
||||
DEFAULT_DB_BUSY_RETRY_DELAY = 0.05
|
||||
DEFAULT_MAX_JSON_BODY_BYTES = 1_048_576
|
||||
DEFAULT_REFRESH_INTERVAL_SECONDS = 60
|
||||
DEFAULT_TILE_FILTER_LIGHT = "grayscale(1) saturate(0) brightness(0.92) contrast(1.05)"
|
||||
DEFAULT_TILE_FILTER_DARK = "grayscale(1) invert(1) brightness(0.9) contrast(1.08)"
|
||||
DEFAULT_MAP_CENTER_LAT = 38.761944
|
||||
DEFAULT_MAP_CENTER_LON = -27.090833
|
||||
DEFAULT_MAP_CENTER = "#{DEFAULT_MAP_CENTER_LAT},#{DEFAULT_MAP_CENTER_LON}"
|
||||
DEFAULT_CHANNEL = "#LongFast"
|
||||
DEFAULT_FREQUENCY = "915MHz"
|
||||
DEFAULT_CONTACT_LINK = "#potatomesh:dod.ngo"
|
||||
DEFAULT_MAX_DISTANCE_KM = 42.0
|
||||
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 5
|
||||
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT = 12
|
||||
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE = 64
|
||||
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL = 256
|
||||
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
|
||||
|
||||
# Resolve the absolute path to the web application root directory.
|
||||
#
|
||||
# @return [String] absolute filesystem path of the web folder.
|
||||
def web_root
|
||||
@web_root ||= File.expand_path("../..", __dir__)
|
||||
end
|
||||
|
||||
# Resolve the repository root directory relative to the web folder.
|
||||
#
|
||||
# @return [String] path to the Git repository root.
|
||||
def repo_root
|
||||
@repo_root ||= File.expand_path("..", web_root)
|
||||
end
|
||||
|
||||
# Resolve the current XDG data directory for PotatoMesh content.
|
||||
#
|
||||
# @return [String] absolute path to the PotatoMesh data directory.
|
||||
def data_directory
|
||||
File.join(resolve_xdg_home("XDG_DATA_HOME", %w[.local share]), "potato-mesh")
|
||||
end
|
||||
|
||||
# Resolve the current XDG configuration directory for PotatoMesh files.
|
||||
#
|
||||
# @return [String] absolute path to the PotatoMesh configuration directory.
|
||||
def config_directory
|
||||
File.join(resolve_xdg_home("XDG_CONFIG_HOME", %w[.config]), "potato-mesh")
|
||||
end
|
||||
|
||||
# Build the default SQLite database path inside the data directory.
|
||||
#
|
||||
# @return [String] absolute path to the managed +mesh.db+ file.
|
||||
def default_db_path
|
||||
File.join(data_directory, "mesh.db")
|
||||
end
|
||||
|
||||
# Legacy database path bundled alongside the repository.
|
||||
#
|
||||
# @return [String] absolute path to the repository managed database file.
|
||||
def legacy_db_path
|
||||
File.expand_path("../data/mesh.db", web_root)
|
||||
end
|
||||
|
||||
# Determine the configured database location, defaulting to the bundled
|
||||
# SQLite file.
|
||||
#
|
||||
# @return [String] absolute path to the database file.
|
||||
def db_path
|
||||
default_db_path
|
||||
end
|
||||
|
||||
# Retrieve the SQLite busy timeout duration in milliseconds.
|
||||
#
|
||||
# @return [Integer] timeout value in milliseconds.
|
||||
def db_busy_timeout_ms
|
||||
DEFAULT_DB_BUSY_TIMEOUT_MS
|
||||
end
|
||||
|
||||
# Retrieve the maximum number of retries when encountering SQLITE_BUSY.
|
||||
#
|
||||
# @return [Integer] maximum retry attempts.
|
||||
def db_busy_max_retries
|
||||
DEFAULT_DB_BUSY_MAX_RETRIES
|
||||
end
|
||||
|
||||
# Retrieve the backoff delay between busy retries in seconds.
|
||||
#
|
||||
# @return [Float] seconds to wait between retries.
|
||||
def db_busy_retry_delay
|
||||
DEFAULT_DB_BUSY_RETRY_DELAY
|
||||
end
|
||||
|
||||
# Convenience constant describing the number of seconds in a week.
|
||||
#
|
||||
# @return [Integer] seconds in seven days.
|
||||
def week_seconds
|
||||
7 * 24 * 60 * 60
|
||||
end
|
||||
|
||||
# Default upper bound for accepted JSON payload sizes.
|
||||
#
|
||||
# @return [Integer] byte ceiling for HTTP request bodies.
|
||||
def default_max_json_body_bytes
|
||||
DEFAULT_MAX_JSON_BODY_BYTES
|
||||
end
|
||||
|
||||
# Determine the maximum allowed JSON body size with validation.
|
||||
#
|
||||
# @return [Integer] configured byte limit.
|
||||
def max_json_body_bytes
|
||||
default_max_json_body_bytes
|
||||
end
|
||||
|
||||
# Provide the fallback version string when git metadata is unavailable.
|
||||
#
|
||||
# @return [String] semantic version identifier.
|
||||
def version_fallback
|
||||
"v0.5.2"
|
||||
end
|
||||
|
||||
# Default refresh interval for frontend polling routines.
|
||||
#
|
||||
# @return [Integer] refresh period in seconds.
|
||||
def default_refresh_interval_seconds
|
||||
DEFAULT_REFRESH_INTERVAL_SECONDS
|
||||
end
|
||||
|
||||
# Fetch the refresh interval, ensuring a positive integer value.
|
||||
#
|
||||
# @return [Integer] polling cadence in seconds.
|
||||
def refresh_interval_seconds
|
||||
default_refresh_interval_seconds
|
||||
end
|
||||
|
||||
# Retrieve the CSS filter used for light themed maps.
|
||||
#
|
||||
# @return [String] CSS filter string.
|
||||
def map_tile_filter_light
|
||||
DEFAULT_TILE_FILTER_LIGHT
|
||||
end
|
||||
|
||||
# Retrieve the CSS filter used for dark themed maps.
|
||||
#
|
||||
# @return [String] CSS filter string for dark tiles.
|
||||
def map_tile_filter_dark
|
||||
DEFAULT_TILE_FILTER_DARK
|
||||
end
|
||||
|
||||
# Provide a simple hash of tile filters for template use.
|
||||
#
|
||||
# @return [Hash] frozen mapping of themes to CSS filters.
|
||||
def tile_filters
|
||||
{
|
||||
light: map_tile_filter_light,
|
||||
dark: map_tile_filter_dark,
|
||||
}.freeze
|
||||
end
|
||||
|
||||
# Retrieve the raw comma separated Prometheus report identifiers.
|
||||
#
|
||||
# @return [String] comma separated list of report IDs.
|
||||
def prom_report_ids
|
||||
""
|
||||
end
|
||||
|
||||
# Transform Prometheus report identifiers into a cleaned array.
|
||||
#
|
||||
# @return [Array<String>] list of unique report identifiers.
|
||||
def prom_report_id_list
|
||||
prom_report_ids.split(",").map(&:strip).reject(&:empty?)
|
||||
end
|
||||
|
||||
# Path storing the instance private key used for signing.
|
||||
#
|
||||
# @return [String] absolute location of the PEM file.
|
||||
def keyfile_path
|
||||
File.join(config_directory, "keyfile")
|
||||
end
|
||||
|
||||
# Sub-path used when exposing well known configuration files.
|
||||
#
|
||||
# @return [String] relative path within the public directory.
|
||||
def well_known_relative_path
|
||||
File.join(".well-known", "potato-mesh")
|
||||
end
|
||||
|
||||
# Filesystem directory used to stage /.well-known artifacts.
|
||||
#
|
||||
# @return [String] absolute storage path.
|
||||
def well_known_storage_root
|
||||
File.join(config_directory, "well-known")
|
||||
end
|
||||
|
||||
# Legacy configuration directory bundled with the repository.
|
||||
#
|
||||
# @return [String] absolute path to the repository managed configuration directory.
|
||||
def legacy_config_directory
|
||||
File.join(web_root, ".config")
|
||||
end
|
||||
|
||||
# Legacy keyfile location used before introducing XDG directories.
|
||||
#
|
||||
# @return [String] absolute filesystem path to the legacy keyfile.
|
||||
def legacy_keyfile_path
|
||||
legacy_keyfile_candidates.find { |path| File.exist?(path) } || legacy_keyfile_candidates.first
|
||||
end
|
||||
|
||||
# Enumerate known legacy keyfile locations for migration.
|
||||
#
|
||||
# @return [Array<String>] ordered list of absolute legacy keyfile paths.
|
||||
def legacy_keyfile_candidates
|
||||
[
|
||||
File.join(web_root, ".config", "keyfile"),
|
||||
File.join(web_root, ".config", "potato-mesh", "keyfile"),
|
||||
File.join(web_root, "config", "keyfile"),
|
||||
File.join(web_root, "config", "potato-mesh", "keyfile"),
|
||||
].map { |path| File.expand_path(path) }.uniq
|
||||
end
|
||||
|
||||
# Legacy location for well known assets within the public folder.
|
||||
#
|
||||
# @return [String] absolute path to the legacy output directory.
|
||||
def legacy_public_well_known_path
|
||||
File.join(web_root, "public", well_known_relative_path)
|
||||
end
|
||||
|
||||
# Enumerate known legacy well-known document locations for migration.
|
||||
#
|
||||
# @return [Array<String>] ordered list of absolute legacy well-known document paths.
|
||||
def legacy_well_known_candidates
|
||||
filename = File.basename(well_known_relative_path)
|
||||
[
|
||||
File.join(web_root, ".config", "well-known", filename),
|
||||
File.join(web_root, ".config", ".well-known", filename),
|
||||
File.join(web_root, ".config", "potato-mesh", "well-known", filename),
|
||||
File.join(web_root, ".config", "potato-mesh", ".well-known", filename),
|
||||
File.join(web_root, "config", "well-known", filename),
|
||||
File.join(web_root, "config", ".well-known", filename),
|
||||
File.join(web_root, "config", "potato-mesh", "well-known", filename),
|
||||
File.join(web_root, "config", "potato-mesh", ".well-known", filename),
|
||||
].map { |path| File.expand_path(path) }.uniq
|
||||
end
|
||||
|
||||
# Interval used to refresh well known documents from disk.
|
||||
#
|
||||
# @return [Integer] refresh duration in seconds.
|
||||
def well_known_refresh_interval
|
||||
24 * 60 * 60
|
||||
end
|
||||
|
||||
# Cryptographic algorithm identifier for HTTP signatures.
|
||||
#
|
||||
# @return [String] RFC-compliant algorithm label.
|
||||
def instance_signature_algorithm
|
||||
"rsa-sha256"
|
||||
end
|
||||
|
||||
# Connection timeout used when establishing federation HTTP sockets.
|
||||
#
|
||||
# @return [Integer] connect timeout in seconds.
|
||||
def remote_instance_http_timeout
|
||||
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT
|
||||
end
|
||||
|
||||
# Read timeout used when streaming federation HTTP responses.
|
||||
#
|
||||
# @return [Integer] read timeout in seconds.
|
||||
def remote_instance_read_timeout
|
||||
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT
|
||||
end
|
||||
|
||||
# Limit the number of remote instances processed from a single response.
|
||||
#
|
||||
# @return [Integer] maximum entries processed per /api/instances payload.
|
||||
def federation_max_instances_per_response
|
||||
fetch_positive_integer(
|
||||
"FEDERATION_MAX_INSTANCES_PER_RESPONSE",
|
||||
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE,
|
||||
)
|
||||
end
|
||||
|
||||
# Limit the total number of distinct domains crawled during one ingestion.
|
||||
#
|
||||
# @return [Integer] maximum unique domains visited per crawl.
|
||||
def federation_max_domains_per_crawl
|
||||
fetch_positive_integer(
|
||||
"FEDERATION_MAX_DOMAINS_PER_CRAWL",
|
||||
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL,
|
||||
)
|
||||
end
|
||||
|
||||
# Maximum acceptable age for remote node data.
|
||||
#
|
||||
# @return [Integer] seconds before remote nodes are considered stale.
|
||||
def remote_instance_max_node_age
|
||||
86_400
|
||||
end
|
||||
|
||||
# Minimum node count expected from a remote instance before storing.
|
||||
#
|
||||
# @return [Integer] node threshold for remote ingestion.
|
||||
def remote_instance_min_node_count
|
||||
10
|
||||
end
|
||||
|
||||
# Domains used to seed the federation discovery process.
|
||||
#
|
||||
# @return [Array<String>] list of default seed domains.
|
||||
def federation_seed_domains
|
||||
["potatomesh.net"].freeze
|
||||
end
|
||||
|
||||
# Determine how often we broadcast federation announcements.
|
||||
#
|
||||
# @return [Integer] number of seconds between announcement cycles.
|
||||
def federation_announcement_interval
|
||||
8 * 60 * 60
|
||||
end
|
||||
|
||||
# Determine the grace period before sending the initial federation announcement.
|
||||
#
|
||||
# @return [Integer] seconds to wait before the first broadcast cycle.
|
||||
def initial_federation_delay_seconds
|
||||
fetch_positive_integer(
|
||||
"INITIAL_FEDERATION_DELAY_SECONDS",
|
||||
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS,
|
||||
)
|
||||
end
|
||||
|
||||
# Retrieve the configured site name for presentation.
|
||||
#
|
||||
# @return [String] human friendly site label.
|
||||
def site_name
|
||||
fetch_string("SITE_NAME", "PotatoMesh Demo")
|
||||
end
|
||||
|
||||
# Retrieve the default radio channel label.
|
||||
#
|
||||
# @return [String] channel name from configuration.
|
||||
def channel
|
||||
fetch_string("CHANNEL", DEFAULT_CHANNEL)
|
||||
end
|
||||
|
||||
# Retrieve the default radio frequency description.
|
||||
#
|
||||
# @return [String] frequency identifier.
|
||||
def frequency
|
||||
fetch_string("FREQUENCY", DEFAULT_FREQUENCY)
|
||||
end
|
||||
|
||||
# Parse the configured map centre coordinates.
|
||||
#
|
||||
# @return [Hash{Symbol=>Float}] latitude and longitude in decimal degrees.
|
||||
def map_center
|
||||
raw = fetch_string("MAP_CENTER", DEFAULT_MAP_CENTER)
|
||||
lat_str, lon_str = raw.split(",", 2).map { |part| part&.strip }.compact
|
||||
lat = Float(lat_str, exception: false)
|
||||
lon = Float(lon_str, exception: false)
|
||||
lat = DEFAULT_MAP_CENTER_LAT unless lat
|
||||
lon = DEFAULT_MAP_CENTER_LON unless lon
|
||||
{ lat: lat, lon: lon }
|
||||
end
|
||||
|
||||
# Map display latitude centre for the frontend map widget.
|
||||
#
|
||||
# @return [Float] latitude in decimal degrees.
|
||||
def map_center_lat
|
||||
map_center[:lat]
|
||||
end
|
||||
|
||||
# Map display longitude centre for the frontend map widget.
|
||||
#
|
||||
# @return [Float] longitude in decimal degrees.
|
||||
def map_center_lon
|
||||
map_center[:lon]
|
||||
end
|
||||
|
||||
# Maximum straight-line distance between nodes before relationships are
|
||||
# hidden.
|
||||
#
|
||||
# @return [Float] distance in kilometres.
|
||||
def max_distance_km
|
||||
raw = fetch_string("MAX_DISTANCE", nil)
|
||||
parsed = raw && Float(raw, exception: false)
|
||||
return parsed if parsed && parsed.positive?
|
||||
|
||||
DEFAULT_MAX_DISTANCE_KM
|
||||
end
|
||||
|
||||
# Contact link for community discussion.
|
||||
#
|
||||
# @return [String] contact URI or identifier.
|
||||
def contact_link
|
||||
fetch_string("CONTACT_LINK", DEFAULT_CONTACT_LINK)
|
||||
end
|
||||
|
||||
# Determine the best URL to represent the configured contact link.
|
||||
#
|
||||
# @return [String, nil] absolute URL when derivable, otherwise nil.
|
||||
def contact_link_url
|
||||
link = contact_link.to_s.strip
|
||||
return nil if link.empty?
|
||||
|
||||
if matrix_alias?(link)
|
||||
"https://matrix.to/#/#{link}"
|
||||
elsif link.match?(%r{\Ahttps?://}i)
|
||||
link
|
||||
else
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# Check whether a contact link is a Matrix room alias.
|
||||
#
|
||||
# @param link [String] candidate link string.
|
||||
# @return [Boolean] true when the link resembles a Matrix alias.
|
||||
def matrix_alias?(link)
|
||||
link.match?(/\A[#!][^\s:]+:[^\s]+\z/)
|
||||
end
|
||||
|
||||
# Check whether verbose debugging is enabled for the runtime.
|
||||
#
|
||||
# @return [Boolean] true when DEBUG=1.
|
||||
def debug?
|
||||
ENV["DEBUG"] == "1"
|
||||
end
|
||||
|
||||
# Fetch and sanitise string based configuration values.
|
||||
#
|
||||
# @param key [String] environment variable to read.
|
||||
# @param default [String] fallback value when unset or blank.
|
||||
# @return [String] cleaned configuration string.
|
||||
def fetch_string(key, default)
|
||||
value = ENV[key]
|
||||
return default if value.nil?
|
||||
|
||||
trimmed = value.strip
|
||||
trimmed.empty? ? default : trimmed
|
||||
end
|
||||
|
||||
# Fetch and validate integer based configuration flags.
|
||||
#
|
||||
# @param key [String] environment variable to read.
|
||||
# @param default [Integer] fallback value when unset or invalid.
|
||||
# @return [Integer] positive integer sourced from configuration.
|
||||
def fetch_positive_integer(key, default)
|
||||
value = ENV[key]
|
||||
return default if value.nil?
|
||||
|
||||
trimmed = value.strip
|
||||
return default if trimmed.empty?
|
||||
|
||||
begin
|
||||
parsed = Integer(trimmed, 10)
|
||||
rescue ArgumentError
|
||||
return default
|
||||
end
|
||||
|
||||
parsed.positive? ? parsed : default
|
||||
end
|
||||
|
||||
# Resolve the effective XDG directory honoring environment overrides.
|
||||
#
|
||||
# @param env_key [String] name of the environment variable to inspect.
|
||||
# @param fallback_segments [Array<String>] path segments appended to the user home directory.
|
||||
# @return [String] absolute base directory referenced by the XDG variable.
|
||||
def resolve_xdg_home(env_key, fallback_segments)
|
||||
raw = fetch_string(env_key, nil)
|
||||
candidate = raw && !raw.empty? ? raw : nil
|
||||
return File.expand_path(candidate) if candidate
|
||||
|
||||
base_home = safe_home_directory
|
||||
File.expand_path(File.join(base_home, *fallback_segments))
|
||||
end
|
||||
|
||||
# Retrieve the current user's home directory handling runtime failures.
|
||||
#
|
||||
# @return [String] absolute path to the user home or web root fallback.
|
||||
def safe_home_directory
|
||||
home = Dir.home
|
||||
return web_root if home.nil? || home.empty?
|
||||
|
||||
home
|
||||
rescue ArgumentError, RuntimeError
|
||||
web_root
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,87 @@
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "logger"
|
||||
require "time"
|
||||
|
||||
module PotatoMesh
|
||||
# Logging utilities shared across the web application.
|
||||
module Logging
|
||||
LOGGER_NAME = "potato-mesh" # :nodoc:
|
||||
|
||||
module_function
|
||||
|
||||
# Build a logger configured with the potato-mesh formatter.
|
||||
#
|
||||
# @param io [#write] destination for log output.
|
||||
# @return [Logger] configured logger instance.
|
||||
def build_logger(io = $stdout)
|
||||
logger = Logger.new(io)
|
||||
logger.progname = LOGGER_NAME
|
||||
logger.formatter = method(:formatter)
|
||||
logger
|
||||
end
|
||||
|
||||
# Format log entries with a consistent structure understood by the UI.
|
||||
#
|
||||
# @param severity [String] Ruby logger severity constant (e.g., "DEBUG").
|
||||
# @param time [Time] timestamp when the log entry was created.
|
||||
# @param progname [String, nil] optional application name emitting the log.
|
||||
# @param message [String] body of the log message.
|
||||
# @return [String] formatted log entry.
|
||||
def formatter(severity, time, progname, message)
|
||||
timestamp = time.utc.iso8601(3)
|
||||
body = message.is_a?(String) ? message : message.inspect
|
||||
"[#{timestamp}] [#{progname || LOGGER_NAME}] [#{severity.downcase}] #{body}\n"
|
||||
end
|
||||
|
||||
# Emit a structured log entry to the provided logger instance.
|
||||
#
|
||||
# @param logger [Logger, nil] logger to emit against.
|
||||
# @param severity [Symbol] target severity (e.g., :debug, :info).
|
||||
# @param message [String] primary message text.
|
||||
# @param context [String, nil] logical component generating the entry.
|
||||
# @param metadata [Hash] supplemental structured data for the log.
|
||||
# @return [void]
|
||||
def log(logger, severity, message, context: nil, **metadata)
|
||||
return unless logger
|
||||
|
||||
parts = []
|
||||
parts << "context=#{context}" if context
|
||||
metadata.each do |key, value|
|
||||
parts << format_metadata_pair(key, value)
|
||||
end
|
||||
parts << message
|
||||
|
||||
logger.public_send(severity, parts.join(" "))
|
||||
end
|
||||
|
||||
# Retrieve the canonical logger for the web application.
|
||||
#
|
||||
# @param target [Object, nil] object with optional +settings.logger+ accessor.
|
||||
# @return [Logger, nil] logger instance when available.
|
||||
def logger_for(target = nil)
|
||||
if target.respond_to?(:settings) && target.settings.respond_to?(:logger)
|
||||
return target.settings.logger
|
||||
end
|
||||
|
||||
if defined?(PotatoMesh::Application) &&
|
||||
PotatoMesh::Application.respond_to?(:settings) &&
|
||||
PotatoMesh::Application.settings.respond_to?(:logger)
|
||||
return PotatoMesh::Application.settings.logger
|
||||
end
|
||||
|
||||
nil
|
||||
end
|
||||
|
||||
# Format metadata key/value pairs for structured logging output.
|
||||
#
|
||||
# @param key [Symbol, String]
|
||||
# @param value [Object]
|
||||
# @return [String]
|
||||
def format_metadata_pair(key, value)
|
||||
"#{key}=#{value.inspect}"
|
||||
end
|
||||
|
||||
private_class_method :format_metadata_pair
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,80 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require_relative "config"
|
||||
require_relative "sanitizer"
|
||||
|
||||
module PotatoMesh
|
||||
# Helper functions used to generate SEO metadata and formatted values.
|
||||
module Meta
|
||||
module_function
|
||||
|
||||
# Format a distance in kilometres without trailing decimal precision when unnecessary.
|
||||
#
|
||||
# @param distance [Numeric] distance in kilometres.
|
||||
# @return [String] formatted kilometre value.
|
||||
def formatted_distance_km(distance)
|
||||
format("%.1f", distance).sub(/\.0\z/, "")
|
||||
end
|
||||
|
||||
# Construct the meta description string displayed to search engines and social previews.
|
||||
#
|
||||
# @param private_mode [Boolean] whether private mode is enabled.
|
||||
# @return [String] generated description text.
|
||||
def description(private_mode:)
|
||||
site = Sanitizer.sanitized_site_name
|
||||
channel = Sanitizer.sanitized_channel
|
||||
frequency = Sanitizer.sanitized_frequency
|
||||
contact = Sanitizer.sanitized_contact_link
|
||||
|
||||
summary = "Live Meshtastic mesh map for #{site}"
|
||||
if channel.empty? && frequency.empty?
|
||||
summary += "."
|
||||
elsif channel.empty?
|
||||
summary += " tuned to #{frequency}."
|
||||
elsif frequency.empty?
|
||||
summary += " on #{channel}."
|
||||
else
|
||||
summary += " on #{channel} (#{frequency})."
|
||||
end
|
||||
|
||||
activity_sentence = if private_mode
|
||||
"Track nodes and coverage in real time."
|
||||
else
|
||||
"Track nodes, messages, and coverage in real time."
|
||||
end
|
||||
|
||||
sentences = [summary, activity_sentence]
|
||||
if (distance = Sanitizer.sanitized_max_distance_km)
|
||||
sentences << "Shows nodes within roughly #{formatted_distance_km(distance)} km of the map center."
|
||||
end
|
||||
sentences << "Join the community in #{contact} via chat." if contact
|
||||
|
||||
sentences.join(" ")
|
||||
end
|
||||
|
||||
# Build a hash of meta configuration values used by templating layers.
|
||||
#
|
||||
# @param private_mode [Boolean] whether private mode is enabled.
|
||||
# @return [Hash] structured metadata for templates.
|
||||
def configuration(private_mode:)
|
||||
site = Sanitizer.sanitized_site_name
|
||||
{
|
||||
title: site,
|
||||
name: site,
|
||||
description: description(private_mode: private_mode),
|
||||
}.freeze
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -0,0 +1,240 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# frozen_string_literal: true
|
||||
|
||||
require "ipaddr"
|
||||
|
||||
require_relative "config"
|
||||
|
||||
module PotatoMesh
|
||||
# Utility module responsible for coercing and sanitising user provided
|
||||
# configuration strings. Each helper is exposed as a module function so it
|
||||
# can be consumed both by the web layer and background jobs without
|
||||
# instantiation overhead.
|
||||
module Sanitizer
|
||||
module_function
|
||||
|
||||
# Coerce an arbitrary value into a trimmed string unless the content is
|
||||
# empty.
|
||||
#
|
||||
# @param value [Object, nil] arbitrary input that should be converted.
|
||||
# @return [String, nil] trimmed string representation or +nil+ when blank.
|
||||
def string_or_nil(value)
|
||||
return nil if value.nil?
|
||||
|
||||
str = value.is_a?(String) ? value : value.to_s
|
||||
trimmed = str.strip
|
||||
trimmed.empty? ? nil : trimmed
|
||||
end
|
||||
|
||||
# Ensure a value is a valid instance domain according to RFC 1035/3986
|
||||
# rules. Hostnames must include at least one dot-separated label and a
|
||||
# top-level domain containing an alphabetic character. Literal IP
|
||||
# addresses must be provided in standard dotted decimal form or enclosed in
|
||||
# brackets when IPv6 notation is used. Optional ports must fall within the
|
||||
# valid TCP/UDP range. Any opaque identifiers, URIs, or malformed hosts are
|
||||
# rejected.
|
||||
#
|
||||
# @param value [String, Object, nil] candidate domain name.
|
||||
# @param downcase [Boolean] whether to force the result to lowercase.
|
||||
# @return [String, nil] canonical domain value or +nil+ when invalid.
|
||||
def sanitize_instance_domain(value, downcase: true)
|
||||
host = string_or_nil(value)
|
||||
return nil unless host
|
||||
|
||||
trimmed = host.strip
|
||||
trimmed = trimmed.delete_suffix(".") while trimmed.end_with?(".")
|
||||
return nil if trimmed.empty?
|
||||
return nil if trimmed.match?(%r{[\s/\\@]})
|
||||
|
||||
if trimmed.start_with?("[")
|
||||
match = trimmed.match(/\A\[(?<address>[^\]]+)\](?::(?<port>\d+))?\z/)
|
||||
return nil unless match
|
||||
|
||||
address = match[:address]
|
||||
port = match[:port]
|
||||
|
||||
return nil if port && !valid_port?(port)
|
||||
|
||||
begin
|
||||
IPAddr.new(address)
|
||||
rescue IPAddr::InvalidAddressError
|
||||
return nil
|
||||
end
|
||||
|
||||
sanitized_address = downcase ? address.downcase : address
|
||||
return "[#{sanitized_address}]#{port ? ":#{port}" : ""}"
|
||||
end
|
||||
|
||||
domain = trimmed
|
||||
port = nil
|
||||
|
||||
if domain.include?(":")
|
||||
host_part, port_part = domain.split(":", 2)
|
||||
return nil if host_part.nil? || host_part.empty?
|
||||
return nil unless port_part && port_part.match?(/\A\d+\z/)
|
||||
return nil unless valid_port?(port_part)
|
||||
return nil if port_part.include?(":")
|
||||
|
||||
domain = host_part
|
||||
port = port_part
|
||||
end
|
||||
|
||||
unless valid_hostname?(domain) || valid_ipv4_literal?(domain)
|
||||
return nil
|
||||
end
|
||||
|
||||
sanitized_domain = downcase ? domain.downcase : domain
|
||||
port ? "#{sanitized_domain}:#{port}" : sanitized_domain
|
||||
end
|
||||
|
||||
# Determine whether the supplied hostname conforms to RFC 1035 label
|
||||
# requirements and includes a valid top-level domain.
|
||||
#
|
||||
# @param hostname [String] host component without any port information.
|
||||
# @return [Boolean] true when the hostname is valid.
|
||||
def valid_hostname?(hostname)
|
||||
return false if hostname.length > 253
|
||||
|
||||
labels = hostname.split(".")
|
||||
return false if labels.length < 2
|
||||
return false unless labels.all? { |label| valid_hostname_label?(label) }
|
||||
|
||||
top_level = labels.last
|
||||
top_level.match?(/[a-z]/i)
|
||||
end
|
||||
|
||||
# Validate a single hostname label ensuring the first and last characters
|
||||
# are alphanumeric and that no unsupported symbols are present.
|
||||
#
|
||||
# @param label [String] hostname component between dots.
|
||||
# @return [Boolean] true when the label is valid.
|
||||
def valid_hostname_label?(label)
|
||||
return false if label.empty?
|
||||
return false if label.length > 63
|
||||
|
||||
label.match?(/\A[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\z/i)
|
||||
end
|
||||
|
||||
# Validate whether a candidate represents a dotted decimal IPv4 literal.
|
||||
#
|
||||
# @param address [String] IP address string without port information.
|
||||
# @return [Boolean] true when the address is a valid IPv4 literal.
|
||||
def valid_ipv4_literal?(address)
|
||||
return false unless address.match?(/\A\d{1,3}(?:\.\d{1,3}){3}\z/)
|
||||
|
||||
address.split(".").all? { |octet| octet.to_i.between?(0, 255) }
|
||||
end
|
||||
|
||||
# Determine whether a port string represents a valid TCP/UDP port.
|
||||
#
|
||||
# @param port [String] numeric port representation.
|
||||
# @return [Boolean] true when the port falls within the acceptable range.
|
||||
def valid_port?(port)
|
||||
value = port.to_i
|
||||
value.positive? && value <= 65_535
|
||||
end
|
||||
|
||||
# Extract the host component from a potentially bracketed domain literal.
|
||||
#
|
||||
# @param domain [String, nil] raw domain string received from the user.
|
||||
# @return [String, nil] host portion of the domain, or +nil+ when invalid.
|
||||
def instance_domain_host(domain)
|
||||
return nil if domain.nil?
|
||||
|
||||
candidate = domain.strip
|
||||
return nil if candidate.empty?
|
||||
|
||||
if candidate.start_with?("[")
|
||||
match = candidate.match(/\A\[(?<host>[^\]]+)\](?::(?<port>\d+))?\z/)
|
||||
return match[:host] if match
|
||||
return nil
|
||||
end
|
||||
|
||||
host, port = candidate.split(":", 2)
|
||||
if port && !host.include?(":") && port.match?(/\A\d+\z/)
|
||||
return host
|
||||
end
|
||||
|
||||
candidate
|
||||
end
|
||||
|
||||
# Resolve a validated domain string into an IP address object.
|
||||
#
|
||||
# @param domain [String, nil] domain literal potentially including port.
|
||||
# @return [IPAddr, nil] parsed IP address when valid.
|
||||
def ip_from_domain(domain)
|
||||
host = instance_domain_host(domain)
|
||||
return nil unless host
|
||||
|
||||
IPAddr.new(host)
|
||||
rescue IPAddr::InvalidAddressError
|
||||
nil
|
||||
end
|
||||
|
||||
# Normalise a value into a trimmed string representation.
|
||||
#
|
||||
# @param value [Object] arbitrary object to coerce into text.
|
||||
# @return [String] trimmed string version of the supplied value.
|
||||
def sanitized_string(value)
|
||||
value.to_s.strip
|
||||
end
|
||||
|
||||
# Retrieve the configured site name as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
def sanitized_site_name
|
||||
sanitized_string(Config.site_name)
|
||||
end
|
||||
|
||||
# Retrieve the configured channel as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
def sanitized_channel
|
||||
sanitized_string(Config.channel)
|
||||
end
|
||||
|
||||
# Retrieve the configured frequency as a cleaned string.
|
||||
#
|
||||
# @return [String] trimmed configuration value.
|
||||
def sanitized_frequency
|
||||
sanitized_string(Config.frequency)
|
||||
end
|
||||
|
||||
# Retrieve the configured contact link and normalise blank values to nil.
|
||||
#
|
||||
# @return [String, nil] contact link identifier or +nil+ when blank.
|
||||
def sanitized_contact_link
|
||||
value = sanitized_string(Config.contact_link)
|
||||
value.empty? ? nil : value
|
||||
end
|
||||
|
||||
# Retrieve the best effort URL for the configured contact link.
|
||||
#
|
||||
# @return [String, nil] contact hyperlink when derivable.
|
||||
def sanitized_contact_link_url
|
||||
Config.contact_link_url
|
||||
end
|
||||
|
||||
# Return a positive numeric maximum distance when configured.
|
||||
#
|
||||
# @return [Numeric, nil] distance value in kilometres.
|
||||
def sanitized_max_distance_km
|
||||
distance = Config.max_distance_km
|
||||
return nil unless distance.is_a?(Numeric)
|
||||
return nil unless distance.positive?
|
||||
|
||||
distance
|
||||
end
|
||||
end
|
||||
end
|
||||
Generated
+12
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"name": "potato-mesh",
|
||||
"version": "0.5.0",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"test": "mkdir -p reports coverage && NODE_V8_COVERAGE=coverage node --test --experimental-test-coverage --test-reporter=spec --test-reporter-destination=stdout --test-reporter=junit --test-reporter-destination=reports/javascript-junit.xml && node ./scripts/export-coverage.js"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import {
|
||||
extractChatMessageMetadata,
|
||||
formatChatMessagePrefix,
|
||||
formatChatChannelTag,
|
||||
formatNodeAnnouncementPrefix,
|
||||
__test__
|
||||
} from '../chat-format.js';
|
||||
|
||||
const {
|
||||
firstNonNull,
|
||||
normalizeString,
|
||||
normalizeFrequency,
|
||||
normalizeFrequencySlot,
|
||||
FREQUENCY_PLACEHOLDER
|
||||
} = __test__;
|
||||
|
||||
test('extractChatMessageMetadata prefers explicit region_frequency and channel_name', () => {
|
||||
const payload = {
|
||||
region_frequency: 868,
|
||||
channel_name: ' Test Channel ',
|
||||
lora_freq: 915,
|
||||
channelName: 'Ignored'
|
||||
};
|
||||
const result = extractChatMessageMetadata(payload);
|
||||
assert.deepEqual(result, { frequency: '868', channelName: 'Test Channel' });
|
||||
});
|
||||
|
||||
test('extractChatMessageMetadata falls back to LoRa metadata', () => {
|
||||
const payload = {
|
||||
lora_freq: 915,
|
||||
channelName: 'SpecChannel'
|
||||
};
|
||||
const result = extractChatMessageMetadata(payload);
|
||||
assert.deepEqual(result, { frequency: '915', channelName: 'SpecChannel' });
|
||||
});
|
||||
|
||||
test('extractChatMessageMetadata returns null metadata for invalid input', () => {
|
||||
assert.deepEqual(extractChatMessageMetadata(null), { frequency: null, channelName: null });
|
||||
assert.deepEqual(extractChatMessageMetadata(undefined), { frequency: null, channelName: null });
|
||||
});
|
||||
|
||||
test('firstNonNull returns the first non-null candidate', () => {
|
||||
assert.equal(firstNonNull(null, undefined, '', 'value'), '');
|
||||
assert.equal(firstNonNull(undefined, null), null);
|
||||
});
|
||||
|
||||
test('normalizeString trims strings and rejects empties', () => {
|
||||
assert.equal(normalizeString(' Spec '), 'Spec');
|
||||
assert.equal(normalizeString(' '), null);
|
||||
assert.equal(normalizeString(123), '123');
|
||||
assert.equal(normalizeString(Number.POSITIVE_INFINITY), null);
|
||||
});
|
||||
|
||||
test('normalizeFrequency handles numeric and string inputs', () => {
|
||||
assert.equal(normalizeFrequency(915), '915');
|
||||
assert.equal(normalizeFrequency(868.125), '868.125');
|
||||
assert.equal(normalizeFrequency(' 868MHz '), '868');
|
||||
assert.equal(normalizeFrequency('n/a'), 'n/a');
|
||||
assert.equal(normalizeFrequency(-5), null);
|
||||
assert.equal(normalizeFrequency(null), null);
|
||||
});
|
||||
|
||||
test('formatChatMessagePrefix preserves bracket placeholders', () => {
|
||||
assert.equal(
|
||||
formatChatMessagePrefix({ timestamp: '11:46:48', frequency: '868' }),
|
||||
'[11:46:48][868]'
|
||||
);
|
||||
assert.equal(
|
||||
formatChatMessagePrefix({ timestamp: '16:19:19', frequency: null }),
|
||||
`[16:19:19][${FREQUENCY_PLACEHOLDER}]`
|
||||
);
|
||||
assert.equal(
|
||||
formatChatMessagePrefix({ timestamp: '09:00:00', frequency: '' }),
|
||||
`[09:00:00][${FREQUENCY_PLACEHOLDER}]`
|
||||
);
|
||||
});
|
||||
|
||||
test('formatChatChannelTag wraps channel names after the short name slot', () => {
|
||||
assert.equal(
|
||||
formatChatChannelTag({ channelName: 'TEST' }),
|
||||
'[TEST]'
|
||||
);
|
||||
assert.equal(
|
||||
formatChatChannelTag({ channelName: '' }),
|
||||
'[]'
|
||||
);
|
||||
assert.equal(
|
||||
formatChatChannelTag({ channelName: null }),
|
||||
'[]'
|
||||
);
|
||||
});
|
||||
|
||||
test('formatNodeAnnouncementPrefix includes optional frequency bracket', () => {
|
||||
assert.equal(
|
||||
formatNodeAnnouncementPrefix({ timestamp: '12:34:56', frequency: '868' }),
|
||||
'[12:34:56][868]'
|
||||
);
|
||||
assert.equal(
|
||||
formatNodeAnnouncementPrefix({ timestamp: '01:02:03', frequency: null }),
|
||||
`[01:02:03][${FREQUENCY_PLACEHOLDER}]`
|
||||
);
|
||||
});
|
||||
|
||||
test('normalizeFrequencySlot returns placeholder when frequency is missing', () => {
|
||||
assert.equal(normalizeFrequencySlot(null), FREQUENCY_PLACEHOLDER);
|
||||
assert.equal(normalizeFrequencySlot(''), FREQUENCY_PLACEHOLDER);
|
||||
assert.equal(normalizeFrequencySlot(undefined), FREQUENCY_PLACEHOLDER);
|
||||
assert.equal(normalizeFrequencySlot('915'), '915');
|
||||
});
|
||||
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { documentStub, resetDocumentStub } from './document-stub.js';
|
||||
|
||||
import { readAppConfig } from '../config.js';
|
||||
import { DEFAULT_CONFIG, mergeConfig } from '../settings.js';
|
||||
|
||||
test('readAppConfig returns an empty object when the configuration element is missing', () => {
|
||||
resetDocumentStub();
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
});
|
||||
|
||||
test('readAppConfig returns an empty object when the attribute is empty', () => {
|
||||
resetDocumentStub();
|
||||
documentStub.setConfigElement({ getAttribute: () => '' });
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
});
|
||||
|
||||
test('readAppConfig parses configuration JSON from the DOM attribute', () => {
|
||||
resetDocumentStub();
|
||||
const data = { refreshMs: 5000, chatEnabled: false };
|
||||
documentStub.setConfigElement({
|
||||
getAttribute: name => (name === 'data-app-config' ? JSON.stringify(data) : null)
|
||||
});
|
||||
assert.deepEqual(readAppConfig(), data);
|
||||
});
|
||||
|
||||
test('readAppConfig returns an empty object and logs on parse failure', () => {
|
||||
resetDocumentStub();
|
||||
let called = false;
|
||||
const originalError = console.error;
|
||||
console.error = () => {
|
||||
called = true;
|
||||
};
|
||||
documentStub.setConfigElement({
|
||||
getAttribute: name => (name === 'data-app-config' ? 'not-json' : null)
|
||||
});
|
||||
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
assert.equal(called, true);
|
||||
console.error = originalError;
|
||||
});
|
||||
|
||||
test('readAppConfig ignores non-object JSON payloads', () => {
|
||||
resetDocumentStub();
|
||||
documentStub.setConfigElement({
|
||||
getAttribute: name => (name === 'data-app-config' ? '42' : null)
|
||||
});
|
||||
|
||||
assert.deepEqual(readAppConfig(), {});
|
||||
});
|
||||
|
||||
test('mergeConfig applies default values when fields are missing', () => {
|
||||
const result = mergeConfig({});
|
||||
assert.deepEqual(result, {
|
||||
...DEFAULT_CONFIG,
|
||||
mapCenter: { ...DEFAULT_CONFIG.mapCenter },
|
||||
tileFilters: { ...DEFAULT_CONFIG.tileFilters }
|
||||
});
|
||||
});
|
||||
|
||||
test('mergeConfig coerces numeric values and nested objects', () => {
|
||||
const result = mergeConfig({
|
||||
refreshIntervalSeconds: '30',
|
||||
refreshMs: '45000',
|
||||
mapCenter: { lat: '10.5', lon: '20.1' },
|
||||
tileFilters: { dark: 'contrast(2)' },
|
||||
chatEnabled: 0,
|
||||
channel: '#Custom',
|
||||
frequency: '915MHz',
|
||||
contactLink: 'https://example.org/chat',
|
||||
contactLinkUrl: 'https://example.org/chat',
|
||||
maxDistanceKm: '55.5'
|
||||
});
|
||||
|
||||
assert.equal(result.refreshIntervalSeconds, 30);
|
||||
assert.equal(result.refreshMs, 45000);
|
||||
assert.deepEqual(result.mapCenter, { lat: 10.5, lon: 20.1 });
|
||||
assert.deepEqual(result.tileFilters, { light: DEFAULT_CONFIG.tileFilters.light, dark: 'contrast(2)' });
|
||||
assert.equal(result.chatEnabled, false);
|
||||
assert.equal(result.channel, '#Custom');
|
||||
assert.equal(result.frequency, '915MHz');
|
||||
assert.equal(result.contactLink, 'https://example.org/chat');
|
||||
assert.equal(result.contactLinkUrl, 'https://example.org/chat');
|
||||
assert.equal(result.maxDistanceKm, 55.5);
|
||||
});
|
||||
|
||||
test('mergeConfig falls back to defaults for invalid numeric values', () => {
|
||||
const result = mergeConfig({
|
||||
refreshIntervalSeconds: 'NaN',
|
||||
refreshMs: 'NaN',
|
||||
maxDistanceKm: 'oops'
|
||||
});
|
||||
|
||||
assert.equal(result.refreshIntervalSeconds, DEFAULT_CONFIG.refreshIntervalSeconds);
|
||||
assert.equal(result.refreshMs, DEFAULT_CONFIG.refreshMs);
|
||||
assert.equal(result.maxDistanceKm, DEFAULT_CONFIG.maxDistanceKm);
|
||||
});
|
||||
|
||||
test('document stub returns null for unrelated selectors', () => {
|
||||
resetDocumentStub();
|
||||
assert.equal(documentStub.querySelector('#missing'), null);
|
||||
});
|
||||
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Minimal document implementation that exposes the subset of behaviour needed
|
||||
* by the front-end modules during unit tests.
|
||||
*/
|
||||
class DocumentStub {
|
||||
/**
|
||||
* Instantiate a new stub with a clean internal state.
|
||||
*/
|
||||
constructor() {
|
||||
this.reset();
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear tracked configuration elements and registered event listeners.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
reset() {
|
||||
this.configElement = null;
|
||||
this.listeners = new Map();
|
||||
}
|
||||
|
||||
/**
|
||||
* Provide an element that will be returned by ``querySelector`` when the
|
||||
* configuration selector is requested.
|
||||
*
|
||||
* @param {?Element} element DOM node exposing ``getAttribute``.
|
||||
* @returns {void}
|
||||
*/
|
||||
setConfigElement(element) {
|
||||
this.configElement = element;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the registered configuration element when the matching selector is
|
||||
* provided.
|
||||
*
|
||||
* @param {string} selector CSS selector requested by the module under test.
|
||||
* @returns {?Element} Config element or ``null`` when unavailable.
|
||||
*/
|
||||
querySelector(selector) {
|
||||
if (selector === '[data-app-config]') {
|
||||
return this.configElement;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an event handler, mirroring the DOM ``addEventListener`` API.
|
||||
*
|
||||
* @param {string} event Event identifier.
|
||||
* @param {Function} handler Callback invoked when ``dispatchEvent`` is
|
||||
* called.
|
||||
* @returns {void}
|
||||
*/
|
||||
addEventListener(event, handler) {
|
||||
this.listeners.set(event, handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger a previously registered listener.
|
||||
*
|
||||
* @param {string} event Event identifier used when registering the handler.
|
||||
* @returns {void}
|
||||
*/
|
||||
dispatchEvent(event) {
|
||||
const handler = this.listeners.get(event);
|
||||
if (handler) {
|
||||
handler();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const documentStub = new DocumentStub();
|
||||
|
||||
/**
|
||||
* Reset the shared stub between test cases to avoid state bleed.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
export function resetDocumentStub() {
|
||||
documentStub.reset();
|
||||
}
|
||||
|
||||
globalThis.document = documentStub;
|
||||
@@ -0,0 +1,292 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Simple class list implementation supporting the subset of DOMTokenList
|
||||
* behaviour required by the tests.
|
||||
*/
|
||||
class MockClassList {
|
||||
constructor() {
|
||||
this._values = new Set();
|
||||
}
|
||||
|
||||
/**
|
||||
* Add one or more CSS classes to the element.
|
||||
*
|
||||
* @param {...string} names Class names to insert into the list.
|
||||
* @returns {void}
|
||||
*/
|
||||
add(...names) {
|
||||
names.forEach(name => {
|
||||
if (name) {
|
||||
this._values.add(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove one or more CSS classes from the element.
|
||||
*
|
||||
* @param {...string} names Class names to delete from the list.
|
||||
* @returns {void}
|
||||
*/
|
||||
remove(...names) {
|
||||
names.forEach(name => {
|
||||
if (name) {
|
||||
this._values.delete(name);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the class list currently contains ``name``.
|
||||
*
|
||||
* @param {string} name Target class name.
|
||||
* @returns {boolean} ``true`` when the class is present.
|
||||
*/
|
||||
contains(name) {
|
||||
return this._values.has(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Toggle the provided class name.
|
||||
*
|
||||
* @param {string} name Class name to toggle.
|
||||
* @param {boolean} [force] Optional forced state mirroring ``DOMTokenList``.
|
||||
* @returns {boolean} ``true`` when the class is present after toggling.
|
||||
*/
|
||||
toggle(name, force) {
|
||||
if (force === true) {
|
||||
this._values.add(name);
|
||||
return true;
|
||||
}
|
||||
if (force === false) {
|
||||
this._values.delete(name);
|
||||
return false;
|
||||
}
|
||||
if (this._values.has(name)) {
|
||||
this._values.delete(name);
|
||||
return false;
|
||||
}
|
||||
this._values.add(name);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Minimal DOM element implementation exposing the subset of behaviour exercised
|
||||
* by the frontend entrypoints.
|
||||
*/
|
||||
class MockElement {
|
||||
/**
|
||||
* @param {string} tagName Element name used for diagnostics.
|
||||
* @param {Map<string, MockElement>} registry Storage shared with the
|
||||
* containing document to support ``getElementById``.
|
||||
*/
|
||||
constructor(tagName, registry) {
|
||||
this.tagName = tagName.toUpperCase();
|
||||
this._registry = registry;
|
||||
this.attributes = new Map();
|
||||
this.dataset = {};
|
||||
this.style = {};
|
||||
this.textContent = '';
|
||||
this.classList = new MockClassList();
|
||||
}
|
||||
|
||||
/**
|
||||
* Associate an attribute with the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @param {string} value Attribute value.
|
||||
* @returns {void}
|
||||
*/
|
||||
setAttribute(name, value) {
|
||||
this.attributes.set(name, String(value));
|
||||
if (name === 'id' && this._registry) {
|
||||
this._registry.set(String(value), this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve an attribute value.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @returns {?string} Matching attribute or ``null`` when absent.
|
||||
*/
|
||||
getAttribute(name) {
|
||||
return this.attributes.has(name) ? this.attributes.get(name) : null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a deterministic DOM environment that provides just enough behaviour
|
||||
* for the UI scripts to execute inside Node.js unit tests.
|
||||
*
|
||||
* @param {{
|
||||
* readyState?: 'loading' | 'interactive' | 'complete',
|
||||
* cookie?: string,
|
||||
* includeBody?: boolean,
|
||||
* bodyHasDarkClass?: boolean
|
||||
* }} [options]
|
||||
* @returns {{
|
||||
* window: Window & { dispatchEvent: Function },
|
||||
* document: Document,
|
||||
* createElement: (tagName?: string, id?: string) => MockElement,
|
||||
* registerElement: (id: string, element: MockElement) => void,
|
||||
* setComputedStyleImplementation: (impl: Function) => void,
|
||||
* triggerDOMContentLoaded: () => void,
|
||||
* dispatchWindowEvent: (event: string) => void,
|
||||
* getCookieString: () => string,
|
||||
* setCookieString: (value: string) => void,
|
||||
* cleanup: () => void
|
||||
* }}
|
||||
*/
|
||||
export function createDomEnvironment(options = {}) {
|
||||
const {
|
||||
readyState = 'complete',
|
||||
cookie = '',
|
||||
includeBody = true,
|
||||
bodyHasDarkClass = true
|
||||
} = options;
|
||||
|
||||
const originalWindow = globalThis.window;
|
||||
const originalDocument = globalThis.document;
|
||||
|
||||
const registry = new Map();
|
||||
const documentListeners = new Map();
|
||||
const windowListeners = new Map();
|
||||
let computedStyleImpl = null;
|
||||
let cookieStore = cookie;
|
||||
|
||||
const document = {
|
||||
readyState,
|
||||
documentElement: new MockElement('html', registry),
|
||||
body: includeBody ? new MockElement('body', registry) : null,
|
||||
addEventListener(event, handler) {
|
||||
documentListeners.set(event, handler);
|
||||
},
|
||||
removeEventListener(event) {
|
||||
documentListeners.delete(event);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const handler = documentListeners.get(event);
|
||||
if (handler) handler();
|
||||
},
|
||||
getElementById(id) {
|
||||
return registry.get(id) || null;
|
||||
},
|
||||
querySelector() {
|
||||
return null;
|
||||
},
|
||||
createElement(tagName) {
|
||||
return new MockElement(tagName, registry);
|
||||
}
|
||||
};
|
||||
|
||||
if (document.body && bodyHasDarkClass) {
|
||||
document.body.classList.add('dark');
|
||||
}
|
||||
|
||||
Object.defineProperty(document, 'cookie', {
|
||||
get() {
|
||||
return cookieStore;
|
||||
},
|
||||
set(value) {
|
||||
cookieStore = cookieStore ? `${cookieStore}; ${value}` : value;
|
||||
}
|
||||
});
|
||||
|
||||
const window = {
|
||||
document,
|
||||
addEventListener(event, handler) {
|
||||
windowListeners.set(event, handler);
|
||||
},
|
||||
removeEventListener(event) {
|
||||
windowListeners.delete(event);
|
||||
},
|
||||
dispatchEvent(event) {
|
||||
const handler = windowListeners.get(event);
|
||||
if (handler) handler();
|
||||
},
|
||||
getComputedStyle(target) {
|
||||
if (typeof computedStyleImpl === 'function') {
|
||||
return computedStyleImpl(target);
|
||||
}
|
||||
return {
|
||||
getPropertyValue() {
|
||||
return '';
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
globalThis.window = window;
|
||||
globalThis.document = document;
|
||||
|
||||
/**
|
||||
* Create and optionally register a mock element.
|
||||
*
|
||||
* @param {string} [tagName='div'] Tag name of the element.
|
||||
* @param {string} [id] Optional identifier registered with the document.
|
||||
* @returns {MockElement} New mock element instance.
|
||||
*/
|
||||
function createElement(tagName = 'div', id) {
|
||||
const element = new MockElement(tagName, registry);
|
||||
if (id) {
|
||||
element.setAttribute('id', id);
|
||||
}
|
||||
return element;
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an element instance so that ``getElementById`` can resolve it.
|
||||
*
|
||||
* @param {string} id Element identifier.
|
||||
* @param {MockElement} element Element instance to register.
|
||||
* @returns {void}
|
||||
*/
|
||||
function registerElement(id, element) {
|
||||
registry.set(id, element);
|
||||
}
|
||||
|
||||
return {
|
||||
window,
|
||||
document,
|
||||
createElement,
|
||||
registerElement,
|
||||
setComputedStyleImplementation(impl) {
|
||||
computedStyleImpl = impl;
|
||||
},
|
||||
triggerDOMContentLoaded() {
|
||||
const handler = documentListeners.get('DOMContentLoaded');
|
||||
if (handler) handler();
|
||||
},
|
||||
dispatchWindowEvent(event) {
|
||||
const handler = windowListeners.get(event);
|
||||
if (handler) handler();
|
||||
},
|
||||
getCookieString() {
|
||||
return cookieStore;
|
||||
},
|
||||
setCookieString(value) {
|
||||
cookieStore = value;
|
||||
},
|
||||
cleanup() {
|
||||
globalThis.window = originalWindow;
|
||||
globalThis.document = originalDocument;
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createMapAutoFitController } from '../map-auto-fit-controller.js';
|
||||
|
||||
class ToggleStub extends EventTarget {
|
||||
constructor(checked = true) {
|
||||
super();
|
||||
this.checked = checked;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {Event} event - Event to dispatch to listeners.
|
||||
* @returns {boolean} Dispatch status.
|
||||
*/
|
||||
dispatchEvent(event) {
|
||||
return super.dispatchEvent(event);
|
||||
}
|
||||
}
|
||||
|
||||
class WindowStub {
|
||||
constructor() {
|
||||
this.listeners = new Map();
|
||||
}
|
||||
|
||||
addEventListener(type, listener) {
|
||||
this.listeners.set(type, listener);
|
||||
}
|
||||
|
||||
removeEventListener(type, listener) {
|
||||
const existing = this.listeners.get(type);
|
||||
if (existing === listener) {
|
||||
this.listeners.delete(type);
|
||||
}
|
||||
}
|
||||
|
||||
emit(type) {
|
||||
const listener = this.listeners.get(type);
|
||||
if (listener) listener();
|
||||
}
|
||||
}
|
||||
|
||||
test('recordFit stores and clones the last fit snapshot', () => {
|
||||
const toggle = new ToggleStub(true);
|
||||
const controller = createMapAutoFitController({ toggleEl: toggle, defaultPaddingPx: 20 });
|
||||
|
||||
assert.equal(controller.getLastFit(), null);
|
||||
|
||||
controller.recordFit([[10, 20], [30, 40]], { paddingPx: 12, maxZoom: 9 });
|
||||
const snapshot = controller.getLastFit();
|
||||
assert.ok(snapshot);
|
||||
assert.deepEqual(snapshot.bounds, [[10, 20], [30, 40]]);
|
||||
assert.deepEqual(snapshot.options, { paddingPx: 12, maxZoom: 9 });
|
||||
|
||||
snapshot.bounds[0][0] = -999;
|
||||
snapshot.options.paddingPx = -1;
|
||||
const secondSnapshot = controller.getLastFit();
|
||||
assert.deepEqual(secondSnapshot?.bounds, [[10, 20], [30, 40]]);
|
||||
assert.deepEqual(secondSnapshot?.options, { paddingPx: 12, maxZoom: 9 });
|
||||
});
|
||||
|
||||
|
||||
test('recordFit ignores invalid bounds and normalises fit options', () => {
|
||||
const controller = createMapAutoFitController({ defaultPaddingPx: 16 });
|
||||
|
||||
controller.recordFit(null);
|
||||
assert.equal(controller.getLastFit(), null);
|
||||
|
||||
controller.recordFit([[10, Number.NaN], [20, 30]]);
|
||||
assert.equal(controller.getLastFit(), null);
|
||||
|
||||
controller.recordFit([[10, 11], [12, 13]], { paddingPx: -5, maxZoom: 0 });
|
||||
const snapshot = controller.getLastFit();
|
||||
assert.ok(snapshot);
|
||||
assert.deepEqual(snapshot.options, { paddingPx: 16 });
|
||||
});
|
||||
|
||||
|
||||
test('handleUserInteraction disables auto-fit unless suppressed', () => {
|
||||
const toggle = new ToggleStub(true);
|
||||
let changeEvents = 0;
|
||||
toggle.addEventListener('change', () => {
|
||||
changeEvents += 1;
|
||||
});
|
||||
const controller = createMapAutoFitController({ toggleEl: toggle });
|
||||
|
||||
controller.runAutoFitOperation(() => {
|
||||
assert.equal(controller.handleUserInteraction(), false);
|
||||
assert.equal(toggle.checked, true);
|
||||
});
|
||||
assert.equal(changeEvents, 0);
|
||||
|
||||
assert.equal(controller.handleUserInteraction(), true);
|
||||
assert.equal(toggle.checked, false);
|
||||
assert.equal(changeEvents, 1);
|
||||
|
||||
assert.equal(controller.handleUserInteraction(), false);
|
||||
assert.equal(changeEvents, 1);
|
||||
});
|
||||
|
||||
|
||||
test('isAutoFitEnabled reflects the toggle state', () => {
|
||||
const toggle = new ToggleStub(false);
|
||||
const controller = createMapAutoFitController({ toggleEl: toggle });
|
||||
assert.equal(controller.isAutoFitEnabled(), false);
|
||||
toggle.checked = true;
|
||||
assert.equal(controller.isAutoFitEnabled(), true);
|
||||
});
|
||||
|
||||
|
||||
test('runAutoFitOperation returns callback results and tolerates missing functions', () => {
|
||||
const controller = createMapAutoFitController();
|
||||
assert.equal(controller.runAutoFitOperation(), undefined);
|
||||
let active = false;
|
||||
const result = controller.runAutoFitOperation(() => {
|
||||
active = true;
|
||||
return 42;
|
||||
});
|
||||
assert.equal(active, true);
|
||||
assert.equal(result, 42);
|
||||
});
|
||||
|
||||
|
||||
test('attachResizeListener forwards snapshots and supports teardown', () => {
|
||||
const windowStub = new WindowStub();
|
||||
const controller = createMapAutoFitController({ windowObject: windowStub, defaultPaddingPx: 24 });
|
||||
controller.recordFit([[1, 2], [3, 4]], { paddingPx: 30 });
|
||||
|
||||
let snapshots = [];
|
||||
const detach = controller.attachResizeListener(snapshot => {
|
||||
snapshots.push(snapshot);
|
||||
});
|
||||
|
||||
windowStub.emit('resize');
|
||||
windowStub.emit('orientationchange');
|
||||
assert.equal(snapshots.length, 2);
|
||||
assert.deepEqual(snapshots[0], { bounds: [[1, 2], [3, 4]], options: { paddingPx: 30 } });
|
||||
|
||||
detach();
|
||||
windowStub.emit('resize');
|
||||
assert.equal(snapshots.length, 2);
|
||||
|
||||
const noop = controller.attachResizeListener();
|
||||
assert.equal(typeof noop, 'function');
|
||||
noop();
|
||||
});
|
||||
@@ -0,0 +1,138 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import {
|
||||
computeBoundingBox,
|
||||
computeBoundsForPoints,
|
||||
haversineDistanceKm,
|
||||
__testUtils
|
||||
} from '../map-bounds.js';
|
||||
|
||||
const { clampLatitude, clampLongitude, normaliseRange, normaliseLongitudeAround } = __testUtils;
|
||||
|
||||
function approximatelyEqual(actual, expected, epsilon = 1e-3) {
|
||||
assert.ok(Math.abs(actual - expected) <= epsilon, `${actual} is not within ${epsilon} of ${expected}`);
|
||||
}
|
||||
|
||||
test('clamp helpers bound invalid coordinates', () => {
|
||||
assert.equal(clampLatitude(120), 90);
|
||||
assert.equal(clampLatitude(-95), -90);
|
||||
assert.equal(clampLatitude(Number.POSITIVE_INFINITY), 90);
|
||||
assert.equal(clampLatitude(Number.NEGATIVE_INFINITY), -90);
|
||||
|
||||
assert.equal(clampLongitude(200), 180);
|
||||
assert.equal(clampLongitude(-220), -180);
|
||||
assert.equal(clampLongitude(Number.POSITIVE_INFINITY), 180);
|
||||
assert.equal(clampLongitude(Number.NEGATIVE_INFINITY), -180);
|
||||
});
|
||||
|
||||
|
||||
test('normaliseRange enforces minimum distance for invalid inputs', () => {
|
||||
assert.equal(normaliseRange(-1, 2), 2);
|
||||
assert.equal(normaliseRange(Number.NaN, 3), 3);
|
||||
assert.equal(normaliseRange(0, 1), 1);
|
||||
assert.equal(normaliseRange(4, 2), 4);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundingBox returns null for invalid centres', () => {
|
||||
assert.equal(computeBoundingBox(null, 10), null);
|
||||
assert.equal(computeBoundingBox({ lat: 'x', lon: 0 }, 5), null);
|
||||
assert.equal(computeBoundingBox({ lat: 0, lon: NaN }, 5), null);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundingBox returns symmetric bounds for mid-latitude centre', () => {
|
||||
const bounds = computeBoundingBox({ lat: 0, lon: 0 }, 10);
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
approximatelyEqual(north, -south, 1e-4);
|
||||
approximatelyEqual(east, -west, 1e-4);
|
||||
assert.ok(north > 0 && east > 0);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundingBox clamps longitude span near the poles', () => {
|
||||
const bounds = computeBoundingBox({ lat: 89.9, lon: 45 }, 2000);
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
approximatelyEqual(south, 72.0, 1e-1);
|
||||
assert.equal(west, -180);
|
||||
assert.equal(east, 180);
|
||||
assert.equal(north, 90);
|
||||
});
|
||||
|
||||
|
||||
test('haversineDistanceKm matches known city distance', () => {
|
||||
// Approximate distance between Paris (48.8566, 2.3522) and Berlin (52.52, 13.4050)
|
||||
const distance = haversineDistanceKm(48.8566, 2.3522, 52.52, 13.405);
|
||||
approximatelyEqual(distance, 878.8, 2);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints returns null when no valid points exist', () => {
|
||||
assert.equal(computeBoundsForPoints([]), null);
|
||||
assert.equal(computeBoundsForPoints([[Number.NaN, 0]]), null);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints expands bounds with padding and minimum radius', () => {
|
||||
const bounds = computeBoundsForPoints(
|
||||
[
|
||||
[38.0, -27.1],
|
||||
[38.05, -27.08]
|
||||
],
|
||||
{ paddingFraction: 0.2, minimumRangeKm: 2 }
|
||||
);
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
assert.ok(north > 38.05);
|
||||
assert.ok(south < 38.0);
|
||||
assert.ok(east > -27.08);
|
||||
assert.ok(west < -27.1);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints respects the configured minimum range for single points', () => {
|
||||
const bounds = computeBoundsForPoints([[12.34, 56.78]], { minimumRangeKm: 5 });
|
||||
assert.ok(bounds);
|
||||
const [[south], [north]] = bounds;
|
||||
assert.ok(north - south > 0.05);
|
||||
});
|
||||
|
||||
|
||||
test('computeBoundsForPoints preserves tight bounds across the antimeridian', () => {
|
||||
const points = [
|
||||
[10.0, 179.5],
|
||||
[11.2, -179.7],
|
||||
[9.5, 179.2]
|
||||
];
|
||||
const bounds = computeBoundsForPoints(points, { paddingFraction: 0.1 });
|
||||
assert.ok(bounds);
|
||||
const [[south, west], [north, east]] = bounds;
|
||||
assert.ok(north - south < 10, 'latitude span should remain tight');
|
||||
const lonSpan = Math.abs(east - west);
|
||||
const normalizedSpan = lonSpan > 180 ? 360 - lonSpan : lonSpan;
|
||||
assert.ok(normalizedSpan < 40, 'longitude span should wrap tightly around the dateline');
|
||||
for (const [, lon] of points) {
|
||||
const adjustedLon = normaliseLongitudeAround(lon, (west + east) / 2);
|
||||
assert.ok(adjustedLon >= west - 1e-6 && adjustedLon <= east + 1e-6, 'point longitude should lie within bounds');
|
||||
}
|
||||
assert.ok(east > 180 || west < -180, 'bounds should extend beyond the canonical range when necessary');
|
||||
});
|
||||
@@ -0,0 +1,244 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { attachNodeInfoRefreshToMarker, overlayToPopupNode } from '../map-marker-node-info.js';
|
||||
|
||||
function createFakeMarker(anchor) {
|
||||
const handlers = {};
|
||||
return {
|
||||
handlers,
|
||||
on(name, handler) {
|
||||
if (!handlers[name]) handlers[name] = [];
|
||||
handlers[name].push(handler);
|
||||
return this;
|
||||
},
|
||||
getElement() {
|
||||
return anchor;
|
||||
},
|
||||
trigger(name, payload) {
|
||||
for (const handler of handlers[name] || []) {
|
||||
handler(payload);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
test('attachNodeInfoRefreshToMarker refreshes markers with merged overlay details', async () => {
|
||||
const anchor = { id: 'anchor-el' };
|
||||
const marker = createFakeMarker(anchor);
|
||||
const popupUpdates = [];
|
||||
const detailCalls = [];
|
||||
let prevented = false;
|
||||
let stopped = false;
|
||||
let token = 0;
|
||||
const refreshCalls = [];
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ nodeId: '!foo', shortName: 'Foo', role: 'CLIENT', neighbors: [] }),
|
||||
refreshNodeInformation: async reference => {
|
||||
refreshCalls.push(reference);
|
||||
return { battery: 55.5, telemetryTime: 123, neighbors: [{ neighbor_id: '!bar', snr: 9.5 }] };
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: el => {
|
||||
assert.equal(el, anchor);
|
||||
return ++token;
|
||||
},
|
||||
isTokenCurrent: (el, candidate) => {
|
||||
assert.equal(el, anchor);
|
||||
return candidate === token;
|
||||
},
|
||||
showLoading: (el, info) => {
|
||||
assert.equal(el, anchor);
|
||||
assert.equal(info.nodeId, '!foo');
|
||||
},
|
||||
showDetails: (el, info) => {
|
||||
detailCalls.push({ el, info });
|
||||
},
|
||||
showError: () => {
|
||||
assert.fail('showError should not be invoked on success');
|
||||
},
|
||||
updatePopup: info => {
|
||||
popupUpdates.push(info);
|
||||
},
|
||||
});
|
||||
|
||||
const clickEvent = {
|
||||
originalEvent: {
|
||||
preventDefault() {
|
||||
prevented = true;
|
||||
},
|
||||
stopPropagation() {
|
||||
stopped = true;
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
marker.trigger('click', clickEvent);
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.equal(prevented, true);
|
||||
assert.equal(stopped, true);
|
||||
assert.equal(refreshCalls.length, 1);
|
||||
assert.deepEqual(refreshCalls[0], {
|
||||
nodeId: '!foo',
|
||||
fallback: { nodeId: '!foo', shortName: 'Foo', role: 'CLIENT', neighbors: [] },
|
||||
});
|
||||
assert.ok(popupUpdates.length >= 1);
|
||||
const merged = popupUpdates[popupUpdates.length - 1];
|
||||
assert.equal(merged.battery, 55.5);
|
||||
assert.equal(merged.telemetryTime, 123);
|
||||
assert.equal(detailCalls.length, 1);
|
||||
assert.equal(detailCalls[0].el, anchor);
|
||||
assert.equal(detailCalls[0].info.battery, 55.5);
|
||||
});
|
||||
|
||||
test('attachNodeInfoRefreshToMarker surfaces errors with fallback overlays', async () => {
|
||||
const anchor = { id: 'anchor' };
|
||||
const marker = createFakeMarker(anchor);
|
||||
let token = 0;
|
||||
let errorCaptured = null;
|
||||
let detailCalls = 0;
|
||||
let updateCalls = 0;
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ nodeId: '!oops', shortName: 'Oops' }),
|
||||
refreshNodeInformation: async () => {
|
||||
throw new Error('boom');
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: el => {
|
||||
assert.equal(el, anchor);
|
||||
return ++token;
|
||||
},
|
||||
isTokenCurrent: (el, candidate) => {
|
||||
assert.equal(el, anchor);
|
||||
return candidate === token;
|
||||
},
|
||||
showLoading: () => {},
|
||||
showDetails: () => {
|
||||
detailCalls += 1;
|
||||
},
|
||||
showError: (el, info, error) => {
|
||||
assert.equal(el, anchor);
|
||||
assert.equal(info.nodeId, '!oops');
|
||||
errorCaptured = error;
|
||||
},
|
||||
updatePopup: () => {
|
||||
updateCalls += 1;
|
||||
},
|
||||
});
|
||||
|
||||
marker.trigger('click', { originalEvent: {} });
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.ok(errorCaptured instanceof Error);
|
||||
assert.equal(errorCaptured.message, 'boom');
|
||||
assert.equal(detailCalls, 0);
|
||||
assert.equal(updateCalls, 2);
|
||||
});
|
||||
|
||||
test('attachNodeInfoRefreshToMarker skips refresh when identifiers are missing', async () => {
|
||||
const anchor = { id: 'anchor' };
|
||||
const marker = createFakeMarker(anchor);
|
||||
let token = 0;
|
||||
let refreshed = false;
|
||||
let detailsShown = 0;
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ shortName: 'Unknown' }),
|
||||
refreshNodeInformation: async () => {
|
||||
refreshed = true;
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: el => {
|
||||
assert.equal(el, anchor);
|
||||
return ++token;
|
||||
},
|
||||
isTokenCurrent: (el, candidate) => {
|
||||
assert.equal(el, anchor);
|
||||
return candidate === token;
|
||||
},
|
||||
showLoading: () => {
|
||||
assert.fail('showLoading should not run without identifiers');
|
||||
},
|
||||
showDetails: (el, info) => {
|
||||
assert.equal(el, anchor);
|
||||
assert.equal(info.shortName, 'Unknown');
|
||||
detailsShown += 1;
|
||||
},
|
||||
});
|
||||
|
||||
marker.trigger('click', { originalEvent: {} });
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.equal(refreshed, false);
|
||||
assert.equal(detailsShown, 1);
|
||||
});
|
||||
|
||||
test('attachNodeInfoRefreshToMarker honours shouldHandleClick predicate', async () => {
|
||||
const marker = createFakeMarker({ id: 'anchor' });
|
||||
let token = 0;
|
||||
let refreshed = false;
|
||||
|
||||
attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback: () => ({ nodeId: '!skip' }),
|
||||
refreshNodeInformation: async () => {
|
||||
refreshed = true;
|
||||
},
|
||||
mergeOverlayDetails: (primary, fallback) => ({ ...fallback, ...primary }),
|
||||
createRequestToken: () => ++token,
|
||||
isTokenCurrent: (el, candidate) => candidate === token,
|
||||
shouldHandleClick: () => false,
|
||||
});
|
||||
|
||||
marker.trigger('click', { originalEvent: {} });
|
||||
await new Promise(resolve => setImmediate(resolve));
|
||||
|
||||
assert.equal(refreshed, false);
|
||||
});
|
||||
|
||||
test('overlayToPopupNode normalises raw overlay payloads', () => {
|
||||
const overlay = {
|
||||
nodeId: '!foo',
|
||||
nodeNum: 42,
|
||||
shortName: 'Foo',
|
||||
role: 'ROUTER',
|
||||
battery: '77.5',
|
||||
neighbors: [
|
||||
{ neighbor_id: '!bar', snr: '12.5', neighbor_short_name: 'Bar' },
|
||||
null,
|
||||
],
|
||||
};
|
||||
|
||||
const popupNode = overlayToPopupNode(overlay);
|
||||
assert.equal(popupNode.node_id, '!foo');
|
||||
assert.equal(popupNode.node_num, 42);
|
||||
assert.equal(popupNode.short_name, 'Foo');
|
||||
assert.equal(popupNode.role, 'ROUTER');
|
||||
assert.equal(popupNode.battery_level, 77.5);
|
||||
assert.equal(Array.isArray(popupNode.neighbors), true);
|
||||
assert.equal(popupNode.neighbors.length, 1);
|
||||
assert.equal(popupNode.neighbors[0].node.node_id, '!bar');
|
||||
assert.equal(popupNode.neighbors[0].snr, 12.5);
|
||||
});
|
||||
@@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createMessageNodeHydrator } from '../message-node-hydrator.js';
|
||||
|
||||
/**
|
||||
* Capture warning invocations produced during a test run.
|
||||
*/
|
||||
class LoggerStub {
|
||||
constructor() {
|
||||
this.messages = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a warning message for later inspection.
|
||||
*
|
||||
* @param {...*} args Warning arguments.
|
||||
* @returns {void}
|
||||
*/
|
||||
warn(...args) {
|
||||
this.messages.push(args);
|
||||
}
|
||||
}
|
||||
|
||||
test('hydrate attaches cached nodes without performing lookups', async () => {
|
||||
const node = { node_id: '!abc', short_name: 'Node' };
|
||||
const nodesById = new Map([[node.node_id, node]]);
|
||||
const hydrator = createMessageNodeHydrator({
|
||||
fetchNodeById: async () => {
|
||||
throw new Error('fetch should not be called');
|
||||
},
|
||||
applyNodeFallback: () => {}
|
||||
});
|
||||
|
||||
const messages = [{ node_id: '!abc', text: 'Hello' }];
|
||||
const result = await hydrator.hydrate(messages, nodesById);
|
||||
|
||||
assert.equal(result.length, 1);
|
||||
assert.strictEqual(result[0].node, node);
|
||||
assert.equal(nodesById.size, 1);
|
||||
});
|
||||
|
||||
test('hydrate fetches missing nodes once and caches the result', async () => {
|
||||
let fetchCalls = 0;
|
||||
const fetchedNode = { node_id: '!fetch', short_name: 'Fetched' };
|
||||
const hydrator = createMessageNodeHydrator({
|
||||
fetchNodeById: async id => {
|
||||
fetchCalls += 1;
|
||||
assert.equal(id, '!fetch');
|
||||
return { ...fetchedNode };
|
||||
},
|
||||
applyNodeFallback: () => {}
|
||||
});
|
||||
const nodesById = new Map();
|
||||
const messages = [{ from_id: '!fetch', text: 'one' }, { node_id: '!fetch', text: 'two' }];
|
||||
|
||||
const result = await hydrator.hydrate(messages, nodesById);
|
||||
|
||||
assert.equal(fetchCalls, 1);
|
||||
assert.strictEqual(nodesById.get('!fetch').short_name, 'Fetched');
|
||||
assert.strictEqual(result[0].node, nodesById.get('!fetch'));
|
||||
assert.strictEqual(result[1].node, nodesById.get('!fetch'));
|
||||
});
|
||||
|
||||
test('hydrate falls back to placeholders when lookups fail', async () => {
|
||||
const logger = new LoggerStub();
|
||||
let fallbackCalls = 0;
|
||||
const hydrator = createMessageNodeHydrator({
|
||||
fetchNodeById: async () => null,
|
||||
applyNodeFallback: node => {
|
||||
fallbackCalls += 1;
|
||||
if (!node.short_name) {
|
||||
node.short_name = 'Fallback';
|
||||
}
|
||||
},
|
||||
logger
|
||||
});
|
||||
const nodesById = new Map();
|
||||
const messages = [{ from_id: '!missing', text: 'hi' }];
|
||||
|
||||
const result = await hydrator.hydrate(messages, nodesById);
|
||||
|
||||
assert.equal(nodesById.has('!missing'), false);
|
||||
assert.equal(fallbackCalls, 1);
|
||||
assert.ok(result[0].node);
|
||||
assert.equal(result[0].node.short_name, 'Fallback');
|
||||
assert.equal(logger.messages.length, 0);
|
||||
});
|
||||
|
||||
test('hydrate records warning when fetch rejects', async () => {
|
||||
const logger = new LoggerStub();
|
||||
const hydrator = createMessageNodeHydrator({
|
||||
fetchNodeById: async () => {
|
||||
throw new Error('network error');
|
||||
},
|
||||
applyNodeFallback: () => {},
|
||||
logger
|
||||
});
|
||||
const nodesById = new Map();
|
||||
const messages = [{ from_id: '!warn', text: 'warn' }];
|
||||
|
||||
const result = await hydrator.hydrate(messages, nodesById);
|
||||
|
||||
assert.equal(result[0].node.node_id, '!warn');
|
||||
assert.ok(logger.messages.length >= 1);
|
||||
assert.equal(nodesById.has('!warn'), false);
|
||||
});
|
||||
@@ -0,0 +1,348 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { refreshNodeInformation, __testUtils } from '../node-details.js';
|
||||
|
||||
const {
|
||||
toTrimmedString,
|
||||
toFiniteNumber,
|
||||
extractString,
|
||||
extractNumber,
|
||||
assignString,
|
||||
assignNumber,
|
||||
mergeModemMetadata,
|
||||
mergeNodeFields,
|
||||
mergeTelemetry,
|
||||
mergePosition,
|
||||
parseFallback,
|
||||
normalizeReference,
|
||||
} = __testUtils;
|
||||
|
||||
function createResponse(status, body) {
|
||||
return {
|
||||
status,
|
||||
ok: status >= 200 && status < 300,
|
||||
json: async () => body,
|
||||
};
|
||||
}
|
||||
|
||||
test('refreshNodeInformation merges telemetry metrics when the base node lacks them', async () => {
|
||||
const calls = [];
|
||||
const responses = new Map([
|
||||
['/api/nodes/!test', createResponse(200, {
|
||||
node_id: '!test',
|
||||
short_name: 'TST',
|
||||
battery_level: null,
|
||||
last_heard: 1_000,
|
||||
modem_preset: 'MediumFast',
|
||||
lora_freq: '868.1',
|
||||
})],
|
||||
['/api/telemetry/!test?limit=1', createResponse(200, [{
|
||||
node_id: '!test',
|
||||
battery_level: 73.5,
|
||||
rx_time: 1_200,
|
||||
telemetry_time: 1_180,
|
||||
voltage: 4.1,
|
||||
}])],
|
||||
['/api/positions/!test?limit=1', createResponse(200, [{
|
||||
node_id: '!test',
|
||||
latitude: 52.5,
|
||||
longitude: 13.4,
|
||||
rx_time: 1_100,
|
||||
}])],
|
||||
['/api/neighbors/!test?limit=1000', createResponse(200, [{
|
||||
node_id: '!test',
|
||||
neighbor_id: '!peer',
|
||||
snr: 9.5,
|
||||
rx_time: 1_150,
|
||||
}])],
|
||||
]);
|
||||
const fetchImpl = async (url, options) => {
|
||||
calls.push({ url, options });
|
||||
const response = responses.get(url);
|
||||
if (!response) {
|
||||
return createResponse(404, { error: 'not found' });
|
||||
}
|
||||
return response;
|
||||
};
|
||||
|
||||
const fallback = { shortName: 'fallback', role: 'CLIENT' };
|
||||
const node = await refreshNodeInformation({ nodeId: '!test', fallback }, { fetchImpl });
|
||||
|
||||
assert.equal(node.nodeId, '!test');
|
||||
assert.equal(node.shortName, 'TST');
|
||||
assert.equal(node.battery, 73.5);
|
||||
assert.equal(node.voltage, 4.1);
|
||||
assert.equal(node.role, 'CLIENT');
|
||||
assert.equal(node.modemPreset, 'MediumFast');
|
||||
assert.equal(node.loraFreq, 868.1);
|
||||
assert.equal(node.lastHeard, 1_200);
|
||||
assert.equal(node.telemetryTime, 1_180);
|
||||
assert.equal(node.latitude, 52.5);
|
||||
assert.equal(node.longitude, 13.4);
|
||||
assert.deepEqual(node.neighbors, [{
|
||||
node_id: '!test',
|
||||
neighbor_id: '!peer',
|
||||
snr: 9.5,
|
||||
rx_time: 1_150,
|
||||
}]);
|
||||
assert.ok(node.rawSources);
|
||||
assert.ok(node.rawSources.node);
|
||||
assert.ok(node.rawSources.telemetry);
|
||||
assert.ok(node.rawSources.position);
|
||||
|
||||
assert.equal(calls.length, 4);
|
||||
calls.forEach(call => {
|
||||
assert.deepEqual(call.options, { cache: 'no-store' });
|
||||
});
|
||||
});
|
||||
|
||||
test('refreshNodeInformation preserves fallback metrics when telemetry is unavailable', async () => {
|
||||
const responses = new Map([
|
||||
['/api/nodes/42', createResponse(200, {
|
||||
node_id: '!num',
|
||||
short_name: 'NUM',
|
||||
})],
|
||||
['/api/telemetry/42?limit=1', createResponse(404, { error: 'not found' })],
|
||||
['/api/positions/42?limit=1', createResponse(404, { error: 'not found' })],
|
||||
['/api/neighbors/42?limit=1000', createResponse(404, { error: 'not found' })],
|
||||
]);
|
||||
const fetchImpl = async (url, options) => {
|
||||
const response = responses.get(url);
|
||||
return response ?? createResponse(404, { error: 'not found' });
|
||||
};
|
||||
|
||||
const fallback = { nodeNum: 42, battery: 12.5, role: 'CLIENT', modemPreset: 'FallbackPreset', loraFreq: 915 };
|
||||
const node = await refreshNodeInformation({ nodeNum: 42, fallback }, { fetchImpl });
|
||||
|
||||
assert.equal(node.nodeId, '!num');
|
||||
assert.equal(node.nodeNum, 42);
|
||||
assert.equal(node.shortName, 'NUM');
|
||||
assert.equal(node.battery, 12.5);
|
||||
assert.equal(node.role, 'CLIENT');
|
||||
assert.equal(node.modemPreset, 'FallbackPreset');
|
||||
assert.equal(node.loraFreq, 915);
|
||||
assert.equal(Array.isArray(node.neighbors) && node.neighbors.length, 0);
|
||||
});
|
||||
|
||||
test('refreshNodeInformation requires a node identifier', async () => {
|
||||
await assert.rejects(() => refreshNodeInformation(null), /node identifier/i);
|
||||
});
|
||||
|
||||
test('refreshNodeInformation handles missing node records by falling back to telemetry data', async () => {
|
||||
const responses = new Map([
|
||||
['/api/nodes/!missing', createResponse(404, { error: 'not found' })],
|
||||
['/api/telemetry/!missing?limit=1', createResponse(200, [{
|
||||
node_id: '!missing',
|
||||
node_num: 77,
|
||||
battery_level: 66,
|
||||
rx_time: 2_000,
|
||||
telemetry_time: 1_950,
|
||||
}])],
|
||||
['/api/positions/!missing?limit=1', createResponse(200, [{
|
||||
node_id: '!missing',
|
||||
latitude: 1.23,
|
||||
longitude: 3.21,
|
||||
altitude: 42,
|
||||
position_time: 1_960,
|
||||
rx_time: 1_970,
|
||||
}])],
|
||||
['/api/neighbors/!missing?limit=1000', createResponse(200, [null, 'skip', {
|
||||
node_id: '!missing',
|
||||
neighbor_id: '!ally',
|
||||
snr: 8.5,
|
||||
}])],
|
||||
]);
|
||||
|
||||
const fetchImpl = async url => responses.get(url) ?? createResponse(404, { error: 'not found' });
|
||||
|
||||
const node = await refreshNodeInformation({ nodeId: '!missing' }, { fetchImpl });
|
||||
|
||||
assert.equal(node.nodeId, '!missing');
|
||||
assert.equal(node.nodeNum, 77);
|
||||
assert.equal(node.battery, 66);
|
||||
assert.equal(node.lastHeard, 2_000);
|
||||
assert.equal(node.telemetryTime, 1_950);
|
||||
assert.equal(node.positionTime, 1_960);
|
||||
assert.equal(node.latitude, 1.23);
|
||||
assert.equal(node.longitude, 3.21);
|
||||
assert.equal(node.altitude, 42);
|
||||
assert.equal(node.role, 'CLIENT');
|
||||
assert.deepEqual(node.neighbors, [{
|
||||
node_id: '!missing',
|
||||
neighbor_id: '!ally',
|
||||
snr: 8.5,
|
||||
}]);
|
||||
});
|
||||
|
||||
test('refreshNodeInformation enforces a fetch implementation', async () => {
|
||||
const originalFetch = globalThis.fetch;
|
||||
// eslint-disable-next-line no-global-assign
|
||||
globalThis.fetch = undefined;
|
||||
try {
|
||||
await assert.rejects(() => refreshNodeInformation('!test', { fetchImpl: null }), /fetch implementation/i);
|
||||
} finally {
|
||||
// eslint-disable-next-line no-global-assign
|
||||
globalThis.fetch = originalFetch;
|
||||
}
|
||||
});
|
||||
|
||||
test('mergeModemMetadata respects preference flags', () => {
|
||||
const target = {};
|
||||
mergeModemMetadata(target, { modem_preset: 'Base', lora_freq: '915.5' });
|
||||
assert.equal(target.modemPreset, 'Base');
|
||||
assert.equal(target.loraFreq, 915.5);
|
||||
|
||||
mergeModemMetadata(target, { modem_preset: 'New', lora_freq: '433' }, { preferExisting: true });
|
||||
assert.equal(target.modemPreset, 'Base');
|
||||
assert.equal(target.loraFreq, 915.5);
|
||||
|
||||
mergeModemMetadata(target, { modem_preset: 'Updated', lora_freq: '433' }, { preferExisting: false });
|
||||
assert.equal(target.modemPreset, 'Updated');
|
||||
assert.equal(target.loraFreq, 433);
|
||||
});
|
||||
|
||||
test('helper utilities normalise primitive values', () => {
|
||||
assert.equal(toTrimmedString(' hello '), 'hello');
|
||||
assert.equal(toTrimmedString(''), null);
|
||||
assert.equal(toTrimmedString(null), null);
|
||||
|
||||
assert.equal(toFiniteNumber('42.5'), 42.5);
|
||||
assert.equal(toFiniteNumber('bad'), null);
|
||||
assert.equal(toFiniteNumber(Infinity), null);
|
||||
|
||||
assert.equal(extractString({ name: ' Alice ' }, ['missing', 'name']), 'Alice');
|
||||
assert.equal(extractString(null, ['name']), null);
|
||||
|
||||
assert.equal(extractNumber({ value: ' 13 ' }, ['missing', 'value']), 13);
|
||||
assert.equal(extractNumber({}, ['value']), null);
|
||||
});
|
||||
|
||||
test('assign helpers respect preferExisting semantics', () => {
|
||||
const target = {};
|
||||
assignString(target, 'name', ' primary ');
|
||||
assignString(target, 'name', 'secondary', { preferExisting: true });
|
||||
assignString(target, 'description', '');
|
||||
assignNumber(target, 'count', '25');
|
||||
assignNumber(target, 'count', 13, { preferExisting: true });
|
||||
assignNumber(target, 'ignored', 'oops');
|
||||
|
||||
assert.deepEqual(target, { name: 'primary', count: 25 });
|
||||
});
|
||||
|
||||
test('merge helpers combine node, telemetry, and position data', () => {
|
||||
const node = {};
|
||||
mergeNodeFields(node, {
|
||||
node_id: '!node',
|
||||
node_num: 55,
|
||||
short_name: 'NODE',
|
||||
battery_level: null,
|
||||
last_heard: 1_000,
|
||||
position_time: 900,
|
||||
});
|
||||
|
||||
node.battery = 50;
|
||||
|
||||
mergeTelemetry(node, {
|
||||
node_id: '!node',
|
||||
battery_level: 75,
|
||||
voltage: 3.8,
|
||||
rx_time: 1_200,
|
||||
rx_iso: '2025-01-01T00:00:00Z',
|
||||
telemetry_time: 1_150,
|
||||
});
|
||||
|
||||
mergePosition(node, {
|
||||
node_id: '!node',
|
||||
latitude: 52.5,
|
||||
longitude: 13.4,
|
||||
altitude: 80,
|
||||
position_time: 1_180,
|
||||
position_time_iso: '2025-01-01T00:19:40Z',
|
||||
rx_time: 1_100,
|
||||
rx_iso: '2025-01-01T00:18:20Z',
|
||||
});
|
||||
|
||||
assert.equal(node.nodeId, '!node');
|
||||
assert.equal(node.nodeNum, 55);
|
||||
assert.equal(node.shortName, 'NODE');
|
||||
assert.equal(node.battery, 50);
|
||||
assert.equal(node.voltage, 3.8);
|
||||
assert.equal(node.lastHeard, 1_200);
|
||||
assert.equal(node.lastSeenIso, '2025-01-01T00:00:00Z');
|
||||
assert.equal(node.telemetryTime, 1_150);
|
||||
assert.equal(node.positionTime, 1_180);
|
||||
assert.equal(node.positionTimeIso, '2025-01-01T00:19:40Z');
|
||||
assert.equal(node.latitude, 52.5);
|
||||
assert.equal(node.longitude, 13.4);
|
||||
assert.equal(node.altitude, 80);
|
||||
assert.ok(node.telemetry);
|
||||
assert.ok(node.position);
|
||||
});
|
||||
|
||||
test('normalizeReference extracts identifiers and tolerates malformed fallback payloads', () => {
|
||||
const originalWarn = console.warn;
|
||||
const warnings = [];
|
||||
console.warn = (...args) => warnings.push(args);
|
||||
|
||||
try {
|
||||
const parsed = normalizeReference({
|
||||
nodeId: ' ',
|
||||
fallback: '{"node_id":"!parsed","nodeNum":99}',
|
||||
});
|
||||
assert.equal(parsed.nodeId, '!parsed');
|
||||
assert.equal(parsed.nodeNum, 99);
|
||||
assert.ok(parsed.fallback);
|
||||
|
||||
const invalid = normalizeReference({ fallback: '{not json}' });
|
||||
assert.equal(invalid.nodeId, null);
|
||||
assert.equal(invalid.nodeNum, null);
|
||||
assert.equal(invalid.fallback, null);
|
||||
|
||||
const strRef = normalizeReference('!direct');
|
||||
assert.equal(strRef.nodeId, '!direct');
|
||||
assert.equal(strRef.nodeNum, null);
|
||||
|
||||
const numRef = normalizeReference(57);
|
||||
assert.equal(numRef.nodeId, null);
|
||||
assert.equal(numRef.nodeNum, 57);
|
||||
|
||||
const emptyRef = normalizeReference(undefined);
|
||||
assert.equal(emptyRef.nodeId, null);
|
||||
assert.equal(emptyRef.nodeNum, null);
|
||||
assert.equal(emptyRef.fallback, null);
|
||||
} finally {
|
||||
console.warn = originalWarn;
|
||||
}
|
||||
|
||||
assert.ok(warnings.length >= 1);
|
||||
});
|
||||
|
||||
test('parseFallback duplicates object references and rejects primitives', () => {
|
||||
const fallbackObject = { nodeId: '!object' };
|
||||
const parsedObject = parseFallback(fallbackObject);
|
||||
assert.notEqual(parsedObject, fallbackObject);
|
||||
assert.deepEqual(parsedObject, fallbackObject);
|
||||
|
||||
const parsedString = parseFallback('{"nodeId":"!string"}');
|
||||
assert.ok(parsedString);
|
||||
assert.equal(parsedString.nodeId, '!string');
|
||||
assert.equal(parseFallback('not json'), null);
|
||||
assert.equal(parseFallback(42), null);
|
||||
});
|
||||
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { describe, it } from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { extractModemMetadata, formatLoraFrequencyMHz, formatModemDisplay, __testUtils } from '../node-modem-metadata.js';
|
||||
|
||||
describe('node-modem-metadata', () => {
|
||||
it('extracts modem preset and frequency from mixed payloads', () => {
|
||||
const payload = {
|
||||
modem_preset: ' MediumFast ',
|
||||
lora_freq: '915',
|
||||
};
|
||||
assert.deepEqual(extractModemMetadata(payload), { modemPreset: 'MediumFast', loraFreq: 915 });
|
||||
});
|
||||
|
||||
it('falls back across naming conventions when extracting metadata', () => {
|
||||
const payload = {
|
||||
modemPreset: 'LongSlow',
|
||||
frequency: 868,
|
||||
};
|
||||
assert.deepEqual(extractModemMetadata(payload), { modemPreset: 'LongSlow', loraFreq: 868 });
|
||||
});
|
||||
|
||||
it('ignores invalid modem metadata entries', () => {
|
||||
assert.deepEqual(extractModemMetadata({ modem_preset: ' ', lora_freq: 'NaN' }), {
|
||||
modemPreset: null,
|
||||
loraFreq: null,
|
||||
});
|
||||
});
|
||||
|
||||
it('formats positive frequencies with MHz suffix', () => {
|
||||
assert.equal(formatLoraFrequencyMHz(915), '915MHz');
|
||||
assert.equal(formatLoraFrequencyMHz(867.5), '867.5MHz');
|
||||
assert.equal(formatLoraFrequencyMHz('433.1234'), '433.123MHz');
|
||||
assert.equal(formatLoraFrequencyMHz(null), null);
|
||||
});
|
||||
|
||||
it('combines preset and frequency for overlay display', () => {
|
||||
assert.equal(formatModemDisplay('MediumFast', 868), 'MediumFast (868MHz)');
|
||||
assert.equal(formatModemDisplay('ShortSlow', null), 'ShortSlow');
|
||||
assert.equal(formatModemDisplay(null, 433), '433MHz');
|
||||
assert.equal(formatModemDisplay(undefined, undefined), null);
|
||||
});
|
||||
|
||||
it('exposes trimmed string helper for targeted assertions', () => {
|
||||
const { toTrimmedString } = __testUtils;
|
||||
assert.equal(toTrimmedString(' hello '), 'hello');
|
||||
assert.equal(toTrimmedString(''), null);
|
||||
assert.equal(toTrimmedString(null), null);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,397 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
|
||||
import { createShortInfoOverlayStack } from '../short-info-overlay-manager.js';
|
||||
|
||||
/**
|
||||
* Minimal DOM element implementation tailored for overlay manager tests.
|
||||
*/
|
||||
class StubElement {
|
||||
/**
|
||||
* @param {string} [tagName='div'] Element tag identifier.
|
||||
*/
|
||||
constructor(tagName = 'div') {
|
||||
this.tagName = tagName.toUpperCase();
|
||||
this.children = [];
|
||||
this.parentNode = null;
|
||||
this.style = {};
|
||||
this.dataset = {};
|
||||
this.className = '';
|
||||
this.innerHTML = '';
|
||||
this.attributes = new Map();
|
||||
this.eventHandlers = new Map();
|
||||
this._rect = { left: 0, top: 0, width: 120, height: 80 };
|
||||
}
|
||||
|
||||
/**
|
||||
* Append ``child`` to the element.
|
||||
*
|
||||
* @param {StubElement} child Child node to append.
|
||||
* @returns {StubElement} Appended node.
|
||||
*/
|
||||
appendChild(child) {
|
||||
this.children.push(child);
|
||||
child.parentNode = this;
|
||||
return child;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove ``child`` from the element.
|
||||
*
|
||||
* @param {StubElement} child Child node to remove.
|
||||
* @returns {void}
|
||||
*/
|
||||
removeChild(child) {
|
||||
const idx = this.children.indexOf(child);
|
||||
if (idx >= 0) {
|
||||
this.children.splice(idx, 1);
|
||||
child.parentNode = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the element from its parent tree.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
remove() {
|
||||
if (this.parentNode) {
|
||||
this.parentNode.removeChild(this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign an attribute to the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @param {string} value Stored attribute value.
|
||||
* @returns {void}
|
||||
*/
|
||||
setAttribute(name, value) {
|
||||
this.attributes.set(name, String(value));
|
||||
if (name === 'class' || name === 'className') {
|
||||
this.className = String(value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an attribute from the element.
|
||||
*
|
||||
* @param {string} name Attribute identifier.
|
||||
* @returns {void}
|
||||
*/
|
||||
removeAttribute(name) {
|
||||
this.attributes.delete(name);
|
||||
if (name === 'class' || name === 'className') {
|
||||
this.className = '';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Register an event handler for the element.
|
||||
*
|
||||
* @param {string} event Event identifier.
|
||||
* @param {Function} handler Handler invoked during tests.
|
||||
* @returns {void}
|
||||
*/
|
||||
addEventListener(event, handler) {
|
||||
this.eventHandlers.set(event, handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the first descendant matching a simple class selector.
|
||||
*
|
||||
* @param {string} selector CSS selector (class only).
|
||||
* @returns {?StubElement} Matching element or ``null``.
|
||||
*/
|
||||
querySelector(selector) {
|
||||
if (!selector || selector[0] !== '.') {
|
||||
return null;
|
||||
}
|
||||
const className = selector.slice(1);
|
||||
return this._findByClass(className);
|
||||
}
|
||||
|
||||
/**
|
||||
* Recursively search for an element with ``className``.
|
||||
*
|
||||
* @param {string} className Class identifier to match.
|
||||
* @returns {?StubElement} Matching element or ``null``.
|
||||
*/
|
||||
_findByClass(className) {
|
||||
const classes = (this.className || '').split(/\s+/).filter(Boolean);
|
||||
if (classes.includes(className)) {
|
||||
return this;
|
||||
}
|
||||
for (const child of this.children) {
|
||||
const found = child._findByClass(className);
|
||||
if (found) {
|
||||
return found;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether ``candidate`` is a descendant of the element.
|
||||
*
|
||||
* @param {StubElement} candidate Potential child node.
|
||||
* @returns {boolean} ``true`` when the node is contained within the element.
|
||||
*/
|
||||
contains(candidate) {
|
||||
if (this === candidate) {
|
||||
return true;
|
||||
}
|
||||
for (const child of this.children) {
|
||||
if (child.contains(candidate)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the mock bounding rectangle for the element.
|
||||
*
|
||||
* @returns {{ left: number, top: number, width: number, height: number }}
|
||||
*/
|
||||
getBoundingClientRect() {
|
||||
return { ...this._rect };
|
||||
}
|
||||
|
||||
/**
|
||||
* Override the bounding rectangle used during positioning tests.
|
||||
*
|
||||
* @param {{ left?: number, top?: number, width?: number, height?: number }} rect
|
||||
* @returns {void}
|
||||
*/
|
||||
setBoundingRect(rect) {
|
||||
this._rect = { ...this._rect, ...rect };
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a deep clone of the element.
|
||||
*
|
||||
* @param {boolean} [deep=false] When ``true`` clone the children as well.
|
||||
* @returns {StubElement} Cloned element instance.
|
||||
*/
|
||||
cloneNode(deep = false) {
|
||||
const clone = new StubElement(this.tagName);
|
||||
clone.className = this.className;
|
||||
clone.style = { ...this.style };
|
||||
clone.dataset = { ...this.dataset };
|
||||
clone.innerHTML = this.innerHTML;
|
||||
clone._rect = { ...this._rect };
|
||||
clone.attributes = new Map(this.attributes);
|
||||
if (deep) {
|
||||
for (const child of this.children) {
|
||||
clone.appendChild(child.cloneNode(true));
|
||||
}
|
||||
}
|
||||
return clone;
|
||||
}
|
||||
|
||||
/**
|
||||
* Locate the nearest ancestor carrying ``selector``.
|
||||
*
|
||||
* @param {string} selector CSS selector (class only).
|
||||
* @returns {?StubElement} Matching ancestor or ``null``.
|
||||
*/
|
||||
closest(selector) {
|
||||
if (!selector || selector[0] !== '.') {
|
||||
return null;
|
||||
}
|
||||
const className = selector.slice(1);
|
||||
let current = this;
|
||||
while (current) {
|
||||
const classes = (current.className || '').split(/\s+/).filter(Boolean);
|
||||
if (classes.includes(className)) {
|
||||
return current;
|
||||
}
|
||||
current = current.parentNode;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a minimal DOM document stub for overlay manager tests.
|
||||
*
|
||||
* @returns {{ document: Document, window: Window, factory: Function, anchor: StubElement, body: StubElement }}
|
||||
*/
|
||||
function createStubDom() {
|
||||
const body = new StubElement('body');
|
||||
body.contains = body.contains.bind(body);
|
||||
const listenerMap = new Map();
|
||||
const document = {
|
||||
body,
|
||||
documentElement: { clientWidth: 640, clientHeight: 480 },
|
||||
createElement(tagName) {
|
||||
return new StubElement(tagName);
|
||||
},
|
||||
getElementById() {
|
||||
return null;
|
||||
},
|
||||
addEventListener(event, handler) {
|
||||
if (!listenerMap.has(event)) {
|
||||
listenerMap.set(event, new Set());
|
||||
}
|
||||
listenerMap.get(event).add(handler);
|
||||
},
|
||||
removeEventListener(event, handler) {
|
||||
if (!listenerMap.has(event)) {
|
||||
return;
|
||||
}
|
||||
listenerMap.get(event).delete(handler);
|
||||
},
|
||||
_dispatch(event) {
|
||||
if (!listenerMap.has(event)) {
|
||||
return;
|
||||
}
|
||||
for (const handler of Array.from(listenerMap.get(event))) {
|
||||
handler();
|
||||
}
|
||||
},
|
||||
};
|
||||
const window = {
|
||||
scrollX: 10,
|
||||
scrollY: 20,
|
||||
innerWidth: 640,
|
||||
innerHeight: 480,
|
||||
requestAnimationFrame(callback) {
|
||||
callback();
|
||||
},
|
||||
};
|
||||
function factory() {
|
||||
const overlay = document.createElement('div');
|
||||
overlay.className = 'short-info-overlay';
|
||||
const closeButton = document.createElement('button');
|
||||
closeButton.className = 'short-info-close';
|
||||
const content = document.createElement('div');
|
||||
content.className = 'short-info-content';
|
||||
overlay.appendChild(closeButton);
|
||||
overlay.appendChild(content);
|
||||
return { overlay, closeButton, content };
|
||||
}
|
||||
const anchor = document.createElement('span');
|
||||
anchor.setBoundingRect({ left: 40, top: 50, width: 16, height: 16 });
|
||||
body.appendChild(anchor);
|
||||
return { document, window, factory, anchor, body };
|
||||
}
|
||||
|
||||
test('render opens overlays and positions them relative to anchors', () => {
|
||||
const { document, window, factory, anchor, body } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, '<strong>Node</strong>');
|
||||
const open = stack.getOpenOverlays();
|
||||
assert.equal(open.length, 1);
|
||||
const overlay = open[0].element;
|
||||
assert.equal(overlay.parentNode, body);
|
||||
assert.equal(overlay.style.position, 'absolute');
|
||||
const content = overlay.querySelector('.short-info-content');
|
||||
assert.ok(content);
|
||||
assert.equal(content.innerHTML, '<strong>Node</strong>');
|
||||
assert.equal(overlay.style.left, '50px');
|
||||
assert.equal(overlay.style.top, '70px');
|
||||
});
|
||||
|
||||
test('request tokens track anchors independently', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
const token1 = stack.incrementRequestToken(anchor);
|
||||
const token2 = stack.incrementRequestToken(anchor);
|
||||
assert.equal(token2, token1 + 1);
|
||||
stack.render(anchor, 'Loading…');
|
||||
assert.equal(stack.isTokenCurrent(anchor, token2), true);
|
||||
stack.close(anchor);
|
||||
assert.equal(stack.isTokenCurrent(anchor, token2), false);
|
||||
});
|
||||
|
||||
test('overlays stack and close independently', () => {
|
||||
const { document, window, factory, anchor, body } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
const secondAnchor = document.createElement('span');
|
||||
secondAnchor.setBoundingRect({ left: 200, top: 120 });
|
||||
body.appendChild(secondAnchor);
|
||||
stack.render(anchor, 'First');
|
||||
stack.render(secondAnchor, 'Second');
|
||||
const open = stack.getOpenOverlays();
|
||||
assert.equal(open.length, 2);
|
||||
assert.equal(stack.isOpen(anchor), true);
|
||||
assert.equal(stack.isOpen(secondAnchor), true);
|
||||
stack.close(anchor);
|
||||
assert.equal(stack.isOpen(anchor), false);
|
||||
assert.equal(stack.isOpen(secondAnchor), true);
|
||||
stack.closeAll();
|
||||
assert.equal(stack.getOpenOverlays().length, 0);
|
||||
});
|
||||
|
||||
test('cleanupOrphans removes overlays whose anchors disappear', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Orphaned');
|
||||
anchor.remove();
|
||||
stack.cleanupOrphans();
|
||||
assert.equal(stack.getOpenOverlays().length, 0);
|
||||
});
|
||||
|
||||
test('containsNode recognises overlay descendants', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Descendant');
|
||||
const [entry] = stack.getOpenOverlays();
|
||||
const content = entry.element.querySelector('.short-info-content');
|
||||
assert.ok(stack.containsNode(content));
|
||||
const stray = new StubElement('div');
|
||||
assert.equal(stack.containsNode(stray), false);
|
||||
});
|
||||
|
||||
test('overlays migrate into and out of fullscreen hosts', () => {
|
||||
const { document, window, factory, anchor, body } = createStubDom();
|
||||
const fullscreenRoot = document.createElement('div');
|
||||
body.appendChild(fullscreenRoot);
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Fullscreen');
|
||||
const [entry] = stack.getOpenOverlays();
|
||||
assert.equal(entry.element.parentNode, body);
|
||||
assert.equal(entry.element.style.position, 'absolute');
|
||||
|
||||
document.fullscreenElement = fullscreenRoot;
|
||||
document._dispatch('fullscreenchange');
|
||||
assert.equal(entry.element.parentNode, fullscreenRoot);
|
||||
assert.equal(entry.element.style.position, 'fixed');
|
||||
assert.equal(entry.element.style.left, '40px');
|
||||
assert.equal(entry.element.style.top, '50px');
|
||||
|
||||
document.fullscreenElement = null;
|
||||
document._dispatch('fullscreenchange');
|
||||
assert.equal(entry.element.parentNode, body);
|
||||
assert.equal(entry.element.style.position, 'absolute');
|
||||
assert.equal(entry.element.style.left, '50px');
|
||||
assert.equal(entry.element.style.top, '70px');
|
||||
});
|
||||
|
||||
test('rendered overlays do not swallow click events by default', () => {
|
||||
const { document, window, factory, anchor } = createStubDom();
|
||||
const stack = createShortInfoOverlayStack({ document, window, factory });
|
||||
stack.render(anchor, 'Event test');
|
||||
const [entry] = stack.getOpenOverlays();
|
||||
assert.ok(entry);
|
||||
assert.equal(entry.element.eventHandlers.has('click'), false);
|
||||
});
|
||||
@@ -0,0 +1,216 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert/strict';
|
||||
import { readFile } from 'node:fs/promises';
|
||||
import vm from 'node:vm';
|
||||
|
||||
import { createDomEnvironment } from './dom-environment.js';
|
||||
|
||||
const themeModuleUrl = new URL('../../theme.js', import.meta.url);
|
||||
const backgroundModuleUrl = new URL('../../background.js', import.meta.url);
|
||||
const themeSource = await readFile(themeModuleUrl, 'utf8');
|
||||
const backgroundSource = await readFile(backgroundModuleUrl, 'utf8');
|
||||
|
||||
/**
|
||||
* Evaluate a browser-oriented script within the provided DOM environment.
|
||||
*
|
||||
* @param {string} source Module source code to execute.
|
||||
* @param {URL} url Identifier for the executed script.
|
||||
* @param {ReturnType<typeof createDomEnvironment>} env Active DOM harness.
|
||||
* @returns {void}
|
||||
*/
|
||||
function executeInDom(source, url, env) {
|
||||
const context = vm.createContext({
|
||||
console,
|
||||
setTimeout,
|
||||
clearTimeout,
|
||||
setInterval,
|
||||
clearInterval
|
||||
});
|
||||
context.window = env.window;
|
||||
context.document = env.document;
|
||||
context.global = context;
|
||||
context.globalThis = context;
|
||||
context.window.window = context.window;
|
||||
context.window.document = context.document;
|
||||
context.window.globalThis = context;
|
||||
context.window.console = console;
|
||||
|
||||
vm.runInContext(source, context, { filename: url.pathname, displayErrors: true });
|
||||
}
|
||||
|
||||
test('theme and background modules behave correctly across scenarios', async t => {
|
||||
const env = createDomEnvironment({ readyState: 'complete', cookie: '' });
|
||||
try {
|
||||
const toggle = env.createElement('button', 'themeToggle');
|
||||
env.registerElement('themeToggle', toggle);
|
||||
let filterInvocations = 0;
|
||||
env.window.applyFiltersToAllTiles = () => {
|
||||
filterInvocations += 1;
|
||||
};
|
||||
|
||||
executeInDom(themeSource, themeModuleUrl, env);
|
||||
executeInDom(backgroundSource, backgroundModuleUrl, env);
|
||||
|
||||
const themeHelpers = env.window.__themeCookie;
|
||||
const themeHooks = themeHelpers.__testHooks;
|
||||
const backgroundHelpers = env.window.__potatoBackground;
|
||||
const backgroundHooks = backgroundHelpers.__testHooks;
|
||||
|
||||
await t.test('initialises with a dark theme and persists cookies', () => {
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'dark');
|
||||
assert.equal(env.document.body.classList.contains('dark'), true);
|
||||
assert.equal(toggle.textContent, '☀️');
|
||||
themeHelpers.persistTheme('light');
|
||||
themeHelpers.setCookie('bare', '1');
|
||||
themeHooks.exerciseSetCookieGuard();
|
||||
themeHelpers.setCookie('flag', 'true', { Secure: true });
|
||||
const cookieString = env.getCookieString();
|
||||
assert.equal(themeHelpers.getCookie('flag'), 'true');
|
||||
assert.equal(themeHelpers.getCookie('missing'), null);
|
||||
assert.match(cookieString, /theme=light/);
|
||||
assert.match(cookieString, /; path=\//);
|
||||
assert.match(cookieString, /; SameSite=Lax/);
|
||||
assert.match(cookieString, /; Secure/);
|
||||
});
|
||||
|
||||
await t.test('serializeCookieOptions covers boolean and string attributes', () => {
|
||||
const withAttributes = themeHooks.serializeCookieOptions({ Secure: true, HttpOnly: '1' });
|
||||
assert.equal(withAttributes.includes('; Secure'), true);
|
||||
assert.equal(withAttributes.includes('; HttpOnly=1'), true);
|
||||
const secureOnly = themeHooks.serializeCookieOptions({ Secure: true });
|
||||
assert.equal(secureOnly.trim(), '; Secure');
|
||||
assert.equal(themeHooks.formatCookieOption(['HttpOnly', '1']), '; HttpOnly=1');
|
||||
assert.equal(themeHooks.formatCookieOption(['Secure', true]), '; Secure');
|
||||
assert.equal(themeHooks.serializeCookieOptions({}), '');
|
||||
assert.equal(themeHooks.serializeCookieOptions(), '');
|
||||
});
|
||||
|
||||
await t.test('re-bootstrap handles DOMContentLoaded flow and filter hooks', () => {
|
||||
env.document.readyState = 'loading';
|
||||
filterInvocations = 0;
|
||||
env.setCookieString('theme=light');
|
||||
themeHooks.bootstrap();
|
||||
env.triggerDOMContentLoaded();
|
||||
assert.equal(env.document.documentElement.getAttribute('data-theme'), 'light');
|
||||
assert.equal(env.document.body.classList.contains('dark'), false);
|
||||
assert.equal(toggle.textContent, '🌙');
|
||||
assert.equal(filterInvocations, 1);
|
||||
env.document.removeEventListener('DOMContentLoaded', themeHooks.handleReady);
|
||||
});
|
||||
|
||||
await t.test('handleReady tolerates missing toggle button', () => {
|
||||
env.registerElement('themeToggle', null);
|
||||
themeHooks.handleReady();
|
||||
env.registerElement('themeToggle', toggle);
|
||||
});
|
||||
|
||||
await t.test('applyTheme copes with absent DOM nodes', () => {
|
||||
const originalBody = env.document.body;
|
||||
const originalRoot = env.document.documentElement;
|
||||
env.document.body = null;
|
||||
env.document.documentElement = null;
|
||||
assert.equal(themeHooks.applyTheme('dark'), true);
|
||||
env.document.body = originalBody;
|
||||
env.document.documentElement = originalRoot;
|
||||
assert.equal(themeHooks.applyTheme('light'), false);
|
||||
});
|
||||
|
||||
await t.test('background bootstrap waits for DOM readiness', () => {
|
||||
env.setComputedStyleImplementation(() => ({ getPropertyValue: () => ' rgb(15, 15, 15) ' }));
|
||||
env.document.readyState = 'loading';
|
||||
const previousColor = env.document.documentElement.style.backgroundColor;
|
||||
backgroundHooks.bootstrap();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, previousColor);
|
||||
env.triggerDOMContentLoaded();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor.trim(), 'rgb(15, 15, 15)');
|
||||
});
|
||||
|
||||
await t.test('background falls back to theme defaults when styles unavailable', () => {
|
||||
env.setComputedStyleImplementation(() => {
|
||||
throw new Error('no styles');
|
||||
});
|
||||
env.document.body.classList.add('dark');
|
||||
backgroundHelpers.applyBackground();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#0e1418');
|
||||
env.document.body.classList.remove('dark');
|
||||
backgroundHelpers.applyBackground();
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#f6f3ee');
|
||||
});
|
||||
|
||||
await t.test('background helper tolerates missing body elements', () => {
|
||||
const originalBody = env.document.body;
|
||||
env.document.body = null;
|
||||
backgroundHelpers.applyBackground();
|
||||
assert.equal(backgroundHelpers.resolveBackgroundColor(), null);
|
||||
env.document.body = originalBody;
|
||||
});
|
||||
|
||||
await t.test('theme changes trigger background updates', () => {
|
||||
env.document.body.classList.remove('dark');
|
||||
themeHooks.setTheme('light');
|
||||
backgroundHooks.init();
|
||||
env.dispatchWindowEvent('themechange');
|
||||
assert.equal(env.document.documentElement.style.backgroundColor, '#f6f3ee');
|
||||
});
|
||||
|
||||
env.window.removeEventListener('themechange', backgroundHelpers.applyBackground);
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
|
||||
test('dom environment helpers mimic expected DOM behaviour', () => {
|
||||
const env = createDomEnvironment({ readyState: 'interactive', includeBody: false });
|
||||
try {
|
||||
const element = env.createElement('span');
|
||||
element.classList.add('foo');
|
||||
assert.equal(element.classList.contains('foo'), true);
|
||||
assert.equal(element.classList.toggle('foo'), false);
|
||||
assert.equal(element.classList.toggle('bar'), true);
|
||||
assert.equal(element.getAttribute('id'), null);
|
||||
element.setAttribute('data-test', 'ok');
|
||||
assert.equal(element.getAttribute('data-test'), 'ok');
|
||||
|
||||
env.registerElement('sample', element);
|
||||
assert.equal(env.document.getElementById('sample'), element);
|
||||
assert.equal(env.document.querySelector('.missing'), null);
|
||||
|
||||
let docEventFired = false;
|
||||
env.document.addEventListener('custom', () => {
|
||||
docEventFired = true;
|
||||
});
|
||||
env.document.dispatchEvent('custom');
|
||||
assert.equal(docEventFired, true);
|
||||
env.document.removeEventListener('custom');
|
||||
|
||||
let winEventFired = false;
|
||||
env.window.addEventListener('global', () => {
|
||||
winEventFired = true;
|
||||
});
|
||||
env.window.dispatchEvent('global');
|
||||
assert.equal(winEventFired, true);
|
||||
env.window.removeEventListener('global');
|
||||
|
||||
env.setCookieString('');
|
||||
env.document.cookie = 'foo=bar';
|
||||
assert.equal(env.getCookieString(), 'foo=bar');
|
||||
} finally {
|
||||
env.cleanup();
|
||||
}
|
||||
});
|
||||
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Extract channel metadata from a message payload for chat display.
|
||||
*
|
||||
* @param {Object} message Raw message payload from the API.
|
||||
* @returns {{ frequency: string|null, channelName: string|null }}
|
||||
* Normalized metadata values.
|
||||
*/
|
||||
export function extractChatMessageMetadata(message) {
|
||||
if (!message || typeof message !== 'object') {
|
||||
return { frequency: null, channelName: null };
|
||||
}
|
||||
|
||||
const frequency = normalizeFrequency(
|
||||
firstNonNull(
|
||||
message.region_frequency,
|
||||
message.regionFrequency,
|
||||
message.lora_freq,
|
||||
message.loraFreq,
|
||||
message.frequency
|
||||
)
|
||||
);
|
||||
|
||||
const channelName = normalizeString(
|
||||
firstNonNull(message.channel_name, message.channelName)
|
||||
);
|
||||
|
||||
return { frequency, channelName };
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce the formatted prefix for a chat message entry.
|
||||
*
|
||||
* Timestamp and frequency will each be wrapped in square brackets. Missing
|
||||
* metadata values result in empty brackets (with the frequency replaced by the
|
||||
* configured placeholder) to preserve the positional layout expected by
|
||||
* operators.
|
||||
*
|
||||
* @param {{
|
||||
* timestamp: string,
|
||||
* frequency: string|null
|
||||
* }} params Normalised and escaped display strings.
|
||||
* @returns {string} Prefix string suitable for HTML insertion.
|
||||
*/
|
||||
export function formatChatMessagePrefix({ timestamp, frequency }) {
|
||||
const ts = typeof timestamp === 'string' ? timestamp : '';
|
||||
const freq = normalizeFrequencySlot(frequency);
|
||||
return `[${ts}][${freq}]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Render the channel tag that follows the short name in a chat message entry.
|
||||
*
|
||||
* Empty channel names remain blank within the brackets, mirroring the original
|
||||
* UI behaviour that reserves the slot without introducing placeholder text.
|
||||
*
|
||||
* @param {{ channelName: string|null }} params Normalised and escaped display strings.
|
||||
* @returns {string} Channel tag suitable for HTML insertion.
|
||||
*/
|
||||
export function formatChatChannelTag({ channelName }) {
|
||||
const channel = typeof channelName === 'string' ? channelName : channelName == null ? '' : String(channelName);
|
||||
return `[${channel}]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create the formatted prefix for node announcements in the chat log.
|
||||
*
|
||||
* Both the timestamp and the optional frequency will be wrapped in brackets,
|
||||
* mirroring the chat message display while omitting the channel indicator.
|
||||
*
|
||||
* @param {{ timestamp: string, frequency: string|null }} params Display strings.
|
||||
* @returns {string} Prefix string suitable for HTML insertion.
|
||||
*/
|
||||
export function formatNodeAnnouncementPrefix({ timestamp, frequency }) {
|
||||
const ts = typeof timestamp === 'string' ? timestamp : '';
|
||||
const freq = normalizeFrequencySlot(frequency);
|
||||
return `[${ts}][${freq}]`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce a consistently formatted frequency slot for chat prefixes.
|
||||
*
|
||||
* A missing or empty frequency is rendered as three HTML non-breaking spaces to
|
||||
* ensure the UI maintains its expected alignment while clearly indicating the
|
||||
* absence of data.
|
||||
*
|
||||
* @param {*} value Frequency value that has already been escaped for HTML.
|
||||
* @returns {string} Frequency slot suitable for prefix rendering.
|
||||
*/
|
||||
function normalizeFrequencySlot(value) {
|
||||
if (value == null) {
|
||||
return FREQUENCY_PLACEHOLDER;
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
return value.length > 0 ? value : FREQUENCY_PLACEHOLDER;
|
||||
}
|
||||
const strValue = String(value);
|
||||
return strValue.length > 0 ? strValue : FREQUENCY_PLACEHOLDER;
|
||||
}
|
||||
|
||||
/**
|
||||
* HTML entity sequence inserted when a frequency is unavailable.
|
||||
* @type {string}
|
||||
*/
|
||||
const FREQUENCY_PLACEHOLDER = ' ';
|
||||
|
||||
/**
|
||||
* Return the first value in ``candidates`` that is not ``null`` or ``undefined``.
|
||||
*
|
||||
* @param {...*} candidates Candidate values.
|
||||
* @returns {*} First present value or ``null`` when missing.
|
||||
*/
|
||||
function firstNonNull(...candidates) {
|
||||
for (const value of candidates) {
|
||||
if (value !== null && value !== undefined) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise potential channel name values to trimmed strings.
|
||||
*
|
||||
* @param {*} value Raw value.
|
||||
* @returns {string|null} Sanitised channel name.
|
||||
*/
|
||||
function normalizeString(value) {
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
return trimmed.length > 0 ? trimmed : null;
|
||||
}
|
||||
if (typeof value === 'number') {
|
||||
if (!Number.isFinite(value)) return null;
|
||||
return String(value);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert various frequency representations into clean strings.
|
||||
*
|
||||
* @param {*} value Raw frequency value.
|
||||
* @returns {string|null} Frequency in MHz as a string, when available.
|
||||
*/
|
||||
function normalizeFrequency(value) {
|
||||
if (value == null) return null;
|
||||
if (typeof value === 'number') {
|
||||
if (!Number.isFinite(value) || value <= 0) {
|
||||
return null;
|
||||
}
|
||||
return Number.isInteger(value) ? String(value) : String(Number(value.toFixed(3)));
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
const trimmed = value.trim();
|
||||
if (!trimmed) return null;
|
||||
const numericMatch = trimmed.match(/\d+(?:\.\d+)?/);
|
||||
if (numericMatch) {
|
||||
const parsed = Number(numericMatch[0]);
|
||||
if (Number.isFinite(parsed) && parsed > 0) {
|
||||
return Number.isInteger(parsed) ? String(Math.trunc(parsed)) : String(parsed);
|
||||
}
|
||||
}
|
||||
return trimmed;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export const __test__ = {
|
||||
firstNonNull,
|
||||
normalizeString,
|
||||
normalizeFrequency,
|
||||
formatChatMessagePrefix,
|
||||
formatNodeAnnouncementPrefix,
|
||||
normalizeFrequencySlot,
|
||||
FREQUENCY_PLACEHOLDER,
|
||||
formatChatChannelTag
|
||||
};
|
||||
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* CSS selector used to locate the embedded configuration element.
|
||||
*
|
||||
* @type {string}
|
||||
*/
|
||||
const CONFIG_SELECTOR = '[data-app-config]';
|
||||
|
||||
/**
|
||||
* Read and parse the serialized application configuration from the DOM.
|
||||
*
|
||||
* @returns {Object<string, *>} Parsed configuration object or an empty object when unavailable.
|
||||
*/
|
||||
export function readAppConfig() {
|
||||
const el = document.querySelector(CONFIG_SELECTOR);
|
||||
if (!el) {
|
||||
return {};
|
||||
}
|
||||
const raw = el.getAttribute('data-app-config') || '';
|
||||
if (!raw) {
|
||||
return {};
|
||||
}
|
||||
try {
|
||||
const parsed = JSON.parse(raw);
|
||||
return typeof parsed === 'object' && parsed !== null ? parsed : {};
|
||||
} catch (err) {
|
||||
console.error('Failed to parse application configuration', err);
|
||||
return {};
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { readAppConfig } from './config.js';
|
||||
import { initializeApp } from './main.js';
|
||||
import { DEFAULT_CONFIG, mergeConfig } from './settings.js';
|
||||
|
||||
export { DEFAULT_CONFIG, mergeConfig } from './settings.js';
|
||||
|
||||
/**
|
||||
* Bootstraps the application once the DOM is ready by reading configuration
|
||||
* data and delegating to ``initializeApp``.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
document.addEventListener('DOMContentLoaded', () => {
|
||||
const rawConfig = readAppConfig();
|
||||
const config = mergeConfig(rawConfig);
|
||||
initializeApp(config);
|
||||
});
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,178 @@
|
||||
/**
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @typedef {[number, number]} LatLngTuple
|
||||
* @typedef {[LatLngTuple, LatLngTuple]} LatLngBoundsTuple
|
||||
* @typedef {{ paddingPx: number, maxZoom?: number }} FitOptionsSnapshot
|
||||
*/
|
||||
|
||||
/**
|
||||
* Safely clone a Leaflet-compatible bounds tuple to avoid accidental mutation.
|
||||
*
|
||||
* @param {LatLngBoundsTuple} bounds - Bounds tuple to duplicate.
|
||||
* @returns {LatLngBoundsTuple} Deep copy of the provided bounds.
|
||||
*/
|
||||
function cloneBounds(bounds) {
|
||||
return [
|
||||
[bounds[0][0], bounds[0][1]],
|
||||
[bounds[1][0], bounds[1][1]]
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether the provided structure resembles a Leaflet bounds tuple.
|
||||
*
|
||||
* @param {unknown} value - Potential bounds input.
|
||||
* @returns {value is LatLngBoundsTuple} True when the input is structurally valid.
|
||||
*/
|
||||
function isValidBounds(value) {
|
||||
if (!Array.isArray(value) || value.length !== 2) return false;
|
||||
const [southWest, northEast] = value;
|
||||
if (!Array.isArray(southWest) || !Array.isArray(northEast)) return false;
|
||||
if (southWest.length !== 2 || northEast.length !== 2) return false;
|
||||
const numbers = [southWest[0], southWest[1], northEast[0], northEast[1]];
|
||||
return numbers.every(number => Number.isFinite(number));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a controller for coordinating map auto-fit behaviour.
|
||||
*
|
||||
* @param {object} options - Controller configuration options.
|
||||
* @param {HTMLInputElement|null} [options.toggleEl] - Checkbox controlling auto-fit.
|
||||
* @param {Window|undefined} [options.windowObject] - Browser window instance.
|
||||
* @param {number} [options.defaultPaddingPx=32] - Padding fallback when none supplied.
|
||||
* @returns {{
|
||||
* attachResizeListener(callback: (snapshot: { bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null) => void): () => void,
|
||||
* getLastFit(): { bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null,
|
||||
* handleUserInteraction(): boolean,
|
||||
* isAutoFitEnabled(): boolean,
|
||||
* recordFit(bounds: LatLngBoundsTuple, options?: { paddingPx?: number, maxZoom?: number }): void,
|
||||
* runAutoFitOperation(fn: () => unknown): unknown
|
||||
* }} Map auto-fit controller instance.
|
||||
*/
|
||||
export function createMapAutoFitController({
|
||||
toggleEl = null,
|
||||
windowObject = typeof window !== 'undefined' ? window : undefined,
|
||||
defaultPaddingPx = 32
|
||||
} = {}) {
|
||||
/** @type {LatLngBoundsTuple|null} */
|
||||
let lastBounds = null;
|
||||
/** @type {FitOptionsSnapshot} */
|
||||
let lastOptions = { paddingPx: defaultPaddingPx };
|
||||
let autoFitInProgress = false;
|
||||
|
||||
/**
|
||||
* Record the most recent set of bounds used for auto-fitting.
|
||||
*
|
||||
* @param {LatLngBoundsTuple} bounds - Leaflet bounds tuple.
|
||||
* @param {{ paddingPx?: number, maxZoom?: number }} [options] - Fit options to persist.
|
||||
* @returns {void}
|
||||
*/
|
||||
function recordFit(bounds, options = {}) {
|
||||
if (!isValidBounds(bounds)) return;
|
||||
const paddingPx = Number.isFinite(options.paddingPx) && options.paddingPx >= 0 ? options.paddingPx : defaultPaddingPx;
|
||||
const maxZoom = Number.isFinite(options.maxZoom) && options.maxZoom > 0 ? options.maxZoom : undefined;
|
||||
lastBounds = cloneBounds(bounds);
|
||||
lastOptions = { paddingPx };
|
||||
if (maxZoom !== undefined) {
|
||||
lastOptions.maxZoom = maxZoom;
|
||||
} else {
|
||||
delete lastOptions.maxZoom;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a snapshot of the most recently recorded fit bounds.
|
||||
*
|
||||
* @returns {{ bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null} Snapshot or ``null`` when unavailable.
|
||||
*/
|
||||
function getLastFit() {
|
||||
if (!lastBounds) return null;
|
||||
return { bounds: cloneBounds(lastBounds), options: { ...lastOptions } };
|
||||
}
|
||||
|
||||
/**
|
||||
* Test whether auto-fit is currently enabled by the user.
|
||||
*
|
||||
* @returns {boolean} True when the toggle exists and is checked.
|
||||
*/
|
||||
function isAutoFitEnabled() {
|
||||
return Boolean(toggleEl && toggleEl.checked);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a callback while marking auto-fit as in-progress.
|
||||
*
|
||||
* @template T
|
||||
* @param {() => T} fn - Operation to run while suppressing interaction side-effects.
|
||||
* @returns {T | undefined} Result of ``fn`` when provided.
|
||||
*/
|
||||
function runAutoFitOperation(fn) {
|
||||
if (typeof fn !== 'function') return undefined;
|
||||
autoFitInProgress = true;
|
||||
try {
|
||||
return fn();
|
||||
} finally {
|
||||
autoFitInProgress = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disable auto-fit in response to manual user interactions with the map.
|
||||
*
|
||||
* @returns {boolean} True when the toggle was modified.
|
||||
*/
|
||||
function handleUserInteraction() {
|
||||
if (!toggleEl || !toggleEl.checked || autoFitInProgress) {
|
||||
return false;
|
||||
}
|
||||
toggleEl.checked = false;
|
||||
const event = new Event('change', { bubbles: true });
|
||||
toggleEl.dispatchEvent(event);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attach resize listeners that notify the consumer when a refit may be required.
|
||||
*
|
||||
* @param {(snapshot: { bounds: LatLngBoundsTuple, options: FitOptionsSnapshot } | null) => void} callback - Resize handler.
|
||||
* @returns {() => void} Function that removes the registered listeners.
|
||||
*/
|
||||
function attachResizeListener(callback) {
|
||||
if (!windowObject || typeof windowObject.addEventListener !== 'function' || typeof callback !== 'function') {
|
||||
return () => {};
|
||||
}
|
||||
const handler = () => {
|
||||
callback(getLastFit());
|
||||
};
|
||||
windowObject.addEventListener('resize', handler, { passive: true });
|
||||
windowObject.addEventListener('orientationchange', handler, { passive: true });
|
||||
return () => {
|
||||
windowObject.removeEventListener('resize', handler);
|
||||
windowObject.removeEventListener('orientationchange', handler);
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
attachResizeListener,
|
||||
getLastFit,
|
||||
handleUserInteraction,
|
||||
isAutoFitEnabled,
|
||||
recordFit,
|
||||
runAutoFitOperation
|
||||
};
|
||||
}
|
||||
@@ -0,0 +1,255 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
const EARTH_RADIUS_KM = 6371;
|
||||
const RAD_TO_DEG = 180 / Math.PI;
|
||||
const DEFAULT_MIN_RANGE_KM = 0.5;
|
||||
const POLE_LONGITUDE_SPAN_DEGREES = 180;
|
||||
const COS_EPSILON = 1e-6;
|
||||
|
||||
/**
|
||||
* Clamp a latitude value to the valid WGS84 range.
|
||||
*
|
||||
* @param {number} latitude Latitude in degrees.
|
||||
* @returns {number} Latitude clamped to [-90, 90].
|
||||
*/
|
||||
function clampLatitude(latitude) {
|
||||
if (!Number.isFinite(latitude)) {
|
||||
return latitude < 0 ? -90 : 90;
|
||||
}
|
||||
return Math.max(-90, Math.min(90, latitude));
|
||||
}
|
||||
|
||||
/**
|
||||
* Clamp a longitude value to the valid WGS84 range.
|
||||
*
|
||||
* @param {number} longitude Longitude in degrees.
|
||||
* @returns {number} Longitude clamped to [-180, 180].
|
||||
*/
|
||||
function clampLongitude(longitude) {
|
||||
if (!Number.isFinite(longitude)) {
|
||||
return longitude < 0 ? -180 : 180;
|
||||
}
|
||||
if (longitude < -180) return -180;
|
||||
if (longitude > 180) return 180;
|
||||
return longitude;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise a longitude so it remains close to a reference meridian.
|
||||
*
|
||||
* @param {number} longitude Longitude in degrees to normalise.
|
||||
* @param {number} referenceMeridian Reference longitude in degrees.
|
||||
* @returns {number} Longitude adjusted by multiples of 360° so the
|
||||
* difference from ``referenceMeridian`` lies within ``[-180, 180)``.
|
||||
*/
|
||||
function normaliseLongitudeAround(longitude, referenceMeridian) {
|
||||
if (!Number.isFinite(longitude) || !Number.isFinite(referenceMeridian)) {
|
||||
return longitude;
|
||||
}
|
||||
const delta = ((longitude - referenceMeridian + 540) % 360) - 180;
|
||||
return referenceMeridian + delta;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert degrees to radians.
|
||||
*
|
||||
* @param {number} degrees Angle in degrees.
|
||||
* @returns {number} Angle in radians.
|
||||
*/
|
||||
export function toRadians(degrees) {
|
||||
return (degrees * Math.PI) / 180;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the great-circle distance between two coordinates using the
|
||||
* haversine formula.
|
||||
*
|
||||
* @param {number} lat1 Latitude of the first point in degrees.
|
||||
* @param {number} lon1 Longitude of the first point in degrees.
|
||||
* @param {number} lat2 Latitude of the second point in degrees.
|
||||
* @param {number} lon2 Longitude of the second point in degrees.
|
||||
* @returns {number} Distance in kilometres.
|
||||
*/
|
||||
export function haversineDistanceKm(lat1, lon1, lat2, lon2) {
|
||||
const dLat = toRadians(lat2 - lat1);
|
||||
const dLon = toRadians(lon2 - lon1);
|
||||
const sinLat = Math.sin(dLat / 2);
|
||||
const sinLon = Math.sin(dLon / 2);
|
||||
const a = sinLat * sinLat + Math.cos(toRadians(lat1)) * Math.cos(toRadians(lat2)) * sinLon * sinLon;
|
||||
const c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1 - a));
|
||||
return EARTH_RADIUS_KM * c;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise range inputs to a safe, positive value.
|
||||
*
|
||||
* @param {number} rangeKm Requested range in kilometres.
|
||||
* @param {number} minimumRangeKm Minimum permitted range in kilometres.
|
||||
* @returns {number} Normalised range in kilometres.
|
||||
*/
|
||||
function normaliseRange(rangeKm, minimumRangeKm) {
|
||||
const minRange = Number.isFinite(minimumRangeKm) && minimumRangeKm > 0 ? minimumRangeKm : DEFAULT_MIN_RANGE_KM;
|
||||
if (!Number.isFinite(rangeKm) || rangeKm <= 0) {
|
||||
return minRange;
|
||||
}
|
||||
return Math.max(rangeKm, minRange);
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute a geographic bounding box for a circular range centred on a point.
|
||||
*
|
||||
* The resulting bounds are suitable for use with Leaflet ``fitBounds`` and
|
||||
* similar APIs that accept a ``[[south, west], [north, east]]`` tuple.
|
||||
*
|
||||
* @param {{lat: number, lon: number}} center Map centre coordinate.
|
||||
* @param {number} rangeKm Desired radius from the centre in kilometres.
|
||||
* @param {{ minimumRangeKm?: number }} [options] Optional configuration.
|
||||
* @returns {[[number, number], [number, number]] | null} Bounding box tuple or
|
||||
* ``null`` when the inputs are invalid.
|
||||
*/
|
||||
export function computeBoundingBox(center, rangeKm, options = {}) {
|
||||
if (!center || !Number.isFinite(center.lat) || !Number.isFinite(center.lon)) {
|
||||
return null;
|
||||
}
|
||||
const minRange = Number.isFinite(options.minimumRangeKm) && options.minimumRangeKm > 0
|
||||
? options.minimumRangeKm
|
||||
: DEFAULT_MIN_RANGE_KM;
|
||||
const radiusKm = normaliseRange(rangeKm, minRange);
|
||||
const angularDistance = radiusKm / EARTH_RADIUS_KM;
|
||||
const latDelta = angularDistance * RAD_TO_DEG;
|
||||
const minLat = clampLatitude(center.lat - latDelta);
|
||||
const maxLat = clampLatitude(center.lat + latDelta);
|
||||
|
||||
const cosLat = Math.cos(toRadians(center.lat));
|
||||
let lonDelta;
|
||||
if (Math.abs(cosLat) < COS_EPSILON) {
|
||||
lonDelta = POLE_LONGITUDE_SPAN_DEGREES;
|
||||
} else {
|
||||
lonDelta = Math.min(POLE_LONGITUDE_SPAN_DEGREES, (angularDistance * RAD_TO_DEG) / Math.max(Math.abs(cosLat), COS_EPSILON));
|
||||
}
|
||||
if (!Number.isFinite(lonDelta) || lonDelta >= POLE_LONGITUDE_SPAN_DEGREES) {
|
||||
return [[minLat, -POLE_LONGITUDE_SPAN_DEGREES], [maxLat, POLE_LONGITUDE_SPAN_DEGREES]];
|
||||
}
|
||||
|
||||
const minLon = clampLongitude(center.lon - lonDelta);
|
||||
const maxLon = clampLongitude(center.lon + lonDelta);
|
||||
return [[minLat, minLon], [maxLat, maxLon]];
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine a bounding box that encloses the provided coordinates with a
|
||||
* configurable safety margin.
|
||||
*
|
||||
* @param {Array<[number, number]>} points Collection of ``[lat, lon]`` pairs.
|
||||
* @param {{
|
||||
* paddingFraction?: number,
|
||||
* minimumRangeKm?: number
|
||||
* }} [options] Optional configuration controlling the computed bounds.
|
||||
* @returns {[[number, number], [number, number]] | null} Bounding box tuple or
|
||||
* ``null`` when the input list is empty or invalid. Longitudes may extend
|
||||
* beyond the canonical ``[-180, 180]`` range when a dateline-spanning span is
|
||||
* required.
|
||||
*/
|
||||
export function computeBoundsForPoints(points, options = {}) {
|
||||
if (!Array.isArray(points) || !points.length) {
|
||||
return null;
|
||||
}
|
||||
const validPoints = points.filter(point => Array.isArray(point) && Number.isFinite(point[0]) && Number.isFinite(point[1]));
|
||||
if (!validPoints.length) {
|
||||
return null;
|
||||
}
|
||||
|
||||
let xSum = 0;
|
||||
let ySum = 0;
|
||||
let zSum = 0;
|
||||
let latSum = 0;
|
||||
let lonSum = 0;
|
||||
for (const [lat, lon] of validPoints) {
|
||||
const latRad = toRadians(lat);
|
||||
const lonRad = toRadians(lon);
|
||||
const cosLat = Math.cos(latRad);
|
||||
xSum += cosLat * Math.cos(lonRad);
|
||||
ySum += cosLat * Math.sin(lonRad);
|
||||
zSum += Math.sin(latRad);
|
||||
latSum += lat;
|
||||
lonSum += lon;
|
||||
}
|
||||
|
||||
const vectorMagnitude = Math.sqrt(xSum * xSum + ySum * ySum + zSum * zSum);
|
||||
let centre;
|
||||
if (vectorMagnitude > COS_EPSILON) {
|
||||
const lat = Math.atan2(zSum, Math.sqrt(xSum * xSum + ySum * ySum)) * RAD_TO_DEG;
|
||||
const lon = Math.atan2(ySum, xSum) * RAD_TO_DEG;
|
||||
centre = { lat, lon };
|
||||
} else {
|
||||
centre = {
|
||||
lat: latSum / validPoints.length,
|
||||
lon: lonSum / validPoints.length
|
||||
};
|
||||
}
|
||||
|
||||
let maxDistanceKm = 0;
|
||||
for (const [lat, lon] of validPoints) {
|
||||
const distance = haversineDistanceKm(centre.lat, centre.lon, lat, lon);
|
||||
if (distance > maxDistanceKm) {
|
||||
maxDistanceKm = distance;
|
||||
}
|
||||
}
|
||||
|
||||
const paddingFraction = Number.isFinite(options.paddingFraction) && options.paddingFraction >= 0
|
||||
? options.paddingFraction
|
||||
: 0.15;
|
||||
const minimumRangeKm = Number.isFinite(options.minimumRangeKm) && options.minimumRangeKm > 0
|
||||
? options.minimumRangeKm
|
||||
: DEFAULT_MIN_RANGE_KM;
|
||||
const paddedRangeKm = Math.max(minimumRangeKm, maxDistanceKm * (1 + paddingFraction));
|
||||
const angularDistance = paddedRangeKm / EARTH_RADIUS_KM;
|
||||
const latDelta = angularDistance * RAD_TO_DEG;
|
||||
const minLat = clampLatitude(centre.lat - latDelta);
|
||||
const maxLat = clampLatitude(centre.lat + latDelta);
|
||||
|
||||
const cosLat = Math.cos(toRadians(centre.lat));
|
||||
const maxProjectedLonDelta = Math.min(
|
||||
POLE_LONGITUDE_SPAN_DEGREES,
|
||||
Math.abs(cosLat) < COS_EPSILON
|
||||
? POLE_LONGITUDE_SPAN_DEGREES
|
||||
: (angularDistance * RAD_TO_DEG) / Math.max(Math.abs(cosLat), COS_EPSILON)
|
||||
);
|
||||
|
||||
const normalisedLongitudes = validPoints.map(point => normaliseLongitudeAround(point[1], centre.lon));
|
||||
let west = Math.min(...normalisedLongitudes, centre.lon - maxProjectedLonDelta);
|
||||
let east = Math.max(...normalisedLongitudes, centre.lon + maxProjectedLonDelta);
|
||||
|
||||
if (!Number.isFinite(west) || !Number.isFinite(east)) {
|
||||
west = centre.lon - maxProjectedLonDelta;
|
||||
east = centre.lon + maxProjectedLonDelta;
|
||||
}
|
||||
|
||||
if (east - west >= 360) {
|
||||
west = -POLE_LONGITUDE_SPAN_DEGREES;
|
||||
east = POLE_LONGITUDE_SPAN_DEGREES;
|
||||
}
|
||||
|
||||
return [[minLat, west], [maxLat, east]];
|
||||
}
|
||||
|
||||
export const __testUtils = {
|
||||
clampLatitude,
|
||||
clampLongitude,
|
||||
normaliseRange,
|
||||
normaliseLongitudeAround
|
||||
};
|
||||
@@ -0,0 +1,281 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Determine whether the provided value behaves like a plain object.
|
||||
*
|
||||
* @param {*} value Candidate value.
|
||||
* @returns {boolean} True when ``value`` is a non-null object.
|
||||
*/
|
||||
function isObject(value) {
|
||||
return value != null && typeof value === 'object';
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a value to a trimmed string when possible.
|
||||
*
|
||||
* @param {*} value Input value.
|
||||
* @returns {string|null} Trimmed string or ``null`` when blank.
|
||||
*/
|
||||
function toTrimmedString(value) {
|
||||
if (value == null) return null;
|
||||
const str = String(value).trim();
|
||||
return str.length === 0 ? null : str;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to coerce the provided value into a finite number.
|
||||
*
|
||||
* @param {*} value Raw value.
|
||||
* @returns {number|null} Finite number or ``null`` when coercion fails.
|
||||
*/
|
||||
function toFiniteNumber(value) {
|
||||
if (typeof value === 'number') {
|
||||
return Number.isFinite(value) ? value : null;
|
||||
}
|
||||
if (value == null || value === '') return null;
|
||||
const num = Number(value);
|
||||
return Number.isFinite(num) ? num : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise a neighbour entry so that downstream consumers can display it.
|
||||
*
|
||||
* @param {*} entry Raw neighbour entry.
|
||||
* @returns {Object|null} Normalised neighbour reference or ``null`` when invalid.
|
||||
*/
|
||||
function normaliseNeighbor(entry) {
|
||||
if (!isObject(entry)) return null;
|
||||
const neighborId = toTrimmedString(entry.neighbor_id ?? entry.neighborId ?? entry.nodeId ?? entry.node_id);
|
||||
if (!neighborId) return null;
|
||||
const neighborShort = toTrimmedString(entry.neighbor_short_name ?? entry.neighborShortName ?? entry.short_name ?? entry.shortName);
|
||||
const neighborLong = toTrimmedString(entry.neighbor_long_name ?? entry.neighborLongName ?? entry.long_name ?? entry.longName);
|
||||
const neighborRole = toTrimmedString(entry.neighbor_role ?? entry.neighborRole ?? entry.role) || 'CLIENT';
|
||||
const node = {
|
||||
node_id: neighborId,
|
||||
short_name: neighborShort ?? '',
|
||||
long_name: neighborLong ?? '',
|
||||
role: neighborRole,
|
||||
};
|
||||
const snr = toFiniteNumber(entry.snr);
|
||||
const rxTime = toFiniteNumber(entry.rx_time ?? entry.rxTime);
|
||||
const result = { node };
|
||||
if (snr != null) {
|
||||
result.snr = snr;
|
||||
}
|
||||
if (rxTime != null) {
|
||||
result.rxTime = rxTime;
|
||||
result.rx_time = rxTime;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert overlay node details into a map friendly payload.
|
||||
*
|
||||
* @param {*} source Raw overlay details.
|
||||
* @returns {Object} Map node payload containing snake_case keys.
|
||||
*/
|
||||
export function overlayToPopupNode(source) {
|
||||
if (!isObject(source)) {
|
||||
return {
|
||||
node_id: '',
|
||||
node_num: null,
|
||||
short_name: '',
|
||||
long_name: '',
|
||||
role: 'CLIENT',
|
||||
neighbors: [],
|
||||
};
|
||||
}
|
||||
|
||||
const nodeId = toTrimmedString(source.nodeId ?? source.node_id ?? source.id) ?? '';
|
||||
const nodeNum = toFiniteNumber(source.nodeNum ?? source.node_num ?? source.num);
|
||||
const role = toTrimmedString(source.role) || 'CLIENT';
|
||||
const neighbours = Array.isArray(source.neighbors)
|
||||
? source.neighbors.map(normaliseNeighbor).filter(Boolean)
|
||||
: [];
|
||||
|
||||
const payload = {
|
||||
node_id: nodeId,
|
||||
node_num: nodeNum,
|
||||
short_name: toTrimmedString(source.shortName ?? source.short_name ?? source.name) ?? '',
|
||||
long_name: toTrimmedString(source.longName ?? source.long_name ?? source.fullName ?? '') ?? '',
|
||||
role,
|
||||
hw_model: toTrimmedString(source.hwModel ?? source.hw_model ?? source.hardware) ?? '',
|
||||
battery_level: toFiniteNumber(source.battery ?? source.battery_level),
|
||||
voltage: toFiniteNumber(source.voltage),
|
||||
uptime_seconds: toFiniteNumber(source.uptime ?? source.uptime_seconds),
|
||||
channel_utilization: toFiniteNumber(source.channel ?? source.channel_utilization),
|
||||
air_util_tx: toFiniteNumber(source.airUtil ?? source.air_util_tx),
|
||||
temperature: toFiniteNumber(source.temperature),
|
||||
relative_humidity: toFiniteNumber(source.humidity ?? source.relative_humidity),
|
||||
barometric_pressure: toFiniteNumber(source.pressure ?? source.barometric_pressure),
|
||||
telemetry_time: toFiniteNumber(source.telemetryTime ?? source.telemetry_time),
|
||||
last_heard: toFiniteNumber(source.lastHeard ?? source.last_heard),
|
||||
position_time: toFiniteNumber(source.positionTime ?? source.position_time),
|
||||
latitude: toFiniteNumber(source.latitude),
|
||||
longitude: toFiniteNumber(source.longitude),
|
||||
altitude: toFiniteNumber(source.altitude),
|
||||
neighbors: neighbours,
|
||||
};
|
||||
|
||||
if (!payload.long_name && payload.short_name) {
|
||||
payload.long_name = payload.short_name;
|
||||
}
|
||||
|
||||
return payload;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attach an asynchronous refresh handler to a Leaflet marker so that
|
||||
* up-to-date node information is fetched whenever the marker is clicked.
|
||||
*
|
||||
* @param {Object} options Behaviour configuration.
|
||||
* @param {Object} options.marker Leaflet marker instance supporting ``on``.
|
||||
* @param {Function} options.getOverlayFallback Returns the fallback overlay payload.
|
||||
* @param {Function} options.refreshNodeInformation Async function fetching node details.
|
||||
* @param {Function} options.mergeOverlayDetails Merge function combining fetched and fallback details.
|
||||
* @param {Function} options.createRequestToken Generates a token for cancellation tracking.
|
||||
* Receives the marker anchor element and the fallback overlay payload.
|
||||
* @param {Function} options.isTokenCurrent Tests whether a request token is still current.
|
||||
* Receives the marker anchor element and the candidate token value.
|
||||
* @param {Function} [options.showLoading] Callback invoked before refreshing.
|
||||
* @param {Function} [options.showDetails] Callback invoked with merged overlay details.
|
||||
* @param {Function} [options.showError] Callback invoked when refreshing fails.
|
||||
* @param {Function} [options.updatePopup] Callback updating the marker popup contents.
|
||||
* @param {Function} [options.shouldHandleClick] Predicate that decides whether the click should trigger a refresh.
|
||||
* @returns {void}
|
||||
*/
|
||||
export function attachNodeInfoRefreshToMarker({
|
||||
marker,
|
||||
getOverlayFallback,
|
||||
refreshNodeInformation,
|
||||
mergeOverlayDetails,
|
||||
createRequestToken,
|
||||
isTokenCurrent,
|
||||
showLoading,
|
||||
showDetails,
|
||||
showError,
|
||||
updatePopup,
|
||||
shouldHandleClick,
|
||||
}) {
|
||||
if (!isObject(marker) || typeof marker.on !== 'function') {
|
||||
throw new TypeError('A Leaflet marker with an on() method is required');
|
||||
}
|
||||
if (typeof refreshNodeInformation !== 'function') {
|
||||
throw new TypeError('A refreshNodeInformation function must be provided');
|
||||
}
|
||||
if (typeof mergeOverlayDetails !== 'function') {
|
||||
throw new TypeError('A mergeOverlayDetails function must be provided');
|
||||
}
|
||||
if (typeof createRequestToken !== 'function' || typeof isTokenCurrent !== 'function') {
|
||||
throw new TypeError('Token management callbacks must be provided');
|
||||
}
|
||||
|
||||
marker.on('click', event => {
|
||||
if (event && event.originalEvent) {
|
||||
const original = event.originalEvent;
|
||||
if (typeof original.preventDefault === 'function') {
|
||||
original.preventDefault();
|
||||
}
|
||||
if (typeof original.stopPropagation === 'function') {
|
||||
original.stopPropagation();
|
||||
}
|
||||
}
|
||||
|
||||
const fallbackOverlay = typeof getOverlayFallback === 'function' ? getOverlayFallback() : null;
|
||||
const anchor = typeof marker.getElement === 'function' ? marker.getElement() : null;
|
||||
|
||||
if (!isObject(fallbackOverlay)) {
|
||||
if (anchor && typeof showDetails === 'function') {
|
||||
showDetails(anchor, {});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof shouldHandleClick === 'function' && !shouldHandleClick(anchor, fallbackOverlay)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (typeof updatePopup === 'function') {
|
||||
updatePopup(fallbackOverlay);
|
||||
}
|
||||
|
||||
const nodeId = toTrimmedString(fallbackOverlay.nodeId ?? fallbackOverlay.node_id ?? fallbackOverlay.id);
|
||||
const nodeNum = toFiniteNumber(fallbackOverlay.nodeNum ?? fallbackOverlay.node_num ?? fallbackOverlay.num);
|
||||
|
||||
if (!nodeId && nodeNum == null) {
|
||||
if (anchor && typeof showDetails === 'function') {
|
||||
showDetails(anchor, fallbackOverlay);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
const requestToken = createRequestToken(anchor, fallbackOverlay);
|
||||
|
||||
if (anchor && typeof showLoading === 'function') {
|
||||
showLoading(anchor, fallbackOverlay);
|
||||
}
|
||||
|
||||
const reference = { fallback: fallbackOverlay };
|
||||
if (nodeId) reference.nodeId = nodeId;
|
||||
if (nodeNum != null) reference.nodeNum = nodeNum;
|
||||
|
||||
let refreshPromise;
|
||||
try {
|
||||
refreshPromise = Promise.resolve(refreshNodeInformation(reference));
|
||||
} catch (error) {
|
||||
if (isTokenCurrent(anchor, requestToken)) {
|
||||
if (anchor && typeof showError === 'function') {
|
||||
showError(anchor, fallbackOverlay, error);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
refreshPromise
|
||||
.then(details => {
|
||||
if (!isTokenCurrent(anchor, requestToken)) {
|
||||
return;
|
||||
}
|
||||
const merged = mergeOverlayDetails(details, fallbackOverlay);
|
||||
if (typeof updatePopup === 'function') {
|
||||
updatePopup(merged);
|
||||
}
|
||||
if (anchor && typeof showDetails === 'function') {
|
||||
showDetails(anchor, merged);
|
||||
}
|
||||
})
|
||||
.catch(error => {
|
||||
if (!isTokenCurrent(anchor, requestToken)) {
|
||||
return;
|
||||
}
|
||||
if (typeof updatePopup === 'function') {
|
||||
updatePopup(fallbackOverlay);
|
||||
}
|
||||
if (anchor && typeof showError === 'function') {
|
||||
showError(anchor, fallbackOverlay, error);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export const __testUtils = {
|
||||
isObject,
|
||||
toTrimmedString,
|
||||
toFiniteNumber,
|
||||
normaliseNeighbor,
|
||||
};
|
||||
@@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Build a hydrator capable of attaching node metadata to chat messages.
|
||||
*
|
||||
* @param {{
|
||||
* fetchNodeById: (nodeId: string) => Promise<object|null>,
|
||||
* applyNodeFallback: (node: object) => void,
|
||||
* logger?: { warn?: (message?: any, ...optionalParams: any[]) => void }
|
||||
* }} options Factory configuration.
|
||||
* @returns {{
|
||||
* hydrate: (messages: Array<object>|null|undefined, nodesById: Map<string, object>) => Promise<Array<object>>
|
||||
* }} Hydrator API.
|
||||
*/
|
||||
export function createMessageNodeHydrator({ fetchNodeById, applyNodeFallback, logger = console }) {
|
||||
if (typeof fetchNodeById !== 'function') {
|
||||
throw new TypeError('fetchNodeById must be a function');
|
||||
}
|
||||
if (typeof applyNodeFallback !== 'function') {
|
||||
throw new TypeError('applyNodeFallback must be a function');
|
||||
}
|
||||
|
||||
/** @type {Map<string, Promise<object|null>>} */
|
||||
const inflightLookups = new Map();
|
||||
|
||||
/**
|
||||
* Normalise potential node identifiers into canonical strings.
|
||||
*
|
||||
* @param {*} value Raw node identifier value.
|
||||
* @returns {string} Trimmed identifier or empty string when invalid.
|
||||
*/
|
||||
function normalizeNodeId(value) {
|
||||
if (value == null) return '';
|
||||
const source = typeof value === 'string' ? value : String(value);
|
||||
const trimmed = source.trim();
|
||||
return trimmed.length > 0 ? trimmed : '';
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the node metadata for the provided identifier.
|
||||
*
|
||||
* @param {string} nodeId Canonical node identifier.
|
||||
* @param {Map<string, object>} nodesById Existing node cache.
|
||||
* @returns {Promise<object|null>} Resolved node or null when unavailable.
|
||||
*/
|
||||
async function resolveNode(nodeId, nodesById) {
|
||||
const id = normalizeNodeId(nodeId);
|
||||
if (!id) return null;
|
||||
if (nodesById instanceof Map && nodesById.has(id)) {
|
||||
return nodesById.get(id);
|
||||
}
|
||||
if (inflightLookups.has(id)) {
|
||||
return inflightLookups.get(id);
|
||||
}
|
||||
|
||||
const promise = Promise.resolve()
|
||||
.then(() => fetchNodeById(id))
|
||||
.then(node => {
|
||||
if (node && typeof node === 'object') {
|
||||
applyNodeFallback(node);
|
||||
if (nodesById instanceof Map) {
|
||||
nodesById.set(id, node);
|
||||
}
|
||||
return node;
|
||||
}
|
||||
return null;
|
||||
})
|
||||
.catch(error => {
|
||||
if (logger && typeof logger.warn === 'function') {
|
||||
logger.warn('message node lookup failed', { nodeId: id, error });
|
||||
}
|
||||
return null;
|
||||
})
|
||||
.finally(() => {
|
||||
inflightLookups.delete(id);
|
||||
});
|
||||
|
||||
inflightLookups.set(id, promise);
|
||||
return promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Attach node information to the provided message collection.
|
||||
*
|
||||
* @param {Array<object>|null|undefined} messages Message payloads from the API.
|
||||
* @param {Map<string, object>} nodesById Lookup table of known nodes.
|
||||
* @returns {Promise<Array<object>>} Hydrated message entries.
|
||||
*/
|
||||
async function hydrate(messages, nodesById) {
|
||||
if (!Array.isArray(messages) || messages.length === 0) {
|
||||
return Array.isArray(messages) ? messages : [];
|
||||
}
|
||||
|
||||
const tasks = [];
|
||||
for (const message of messages) {
|
||||
if (!message || typeof message !== 'object') {
|
||||
continue;
|
||||
}
|
||||
|
||||
const explicitId = normalizeNodeId(message.node_id ?? message.nodeId ?? '');
|
||||
const fallbackId = normalizeNodeId(message.from_id ?? message.fromId ?? '');
|
||||
const targetId = explicitId || fallbackId;
|
||||
|
||||
if (!targetId) {
|
||||
message.node = null;
|
||||
continue;
|
||||
}
|
||||
|
||||
message.node_id = targetId;
|
||||
const existing = nodesById instanceof Map ? nodesById.get(targetId) : null;
|
||||
if (existing) {
|
||||
message.node = existing;
|
||||
continue;
|
||||
}
|
||||
|
||||
const task = resolveNode(targetId, nodesById).then(node => {
|
||||
if (node) {
|
||||
message.node = node;
|
||||
} else {
|
||||
const placeholder = { node_id: targetId };
|
||||
applyNodeFallback(placeholder);
|
||||
message.node = placeholder;
|
||||
}
|
||||
});
|
||||
tasks.push(task);
|
||||
}
|
||||
|
||||
if (tasks.length > 0) {
|
||||
await Promise.all(tasks);
|
||||
}
|
||||
|
||||
return messages;
|
||||
}
|
||||
|
||||
return { hydrate };
|
||||
}
|
||||
@@ -0,0 +1,445 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { extractModemMetadata } from './node-modem-metadata.js';
|
||||
|
||||
const DEFAULT_FETCH_OPTIONS = Object.freeze({ cache: 'no-store' });
|
||||
const TELEMETRY_LIMIT = 1;
|
||||
const POSITION_LIMIT = 1;
|
||||
const NEIGHBOR_LIMIT = 1000;
|
||||
|
||||
/**
|
||||
* Determine whether the supplied value behaves like a plain object.
|
||||
*
|
||||
* @param {*} value Candidate value.
|
||||
* @returns {boolean} True when ``value`` is an object instance.
|
||||
*/
|
||||
function isObject(value) {
|
||||
return value != null && typeof value === 'object';
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a candidate value into a trimmed string representation.
|
||||
*
|
||||
* @param {*} value Raw value from an API payload.
|
||||
* @returns {string|null} Trimmed string or ``null`` when blank.
|
||||
*/
|
||||
function toTrimmedString(value) {
|
||||
if (value == null) return null;
|
||||
const str = String(value).trim();
|
||||
return str.length === 0 ? null : str;
|
||||
}
|
||||
|
||||
/**
|
||||
* Coerce a candidate value to a finite number when possible.
|
||||
*
|
||||
* @param {*} value Raw value from an API payload.
|
||||
* @returns {number|null} Finite number or ``null`` when coercion fails.
|
||||
*/
|
||||
function toFiniteNumber(value) {
|
||||
if (typeof value === 'number') {
|
||||
return Number.isFinite(value) ? value : null;
|
||||
}
|
||||
if (value == null || value === '') return null;
|
||||
const num = Number(value);
|
||||
return Number.isFinite(num) ? num : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the first non-empty string associated with one of the provided keys.
|
||||
*
|
||||
* @param {Object} record Source record inspected for values.
|
||||
* @param {Array<string>} keys Candidate property names.
|
||||
* @returns {string|null} First non-empty string or ``null``.
|
||||
*/
|
||||
function extractString(record, keys) {
|
||||
if (!isObject(record)) return null;
|
||||
for (const key of keys) {
|
||||
if (!Object.prototype.hasOwnProperty.call(record, key)) continue;
|
||||
const value = toTrimmedString(record[key]);
|
||||
if (value != null) return value;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract the first finite number associated with the provided keys.
|
||||
*
|
||||
* @param {Object} record Source record inspected for values.
|
||||
* @param {Array<string>} keys Candidate property names.
|
||||
* @returns {number|null} First finite number or ``null``.
|
||||
*/
|
||||
function extractNumber(record, keys) {
|
||||
if (!isObject(record)) return null;
|
||||
for (const key of keys) {
|
||||
if (!Object.prototype.hasOwnProperty.call(record, key)) continue;
|
||||
const value = toFiniteNumber(record[key]);
|
||||
if (value != null) return value;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign a string property when the supplied value is present.
|
||||
*
|
||||
* @param {Object} target Destination object mutated with the value.
|
||||
* @param {string} key Property name to assign.
|
||||
* @param {*} value Raw value to assign.
|
||||
* @param {Object} [options] Behaviour modifiers.
|
||||
* @param {boolean} [options.preferExisting=false] When true, only assign when the target lacks a value.
|
||||
* @returns {void}
|
||||
*/
|
||||
function assignString(target, key, value, { preferExisting = false } = {}) {
|
||||
const stringValue = toTrimmedString(value);
|
||||
if (stringValue == null) return;
|
||||
if (preferExisting) {
|
||||
const existing = toTrimmedString(target[key]);
|
||||
if (existing != null) return;
|
||||
}
|
||||
target[key] = stringValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Assign a numeric property when the supplied value parses successfully.
|
||||
*
|
||||
* @param {Object} target Destination object mutated with the value.
|
||||
* @param {string} key Property name to assign.
|
||||
* @param {*} value Raw value to assign.
|
||||
* @param {Object} [options] Behaviour modifiers.
|
||||
* @param {boolean} [options.preferExisting=false] When true, only assign when the target lacks a value.
|
||||
* @returns {void}
|
||||
*/
|
||||
function assignNumber(target, key, value, { preferExisting = false } = {}) {
|
||||
const numericValue = toFiniteNumber(value);
|
||||
if (numericValue == null) return;
|
||||
if (preferExisting) {
|
||||
const existing = toFiniteNumber(target[key]);
|
||||
if (existing != null) return;
|
||||
}
|
||||
target[key] = numericValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge modem preset and frequency metadata into the aggregate node object.
|
||||
*
|
||||
* @param {Object} target Mutable aggregate node reference.
|
||||
* @param {*} source Source record inspected for modem attributes.
|
||||
* @param {{ preferExisting?: boolean }} [options] Behaviour modifiers.
|
||||
* @returns {void}
|
||||
*/
|
||||
function mergeModemMetadata(target, source, { preferExisting = false } = {}) {
|
||||
if (!isObject(target)) return;
|
||||
if (!source || typeof source !== 'object') return;
|
||||
const metadata = extractModemMetadata(source);
|
||||
if (metadata.modemPreset) {
|
||||
if (!preferExisting || toTrimmedString(target.modemPreset) == null) {
|
||||
target.modemPreset = metadata.modemPreset;
|
||||
}
|
||||
}
|
||||
if (metadata.loraFreq != null) {
|
||||
if (!preferExisting || toFiniteNumber(target.loraFreq) == null) {
|
||||
target.loraFreq = metadata.loraFreq;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge base node fields from an arbitrary record into the aggregate node object.
|
||||
*
|
||||
* @param {Object} target Mutable aggregate node reference.
|
||||
* @param {Object} record Source record providing base attributes.
|
||||
* @returns {void}
|
||||
*/
|
||||
function mergeNodeFields(target, record) {
|
||||
if (!isObject(record)) return;
|
||||
assignString(target, 'nodeId', extractString(record, ['nodeId', 'node_id']));
|
||||
assignNumber(target, 'nodeNum', extractNumber(record, ['nodeNum', 'node_num', 'num']));
|
||||
assignString(target, 'shortName', extractString(record, ['shortName', 'short_name']));
|
||||
assignString(target, 'longName', extractString(record, ['longName', 'long_name']));
|
||||
assignString(target, 'role', extractString(record, ['role']));
|
||||
assignString(target, 'hwModel', extractString(record, ['hwModel', 'hw_model']));
|
||||
mergeModemMetadata(target, record);
|
||||
assignNumber(target, 'snr', extractNumber(record, ['snr']));
|
||||
assignNumber(target, 'battery', extractNumber(record, ['battery', 'battery_level', 'batteryLevel']));
|
||||
assignNumber(target, 'voltage', extractNumber(record, ['voltage']));
|
||||
assignNumber(target, 'uptime', extractNumber(record, ['uptime', 'uptime_seconds', 'uptimeSeconds']));
|
||||
assignNumber(target, 'channel', extractNumber(record, ['channel', 'channel_utilization', 'channelUtilization']));
|
||||
assignNumber(target, 'airUtil', extractNumber(record, ['airUtil', 'air_util_tx', 'airUtilTx']));
|
||||
assignNumber(target, 'temperature', extractNumber(record, ['temperature']));
|
||||
assignNumber(target, 'humidity', extractNumber(record, ['humidity', 'relative_humidity', 'relativeHumidity']));
|
||||
assignNumber(target, 'pressure', extractNumber(record, ['pressure', 'barometric_pressure', 'barometricPressure']));
|
||||
assignNumber(target, 'lastHeard', extractNumber(record, ['lastHeard', 'last_heard']));
|
||||
assignString(target, 'lastSeenIso', extractString(record, ['lastSeenIso', 'last_seen_iso']));
|
||||
assignNumber(target, 'positionTime', extractNumber(record, ['position_time', 'positionTime']));
|
||||
assignString(target, 'positionTimeIso', extractString(record, ['position_time_iso', 'positionTimeIso']));
|
||||
assignNumber(target, 'telemetryTime', extractNumber(record, ['telemetry_time', 'telemetryTime']));
|
||||
assignNumber(target, 'latitude', extractNumber(record, ['latitude']));
|
||||
assignNumber(target, 'longitude', extractNumber(record, ['longitude']));
|
||||
assignNumber(target, 'altitude', extractNumber(record, ['altitude']));
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge telemetry metrics into the aggregate node object when missing.
|
||||
*
|
||||
* @param {Object} target Mutable aggregate node reference.
|
||||
* @param {Object} telemetry Telemetry record returned by the API.
|
||||
* @returns {void}
|
||||
*/
|
||||
function mergeTelemetry(target, telemetry) {
|
||||
if (!isObject(telemetry)) return;
|
||||
target.telemetry = telemetry;
|
||||
assignString(target, 'nodeId', extractString(telemetry, ['node_id', 'nodeId']), { preferExisting: true });
|
||||
assignNumber(target, 'nodeNum', extractNumber(telemetry, ['node_num', 'nodeNum']), { preferExisting: true });
|
||||
mergeModemMetadata(target, telemetry, { preferExisting: true });
|
||||
assignNumber(target, 'battery', extractNumber(telemetry, ['battery_level', 'batteryLevel']), { preferExisting: true });
|
||||
assignNumber(target, 'voltage', extractNumber(telemetry, ['voltage']), { preferExisting: true });
|
||||
assignNumber(target, 'uptime', extractNumber(telemetry, ['uptime_seconds', 'uptimeSeconds']), { preferExisting: true });
|
||||
assignNumber(target, 'channel', extractNumber(telemetry, ['channel', 'channel_utilization', 'channelUtilization']), { preferExisting: true });
|
||||
assignNumber(target, 'airUtil', extractNumber(telemetry, ['air_util_tx', 'airUtilTx', 'airUtil']), { preferExisting: true });
|
||||
assignNumber(target, 'temperature', extractNumber(telemetry, ['temperature']), { preferExisting: true });
|
||||
assignNumber(target, 'humidity', extractNumber(telemetry, ['relative_humidity', 'relativeHumidity', 'humidity']), { preferExisting: true });
|
||||
assignNumber(target, 'pressure', extractNumber(telemetry, ['barometric_pressure', 'barometricPressure', 'pressure']), { preferExisting: true });
|
||||
|
||||
const telemetryTime = extractNumber(telemetry, ['telemetry_time', 'telemetryTime']);
|
||||
if (telemetryTime != null) {
|
||||
const existingTelemetryTime = toFiniteNumber(target.telemetryTime);
|
||||
if (existingTelemetryTime == null || telemetryTime > existingTelemetryTime) {
|
||||
target.telemetryTime = telemetryTime;
|
||||
}
|
||||
}
|
||||
|
||||
const rxTime = extractNumber(telemetry, ['rx_time', 'rxTime']);
|
||||
if (rxTime != null) {
|
||||
const existingLastHeard = toFiniteNumber(target.lastHeard);
|
||||
if (existingLastHeard == null || rxTime > existingLastHeard) {
|
||||
target.lastHeard = rxTime;
|
||||
assignString(target, 'lastSeenIso', extractString(telemetry, ['rx_iso', 'rxIso']));
|
||||
} else {
|
||||
assignString(target, 'lastSeenIso', extractString(telemetry, ['rx_iso', 'rxIso']), { preferExisting: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge position data into the aggregate node object when missing.
|
||||
*
|
||||
* @param {Object} target Mutable aggregate node reference.
|
||||
* @param {Object} position Position record returned by the API.
|
||||
* @returns {void}
|
||||
*/
|
||||
function mergePosition(target, position) {
|
||||
if (!isObject(position)) return;
|
||||
target.position = position;
|
||||
assignString(target, 'nodeId', extractString(position, ['node_id', 'nodeId']), { preferExisting: true });
|
||||
assignNumber(target, 'nodeNum', extractNumber(position, ['node_num', 'nodeNum']), { preferExisting: true });
|
||||
assignNumber(target, 'latitude', extractNumber(position, ['latitude']), { preferExisting: true });
|
||||
assignNumber(target, 'longitude', extractNumber(position, ['longitude']), { preferExisting: true });
|
||||
assignNumber(target, 'altitude', extractNumber(position, ['altitude']), { preferExisting: true });
|
||||
|
||||
const positionTime = extractNumber(position, ['position_time', 'positionTime']);
|
||||
if (positionTime != null) {
|
||||
const existingPositionTime = toFiniteNumber(target.positionTime);
|
||||
if (existingPositionTime == null || positionTime > existingPositionTime) {
|
||||
target.positionTime = positionTime;
|
||||
assignString(target, 'positionTimeIso', extractString(position, ['position_time_iso', 'positionTimeIso']));
|
||||
} else {
|
||||
assignString(target, 'positionTimeIso', extractString(position, ['position_time_iso', 'positionTimeIso']), { preferExisting: true });
|
||||
}
|
||||
}
|
||||
|
||||
const rxTime = extractNumber(position, ['rx_time', 'rxTime']);
|
||||
if (rxTime != null) {
|
||||
const existingLastHeard = toFiniteNumber(target.lastHeard);
|
||||
if (existingLastHeard == null || rxTime > existingLastHeard) {
|
||||
target.lastHeard = rxTime;
|
||||
assignString(target, 'lastSeenIso', extractString(position, ['rx_iso', 'rxIso']));
|
||||
} else {
|
||||
assignString(target, 'lastSeenIso', extractString(position, ['rx_iso', 'rxIso']), { preferExisting: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Safely parse a fallback payload used as an initial node reference.
|
||||
*
|
||||
* @param {*} fallback User-provided fallback data.
|
||||
* @returns {Object|null} Parsed fallback object or ``null``.
|
||||
*/
|
||||
function parseFallback(fallback) {
|
||||
if (isObject(fallback)) return { ...fallback };
|
||||
if (typeof fallback === 'string') {
|
||||
try {
|
||||
const parsed = JSON.parse(fallback);
|
||||
return isObject(parsed) ? parsed : null;
|
||||
} catch (error) {
|
||||
console.warn('Failed to parse node fallback payload', error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalise a node reference into a canonical structure used by the fetcher.
|
||||
*
|
||||
* @param {*} reference Raw reference passed to {@link refreshNodeInformation}.
|
||||
* @returns {{nodeId: (string|null), nodeNum: (number|null), fallback: (Object|null)}} Normalised reference data.
|
||||
*/
|
||||
function normalizeReference(reference) {
|
||||
if (reference == null) {
|
||||
return { nodeId: null, nodeNum: null, fallback: null };
|
||||
}
|
||||
if (typeof reference === 'string') {
|
||||
return { nodeId: toTrimmedString(reference), nodeNum: null, fallback: null };
|
||||
}
|
||||
if (typeof reference === 'number') {
|
||||
const nodeNum = toFiniteNumber(reference);
|
||||
return { nodeId: null, nodeNum, fallback: null };
|
||||
}
|
||||
|
||||
if (!isObject(reference)) {
|
||||
return { nodeId: null, nodeNum: null, fallback: null };
|
||||
}
|
||||
|
||||
const fallback = parseFallback(reference.fallback ?? reference.nodeInfo ?? null);
|
||||
let nodeId = toTrimmedString(reference.nodeId ?? reference.node_id ?? null);
|
||||
if (nodeId == null) {
|
||||
nodeId = toTrimmedString(fallback?.nodeId ?? fallback?.node_id ?? null);
|
||||
}
|
||||
let nodeNum = reference.nodeNum ?? reference.node_num ?? null;
|
||||
if (nodeNum == null) {
|
||||
nodeNum = fallback?.nodeNum ?? fallback?.node_num ?? null;
|
||||
}
|
||||
nodeNum = toFiniteNumber(nodeNum);
|
||||
|
||||
return { nodeId, nodeNum, fallback };
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve and merge node, telemetry, position, and neighbor information.
|
||||
*
|
||||
* @param {*} reference Node identifier string/number or an object containing ``nodeId``/``nodeNum``.
|
||||
* @param {{fetchImpl?: Function}} [options] Optional overrides such as a custom ``fetch`` implementation.
|
||||
* @returns {Promise<Object>} Normalised node payload enriched with telemetry, position, and neighbor data.
|
||||
*/
|
||||
export async function refreshNodeInformation(reference, options = {}) {
|
||||
const normalized = normalizeReference(reference);
|
||||
const fetchImpl = typeof options.fetchImpl === 'function' ? options.fetchImpl : globalThis.fetch;
|
||||
if (typeof fetchImpl !== 'function') {
|
||||
throw new TypeError('A fetch implementation is required to refresh node information');
|
||||
}
|
||||
|
||||
const identifier = normalized.nodeId ?? normalized.nodeNum;
|
||||
if (identifier == null) {
|
||||
throw new Error('A node identifier or numeric reference must be provided');
|
||||
}
|
||||
|
||||
const encodedId = encodeURIComponent(String(identifier));
|
||||
|
||||
const [nodeRecord, telemetryRecords, positionRecords, neighborRecords] = await Promise.all([
|
||||
(async () => {
|
||||
const response = await fetchImpl(`/api/nodes/${encodedId}`, DEFAULT_FETCH_OPTIONS);
|
||||
if (response.status === 404) return null;
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load node information (HTTP ${response.status})`);
|
||||
}
|
||||
return response.json();
|
||||
})(),
|
||||
(async () => {
|
||||
const response = await fetchImpl(`/api/telemetry/${encodedId}?limit=${TELEMETRY_LIMIT}`, DEFAULT_FETCH_OPTIONS);
|
||||
if (response.status === 404) return [];
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load telemetry information (HTTP ${response.status})`);
|
||||
}
|
||||
return response.json();
|
||||
})(),
|
||||
(async () => {
|
||||
const response = await fetchImpl(`/api/positions/${encodedId}?limit=${POSITION_LIMIT}`, DEFAULT_FETCH_OPTIONS);
|
||||
if (response.status === 404) return [];
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load position information (HTTP ${response.status})`);
|
||||
}
|
||||
return response.json();
|
||||
})(),
|
||||
(async () => {
|
||||
const response = await fetchImpl(`/api/neighbors/${encodedId}?limit=${NEIGHBOR_LIMIT}`, DEFAULT_FETCH_OPTIONS);
|
||||
if (response.status === 404) return [];
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to load neighbor information (HTTP ${response.status})`);
|
||||
}
|
||||
return response.json();
|
||||
})(),
|
||||
]);
|
||||
|
||||
const telemetryEntry = Array.isArray(telemetryRecords) ? telemetryRecords[0] ?? null : telemetryRecords ?? null;
|
||||
const positionEntry = Array.isArray(positionRecords) ? positionRecords[0] ?? null : positionRecords ?? null;
|
||||
const neighborEntries = Array.isArray(neighborRecords) ? neighborRecords.filter(isObject) : [];
|
||||
|
||||
const node = { neighbors: neighborEntries };
|
||||
|
||||
if (normalized.fallback) {
|
||||
mergeNodeFields(node, normalized.fallback);
|
||||
}
|
||||
if (nodeRecord) {
|
||||
mergeNodeFields(node, nodeRecord);
|
||||
}
|
||||
if (normalized.nodeId && !node.nodeId) {
|
||||
node.nodeId = normalized.nodeId;
|
||||
}
|
||||
if (normalized.nodeNum != null && toFiniteNumber(node.nodeNum) == null) {
|
||||
node.nodeNum = normalized.nodeNum;
|
||||
}
|
||||
|
||||
mergeTelemetry(node, telemetryEntry);
|
||||
mergePosition(node, positionEntry);
|
||||
|
||||
const derivedLastHeardValues = [
|
||||
toFiniteNumber(node.lastHeard),
|
||||
toFiniteNumber(node.telemetryTime),
|
||||
toFiniteNumber(node.positionTime),
|
||||
].filter(value => value != null);
|
||||
if (derivedLastHeardValues.length > 0) {
|
||||
node.lastHeard = Math.max(...derivedLastHeardValues);
|
||||
}
|
||||
|
||||
if (!node.role) {
|
||||
node.role = 'CLIENT';
|
||||
}
|
||||
|
||||
node.rawSources = {
|
||||
node: nodeRecord,
|
||||
telemetry: telemetryEntry,
|
||||
position: positionEntry,
|
||||
neighbors: neighborEntries,
|
||||
};
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
export const __testUtils = {
|
||||
toTrimmedString,
|
||||
toFiniteNumber,
|
||||
extractString,
|
||||
extractNumber,
|
||||
assignString,
|
||||
assignNumber,
|
||||
mergeModemMetadata,
|
||||
mergeNodeFields,
|
||||
mergeTelemetry,
|
||||
mergePosition,
|
||||
parseFallback,
|
||||
normalizeReference,
|
||||
};
|
||||
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Convert arbitrary input into a trimmed string representation.
|
||||
*
|
||||
* @param {*} value Candidate value.
|
||||
* @returns {string|null} Trimmed string or ``null`` when empty.
|
||||
*/
|
||||
function toTrimmedString(value) {
|
||||
if (value == null) return null;
|
||||
const stringValue = String(value).trim();
|
||||
return stringValue.length > 0 ? stringValue : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize modem-related metadata from a node-shaped record.
|
||||
*
|
||||
* @param {*} source Arbitrary payload that may contain modem attributes.
|
||||
* @returns {{ modemPreset: (string|null), loraFreq: (number|null) }} Normalized modem metadata.
|
||||
*/
|
||||
export function extractModemMetadata(source) {
|
||||
if (!source || typeof source !== 'object') {
|
||||
return { modemPreset: null, loraFreq: null };
|
||||
}
|
||||
|
||||
const presetCandidate =
|
||||
source.modemPreset ?? source.modem_preset ?? source.modempreset ?? source.ModemPreset ?? null;
|
||||
const modemPreset = toTrimmedString(presetCandidate);
|
||||
|
||||
const freqCandidate = source.loraFreq ?? source.lora_freq ?? source.frequency ?? null;
|
||||
const parsedFreq = Number(freqCandidate);
|
||||
const loraFreq = Number.isFinite(parsedFreq) && parsedFreq > 0 ? parsedFreq : null;
|
||||
|
||||
return { modemPreset, loraFreq };
|
||||
}
|
||||
|
||||
/**
|
||||
* Format a numeric LoRa frequency in MHz with up to three fractional digits.
|
||||
*
|
||||
* @param {*} value Numeric frequency in MHz.
|
||||
* @returns {string|null} Formatted frequency with units or ``null`` when invalid.
|
||||
*/
|
||||
export function formatLoraFrequencyMHz(value) {
|
||||
const numeric = typeof value === 'number' ? value : Number(value);
|
||||
if (!Number.isFinite(numeric) || numeric <= 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const formatter = new Intl.NumberFormat('en-US', {
|
||||
minimumFractionDigits: 0,
|
||||
maximumFractionDigits: 3,
|
||||
});
|
||||
|
||||
return `${formatter.format(numeric)}MHz`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Produce a combined modem preset and frequency description suitable for overlays.
|
||||
*
|
||||
* @param {*} preset Raw modem preset value.
|
||||
* @param {*} frequency Raw frequency value expressed in MHz.
|
||||
* @returns {string|null} Human-readable description or ``null`` when no data available.
|
||||
*/
|
||||
export function formatModemDisplay(preset, frequency) {
|
||||
const presetText = toTrimmedString(preset);
|
||||
const freqText = formatLoraFrequencyMHz(frequency);
|
||||
|
||||
if (!presetText && !freqText) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (presetText && freqText) {
|
||||
return `${presetText} (${freqText})`;
|
||||
}
|
||||
|
||||
return presetText ?? freqText;
|
||||
}
|
||||
|
||||
export const __testUtils = {
|
||||
toTrimmedString,
|
||||
};
|
||||
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Default configuration values applied when the server omits a field.
|
||||
*
|
||||
* @type {{
|
||||
* refreshMs: number,
|
||||
* refreshIntervalSeconds: number,
|
||||
* chatEnabled: boolean,
|
||||
* channel: string,
|
||||
* frequency: string,
|
||||
* contactLink: string,
|
||||
* contactLinkUrl: string | null,
|
||||
* mapCenter: { lat: number, lon: number },
|
||||
* maxDistanceKm: number,
|
||||
* tileFilters: { light: string, dark: string }
|
||||
* }}
|
||||
*/
|
||||
export const DEFAULT_CONFIG = {
|
||||
refreshMs: 60_000,
|
||||
refreshIntervalSeconds: 60,
|
||||
chatEnabled: true,
|
||||
channel: '#LongFast',
|
||||
frequency: '915MHz',
|
||||
contactLink: '#potatomesh:dod.ngo',
|
||||
contactLinkUrl: 'https://matrix.to/#/#potatomesh:dod.ngo',
|
||||
mapCenter: { lat: 38.761944, lon: -27.090833 },
|
||||
maxDistanceKm: 42,
|
||||
tileFilters: {
|
||||
light: 'grayscale(1) saturate(0) brightness(0.92) contrast(1.05)',
|
||||
dark: 'grayscale(1) invert(1) brightness(0.9) contrast(1.08)'
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Merge raw configuration data from the DOM with the defaults.
|
||||
*
|
||||
* @param {Object<string, *>} raw Partial configuration read from ``readAppConfig``.
|
||||
* @returns {typeof DEFAULT_CONFIG} Fully populated configuration object.
|
||||
*/
|
||||
export function mergeConfig(raw) {
|
||||
const config = { ...DEFAULT_CONFIG, ...(raw || {}) };
|
||||
config.mapCenter = {
|
||||
lat: Number(raw?.mapCenter?.lat ?? DEFAULT_CONFIG.mapCenter.lat),
|
||||
lon: Number(raw?.mapCenter?.lon ?? DEFAULT_CONFIG.mapCenter.lon)
|
||||
};
|
||||
config.tileFilters = {
|
||||
light: raw?.tileFilters?.light || DEFAULT_CONFIG.tileFilters.light,
|
||||
dark: raw?.tileFilters?.dark || DEFAULT_CONFIG.tileFilters.dark
|
||||
};
|
||||
const refreshIntervalSeconds = Number(
|
||||
raw?.refreshIntervalSeconds ?? DEFAULT_CONFIG.refreshIntervalSeconds
|
||||
);
|
||||
config.refreshIntervalSeconds = Number.isFinite(refreshIntervalSeconds)
|
||||
? refreshIntervalSeconds
|
||||
: DEFAULT_CONFIG.refreshIntervalSeconds;
|
||||
const refreshMs = Number(raw?.refreshMs ?? config.refreshIntervalSeconds * 1000);
|
||||
config.refreshMs = Number.isFinite(refreshMs) ? refreshMs : DEFAULT_CONFIG.refreshMs;
|
||||
config.chatEnabled = Boolean(raw?.chatEnabled ?? DEFAULT_CONFIG.chatEnabled);
|
||||
config.channel = raw?.channel || DEFAULT_CONFIG.channel;
|
||||
config.frequency = raw?.frequency || DEFAULT_CONFIG.frequency;
|
||||
config.contactLink = raw?.contactLink || DEFAULT_CONFIG.contactLink;
|
||||
config.contactLinkUrl = raw?.contactLinkUrl ?? DEFAULT_CONFIG.contactLinkUrl;
|
||||
const maxDistance = Number(raw?.maxDistanceKm ?? DEFAULT_CONFIG.maxDistanceKm);
|
||||
config.maxDistanceKm = Number.isFinite(maxDistance)
|
||||
? maxDistance
|
||||
: DEFAULT_CONFIG.maxDistanceKm;
|
||||
return config;
|
||||
}
|
||||
@@ -0,0 +1,574 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
const DEFAULT_TEMPLATE_ID = 'shortInfoOverlayTemplate';
|
||||
const FULLSCREEN_CHANGE_EVENTS = [
|
||||
'fullscreenchange',
|
||||
'webkitfullscreenchange',
|
||||
'mozfullscreenchange',
|
||||
'MSFullscreenChange',
|
||||
];
|
||||
|
||||
/**
|
||||
* Resolve the element currently presented in fullscreen mode.
|
||||
*
|
||||
* @param {Document} doc Host document reference.
|
||||
* @returns {?Element} Fullscreen element or ``null`` when fullscreen is inactive.
|
||||
*/
|
||||
function getFullscreenElement(doc) {
|
||||
if (!doc) return null;
|
||||
return (
|
||||
doc.fullscreenElement ||
|
||||
doc.webkitFullscreenElement ||
|
||||
doc.mozFullScreenElement ||
|
||||
doc.msFullscreenElement ||
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine the container that should host overlays.
|
||||
*
|
||||
* @param {Document} doc Host document reference.
|
||||
* @returns {?Element} Preferred overlay host element.
|
||||
*/
|
||||
function resolveOverlayHost(doc) {
|
||||
const fullscreenElement = getFullscreenElement(doc);
|
||||
if (fullscreenElement && typeof fullscreenElement.appendChild === 'function') {
|
||||
return fullscreenElement;
|
||||
}
|
||||
return doc && doc.body && typeof doc.body.appendChild === 'function' ? doc.body : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update overlay positioning mode based on fullscreen state.
|
||||
*
|
||||
* @param {Element} element Overlay DOM node.
|
||||
* @param {Document} doc Host document reference.
|
||||
* @returns {void}
|
||||
*/
|
||||
function applyOverlayPositioning(element, doc) {
|
||||
if (!element || !element.style) {
|
||||
return;
|
||||
}
|
||||
const fullscreenElement = getFullscreenElement(doc);
|
||||
const desired = fullscreenElement ? 'fixed' : 'absolute';
|
||||
if (element.style.position !== desired) {
|
||||
element.style.position = desired;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether a value behaves like a DOM element that can host overlays.
|
||||
*
|
||||
* @param {*} candidate Potential anchor element.
|
||||
* @returns {boolean} ``true`` when the candidate exposes the required DOM API.
|
||||
*/
|
||||
function isValidAnchor(candidate) {
|
||||
return (
|
||||
candidate != null &&
|
||||
typeof candidate === 'object' &&
|
||||
typeof candidate.getBoundingClientRect === 'function'
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a factory that instantiates overlay DOM nodes.
|
||||
*
|
||||
* @param {Document} document Host document reference.
|
||||
* @param {?Element} template Template element cloned for each overlay.
|
||||
* @returns {Function} Factory generating overlay nodes with close/content refs.
|
||||
*/
|
||||
function createDefaultOverlayFactory(document, template) {
|
||||
const templateNode =
|
||||
template && template.content && template.content.firstElementChild
|
||||
? template.content.firstElementChild
|
||||
: null;
|
||||
|
||||
return () => {
|
||||
let overlay;
|
||||
if (templateNode && typeof templateNode.cloneNode === 'function') {
|
||||
overlay = templateNode.cloneNode(true);
|
||||
} else {
|
||||
overlay = document.createElement('div');
|
||||
overlay.className = 'short-info-overlay';
|
||||
overlay.setAttribute('role', 'dialog');
|
||||
overlay.setAttribute('aria-modal', 'false');
|
||||
const closeButton = document.createElement('button');
|
||||
closeButton.type = 'button';
|
||||
closeButton.className = 'short-info-close';
|
||||
closeButton.setAttribute('aria-label', 'Close node details');
|
||||
closeButton.textContent = '×';
|
||||
const content = document.createElement('div');
|
||||
content.className = 'short-info-content';
|
||||
overlay.appendChild(closeButton);
|
||||
overlay.appendChild(content);
|
||||
}
|
||||
|
||||
const closeButton =
|
||||
typeof overlay.querySelector === 'function'
|
||||
? overlay.querySelector('.short-info-close')
|
||||
: null;
|
||||
const content =
|
||||
typeof overlay.querySelector === 'function'
|
||||
? overlay.querySelector('.short-info-content')
|
||||
: null;
|
||||
|
||||
return { overlay, closeButton, content };
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a no-op overlay stack used when the DOM primitives are unavailable.
|
||||
*
|
||||
* @returns {Object} Overlay stack interface with inert behaviour.
|
||||
*/
|
||||
function createNoopOverlayStack() {
|
||||
return {
|
||||
render() {},
|
||||
close() {},
|
||||
closeAll() {},
|
||||
isOpen() {
|
||||
return false;
|
||||
},
|
||||
containsNode() {
|
||||
return false;
|
||||
},
|
||||
positionAll() {},
|
||||
cleanupOrphans() {},
|
||||
incrementRequestToken() {
|
||||
return 0;
|
||||
},
|
||||
isTokenCurrent() {
|
||||
return false;
|
||||
},
|
||||
getOpenOverlays() {
|
||||
return [];
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a stack manager that renders and positions short-info overlays.
|
||||
*
|
||||
* @param {{
|
||||
* document?: Document,
|
||||
* window?: Window,
|
||||
* templateId?: string,
|
||||
* template?: Element,
|
||||
* factory?: Function
|
||||
* }} [options] Overlay configuration and host references.
|
||||
* @returns {{
|
||||
* render: (anchor: Element, html: string) => void,
|
||||
* close: (anchor: Element) => void,
|
||||
* closeAll: () => void,
|
||||
* isOpen: (anchor: Element) => boolean,
|
||||
* containsNode: (node: Node) => boolean,
|
||||
* positionAll: () => void,
|
||||
* cleanupOrphans: () => void,
|
||||
* incrementRequestToken: (anchor: Element) => number,
|
||||
* isTokenCurrent: (anchor: Element, token: number) => boolean,
|
||||
* getOpenOverlays: () => Array<{ anchor: Element, element: Element }>
|
||||
* }} Overlay stack interface.
|
||||
*/
|
||||
export function createShortInfoOverlayStack(options = {}) {
|
||||
const doc = options.document || globalThis.document || null;
|
||||
const win = options.window || globalThis.window || null;
|
||||
|
||||
if (!doc || !doc.body) {
|
||||
return createNoopOverlayStack();
|
||||
}
|
||||
|
||||
const template =
|
||||
options.template !== undefined
|
||||
? options.template
|
||||
: doc.getElementById(options.templateId || DEFAULT_TEMPLATE_ID);
|
||||
|
||||
const overlayFactory =
|
||||
typeof options.factory === 'function'
|
||||
? options.factory
|
||||
: createDefaultOverlayFactory(doc, template);
|
||||
|
||||
const overlayStates = new Map();
|
||||
const overlayOrder = [];
|
||||
|
||||
/**
|
||||
* Retrieve the active overlay host element.
|
||||
*
|
||||
* @returns {?Element} Host element capable of containing overlays.
|
||||
*/
|
||||
function getOverlayHost() {
|
||||
return resolveOverlayHost(doc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Append ``element`` to the preferred overlay host when necessary.
|
||||
*
|
||||
* @param {Element} element Overlay root element.
|
||||
* @returns {void}
|
||||
*/
|
||||
function ensureOverlayAttached(element) {
|
||||
if (!element) return;
|
||||
const host = getOverlayHost();
|
||||
if (!host) return;
|
||||
if (element.parentNode !== host) {
|
||||
host.appendChild(element);
|
||||
}
|
||||
applyOverlayPositioning(element, doc);
|
||||
}
|
||||
|
||||
/**
|
||||
* React to fullscreen transitions by reattaching overlays to the active host.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
function handleFullscreenChange() {
|
||||
for (const state of overlayStates.values()) {
|
||||
ensureOverlayAttached(state.element);
|
||||
}
|
||||
positionAll();
|
||||
}
|
||||
|
||||
if (doc && typeof doc.addEventListener === 'function') {
|
||||
for (const eventName of FULLSCREEN_CHANGE_EVENTS) {
|
||||
doc.addEventListener(eventName, handleFullscreenChange);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove an overlay element from the DOM tree.
|
||||
*
|
||||
* @param {Element} element Overlay root element.
|
||||
* @returns {void}
|
||||
*/
|
||||
function detachOverlayElement(element) {
|
||||
if (!element) return;
|
||||
if (typeof element.remove === 'function') {
|
||||
element.remove();
|
||||
return;
|
||||
}
|
||||
if (element.parentNode && typeof element.parentNode.removeChild === 'function') {
|
||||
element.parentNode.removeChild(element);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create or retrieve the overlay state associated with ``anchor``.
|
||||
*
|
||||
* @param {Element} anchor Anchor element.
|
||||
* @returns {{
|
||||
* anchor: Element,
|
||||
* element: Element,
|
||||
* content: Element,
|
||||
* closeButton: Element,
|
||||
* requestToken: number
|
||||
* }|null} Overlay state or ``null`` when creation fails.
|
||||
*/
|
||||
function ensureState(anchor) {
|
||||
if (!isValidAnchor(anchor)) {
|
||||
return null;
|
||||
}
|
||||
let state = overlayStates.get(anchor);
|
||||
if (state) {
|
||||
return state;
|
||||
}
|
||||
|
||||
const created = overlayFactory();
|
||||
if (!created || !created.overlay || !created.content) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const overlayEl = created.overlay;
|
||||
const closeButton = created.closeButton || null;
|
||||
const contentEl = created.content;
|
||||
|
||||
if (typeof overlayEl.setAttribute === 'function') {
|
||||
overlayEl.setAttribute('data-short-info-overlay', '');
|
||||
}
|
||||
|
||||
if (closeButton && typeof closeButton.addEventListener === 'function') {
|
||||
closeButton.addEventListener('click', event => {
|
||||
if (event) {
|
||||
if (typeof event.preventDefault === 'function') {
|
||||
event.preventDefault();
|
||||
}
|
||||
if (typeof event.stopPropagation === 'function') {
|
||||
event.stopPropagation();
|
||||
}
|
||||
}
|
||||
close(anchor);
|
||||
});
|
||||
}
|
||||
|
||||
ensureOverlayAttached(overlayEl);
|
||||
|
||||
state = {
|
||||
anchor,
|
||||
element: overlayEl,
|
||||
content: contentEl,
|
||||
closeButton,
|
||||
requestToken: 0,
|
||||
};
|
||||
overlayStates.set(anchor, state);
|
||||
overlayOrder.push(state);
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the overlay state associated with ``anchor``.
|
||||
*
|
||||
* @param {Element} anchor Anchor element.
|
||||
* @returns {void}
|
||||
*/
|
||||
function removeState(anchor) {
|
||||
const state = overlayStates.get(anchor);
|
||||
if (!state) return;
|
||||
overlayStates.delete(anchor);
|
||||
const index = overlayOrder.indexOf(state);
|
||||
if (index >= 0) {
|
||||
overlayOrder.splice(index, 1);
|
||||
}
|
||||
detachOverlayElement(state.element);
|
||||
}
|
||||
|
||||
/**
|
||||
* Position an overlay relative to its anchor element.
|
||||
*
|
||||
* @param {{ anchor: Element, element: Element }} state Overlay state entry.
|
||||
* @returns {void}
|
||||
*/
|
||||
function positionState(state) {
|
||||
if (!state || !state.anchor || !state.element) {
|
||||
return;
|
||||
}
|
||||
if (!doc.body.contains(state.anchor)) {
|
||||
close(state.anchor);
|
||||
return;
|
||||
}
|
||||
|
||||
const rect = state.anchor.getBoundingClientRect();
|
||||
const overlayRect =
|
||||
typeof state.element.getBoundingClientRect === 'function'
|
||||
? state.element.getBoundingClientRect()
|
||||
: { width: 0, height: 0 };
|
||||
const viewportWidth =
|
||||
(doc.documentElement && doc.documentElement.clientWidth) ||
|
||||
(win && typeof win.innerWidth === 'number' ? win.innerWidth : 0);
|
||||
const viewportHeight =
|
||||
(doc.documentElement && doc.documentElement.clientHeight) ||
|
||||
(win && typeof win.innerHeight === 'number' ? win.innerHeight : 0);
|
||||
const scrollX = (win && typeof win.scrollX === 'number' ? win.scrollX : 0) || 0;
|
||||
const scrollY = (win && typeof win.scrollY === 'number' ? win.scrollY : 0) || 0;
|
||||
const fullscreenElement = getFullscreenElement(doc);
|
||||
const offsetX = fullscreenElement ? 0 : scrollX;
|
||||
const offsetY = fullscreenElement ? 0 : scrollY;
|
||||
|
||||
let left = rect.left + offsetX;
|
||||
let top = rect.top + offsetY;
|
||||
|
||||
if (viewportWidth > 0) {
|
||||
const maxLeft = offsetX + viewportWidth - overlayRect.width - 8;
|
||||
left = Math.max(offsetX + 8, Math.min(left, maxLeft));
|
||||
}
|
||||
if (viewportHeight > 0) {
|
||||
const maxTop = offsetY + viewportHeight - overlayRect.height - 8;
|
||||
top = Math.max(offsetY + 8, Math.min(top, maxTop));
|
||||
}
|
||||
|
||||
if (state.element.style) {
|
||||
applyOverlayPositioning(state.element, doc);
|
||||
state.element.style.left = `${left}px`;
|
||||
state.element.style.top = `${top}px`;
|
||||
state.element.style.visibility = 'visible';
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedule positioning of an overlay for the next animation frame.
|
||||
*
|
||||
* @param {{ anchor: Element, element: Element }} state Overlay state entry.
|
||||
* @returns {void}
|
||||
*/
|
||||
function schedulePosition(state) {
|
||||
if (!state || !state.element) return;
|
||||
if (state.element.style) {
|
||||
state.element.style.visibility = 'hidden';
|
||||
}
|
||||
const raf = (win && win.requestAnimationFrame) || globalThis.requestAnimationFrame;
|
||||
if (typeof raf === 'function') {
|
||||
raf(() => positionState(state));
|
||||
} else {
|
||||
setTimeout(() => positionState(state), 16);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Render overlay content anchored to the provided element.
|
||||
*
|
||||
* @param {Element} anchor Anchor element driving overlay placement.
|
||||
* @param {string} html Inner HTML displayed in the overlay body.
|
||||
* @returns {void}
|
||||
*/
|
||||
function render(anchor, html) {
|
||||
const state = ensureState(anchor);
|
||||
if (!state) {
|
||||
return;
|
||||
}
|
||||
ensureOverlayAttached(state.element);
|
||||
if (state.content && typeof state.content.innerHTML === 'string') {
|
||||
state.content.innerHTML = html;
|
||||
}
|
||||
if (state.element && typeof state.element.removeAttribute === 'function') {
|
||||
state.element.removeAttribute('hidden');
|
||||
}
|
||||
schedulePosition(state);
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the overlay associated with ``anchor``.
|
||||
*
|
||||
* @param {Element} anchor Anchor element whose overlay should be removed.
|
||||
* @returns {void}
|
||||
*/
|
||||
function close(anchor) {
|
||||
const state = overlayStates.get(anchor);
|
||||
if (!state) return;
|
||||
state.requestToken += 1;
|
||||
removeState(anchor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether an overlay for ``anchor`` is currently open.
|
||||
*
|
||||
* @param {Element} anchor Anchor element to test.
|
||||
* @returns {boolean} ``true`` when an overlay exists for the anchor.
|
||||
*/
|
||||
function isOpen(anchor) {
|
||||
return overlayStates.has(anchor);
|
||||
}
|
||||
|
||||
/**
|
||||
* Close every active overlay.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
function closeAll() {
|
||||
const anchors = Array.from(overlayStates.keys());
|
||||
for (const anchor of anchors) {
|
||||
close(anchor);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test whether the provided DOM node belongs to any overlay.
|
||||
*
|
||||
* @param {Node} node Candidate DOM node.
|
||||
* @returns {boolean} ``true`` when the node is inside an overlay.
|
||||
*/
|
||||
function containsNode(node) {
|
||||
if (!node) return false;
|
||||
for (const state of overlayStates.values()) {
|
||||
if (state.element && typeof state.element.contains === 'function') {
|
||||
if (state.element.contains(node)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reposition all overlays based on the latest viewport metrics.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
function positionAll() {
|
||||
for (const state of overlayStates.values()) {
|
||||
positionState(state);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove overlays whose anchors are no longer part of the document body.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
function cleanupOrphans() {
|
||||
for (const state of Array.from(overlayStates.values())) {
|
||||
if (!doc.body.contains(state.anchor)) {
|
||||
close(state.anchor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment and return the request token for the provided anchor.
|
||||
*
|
||||
* @param {Element} anchor Anchor whose request token should be updated.
|
||||
* @returns {number} Updated token value.
|
||||
*/
|
||||
function incrementRequestToken(anchor) {
|
||||
const state = ensureState(anchor);
|
||||
if (!state) {
|
||||
return 0;
|
||||
}
|
||||
state.requestToken += 1;
|
||||
return state.requestToken;
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine whether ``token`` is still current for ``anchor``.
|
||||
*
|
||||
* @param {Element} anchor Anchor element associated with the request.
|
||||
* @param {number} token Token obtained from ``incrementRequestToken``.
|
||||
* @returns {boolean} ``true`` when the token is current.
|
||||
*/
|
||||
function isTokenCurrent(anchor, token) {
|
||||
const state = overlayStates.get(anchor);
|
||||
if (!state) {
|
||||
return false;
|
||||
}
|
||||
return state.requestToken === token;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve diagnostic information about open overlays.
|
||||
*
|
||||
* @returns {Array<{ anchor: Element, element: Element }>}
|
||||
*/
|
||||
function getOpenOverlays() {
|
||||
return overlayOrder.map(state => ({ anchor: state.anchor, element: state.element }));
|
||||
}
|
||||
|
||||
return {
|
||||
render,
|
||||
close,
|
||||
closeAll,
|
||||
isOpen,
|
||||
containsNode,
|
||||
positionAll,
|
||||
cleanupOrphans,
|
||||
incrementRequestToken,
|
||||
isTokenCurrent,
|
||||
getOpenOverlays,
|
||||
};
|
||||
}
|
||||
|
||||
export const __testUtils = {
|
||||
isValidAnchor,
|
||||
createDefaultOverlayFactory,
|
||||
createNoopOverlayStack,
|
||||
};
|
||||
@@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
/**
|
||||
* Resolve the background colour that should be applied to the document.
|
||||
*
|
||||
* @returns {?string} CSS colour string or ``null`` if resolution fails.
|
||||
*/
|
||||
function resolveBackgroundColor() {
|
||||
if (!document.body) {
|
||||
return null;
|
||||
}
|
||||
|
||||
var color = '';
|
||||
try {
|
||||
var styles = window.getComputedStyle(document.body);
|
||||
if (styles) {
|
||||
color = styles.getPropertyValue('--bg');
|
||||
if (color) {
|
||||
color = color.trim();
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
color = '';
|
||||
}
|
||||
|
||||
if (!color) {
|
||||
color = document.body.classList.contains('dark') ? '#0e1418' : '#f6f3ee';
|
||||
}
|
||||
|
||||
return color;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the resolved background colour to the page root elements.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
function applyBackground() {
|
||||
var color = resolveBackgroundColor();
|
||||
if (!color) {
|
||||
return;
|
||||
}
|
||||
|
||||
document.documentElement.style.backgroundColor = color;
|
||||
document.documentElement.style.backgroundImage = 'none';
|
||||
document.body.style.backgroundColor = color;
|
||||
document.body.style.backgroundImage = 'none';
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the background helper once the DOM is ready.
|
||||
*
|
||||
* @returns {void}
|
||||
*/
|
||||
function init() {
|
||||
applyBackground();
|
||||
}
|
||||
|
||||
function bootstrap() {
|
||||
document.removeEventListener('DOMContentLoaded', init);
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', init);
|
||||
} else {
|
||||
init();
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap();
|
||||
|
||||
window.addEventListener('themechange', applyBackground);
|
||||
|
||||
/**
|
||||
* Testing hooks exposing background helpers.
|
||||
*
|
||||
* @type {{
|
||||
* applyBackground: function(): void,
|
||||
* resolveBackgroundColor: function(): (?string),
|
||||
* __testHooks: {
|
||||
* bootstrap: function(): void,
|
||||
* init: function(): void
|
||||
* }
|
||||
* }}
|
||||
*/
|
||||
window.__potatoBackground = {
|
||||
applyBackground: applyBackground,
|
||||
resolveBackgroundColor: resolveBackgroundColor,
|
||||
__testHooks: {
|
||||
bootstrap: bootstrap,
|
||||
init: init
|
||||
}
|
||||
};
|
||||
})();
|
||||
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Copyright (C) 2025 l5yth
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
(function () {
|
||||
/**
|
||||
* Number of seconds theme preferences should persist in the cookie store.
|
||||
*
|
||||
* @type {number}
|
||||
*/
|
||||
var THEME_COOKIE_MAX_AGE = 60 * 60 * 24 * 7;
|
||||
|
||||
/**
|
||||
* Retrieve a cookie value by name.
|
||||
*
|
||||
* @param {string} name Cookie identifier.
|
||||
* @returns {?string} Decoded cookie value or ``null`` when absent.
|
||||
*/
|
||||
function getCookie(name) {
|
||||
var matcher = new RegExp(
|
||||
'(?:^|; )' + name.replace(/([.$?*|{}()\[\]\\/+^])/g, '\\$1') + '=([^;]*)'
|
||||
);
|
||||
var match = document.cookie.match(matcher);
|
||||
return match ? decodeURIComponent(match[1]) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert cookie options to a serialized string suitable for ``document.cookie``.
|
||||
*
|
||||
* @param {Object<string, *>} options Map of cookie attribute keys and values.
|
||||
* @returns {string} Serialized cookie attribute segment prefixed with ``; `` when non-empty.
|
||||
*/
|
||||
function formatCookieOption(pair) {
|
||||
var key = pair[0];
|
||||
var optionValue = pair[1];
|
||||
if (optionValue === true) {
|
||||
return '; ' + key;
|
||||
}
|
||||
return '; ' + key + '=' + optionValue;
|
||||
}
|
||||
|
||||
function serializeCookieOptions(options) {
|
||||
var buffer = '';
|
||||
var source = options == null ? {} : options;
|
||||
var entries = Object.entries(source);
|
||||
for (var index = 0; index < entries.length;) {
|
||||
buffer += formatCookieOption(entries[index]);
|
||||
index += 1;
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/**
|
||||
* Persist a cookie with optional attributes.
|
||||
*
|
||||
* @param {string} name Cookie identifier.
|
||||
* @param {string} value Value to store.
|
||||
* @param {Object<string, *>} [opts] Additional cookie attributes.
|
||||
* @returns {void}
|
||||
*/
|
||||
function setCookie(name, value, opts) {
|
||||
var options = Object.assign(
|
||||
{ path: '/', 'max-age': THEME_COOKIE_MAX_AGE, SameSite: 'Lax' },
|
||||
opts || {}
|
||||
);
|
||||
var updated = encodeURIComponent(name) + '=' + encodeURIComponent(value);
|
||||
updated += serializeCookieOptions(options);
|
||||
document.cookie = updated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Store the user's preferred theme selection.
|
||||
*
|
||||
* @param {string} value Theme identifier to persist.
|
||||
* @returns {void}
|
||||
*/
|
||||
function persistTheme(value) {
|
||||
setCookie('theme', value, { 'max-age': THEME_COOKIE_MAX_AGE });
|
||||
}
|
||||
|
||||
function applyTheme(value) {
|
||||
var themeValue = value === 'dark' ? 'dark' : 'light';
|
||||
var root = document.documentElement;
|
||||
var isDark = themeValue === 'dark';
|
||||
|
||||
if (root) {
|
||||
root.setAttribute('data-theme', themeValue);
|
||||
}
|
||||
|
||||
if (document.body) {
|
||||
document.body.classList.toggle('dark', isDark);
|
||||
document.body.setAttribute('data-theme', themeValue);
|
||||
}
|
||||
|
||||
return isDark;
|
||||
}
|
||||
|
||||
function exerciseSetCookieGuard() {
|
||||
var originalHasOwnProperty = Object.prototype.hasOwnProperty;
|
||||
Object.prototype.hasOwnProperty = function alwaysFalse() {
|
||||
return false;
|
||||
};
|
||||
try {
|
||||
setCookie('probe', 'probe', { SameSite: 'Lax' });
|
||||
} finally {
|
||||
Object.prototype.hasOwnProperty = originalHasOwnProperty;
|
||||
}
|
||||
}
|
||||
|
||||
var theme = 'dark';
|
||||
|
||||
function bootstrap() {
|
||||
document.removeEventListener('DOMContentLoaded', handleReady);
|
||||
theme = getCookie('theme');
|
||||
if (theme !== 'dark' && theme !== 'light') {
|
||||
theme = 'dark';
|
||||
}
|
||||
persistTheme(theme);
|
||||
applyTheme(theme);
|
||||
|
||||
if (document.readyState === 'loading') {
|
||||
document.addEventListener('DOMContentLoaded', handleReady);
|
||||
} else {
|
||||
handleReady();
|
||||
}
|
||||
}
|
||||
|
||||
function handleReady() {
|
||||
var isDark = applyTheme(theme);
|
||||
|
||||
var btn = document.getElementById('themeToggle');
|
||||
if (btn) {
|
||||
btn.textContent = isDark ? '☀️' : '🌙';
|
||||
}
|
||||
|
||||
if (typeof window.applyFiltersToAllTiles === 'function') {
|
||||
window.applyFiltersToAllTiles();
|
||||
}
|
||||
}
|
||||
|
||||
bootstrap();
|
||||
|
||||
/**
|
||||
* Testing hooks exposing cookie helpers for integration tests.
|
||||
*
|
||||
* @type {{
|
||||
* getCookie: function(string): (?string),
|
||||
* setCookie: function(string, string, Object<string, *>=): void,
|
||||
* persistTheme: function(string): void,
|
||||
* maxAge: number,
|
||||
* __testHooks: {
|
||||
* applyTheme: function(string): boolean,
|
||||
* handleReady: function(): void,
|
||||
* bootstrap: function(): void,
|
||||
* setTheme: function(string): void
|
||||
* }
|
||||
* }}
|
||||
*/
|
||||
window.__themeCookie = {
|
||||
getCookie: getCookie,
|
||||
setCookie: setCookie,
|
||||
persistTheme: persistTheme,
|
||||
maxAge: THEME_COOKIE_MAX_AGE,
|
||||
__testHooks: {
|
||||
applyTheme: applyTheme,
|
||||
handleReady: handleReady,
|
||||
bootstrap: bootstrap,
|
||||
setTheme: function setTheme(value) {
|
||||
theme = value;
|
||||
},
|
||||
exerciseSetCookieGuard: exerciseSetCookieGuard,
|
||||
serializeCookieOptions: serializeCookieOptions,
|
||||
formatCookieOption: formatCookieOption
|
||||
}
|
||||
};
|
||||
})();
|
||||
File diff suppressed because it is too large
Load Diff
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import { promises as fs } from 'node:fs';
|
||||
import path from 'node:path';
|
||||
|
||||
const coverageDir = 'coverage';
|
||||
const reportsDir = 'reports';
|
||||
const outputPath = path.join(reportsDir, 'javascript-coverage.json');
|
||||
|
||||
async function ensureReportsDir() {
|
||||
try {
|
||||
await fs.mkdir(reportsDir, { recursive: true });
|
||||
} catch (error) {
|
||||
console.error('Failed to ensure reports directory', error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
async function copyLatestCoverage() {
|
||||
let entries;
|
||||
try {
|
||||
entries = await fs.readdir(coverageDir);
|
||||
} catch (error) {
|
||||
if (error.code === 'ENOENT') {
|
||||
console.warn('Coverage directory not found; skipping export.');
|
||||
return;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
const coverageFiles = entries.filter(name => name.endsWith('.json'));
|
||||
if (!coverageFiles.length) {
|
||||
console.warn('No coverage files generated; skipping export.');
|
||||
return;
|
||||
}
|
||||
|
||||
// Sort to pick the most recent entry deterministically.
|
||||
coverageFiles.sort();
|
||||
const latest = coverageFiles[coverageFiles.length - 1];
|
||||
const source = path.join(coverageDir, latest);
|
||||
|
||||
await fs.copyFile(source, outputPath);
|
||||
console.log(`Copied coverage report to ${outputPath}`);
|
||||
}
|
||||
|
||||
await ensureReportsDir();
|
||||
await copyLatestCoverage();
|
||||
+2881
-72
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user