diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..d83d076 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,10 @@ +# This keeps Docker from including hostOS virtual environment folders +env/ +.venv/ + +# Database files and backups +*.db +*.db-shm +*.db-wal +backups/ +*.db.gz diff --git a/.github/workflows/container.yml b/.github/workflows/container.yml new file mode 100644 index 0000000..b2ace5d --- /dev/null +++ b/.github/workflows/container.yml @@ -0,0 +1,52 @@ +name: Build container + +on: + push: + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Docker meta + id: meta + uses: docker/metadata-action@v5 + with: + # list of Docker images to use as base name for tags + images: | + ghcr.io/${{ github.repository }} + # generate Docker tags based on the following events/attributes + tags: | + type=ref,event=branch + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=semver,pattern={{major}} + type=match,pattern=v\d.\d.\d,value=latest + - name: Login to GitHub Container Registry + if: github.event_name != 'pull_request' + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v6 + with: + context: . + file: ./Containerfile + push: ${{ github.event_name != 'pull_request' }} + labels: ${{ steps.meta.outputs.labels }} + tags: ${{ steps.meta.outputs.tags }} + platforms: linux/amd64,linux/arm64 + # optional cache (speeds up rebuilds) + cache-from: type=gha + cache-to: type=gha,mode=max \ No newline at end of file diff --git a/.gitignore b/.gitignore index 0939dc1..43e9b55 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,47 @@ env/* __pycache__/* meshview/__pycache__/* +alembic/__pycache__/* meshtastic/protobuf/* + +# Database files packets.db +packets*.db +*.db +*.db-shm +*.db-wal + +# Database backups +backups/ +*.db.gz + +# Process files meshview-db.pid meshview-web.pid + +# Config and logs /table_details.py config.ini +*.log + +# Screenshots screenshots/* + +# Python python/nanopb +__pycache__/ +*.pyc +*.pyo +*.pyd +.Python + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 0000000..a3c5373 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,204 @@ +# AI Agent Guidelines for Meshview + +This document provides context and guidelines for AI coding assistants working on the Meshview project. + +## Project Overview + +Meshview is a real-time monitoring and diagnostic tool for Meshtastic mesh networks. It provides web-based visualization and analysis of network activity, including: + +- Real-time packet monitoring from MQTT streams +- Interactive map visualization of node locations +- Network topology graphs showing connectivity +- Message traffic analysis and conversation tracking +- Node statistics and telemetry data +- Packet inspection and traceroute analysis + +## Architecture + +### Core Components + +1. **MQTT Reader** (`meshview/mqtt_reader.py`) - Subscribes to MQTT topics and receives mesh packets +2. **Database Manager** (`meshview/database.py`, `startdb.py`) - Handles database initialization and migrations +3. **MQTT Store** (`meshview/mqtt_store.py`) - Processes and stores packets in the database +4. **Web Server** (`meshview/web.py`, `main.py`) - Serves the web interface and API endpoints +5. **API Layer** (`meshview/web_api/api.py`) - REST API endpoints for data access +6. **Models** (`meshview/models.py`) - SQLAlchemy database models +7. **Decode Payload** (`meshview/decode_payload.py`) - Protobuf message decoding + +### Technology Stack + +- **Python 3.13+** - Main language +- **aiohttp** - Async web framework +- **aiomqtt** - Async MQTT client +- **SQLAlchemy (async)** - ORM with async support +- **Alembic** - Database migrations +- **Jinja2** - Template engine +- **Protobuf** - Message serialization (Meshtastic protocol) +- **SQLite/PostgreSQL** - Database backends (SQLite default, PostgreSQL via asyncpg) + +### Key Patterns + +- **Async/Await** - All I/O operations are asynchronous +- **Database Migrations** - Use Alembic for schema changes (see `docs/Database-Changes-With-Alembic.md`) +- **Configuration** - INI file-based config (`config.ini`, see `sample.config.ini`) +- **Modular API** - API routes separated into `meshview/web_api/` module + +## Project Structure + +``` +meshview/ +├── alembic/ # Database migration scripts +├── docs/ # Technical documentation +├── meshview/ # Main application package +│ ├── static/ # Static web assets (HTML, JS, CSS) +│ ├── templates/ # Jinja2 HTML templates +│ ├── web_api/ # API route handlers +│ └── *.py # Core modules +├── main.py # Web server entry point +├── startdb.py # Database manager entry point +├── mvrun.py # Combined runner (starts both services) +├── config.ini # Runtime configuration +└── requirements.txt # Python dependencies +``` + +## Development Workflow + +### Setup + +1. Use Python 3.13+ virtual environment + +### Running + +- **Database**: `./env/bin/python startdb.py` +- **Web Server**: `./env/bin/python main.py` +- **Both**: `./env/bin/python mvrun.py` + + +## Code Style + +- **Line length**: 100 characters (see `pyproject.toml`) +- **Linting**: Ruff (configured in `pyproject.toml`) +- **Formatting**: Ruff formatter +- **Type hints**: Preferred but not strictly required +- **Async**: Use `async def` and `await` for I/O operations + +## Important Files + +### Configuration +- `config.ini` - Runtime configuration (server, MQTT, database, cleanup) +- `sample.config.ini` - Template configuration file +- `alembic.ini` - Alembic migration configuration + +### Database +- `meshview/models.py` - SQLAlchemy models (Packet, Node, Traceroute, etc.) +- `meshview/database.py` - Database initialization and session management +- `alembic/versions/` - Migration scripts + +### Core Logic +- `meshview/mqtt_reader.py` - MQTT subscription and message reception +- `meshview/mqtt_store.py` - Packet processing and storage +- `meshview/decode_payload.py` - Protobuf decoding +- `meshview/web.py` - Web server routes and handlers +- `meshview/web_api/api.py` - REST API endpoints + +### Templates +- `meshview/templates/` - Jinja2 HTML templates +- `meshview/static/` - Static files (HTML pages, JS, CSS) + +## Common Tasks + +### Adding a New API Endpoint + +1. Add route handler in `meshview/web_api/api.py` +2. Register route in `meshview/web.py` (if needed) +3. Update `docs/API_Documentation.md` if public API + +### Database Schema Changes + +1. Modify models in `meshview/models.py` +2. Create migration: `alembic revision --autogenerate -m "description"` +3. Review generated migration in `alembic/versions/` +4. Test migration: `alembic upgrade head` +5. **Never** modify existing migration files after they've been applied + +### Adding a New Web Page + +1. Create template in `meshview/templates/` +2. Add route in `meshview/web.py` +3. Add navigation link if needed (check existing templates for pattern) +4. Add static assets if needed in `meshview/static/` + +### Processing New Packet Types + +1. Check `meshview/decode_payload.py` for existing decoders +2. Add decoder function if new type +3. Update `meshview/mqtt_store.py` to handle new packet type +4. Update database models if new data needs storage + + +## Key Concepts + +### Meshtastic Protocol +- Uses Protobuf for message serialization +- Packets contain various message types (text, position, telemetry, etc.) +- MQTT topics follow pattern: `msh/{region}/{subregion}/#` + +### Database Schema +- **packet** - Raw packet data +- **node** - Mesh node information +- **traceroute** - Network path information +- **packet_seen** - Packet observation records + +### Real-time Updates +- Web pages use Server-Sent Events (SSE) for live updates +- Map and firehose pages auto-refresh based on config intervals +- API endpoints return JSON for programmatic access + +## Best Practices + +1. **Always use async/await** for database and network operations +2. **Use Alembic** for all database schema changes +3. **Follow existing patterns** - check similar code before adding new features +4. **Update documentation** - keep `docs/` and README current +5. **Test migrations** - verify migrations work both up and down +6. **Handle errors gracefully** - log errors, don't crash on bad packets +7. **Respect configuration** - use `config.ini` values, don't hardcode + +## Common Pitfalls + +- **Don't modify applied migrations** - create new ones instead +- **Don't block the event loop** - use async I/O, not sync +- **Don't forget timezone handling** - timestamps are stored in UTC +- **Don't hardcode paths** - use configuration values +- **Don't ignore MQTT reconnection** - handle connection failures gracefully + +## Resources + +- **Main README**: `README.md` - Installation and basic usage +- **Docker Guide**: `README-Docker.md` - Container deployment +- **API Docs**: `docs/API_Documentation.md` - API endpoint reference +- **Migration Guide**: `docs/Database-Changes-With-Alembic.md` - Database workflow +- **Contributing**: `CONTRIBUTING.md` - Contribution guidelines + +## Version Information + +- **Current Version**: 3.0.0 (November 2025) +- **Python Requirement**: 3.13+ +- **Key Features**: Alembic migrations, automated backups, Docker support, traceroute return paths + + +## Rules for robots +- Always run ruff check and ruff format after making changes (only on python changes) + + +--- + +When working on this project, prioritize: +1. Maintaining async patterns +2. Following existing code structure +3. Using proper database migrations +4. Keeping documentation updated +5. Testing changes thoroughly + + + diff --git a/contributing.md b/CONTRIBUTING.md similarity index 100% rename from contributing.md rename to CONTRIBUTING.md diff --git a/Containerfile b/Containerfile new file mode 100644 index 0000000..8ea1991 --- /dev/null +++ b/Containerfile @@ -0,0 +1,80 @@ +# Build Image +# Uses python:3.13-slim because no native dependencies are needed for meshview itself +# (everything is available as a wheel) + +FROM docker.io/python:3.13-slim AS meshview-build +RUN apt-get update && \ + apt-get install -y --no-install-recommends curl patch && \ + rm -rf /var/lib/apt/lists/* + +# Add a non-root user/group +ARG APP_USER=app +RUN useradd -m -u 10001 -s /bin/bash ${APP_USER} + +# Install uv and put it on PATH system-wide +RUN curl -LsSf https://astral.sh/uv/install.sh | sh \ + && install -m 0755 /root/.local/bin/uv /usr/local/bin/uv + +WORKDIR /app +RUN chown -R ${APP_USER}:${APP_USER} /app + +# Copy deps first for caching +COPY --chown=${APP_USER}:${APP_USER} pyproject.toml uv.lock* requirements*.txt ./ + +# Optional: wheels-only to avoid slow source builds +ENV UV_NO_BUILD=1 +RUN uv venv /opt/venv +# RUN uv sync --frozen +ENV VIRTUAL_ENV=/opt/venv +ENV PATH="$VIRTUAL_ENV/bin:$PATH" + +RUN uv pip install --no-cache-dir --upgrade pip \ + && if [ -f requirements.txt ]; then uv pip install --only-binary=:all: -r requirements.txt; fi + +# Copy app code +COPY --chown=${APP_USER}:${APP_USER} . . + +# Patch config +RUN patch sample.config.ini < container/config.patch + +# Clean +RUN rm -rf /app/.git* && \ + rm -rf /app/.pre-commit-config.yaml && \ + rm -rf /app/*.md && \ + rm -rf /app/COPYING && \ + rm -rf /app/Containerfile && \ + rm -rf /app/Dockerfile && \ + rm -rf /app/container && \ + rm -rf /app/docker && \ + rm -rf /app/docs && \ + rm -rf /app/pyproject.toml && \ + rm -rf /app/requirements.txt && \ + rm -rf /app/screenshots + +# Prepare /app and /opt to copy +RUN mkdir -p /meshview && \ + mv /app /opt /meshview + +# Use a clean container for install +FROM docker.io/python:3.13-slim +ARG APP_USER=app +COPY --from=meshview-build /meshview / +RUN apt-get update && \ + apt-get install -y --no-install-recommends graphviz && \ + rm -rf /var/lib/apt/lists/* && \ + useradd -m -u 10001 -s /bin/bash ${APP_USER} && \ + mkdir -p /etc/meshview /var/lib/meshview /var/log/meshview && \ + mv /app/sample.config.ini /etc/meshview/config.ini && \ + chown -R ${APP_USER}:${APP_USER} /var/lib/meshview /var/log/meshview + +# Drop privileges +USER ${APP_USER} + +WORKDIR /app + +ENTRYPOINT [ "/opt/venv/bin/python", "mvrun.py"] +CMD ["--pid_dir", "/tmp", "--py_exec", "/opt/venv/bin/python", "--config", "/etc/meshview/config.ini" ] + +EXPOSE 8081 +VOLUME [ "/etc/meshview", "/var/lib/meshview", "/var/log/meshview" ] + diff --git a/Dockerfile b/Dockerfile new file mode 120000 index 0000000..5240dc0 --- /dev/null +++ b/Dockerfile @@ -0,0 +1 @@ +Containerfile \ No newline at end of file diff --git a/PERFORMANCE_OPTIMIZATION.md b/PERFORMANCE_OPTIMIZATION.md deleted file mode 100644 index 2821516..0000000 --- a/PERFORMANCE_OPTIMIZATION.md +++ /dev/null @@ -1,203 +0,0 @@ -# /top Endpoint Performance Optimization - -## Problem -The `/top` endpoint was taking over 1 second to execute due to inefficient database queries. The query joins three tables (node, packet, packet_seen) and performs COUNT aggregations on large result sets without proper indexes. - -## Root Cause Analysis - -The `get_top_traffic_nodes()` query in `meshview/store.py` executes: - -```sql -SELECT - n.node_id, - n.long_name, - n.short_name, - n.channel, - COUNT(DISTINCT p.id) AS total_packets_sent, - COUNT(ps.packet_id) AS total_times_seen -FROM node n -LEFT JOIN packet p ON n.node_id = p.from_node_id - AND p.import_time >= DATETIME('now', 'localtime', '-24 hours') -LEFT JOIN packet_seen ps ON p.id = ps.packet_id -GROUP BY n.node_id, n.long_name, n.short_name -HAVING total_packets_sent > 0 -ORDER BY total_times_seen DESC; -``` - -### Performance Bottlenecks Identified: - -1. **Missing composite index on packet(from_node_id, import_time)** - - The query filters packets by BOTH `from_node_id` AND `import_time >= -24 hours` - - Without a composite index, SQLite must: - - Scan using `idx_packet_from_node_id` index - - Then filter each result by `import_time` (expensive!) - -2. **Missing index on packet_seen(packet_id)** - - The LEFT JOIN to packet_seen uses `packet_id` as the join key - - Without an index, SQLite performs a table scan for each packet - - With potentially millions of packet_seen records, this is very slow - -## Solution - -### 1. Added Database Indexes - -Modified `meshview/models.py` to include two new indexes: - -```python -# In Packet class -Index("idx_packet_from_node_time", "from_node_id", desc("import_time")) - -# In PacketSeen class -Index("idx_packet_seen_packet_id", "packet_id") -``` - -### 2. Added Performance Profiling - -Modified `meshview/web.py` `/top` endpoint to include: -- Timing instrumentation for database queries -- Timing for data processing -- Detailed logging with `[PROFILE /top]` prefix -- On-page performance metrics display - -### 3. Created Migration Script - -Created `add_db_indexes.py` to add indexes to existing databases. - -## Implementation Steps - -### Step 1: Stop the Database Writer -```bash -# Stop startdb.py if it's running -pkill -f startdb.py -``` - -### Step 2: Run Migration Script -```bash -python add_db_indexes.py -``` - -Expected output: -``` -====================================================================== -Database Index Migration for /top Endpoint Performance -====================================================================== -Connecting to database: sqlite+aiosqlite:///path/to/packets.db - -====================================================================== -Checking for index: idx_packet_from_node_time -====================================================================== -Creating index idx_packet_from_node_time... - Table: packet - Columns: from_node_id, import_time DESC - Purpose: Speeds up filtering packets by sender and time range -✓ Index created successfully in 2.34 seconds - -====================================================================== -Checking for index: idx_packet_seen_packet_id -====================================================================== -Creating index idx_packet_seen_packet_id... - Table: packet_seen - Columns: packet_id - Purpose: Speeds up joining packet_seen with packet table -✓ Index created successfully in 3.12 seconds - -... (index listings) - -====================================================================== -Migration completed successfully! -====================================================================== -``` - -### Step 3: Restart Services -```bash - -# Restart server -python mvrun.py & -``` - -### Step 4: Verify Performance Improvement - -1. Visit `/top` endpoint eg http://127.0.0.1:8081/top?perf=true -2. Scroll to bottom of page -3. Check the Performance Metrics panel -4. Compare DB query time before and after - -**Expected Results:** -- **Before:** 1000-2000ms query time -- **After:** 50-200ms query time -- **Improvement:** 80-95% reduction - -## Performance Metrics - -The `/top` page now displays at the bottom: - -``` -⚡ Performance Metrics -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -Database Query: 45.23ms -Data Processing: 2.15ms -Total Time: 47.89ms -Nodes Processed: 156 -Total Packets: 45,678 -Times Seen: 123,456 -━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ -``` - - -## Technical Details - -### Why Composite Index Works - -SQLite can use a composite index `(from_node_id, import_time DESC)` to: -1. Quickly find all packets for a specific `from_node_id` -2. Filter by `import_time` without additional I/O (data is already sorted) -3. Both operations use a single index lookup - -### Why packet_id Index Works - -The `packet_seen` table can have millions of rows. Without an index: -- Each packet requires a full table scan of packet_seen -- O(n * m) complexity where n=packets, m=packet_seen rows - -With the index: -- Each packet uses an index lookup -- O(n * log m) complexity - dramatically faster - -### Index Size Impact - -- `idx_packet_from_node_time`: ~10-20% of packet table size -- `idx_packet_seen_packet_id`: ~5-10% of packet_seen table size -- Total additional disk space: typically 50-200MB depending on data volume -- Performance gain: 80-95% query time reduction - -## Future Optimizations - -If query is still slow after indexes: - -1. **Add ANALYZE**: Run `ANALYZE;` to update SQLite query planner statistics -2. **Consider materialized view**: Pre-compute traffic stats in a background job -3. **Add caching**: Cache results for 5-10 minutes using Redis/memcached -4. **Partition data**: Archive old packet_seen records - -## Rollback - -If needed, indexes can be removed: - -```sql -DROP INDEX IF EXISTS idx_packet_from_node_time; -DROP INDEX IF EXISTS idx_packet_seen_packet_id; -``` - -## Files Modified - -- `meshview/models.py` - Added index definitions -- `meshview/web.py` - Added performance profiling -- `meshview/templates/top.html` - Added metrics display -- `add_db_indexes.py` - Migration script (NEW) -- `PERFORMANCE_OPTIMIZATION.md` - This documentation (NEW) - -## Support - -For questions or issues: -1. Verify indexes exist: `python add_db_indexes.py` (safe to re-run) -2. Review SQLite EXPLAIN QUERY PLAN for the query diff --git a/README-Docker.md b/README-Docker.md new file mode 100644 index 0000000..d87d368 --- /dev/null +++ b/README-Docker.md @@ -0,0 +1,243 @@ +# Running MeshView with Docker + +MeshView container images are built automatically and published to GitHub Container Registry. + +## Quick Start + +Pull and run the latest image: + +```bash +docker pull ghcr.io/pablorevilla-meshtastic/meshview:latest + +docker run -d \ + --name meshview \ + -p 8081:8081 \ + -v ./config:/etc/meshview \ + -v ./data:/var/lib/meshview \ + -v ./logs:/var/log/meshview \ + ghcr.io/pablorevilla-meshtastic/meshview:latest +``` + +Access the web interface at: http://localhost:8081 + +## Volume Mounts + +The container uses three volumes for persistent data: + +| Volume | Purpose | Required | +|--------|---------|----------| +| `/etc/meshview` | Configuration files | Yes | +| `/var/lib/meshview` | Database storage | Recommended | +| `/var/log/meshview` | Log files | Optional | + +### Configuration Volume + +Mount a directory containing your `config.ini` file: + +```bash +-v /path/to/your/config:/etc/meshview +``` + +If no config is provided, the container will use the default `sample.config.ini`. + +### Database Volume + +Mount a directory to persist the SQLite database: + +```bash +-v /path/to/your/data:/var/lib/meshview +``` + +**Important:** Without this mount, your database will be lost when the container stops. + +### Logs Volume + +Mount a directory to access logs from the host: + +```bash +-v /path/to/your/logs:/var/log/meshview +``` + +## Complete Example + +Create a directory structure and run: + +```bash +# Create directories +mkdir -p meshview/{config,data,logs,backups} + +# Copy sample config (first time only) +docker run --rm ghcr.io/pablorevilla-meshtastic/meshview:latest \ + cat /etc/meshview/config.ini > meshview/config/config.ini + +# Edit config.ini with your MQTT settings +nano meshview/config/config.ini + +# Run the container +docker run -d \ + --name meshview \ + --restart unless-stopped \ + -p 8081:8081 \ + -v $(pwd)/meshview/config:/etc/meshview \ + -v $(pwd)/meshview/data:/var/lib/meshview \ + -v $(pwd)/meshview/logs:/var/log/meshview \ + ghcr.io/pablorevilla-meshtastic/meshview:latest +``` + +## Docker Compose + +Create a `docker-compose.yml`: + +```yaml +version: '3.8' + +services: + meshview: + image: ghcr.io/pablorevilla-meshtastic/meshview:latest + container_name: meshview + restart: unless-stopped + ports: + - "8081:8081" + volumes: + - ./config:/etc/meshview + - ./data:/var/lib/meshview + - ./logs:/var/log/meshview + - ./backups:/var/lib/meshview/backups # For database backups + environment: + - TZ=America/Los_Angeles # Set your timezone +``` + +Run with: + +```bash +docker-compose up -d +``` + +## Configuration + +### Minimum Configuration + +Edit your `config.ini` to configure MQTT connection: + +```ini +[mqtt] +server = mqtt.meshtastic.org +topics = ["msh/US/#"] +port = 1883 +username = +password = + +[database] +connection_string = sqlite+aiosqlite:///var/lib/meshview/packets.db +``` + +### Database Backups + +To enable automatic daily backups inside the container: + +```ini +[cleanup] +backup_enabled = True +backup_dir = /var/lib/meshview/backups +backup_hour = 2 +backup_minute = 00 +``` + +Then mount the backups directory: + +```bash +-v $(pwd)/meshview/backups:/var/lib/meshview/backups +``` + +## Available Tags + +| Tag | Description | +|-----|-------------| +| `latest` | Latest build from the main branch | +| `dev-v3` | Development branch | +| `v1.2.3` | Specific version tags | + +## Updating + +Pull the latest image and restart: + +```bash +docker pull ghcr.io/pablorevilla-meshtastic/meshview:latest +docker restart meshview +``` + +Or with docker-compose: + +```bash +docker-compose pull +docker-compose up -d +``` + +## Logs + +View container logs: + +```bash +docker logs meshview + +# Follow logs +docker logs -f meshview + +# Last 100 lines +docker logs --tail 100 meshview +``` + +## Troubleshooting + +### Container won't start + +Check logs: +```bash +docker logs meshview +``` + +### Database permission issues + +Ensure the data directory is writable: +```bash +chmod -R 755 meshview/data +``` + +### Can't connect to MQTT + +1. Check your MQTT configuration in `config.ini` +2. Verify network connectivity from the container: + ```bash + docker exec meshview ping mqtt.meshtastic.org + ``` + +### Port already in use + +Change the host port (left side): +```bash +-p 8082:8081 +``` + +Then access at: http://localhost:8082 + +## Building Your Own Image + +If you want to build from source: + +```bash +git clone https://github.com/pablorevilla-meshtastic/meshview.git +cd meshview +docker build -f Containerfile -t meshview:local . +``` + +## Security Notes + +- The container runs as a non-root user (`app`, UID 10001) +- No privileged access required +- Only port 8081 is exposed +- All data stored in mounted volumes + +## Support + +- GitHub Issues: https://github.com/pablorevilla-meshtastic/meshview/issues +- Documentation: https://github.com/pablorevilla-meshtastic/meshview diff --git a/README.md b/README.md index 569569d..9746c5f 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,31 @@ The project serves as a real-time monitoring and diagnostic tool for the Meshtastic mesh network. It provides detailed insights into network activity, including message traffic, node positions, and telemetry data. +### Version 3.0.0 update - November 2025 + +**Major Infrastructure Improvements:** + +* **Database Migrations**: Alembic integration for safe schema upgrades and database versioning +* **Automated Backups**: Independent database backup system with gzip compression (separate from cleanup) +* **Development Tools**: Quick setup script (`setup-dev.sh`) with pre-commit hooks for code quality +* **Docker Support**: Pre-built containers now available on GitHub Container Registry with automatic builds - ogarcia + +**New Features:** + +* **Traceroute Return Path**: Log and display return path data for traceroute packets - jschrempp +* **Microsecond Timestamps**: Added `import_time_us` columns for higher precision time tracking + +**Technical Improvements:** + +* Migration from manual SQL to Alembic-managed schema +* Container images use `uv` for faster dependency installation +* Python 3.13 support with slim Debian-based images +* Documentation collection in `docs/` directory +* API routes moved to separate modules for better organization +* /version and /health endpoints added for monitoring + +See [README-Docker.md](README-Docker.md) for container deployment and [docs/](docs/) for technical documentation. + ### Version 2.0.7 update - September 2025 * New database maintenance capability to automatically keep a specific number of days of data. * Added configuration for update intervals for both the Live Map and the Firehose pages. @@ -61,20 +86,42 @@ Samples of currently running instances: ## Installing -Requires **`python3.11`** or above. +### Using Docker (Recommended) + +The easiest way to run MeshView is using Docker. Pre-built images are available from GitHub Container Registry. + +See **[README-Docker.md](README-Docker.md)** for complete Docker installation and usage instructions. + +### Manual Installation + +Requires **`python3.13`** or above. Clone the repo from GitHub: ```bash git clone https://github.com/pablorevilla-meshtastic/meshview.git -``` - -```bash cd meshview ``` + +#### Quick Setup (Recommended) + +Run the development setup script: + +```bash +./setup-dev.sh +``` + +This will: +- Create Python virtual environment +- Install all requirements +- Install development tools (pre-commit, pytest) +- Set up pre-commit hooks for code formatting +- Create config.ini from sample + +#### Manual Setup + Create a Python virtual environment: -from the meshview directory... ```bash python3 -m venv env ``` @@ -222,6 +269,8 @@ vacuum = False # Application logs (errors, startup messages, etc.) are unaffected # Set to True to enable, False to disable (default: False) access_log = False +# Database cleanup logfile location +db_cleanup_logfile = dbcleanup.log ``` --- @@ -255,12 +304,29 @@ Open in your browser: http://localhost:8081/ ## Running Meshview with `mvrun.py` - `mvrun.py` starts both `startdb.py` and `main.py` in separate threads and merges the output. -- It accepts the `--config` argument like the others. +- It accepts several command-line arguments for flexible deployment. ```bash ./env/bin/python mvrun.py ``` +**Command-line options:** +- `--config CONFIG` - Path to the configuration file (default: `config.ini`) +- `--pid_dir PID_DIR` - Directory for PID files (default: `.`) +- `--py_exec PY_EXEC` - Path to the Python executable (default: `./env/bin/python`) + +**Examples:** +```bash +# Use a specific config file +./env/bin/python mvrun.py --config /etc/meshview/config.ini + +# Store PID files in a specific directory +./env/bin/python mvrun.py --pid_dir /var/run/meshview + +# Use a different Python executable +./env/bin/python mvrun.py --py_exec /usr/bin/python3 +``` + --- ## Setting Up Systemd Services (Ubuntu) @@ -366,6 +432,15 @@ hour = 2 minute = 00 # Run VACUUM after cleanup vacuum = False + +# ------------------------- +# Logging Configuration +# ------------------------- +[logging] +# Enable or disable HTTP access logs from the web server +access_log = False +# Database cleanup logfile location +db_cleanup_logfile = dbcleanup.log ``` Once changes are done you need to restart the script for changes to load. @@ -414,3 +489,20 @@ Add schedule to the bottom of the file (modify /path/to/file/ to the correct pat ``` Check the log file to see it the script run at the specific time. + +--- + +## Testing + +MeshView includes a test suite using pytest. For detailed testing documentation, see [README-testing.md](README-testing.md). + +Quick start: +```bash +./env/bin/pytest tests/test_api_simple.py -v +``` + +--- + +## Technical Documentation + +For more detailed technical documentation including database migrations, architecture details, and advanced topics, see the [docs/](docs/) directory. diff --git a/add_db_indexes.py b/add_db_indexes.py deleted file mode 100644 index 9004759..0000000 --- a/add_db_indexes.py +++ /dev/null @@ -1,154 +0,0 @@ -#!/usr/bin/env python3 -""" -Migration script to add performance indexes - -This script adds two critical indexes: -1. idx_packet_from_node_time: Composite index on packet(from_node_id, import_time DESC) -2. idx_packet_seen_packet_id: Index on packet_seen(packet_id) - -These indexes significantly improve the performance of the get_top_traffic_nodes() query. - -Usage: - python add_db_indexes.py - -The script will: -- Connect to your database in WRITE mode -- Check if indexes already exist -- Create missing indexes -- Report timing for each operation -""" - -import asyncio -import time - -from sqlalchemy import text -from sqlalchemy.ext.asyncio import create_async_engine - -from meshview.config import CONFIG - - -async def add_indexes(): - # Get database connection string and remove read-only flag - db_string = CONFIG["database"]["connection_string"] - if "?mode=ro" in db_string: - db_string = db_string.replace("?mode=ro", "") - - print(f"Connecting to database: {db_string}") - - # Create engine with write access - engine = create_async_engine(db_string, echo=False, connect_args={"uri": True}) - - try: - async with engine.begin() as conn: - # Check and create idx_packet_from_node_time - print("\n" + "=" * 70) - print("Checking for index: idx_packet_from_node_time") - print("=" * 70) - - result = await conn.execute( - text(""" - SELECT name FROM sqlite_master - WHERE type='index' AND name='idx_packet_from_node_time' - """) - ) - - if result.fetchone(): - print("✓ Index idx_packet_from_node_time already exists") - else: - print("Creating index idx_packet_from_node_time...") - print(" Table: packet") - print(" Columns: from_node_id, import_time DESC") - print(" Purpose: Speeds up filtering packets by sender and time range") - - start_time = time.perf_counter() - await conn.execute( - text(""" - CREATE INDEX idx_packet_from_node_time - ON packet(from_node_id, import_time DESC) - """) - ) - elapsed = time.perf_counter() - start_time - - print(f"✓ Index created successfully in {elapsed:.2f} seconds") - - # Check and create idx_packet_seen_packet_id - print("\n" + "=" * 70) - print("Checking for index: idx_packet_seen_packet_id") - print("=" * 70) - - result = await conn.execute( - text(""" - SELECT name FROM sqlite_master - WHERE type='index' AND name='idx_packet_seen_packet_id' - """) - ) - - if result.fetchone(): - print("✓ Index idx_packet_seen_packet_id already exists") - else: - print("Creating index idx_packet_seen_packet_id...") - print(" Table: packet_seen") - print(" Columns: packet_id") - print(" Purpose: Speeds up joining packet_seen with packet table") - - start_time = time.perf_counter() - await conn.execute( - text(""" - CREATE INDEX idx_packet_seen_packet_id - ON packet_seen(packet_id) - """) - ) - elapsed = time.perf_counter() - start_time - - print(f"✓ Index created successfully in {elapsed:.2f} seconds") - - # Show index info - print("\n" + "=" * 70) - print("Current indexes on packet table:") - print("=" * 70) - result = await conn.execute( - text(""" - SELECT name, sql FROM sqlite_master - WHERE type='index' AND tbl_name='packet' - ORDER BY name - """) - ) - for row in result: - if row[1]: # Skip auto-indexes (they have NULL sql) - print(f" • {row[0]}") - - print("\n" + "=" * 70) - print("Current indexes on packet_seen table:") - print("=" * 70) - result = await conn.execute( - text(""" - SELECT name, sql FROM sqlite_master - WHERE type='index' AND tbl_name='packet_seen' - ORDER BY name - """) - ) - for row in result: - if row[1]: # Skip auto-indexes - print(f" • {row[0]}") - - print("\n" + "=" * 70) - print("Migration completed successfully!") - print("=" * 70) - print("\nNext steps:") - print("1. Restart your web server (mvrun.py)") - print("2. Visit /top endpoint and check the performance metrics") - print("3. Compare DB query time with previous measurements") - print("\nExpected improvement: 50-90% reduction in query time") - - except Exception as e: - print(f"\n❌ Error during migration: {e}") - raise - finally: - await engine.dispose() - - -if __name__ == "__main__": - print("=" * 70) - print("Database Index Migration for Endpoint Performance") - print("=" * 70) - asyncio.run(add_indexes()) diff --git a/alembic.ini b/alembic.ini new file mode 100644 index 0000000..e60b7dc --- /dev/null +++ b/alembic.ini @@ -0,0 +1,120 @@ +# A generic, single database configuration. + +[alembic] +# path to migration scripts +# Use forward slashes (/) also on windows to provide an os agnostic path +script_location = alembic + +# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s +# Uncomment the line below if you want the files to be prepended with date and time +# see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file +# for all available tokens +# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s + +# sys.path path, will be prepended to sys.path if present. +# defaults to the current working directory. +prepend_sys_path = . + +# timezone to use when rendering the date within the migration file +# as well as the filename. +# If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library. +# Any required deps can installed by adding `alembic[tz]` to the pip requirements +# string value is passed to ZoneInfo() +# leave blank for localtime +# timezone = + +# max length of characters to apply to the "slug" field +# truncate_slug_length = 40 + +# set to 'true' to run the environment during +# the 'revision' command, regardless of autogenerate +# revision_environment = false + +# set to 'true' to allow .pyc and .pyo files without +# a source .py file to be detected as revisions in the +# versions/ directory +# sourceless = false + +# version location specification; This defaults +# to alembic/versions. When using multiple version +# directories, initial revisions must be specified with --version-path. +# The path separator used here should be the separator specified by "version_path_separator" below. +# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions + +# version path separator; As mentioned above, this is the character used to split +# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep. +# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas. +# Valid values for version_path_separator are: +# +# version_path_separator = : +# version_path_separator = ; +# version_path_separator = space +# version_path_separator = newline +# +# Use os.pathsep. Default configuration used for new projects. +version_path_separator = os + +# set to 'true' to search source files recursively +# in each "version_locations" directory +# new in Alembic version 1.10 +# recursive_version_locations = false + +# the output encoding used when revision files +# are written from script.py.mako +# output_encoding = utf-8 + +# sqlalchemy.url will be set programmatically from meshview config +# sqlalchemy.url = driver://user:pass@localhost/dbname + + +[post_write_hooks] +# post_write_hooks defines scripts or Python functions that are run +# on newly generated revision scripts. See the documentation for further +# detail and examples + +# format using "black" - use the console_scripts runner, against the "black" entrypoint +# hooks = black +# black.type = console_scripts +# black.entrypoint = black +# black.options = -l 79 REVISION_SCRIPT_FILENAME + +# lint with attempts to fix using "ruff" - use the exec runner, execute a binary +# hooks = ruff +# ruff.type = exec +# ruff.executable = %(here)s/.venv/bin/ruff +# ruff.options = --fix REVISION_SCRIPT_FILENAME + +# Logging configuration +[loggers] +keys = root,sqlalchemy,alembic + +[handlers] +keys = console + +[formatters] +keys = generic + +[logger_root] +level = INFO +handlers = console +qualname = + +[logger_sqlalchemy] +level = WARNING +handlers = +qualname = sqlalchemy.engine + +[logger_alembic] +level = INFO +handlers = +qualname = alembic + +[handler_console] +class = StreamHandler +args = (sys.stderr,) +level = NOTSET +formatter = generic + +[formatter_generic] +format = %(asctime)s %(filename)s:%(lineno)d [pid:%(process)d] %(levelname)s - %(message)s +datefmt = %Y-%m-%d %H:%M:%S \ No newline at end of file diff --git a/alembic/README b/alembic/README new file mode 100644 index 0000000..98e4f9c --- /dev/null +++ b/alembic/README @@ -0,0 +1 @@ +Generic single-database configuration. \ No newline at end of file diff --git a/alembic/env.py b/alembic/env.py new file mode 100644 index 0000000..a4d99b7 --- /dev/null +++ b/alembic/env.py @@ -0,0 +1,102 @@ +import asyncio +from logging.config import fileConfig + +from sqlalchemy import pool +from sqlalchemy.engine import Connection +from sqlalchemy.ext.asyncio import async_engine_from_config + +from alembic import context + +# Import models metadata for autogenerate support +from meshview.models import Base + +# this is the Alembic Config object, which provides +# access to the values within the .ini file in use. +config = context.config + +# Interpret the config file for Python logging. +# This line sets up loggers basically. +# Use disable_existing_loggers=False to preserve app logging configuration +if config.config_file_name is not None: + fileConfig(config.config_file_name, disable_existing_loggers=False) + +# Add your model's MetaData object here for 'autogenerate' support +target_metadata = Base.metadata + + +def run_migrations_offline() -> None: + """Run migrations in 'offline' mode. + + This configures the context with just a URL + and not an Engine, though an Engine is acceptable + here as well. By skipping the Engine creation + we don't even need a DBAPI to be available. + + Calls to context.execute() here emit the given string to the + script output. + + """ + url = config.get_main_option("sqlalchemy.url") + context.configure( + url=url, + target_metadata=target_metadata, + literal_binds=True, + dialect_opts={"paramstyle": "named"}, + ) + + with context.begin_transaction(): + context.run_migrations() + + +def do_run_migrations(connection: Connection) -> None: + """Run migrations with the given connection.""" + context.configure(connection=connection, target_metadata=target_metadata) + + with context.begin_transaction(): + context.run_migrations() + + +async def run_async_migrations() -> None: + """Run migrations in async mode.""" + # Get configuration section + configuration = config.get_section(config.config_ini_section, {}) + + # If sqlalchemy.url is not set in alembic.ini, try to get it from meshview config + if "sqlalchemy.url" not in configuration: + try: + from meshview.config import CONFIG + + configuration["sqlalchemy.url"] = CONFIG["database"]["connection_string"] + except Exception: + # Fallback to a default for initial migration creation + configuration["sqlalchemy.url"] = "sqlite+aiosqlite:///packets.db" + + connectable = async_engine_from_config( + configuration, + prefix="sqlalchemy.", + poolclass=pool.NullPool, + ) + + async with connectable.connect() as connection: + await connection.run_sync(do_run_migrations) + + await connectable.dispose() + + +def run_migrations_online() -> None: + """Run migrations in 'online' mode with async support.""" + try: + # Event loop is already running, schedule and run the coroutine + import concurrent.futures + + with concurrent.futures.ThreadPoolExecutor() as pool: + pool.submit(lambda: asyncio.run(run_async_migrations())).result() + except RuntimeError: + # No event loop running, create one + asyncio.run(run_async_migrations()) + + +if context.is_offline_mode(): + run_migrations_offline() +else: + run_migrations_online() diff --git a/alembic/script.py.mako b/alembic/script.py.mako new file mode 100644 index 0000000..fbc4b07 --- /dev/null +++ b/alembic/script.py.mako @@ -0,0 +1,26 @@ +"""${message} + +Revision ID: ${up_revision} +Revises: ${down_revision | comma,n} +Create Date: ${create_date} + +""" +from typing import Sequence, Union + +from alembic import op +import sqlalchemy as sa +${imports if imports else ""} + +# revision identifiers, used by Alembic. +revision: str = ${repr(up_revision)} +down_revision: Union[str, None] = ${repr(down_revision)} +branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} +depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} + + +def upgrade() -> None: + ${upgrades if upgrades else "pass"} + + +def downgrade() -> None: + ${downgrades if downgrades else "pass"} diff --git a/alembic/versions/1717fa5c6545_add_example_table.py b/alembic/versions/1717fa5c6545_add_example_table.py new file mode 100644 index 0000000..bc4d59a --- /dev/null +++ b/alembic/versions/1717fa5c6545_add_example_table.py @@ -0,0 +1,45 @@ +"""Add example table + +Revision ID: 1717fa5c6545 +Revises: c88468b7ab0b +Create Date: 2025-10-26 20:59:04.347066 + +""" + +from collections.abc import Sequence + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = '1717fa5c6545' +down_revision: str | None = 'add_time_us_cols' +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + """Create example table with sample columns.""" + op.create_table( + 'example', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True, autoincrement=True), + sa.Column('name', sa.String(length=100), nullable=False), + sa.Column('description', sa.Text(), nullable=True), + sa.Column('value', sa.Float(), nullable=True), + sa.Column('is_active', sa.Boolean(), nullable=False, server_default='1'), + sa.Column( + 'created_at', sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP') + ), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + + # Create an index on the name column for faster lookups + op.create_index('idx_example_name', 'example', ['name']) + + +def downgrade() -> None: + """Remove example table.""" + op.drop_index('idx_example_name', table_name='example') + op.drop_table('example') diff --git a/alembic/versions/2b5a61bb2b75_auto_generated_migration.py b/alembic/versions/2b5a61bb2b75_auto_generated_migration.py new file mode 100644 index 0000000..5b1b538 --- /dev/null +++ b/alembic/versions/2b5a61bb2b75_auto_generated_migration.py @@ -0,0 +1,35 @@ +"""Add first_seen_us and last_seen_us to node table + +Revision ID: 2b5a61bb2b75 +Revises: ac311b3782a1 +Create Date: 2025-11-05 15:19:13.446724 + +""" + +from collections.abc import Sequence + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = '2b5a61bb2b75' +down_revision: str | None = 'ac311b3782a1' +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + # Add microsecond epoch timestamp columns for first and last seen times + op.add_column('node', sa.Column('first_seen_us', sa.BigInteger(), nullable=True)) + op.add_column('node', sa.Column('last_seen_us', sa.BigInteger(), nullable=True)) + op.create_index('idx_node_first_seen_us', 'node', ['first_seen_us'], unique=False) + op.create_index('idx_node_last_seen_us', 'node', ['last_seen_us'], unique=False) + + +def downgrade() -> None: + # Remove the microsecond epoch timestamp columns and their indexes + op.drop_index('idx_node_last_seen_us', table_name='node') + op.drop_index('idx_node_first_seen_us', table_name='node') + op.drop_column('node', 'last_seen_us') + op.drop_column('node', 'first_seen_us') diff --git a/alembic/versions/ac311b3782a1_add_route_return_to_traceroute.py b/alembic/versions/ac311b3782a1_add_route_return_to_traceroute.py new file mode 100644 index 0000000..046e8a5 --- /dev/null +++ b/alembic/versions/ac311b3782a1_add_route_return_to_traceroute.py @@ -0,0 +1,31 @@ +"""add route_return to traceroute + +Revision ID: ac311b3782a1 +Revises: 1717fa5c6545 +Create Date: 2025-11-04 20:28:33.174137 + +""" + +from collections.abc import Sequence + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = 'ac311b3782a1' +down_revision: str | None = '1717fa5c6545' +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + # Add route_return column to traceroute table + with op.batch_alter_table('traceroute', schema=None) as batch_op: + batch_op.add_column(sa.Column('route_return', sa.LargeBinary(), nullable=True)) + + +def downgrade() -> None: + # Remove route_return column from traceroute table + with op.batch_alter_table('traceroute', schema=None) as batch_op: + batch_op.drop_column('route_return') diff --git a/alembic/versions/add_import_time_us_columns.py b/alembic/versions/add_import_time_us_columns.py new file mode 100644 index 0000000..daf588e --- /dev/null +++ b/alembic/versions/add_import_time_us_columns.py @@ -0,0 +1,74 @@ +"""add import_time_us columns + +Revision ID: add_time_us_cols +Revises: c88468b7ab0b +Create Date: 2025-11-03 14:10:00.000000 + +""" + +from collections.abc import Sequence + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = 'add_time_us_cols' +down_revision: str | None = 'c88468b7ab0b' +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + # Check if columns already exist, add them if they don't + conn = op.get_bind() + inspector = sa.inspect(conn) + + # Add import_time_us to packet table + packet_columns = [col['name'] for col in inspector.get_columns('packet')] + if 'import_time_us' not in packet_columns: + with op.batch_alter_table('packet', schema=None) as batch_op: + batch_op.add_column(sa.Column('import_time_us', sa.BigInteger(), nullable=True)) + op.create_index( + 'idx_packet_import_time_us', 'packet', [sa.text('import_time_us DESC')], unique=False + ) + op.create_index( + 'idx_packet_from_node_time_us', + 'packet', + ['from_node_id', sa.text('import_time_us DESC')], + unique=False, + ) + + # Add import_time_us to packet_seen table + packet_seen_columns = [col['name'] for col in inspector.get_columns('packet_seen')] + if 'import_time_us' not in packet_seen_columns: + with op.batch_alter_table('packet_seen', schema=None) as batch_op: + batch_op.add_column(sa.Column('import_time_us', sa.BigInteger(), nullable=True)) + op.create_index( + 'idx_packet_seen_import_time_us', 'packet_seen', ['import_time_us'], unique=False + ) + + # Add import_time_us to traceroute table + traceroute_columns = [col['name'] for col in inspector.get_columns('traceroute')] + if 'import_time_us' not in traceroute_columns: + with op.batch_alter_table('traceroute', schema=None) as batch_op: + batch_op.add_column(sa.Column('import_time_us', sa.BigInteger(), nullable=True)) + op.create_index( + 'idx_traceroute_import_time_us', 'traceroute', ['import_time_us'], unique=False + ) + + +def downgrade() -> None: + # Drop indexes and columns + op.drop_index('idx_traceroute_import_time_us', table_name='traceroute') + with op.batch_alter_table('traceroute', schema=None) as batch_op: + batch_op.drop_column('import_time_us') + + op.drop_index('idx_packet_seen_import_time_us', table_name='packet_seen') + with op.batch_alter_table('packet_seen', schema=None) as batch_op: + batch_op.drop_column('import_time_us') + + op.drop_index('idx_packet_from_node_time_us', table_name='packet') + op.drop_index('idx_packet_import_time_us', table_name='packet') + with op.batch_alter_table('packet', schema=None) as batch_op: + batch_op.drop_column('import_time_us') diff --git a/alembic/versions/c88468b7ab0b_initial_migration.py b/alembic/versions/c88468b7ab0b_initial_migration.py new file mode 100644 index 0000000..9114a18 --- /dev/null +++ b/alembic/versions/c88468b7ab0b_initial_migration.py @@ -0,0 +1,160 @@ +"""Initial migration + +Revision ID: c88468b7ab0b +Revises: +Create Date: 2025-10-26 20:56:50.285200 + +""" + +from collections.abc import Sequence + +import sqlalchemy as sa + +from alembic import op + +# revision identifiers, used by Alembic. +revision: str = 'c88468b7ab0b' +down_revision: str | None = None +branch_labels: str | Sequence[str] | None = None +depends_on: str | Sequence[str] | None = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + # Get connection and inspector to check what exists + conn = op.get_bind() + inspector = sa.inspect(conn) + existing_tables = inspector.get_table_names() + + # Create node table if it doesn't exist + if 'node' not in existing_tables: + op.create_table( + 'node', + sa.Column('id', sa.String(), nullable=False), + sa.Column('node_id', sa.BigInteger(), nullable=True), + sa.Column('long_name', sa.String(), nullable=True), + sa.Column('short_name', sa.String(), nullable=True), + sa.Column('hw_model', sa.String(), nullable=True), + sa.Column('firmware', sa.String(), nullable=True), + sa.Column('role', sa.String(), nullable=True), + sa.Column('last_lat', sa.BigInteger(), nullable=True), + sa.Column('last_long', sa.BigInteger(), nullable=True), + sa.Column('channel', sa.String(), nullable=True), + sa.Column('last_update', sa.DateTime(), nullable=True), + sa.PrimaryKeyConstraint('id'), + sa.UniqueConstraint('node_id'), + ) + op.create_index('idx_node_node_id', 'node', ['node_id'], unique=False) + + # Create packet table if it doesn't exist + if 'packet' not in existing_tables: + op.create_table( + 'packet', + sa.Column('id', sa.BigInteger(), nullable=False), + sa.Column('portnum', sa.Integer(), nullable=True), + sa.Column('from_node_id', sa.BigInteger(), nullable=True), + sa.Column('to_node_id', sa.BigInteger(), nullable=True), + sa.Column('payload', sa.LargeBinary(), nullable=True), + sa.Column('import_time', sa.DateTime(), nullable=True), + sa.Column('import_time_us', sa.BigInteger(), nullable=True), + sa.Column('channel', sa.String(), nullable=True), + sa.PrimaryKeyConstraint('id'), + ) + op.create_index('idx_packet_from_node_id', 'packet', ['from_node_id'], unique=False) + op.create_index('idx_packet_to_node_id', 'packet', ['to_node_id'], unique=False) + op.create_index( + 'idx_packet_import_time', 'packet', [sa.text('import_time DESC')], unique=False + ) + op.create_index( + 'idx_packet_import_time_us', 'packet', [sa.text('import_time_us DESC')], unique=False + ) + op.create_index( + 'idx_packet_from_node_time', + 'packet', + ['from_node_id', sa.text('import_time DESC')], + unique=False, + ) + op.create_index( + 'idx_packet_from_node_time_us', + 'packet', + ['from_node_id', sa.text('import_time_us DESC')], + unique=False, + ) + + # Create packet_seen table if it doesn't exist + if 'packet_seen' not in existing_tables: + op.create_table( + 'packet_seen', + sa.Column('packet_id', sa.BigInteger(), nullable=False), + sa.Column('node_id', sa.BigInteger(), nullable=False), + sa.Column('rx_time', sa.BigInteger(), nullable=False), + sa.Column('hop_limit', sa.Integer(), nullable=True), + sa.Column('hop_start', sa.Integer(), nullable=True), + sa.Column('channel', sa.String(), nullable=True), + sa.Column('rx_snr', sa.Float(), nullable=True), + sa.Column('rx_rssi', sa.Integer(), nullable=True), + sa.Column('topic', sa.String(), nullable=True), + sa.Column('import_time', sa.DateTime(), nullable=True), + sa.Column('import_time_us', sa.BigInteger(), nullable=True), + sa.ForeignKeyConstraint( + ['packet_id'], + ['packet.id'], + ), + sa.PrimaryKeyConstraint('packet_id', 'node_id', 'rx_time'), + ) + op.create_index('idx_packet_seen_node_id', 'packet_seen', ['node_id'], unique=False) + op.create_index('idx_packet_seen_packet_id', 'packet_seen', ['packet_id'], unique=False) + op.create_index( + 'idx_packet_seen_import_time_us', 'packet_seen', ['import_time_us'], unique=False + ) + + # Create traceroute table if it doesn't exist + if 'traceroute' not in existing_tables: + op.create_table( + 'traceroute', + sa.Column('id', sa.Integer(), nullable=False), + sa.Column('packet_id', sa.BigInteger(), nullable=True), + sa.Column('gateway_node_id', sa.BigInteger(), nullable=True), + sa.Column('done', sa.Boolean(), nullable=True), + sa.Column('route', sa.LargeBinary(), nullable=True), + sa.Column('import_time', sa.DateTime(), nullable=True), + sa.Column('import_time_us', sa.BigInteger(), nullable=True), + sa.ForeignKeyConstraint( + ['packet_id'], + ['packet.id'], + ), + sa.PrimaryKeyConstraint('id'), + ) + op.create_index('idx_traceroute_import_time', 'traceroute', ['import_time'], unique=False) + op.create_index( + 'idx_traceroute_import_time_us', 'traceroute', ['import_time_us'], unique=False + ) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + # Drop traceroute table and indexes + op.drop_index('idx_traceroute_import_time_us', table_name='traceroute') + op.drop_index('idx_traceroute_import_time', table_name='traceroute') + op.drop_table('traceroute') + + # Drop packet_seen table and indexes + op.drop_index('idx_packet_seen_import_time_us', table_name='packet_seen') + op.drop_index('idx_packet_seen_packet_id', table_name='packet_seen') + op.drop_index('idx_packet_seen_node_id', table_name='packet_seen') + op.drop_table('packet_seen') + + # Drop packet table and indexes + op.drop_index('idx_packet_from_node_time_us', table_name='packet') + op.drop_index('idx_packet_from_node_time', table_name='packet') + op.drop_index('idx_packet_import_time_us', table_name='packet') + op.drop_index('idx_packet_import_time', table_name='packet') + op.drop_index('idx_packet_to_node_id', table_name='packet') + op.drop_index('idx_packet_from_node_id', table_name='packet') + op.drop_table('packet') + + # Drop node table and indexes + op.drop_index('idx_node_node_id', table_name='node') + op.drop_table('node') + # ### end Alembic commands ### diff --git a/container/build-container.sh b/container/build-container.sh new file mode 100755 index 0000000..120ff5f --- /dev/null +++ b/container/build-container.sh @@ -0,0 +1,57 @@ +#!/bin/sh +# +# build-container.sh +# +# Script to build MeshView container images + +set -e + +# Default values +IMAGE_NAME="meshview" +TAG="latest" +CONTAINERFILE="Containerfile" + +# Parse arguments +while [ $# -gt 0 ]; do + case "$1" in + --tag|-t) + TAG="$2" + shift 2 + ;; + --name|-n) + IMAGE_NAME="$2" + shift 2 + ;; + --file|-f) + CONTAINERFILE="$2" + shift 2 + ;; + --help|-h) + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Options:" + echo " -t, --tag TAG Tag for the image (default: latest)" + echo " -n, --name NAME Image name (default: meshview)" + echo " -f, --file FILE Containerfile path (default: Containerfile)" + echo " -h, --help Show this help" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use --help for usage information" + exit 1 + ;; + esac +done + +echo "Building MeshView container image..." +echo " Image: ${IMAGE_NAME}:${TAG}" +echo " Containerfile: ${CONTAINERFILE}" +echo "" + +# Build the container +docker build -f "${CONTAINERFILE}" -t "${IMAGE_NAME}:${TAG}" . + +echo "" +echo "Build complete!" +echo "Run with: docker run --rm -p 8081:8081 ${IMAGE_NAME}:${TAG}" diff --git a/container/config.patch b/container/config.patch new file mode 100644 index 0000000..93753bb --- /dev/null +++ b/container/config.patch @@ -0,0 +1,37 @@ +diff --git a/sample.config.ini b/sample.config.ini +index 0e64980..494685c 100644 +--- a/sample.config.ini ++++ b/sample.config.ini +@@ -3,7 +3,7 @@ + # ------------------------- + [server] + # The address to bind the server to. Use * to listen on all interfaces. +-bind = * ++bind = 0.0.0.0 + + # Port to run the web server on. + port = 8081 +@@ -64,7 +64,7 @@ net_tag = #BayMeshNet + # ------------------------- + [mqtt] + # MQTT server hostname or IP. +-server = mqtt.bayme.sh ++server = mqtt.meshtastic.org + + # Topics to subscribe to (as JSON-like list, but still a string). + topics = ["msh/US/bayarea/#", "msh/US/CA/mrymesh/#", "msh/US/CA/sacvalley"] +@@ -82,7 +82,7 @@ password = large4cats + # ------------------------- + [database] + # SQLAlchemy connection string. This one uses SQLite with asyncio support. +-connection_string = sqlite+aiosqlite:///packets.db ++connection_string = sqlite+aiosqlite:////var/lib/meshview/packets.db + + + # ------------------------- +@@ -110,4 +110,4 @@ vacuum = False + # Set to True to enable, False to disable (default: False) + access_log = False + # Database cleanup logfile +-db_cleanup_logfile = dbcleanup.log ++db_cleanup_logfile = /var/log/meshview/dbcleanup.log diff --git a/create_example_migration.py b/create_example_migration.py new file mode 100755 index 0000000..1eb2aa9 --- /dev/null +++ b/create_example_migration.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +""" +Script to create a blank migration for manual editing. + +Usage: + ./env/bin/python create_example_migration.py + +This creates an empty migration file that you can manually edit to add +custom migration logic (data migrations, complex schema changes, etc.) + +Unlike create_migration.py which auto-generates from model changes, +this creates a blank template for you to fill in. +""" + +import os +import sys + +# Add current directory to path +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from alembic.config import Config + +from alembic import command + +# Create Alembic config +alembic_cfg = Config("alembic.ini") + +# Set database URL from meshview config +try: + from meshview.config import CONFIG + + database_url = CONFIG["database"]["connection_string"] + alembic_cfg.set_main_option("sqlalchemy.url", database_url) + print(f"Using database URL from config: {database_url}") +except Exception as e: + print(f"Warning: Could not load meshview config: {e}") + print("Using default database URL") + alembic_cfg.set_main_option("sqlalchemy.url", "sqlite+aiosqlite:///packets.db") + +# Generate blank migration +try: + print("Creating blank migration for manual editing...") + command.revision(alembic_cfg, autogenerate=False, message="Manual migration") + print("✓ Successfully created blank migration!") + print("\nNow edit the generated file in alembic/versions/") + print("Add your custom upgrade() and downgrade() logic") +except Exception as e: + print(f"✗ Error creating migration: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) diff --git a/create_migration.py b/create_migration.py new file mode 100755 index 0000000..a242655 --- /dev/null +++ b/create_migration.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +""" +Helper script to create Alembic migrations from SQLAlchemy model changes. + +Usage: + ./env/bin/python create_migration.py + +This will: +1. Load your current models from meshview/models.py +2. Compare them to the current database schema +3. Auto-generate a migration with the detected changes +4. Save the migration to alembic/versions/ + +After running this, review the generated migration file before committing! +""" + +import os +import sys + +# Add current directory to path +sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) + +from alembic.config import Config + +from alembic import command + +# Create Alembic config +alembic_cfg = Config("alembic.ini") + +# Set database URL from meshview config +try: + from meshview.config import CONFIG + + database_url = CONFIG["database"]["connection_string"] + alembic_cfg.set_main_option("sqlalchemy.url", database_url) + print(f"Using database URL from config: {database_url}") +except Exception as e: + print(f"Warning: Could not load meshview config: {e}") + print("Using default database URL") + alembic_cfg.set_main_option("sqlalchemy.url", "sqlite+aiosqlite:///packets.db") + +# Generate migration +try: + print("\nComparing models to current database schema...") + print("Generating migration...\n") + command.revision(alembic_cfg, autogenerate=True, message="Auto-generated migration") + print("\n✓ Successfully created migration!") + print("\nNext steps:") + print("1. Review the generated file in alembic/versions/") + print("2. Edit the migration message/logic if needed") + print("3. Test the migration: ./env/bin/alembic upgrade head") + print("4. Commit the migration file to version control") +except Exception as e: + print(f"\n✗ Error creating migration: {e}") + import traceback + + traceback.print_exc() + sys.exit(1) diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 3fff869..0000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -FROM python:3.12-slim - -# Set work directory -WORKDIR /app - -# Install system dependencies (graphviz required, git for cloning) -RUN apt-get update && \ - apt-get install -y --no-install-recommends git graphviz && \ - rm -rf /var/lib/apt/lists/* - -# Clone the repo with submodules -RUN git clone --recurse-submodules https://github.com/pablorevilla-meshtastic/meshview.git /app - -# Create virtual environment -RUN python -m venv /app/env - -# Upgrade pip and install requirements in venv -RUN /app/env/bin/pip install --no-cache-dir --upgrade pip && \ - /app/env/bin/pip install --no-cache-dir -r /app/requirements.txt - -# Copy sample config -RUN cp /app/sample.config.ini /app/config.ini - -# Expose port -EXPOSE 8081 - -# Run the app via venv -CMD ["/app/env/bin/python", "/app/mvrun.py"] diff --git a/docker/README.md b/docker/README.md index d5531a5..e249712 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,44 +1,36 @@ # MeshView Docker Container -This Dockerfile builds a containerized version of the [MeshView](https://github.com/pablorevilla-meshtastic/meshview) application. It uses a lightweight Python environment and sets up the required virtual environment as expected by the application. +> **Note:** This directory contains legacy Docker build files. +> +> **For current Docker usage instructions, please see [README-Docker.md](../README-Docker.md) in the project root.** -## Image Details +## Current Approach -- **Base Image**: `python:3.12-slim` -- **Working Directory**: `/app` -- **Python Virtual Environment**: `/app/env` +Pre-built container images are automatically built and published to GitHub Container Registry: + +```bash +docker pull ghcr.io/pablorevilla-meshtastic/meshview:latest +``` + +See **[README-Docker.md](../README-Docker.md)** for: +- Quick start instructions +- Volume mount configuration +- Docker Compose examples +- Backup configuration +- Troubleshooting + +## Legacy Build (Not Recommended) + +If you need to build your own image for development: + +```bash +# From project root +docker build -f Containerfile -t meshview:local . +``` + +The current Containerfile uses: +- **Base Image**: `python:3.13-slim` (Debian-based) +- **Build tool**: `uv` for fast dependency installation +- **User**: Non-root user `app` (UID 10001) - **Exposed Port**: `8081` - -## Build Instructions - -Build the Docker image: - -```bash -docker build -t meshview-docker . -``` - -## Run Instructions - -Run the container: - -```bash -docker run -d --name meshview-docker -p 8081:8081 meshview-docker -``` - -This maps container port `8081` to your host. The application runs via: - -```bash -/app/env/bin/python /app/mvrun.py -``` - -## Web Interface - -Once the container is running, you can access the MeshView web interface by visiting: - -http://localhost:8081 - -If running on a remote server, replace `localhost` with the host's IP or domain name: - -http://:8081 - -Ensure that port `8081` is open and not blocked by a firewall or security group. +- **Volumes**: `/etc/meshview`, `/var/lib/meshview`, `/var/log/meshview` diff --git a/docs/ALEMBIC_SETUP.md b/docs/ALEMBIC_SETUP.md new file mode 100644 index 0000000..0905673 --- /dev/null +++ b/docs/ALEMBIC_SETUP.md @@ -0,0 +1,361 @@ +# Alembic Database Migration Setup + +This document describes the automatic database migration system implemented for MeshView using Alembic. + +## Overview + +The system provides automatic database schema migrations with coordination between the writer app (startdb.py) and reader app (web.py): + +- **Writer App**: Automatically runs pending migrations on startup +- **Reader App**: Waits for migrations to complete before starting + +## Architecture + +### Key Components + +1. **`meshview/migrations.py`** - Migration management utilities + - `run_migrations()` - Runs pending migrations (writer app) + - `wait_for_migrations()` - Waits for schema to be current (reader app) + - `is_database_up_to_date()` - Checks schema version + - Migration status tracking table + +2. **`alembic/`** - Alembic migration directory + - `env.py` - Configured for async SQLAlchemy support + - `versions/` - Migration scripts directory + - `alembic.ini` - Alembic configuration + +3. **Modified Apps**: + - `startdb.py` - Writer app that runs migrations before MQTT ingestion + - `meshview/web.py` - Reader app that waits for schema updates + +## How It Works - Automatic In-Place Updates + +### ✨ Fully Automatic Operation + +**No manual migration commands needed!** The database schema updates automatically when you: +1. Deploy new code with migration files +2. Restart the applications + +### Writer App (startdb.py) Startup Sequence + +1. Initialize database connection +2. Create migration status tracking table +3. Set "migration in progress" flag +4. **🔄 Automatically run any pending Alembic migrations** (synchronously) + - Detects current schema version + - Compares to latest available migration + - Runs all pending migrations in sequence + - Updates database schema in place +5. Clear "migration in progress" flag +6. Start MQTT ingestion and other tasks + +### Reader App (web.py) Startup Sequence + +1. Initialize database connection +2. **Check database schema version** +3. If not up to date: + - Wait up to 60 seconds (30 retries × 2 seconds) + - Check every 2 seconds for schema updates + - Automatically proceeds once writer completes migrations +4. Once schema is current, start web server + +### 🎯 Key Point: Zero Manual Steps + +When you deploy new code with migrations: +```bash +# Just start the apps - migrations happen automatically! +./env/bin/python startdb.py # Migrations run here automatically +./env/bin/python main.py # Waits for migrations, then starts +``` + +**The database updates itself!** No need to run `alembic upgrade` manually. + +### Coordination + +The apps coordinate using: +- **Alembic version table** (`alembic_version`) - Tracks current schema version +- **Migration status table** (`migration_status`) - Optional flag for "in progress" state + +## Creating New Migrations + +### Using the helper script: + +```bash +./env/bin/python create_migration.py +``` + +### Manual creation: + +```bash +./env/bin/alembic revision --autogenerate -m "Description of changes" +``` + +This will: +1. Compare current database schema with SQLAlchemy models +2. Generate a migration script in `alembic/versions/` +3. Automatically detect most schema changes + +### Manual migration (advanced): + +```bash +./env/bin/alembic revision -m "Manual migration" +``` + +Then edit the generated file to add custom migration logic. + +## Running Migrations + +### Automatic (Recommended) + +Migrations run automatically when the writer app starts: + +```bash +./env/bin/python startdb.py +``` + +### Manual + +To run migrations manually: + +```bash +./env/bin/alembic upgrade head +``` + +To downgrade: + +```bash +./env/bin/alembic downgrade -1 # Go back one version +./env/bin/alembic downgrade base # Go back to beginning +``` + +## Checking Migration Status + +Check current database version: + +```bash +./env/bin/alembic current +``` + +View migration history: + +```bash +./env/bin/alembic history +``` + +## Benefits + +1. **Zero Manual Intervention**: Migrations run automatically on startup +2. **Safe Coordination**: Reader won't connect to incompatible schema +3. **Version Control**: All schema changes tracked in git +4. **Rollback Capability**: Can downgrade if needed +5. **Auto-generation**: Most migrations created automatically from model changes + +## Migration Workflow + +### Development Process + +1. **Modify SQLAlchemy models** in `meshview/models.py` +2. **Create migration**: + ```bash + ./env/bin/python create_migration.py + ``` +3. **Review generated migration** in `alembic/versions/` +4. **Test migration**: + - Stop all apps + - Start writer app (migrations run automatically) + - Start reader app (waits for schema to be current) +5. **Commit migration** to version control + +### Production Deployment + +1. **Deploy new code** with migration scripts +2. **Start writer app** - Migrations run automatically +3. **Start reader app** - Waits for migrations, then starts +4. **Monitor logs** for migration success + +## Troubleshooting + +### Migration fails + +Check logs in writer app for error details. To manually fix: + +```bash +./env/bin/alembic current # Check current version +./env/bin/alembic history # View available versions +./env/bin/alembic upgrade head # Try manual upgrade +``` + +### Reader app won't start (timeout) + +Check if writer app is running and has completed migrations: + +```bash +./env/bin/alembic current +``` + +### Reset to clean state + +⚠️ **Warning: This will lose all data** + +```bash +rm packets.db # Or your database file +./env/bin/alembic upgrade head # Create fresh schema +``` + +## File Structure + +``` +meshview/ +├── alembic.ini # Alembic configuration +├── alembic/ +│ ├── env.py # Async-enabled migration runner +│ ├── script.py.mako # Migration template +│ └── versions/ # Migration scripts +│ └── c88468b7ab0b_initial_migration.py +├── meshview/ +│ ├── models.py # SQLAlchemy models (source of truth) +│ ├── migrations.py # Migration utilities +│ ├── mqtt_database.py # Writer database connection +│ └── database.py # Reader database connection +├── startdb.py # Writer app (runs migrations) +├── main.py # Entry point for reader app +└── create_migration.py # Helper script for creating migrations +``` + +## Configuration + +Database URL is read from `config.ini`: + +```ini +[database] +connection_string = sqlite+aiosqlite:///packets.db +``` + +Alembic automatically uses this configuration through `meshview/migrations.py`. + +## Important Notes + +1. **Always test migrations** in development before deploying to production +2. **Backup database** before running migrations in production +3. **Check for data loss** - Some migrations may require data migration logic +4. **Coordinate deployments** - Start writer before readers in multi-instance setups +5. **Monitor logs** during first startup after deployment + +## Example Migrations + +### Example 1: Generated Initial Migration + +Here's what an auto-generated migration looks like (from comparing models to database): + +```python +"""Initial migration + +Revision ID: c88468b7ab0b +Revises: +Create Date: 2025-01-26 20:56:50.123456 + +""" +from alembic import op +import sqlalchemy as sa + +# revision identifiers +revision = 'c88468b7ab0b' +down_revision = None +branch_labels = None +depends_on = None + +def upgrade() -> None: + # Upgrade operations + op.create_table('node', + sa.Column('id', sa.String(), nullable=False), + sa.Column('node_id', sa.BigInteger(), nullable=True), + # ... more columns + sa.PrimaryKeyConstraint('id') + ) + +def downgrade() -> None: + # Downgrade operations + op.drop_table('node') +``` + +### Example 2: Manual Migration Adding a New Table + +We've included an example migration (`1717fa5c6545_add_example_table.py`) that demonstrates how to manually create a new table: + +```python +"""Add example table + +Revision ID: 1717fa5c6545 +Revises: c88468b7ab0b +Create Date: 2025-10-26 20:59:04.347066 +""" +from typing import Sequence, Union +from alembic import op +import sqlalchemy as sa + +def upgrade() -> None: + """Create example table with sample columns.""" + op.create_table( + 'example', + sa.Column('id', sa.Integer(), nullable=False, primary_key=True, autoincrement=True), + sa.Column('name', sa.String(length=100), nullable=False), + sa.Column('description', sa.Text(), nullable=True), + sa.Column('value', sa.Float(), nullable=True), + sa.Column('is_active', sa.Boolean(), nullable=False, server_default='1'), + sa.Column('created_at', sa.DateTime(), nullable=False, + server_default=sa.text('CURRENT_TIMESTAMP')), + sa.Column('updated_at', sa.DateTime(), nullable=True), + sa.PrimaryKeyConstraint('id') + ) + + # Create an index on the name column for faster lookups + op.create_index('idx_example_name', 'example', ['name']) + +def downgrade() -> None: + """Remove example table.""" + op.drop_index('idx_example_name', table_name='example') + op.drop_table('example') +``` + +**Key features demonstrated:** +- Various column types (Integer, String, Text, Float, Boolean, DateTime) +- Primary key with autoincrement +- Nullable and non-nullable columns +- Server defaults (for timestamps and booleans) +- Creating indexes +- Proper downgrade that reverses all changes + +**To test this migration:** + +```bash +# Apply the migration +./env/bin/alembic upgrade head + +# Check it was applied +./env/bin/alembic current + +# Verify table was created +sqlite3 packetsPL.db "SELECT sql FROM sqlite_master WHERE type='table' AND name='example';" + +# Roll back the migration +./env/bin/alembic downgrade -1 + +# Verify table was removed +sqlite3 packetsPL.db "SELECT name FROM sqlite_master WHERE type='table' AND name='example';" +``` + +**To remove this example migration** (after testing): + +```bash +# First make sure you're not on this revision +./env/bin/alembic downgrade c88468b7ab0b + +# Then delete the migration file +rm alembic/versions/1717fa5c6545_add_example_table.py +``` + +## References + +- [Alembic Documentation](https://alembic.sqlalchemy.org/) +- [SQLAlchemy Documentation](https://docs.sqlalchemy.org/) +- [Async SQLAlchemy](https://docs.sqlalchemy.org/en/20/orm/extensions/asyncio.html) \ No newline at end of file diff --git a/API Documentation.md b/docs/API_Documentation.md similarity index 52% rename from API Documentation.md rename to docs/API_Documentation.md index 6b7e909..53a3ff2 100644 --- a/API Documentation.md +++ b/docs/API_Documentation.md @@ -111,12 +111,29 @@ Returns a list of packets with optional filters. --- -### Notes -- All timestamps (`import_time`, `last_seen`) are returned in ISO 8601 format. -- `portnum` is an integer representing the packet type. -- `payload` is always a UTF-8 decoded string. +--- -## 4 Statistics API: GET `/api/stats` +## 4. Channels API + +### GET `/api/channels` +Returns a list of channels seen in a given time period. + +**Query Parameters** +- `period_type` (optional, string): Time granularity (`hour` or `day`). Default: `hour`. +- `length` (optional, int): Number of periods to look back. Default: `24`. + +**Response Example** +```json +{ + "channels": ["LongFast", "MediumFast", "ShortFast"] +} +``` + +--- + +## 5. Statistics API + +### GET `/api/stats` Retrieve packet statistics aggregated by time periods, with optional filtering. @@ -157,3 +174,171 @@ Retrieve packet statistics aggregated by time periods, with optional filtering. // more entries... ] } +``` + +--- + +## 6. Edges API + +### GET `/api/edges` +Returns network edges (connections between nodes) based on traceroutes and neighbor info. + +**Query Parameters** +- `type` (optional, string): Filter by edge type (`traceroute` or `neighbor`). If omitted, returns both types. + +**Response Example** +```json +{ + "edges": [ + { + "from": 12345678, + "to": 87654321, + "type": "traceroute" + }, + { + "from": 11111111, + "to": 22222222, + "type": "neighbor" + } + ] +} +``` + +--- + +## 7. Configuration API + +### GET `/api/config` +Returns the current site configuration (safe subset exposed to clients). + +**Response Example** +```json +{ + "site": { + "domain": "meshview.example.com", + "language": "en", + "title": "Bay Area Mesh", + "message": "Real time data from around the bay area", + "starting": "/chat", + "nodes": "true", + "conversations": "true", + "everything": "true", + "graphs": "true", + "stats": "true", + "net": "true", + "map": "true", + "top": "true", + "map_top_left_lat": 39.0, + "map_top_left_lon": -123.0, + "map_bottom_right_lat": 36.0, + "map_bottom_right_lon": -121.0, + "map_interval": 3, + "firehose_interval": 3, + "weekly_net_message": "Weekly Mesh check-in message.", + "net_tag": "#BayMeshNet", + "version": "2.0.8 ~ 10-22-25" + }, + "mqtt": { + "server": "mqtt.bayme.sh", + "topics": ["msh/US/bayarea/#"] + }, + "cleanup": { + "enabled": "false", + "days_to_keep": "14", + "hour": "2", + "minute": "0", + "vacuum": "false" + } +} +``` + +--- + +## 8. Language/Translations API + +### GET `/api/lang` +Returns translation strings for the UI. + +**Query Parameters** +- `lang` (optional, string): Language code (e.g., `en`, `es`). Defaults to site language setting. +- `section` (optional, string): Specific section to retrieve translations for. + +**Response Example (full)** +```json +{ + "chat": { + "title": "Chat", + "send": "Send" + }, + "map": { + "title": "Map", + "zoom_in": "Zoom In" + } +} +``` + +**Response Example (section-specific)** +Request: `/api/lang?section=chat` +```json +{ + "title": "Chat", + "send": "Send" +} +``` + +--- + +## 9. Health Check API + +### GET `/health` +Health check endpoint for monitoring, load balancers, and orchestration systems. + +**Response Example (Healthy)** +```json +{ + "status": "healthy", + "timestamp": "2025-11-03T14:30:00.123456Z", + "version": "3.0.0", + "git_revision": "6416978", + "database": "connected", + "database_size": "853.03 MB", + "database_size_bytes": 894468096 +} +``` + +**Response Example (Unhealthy)** +Status Code: `503 Service Unavailable` +```json +{ + "status": "unhealthy", + "timestamp": "2025-11-03T14:30:00.123456Z", + "version": "2.0.8", + "git_revision": "6416978", + "database": "disconnected" +} +``` + +--- + +## 10. Version API + +### GET `/version` +Returns detailed version information including semver, release date, and git revision. + +**Response Example** +```json +{ + "version": "2.0.8", + "release_date": "2025-10-22", + "git_revision": "6416978a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q", + "git_revision_short": "6416978" +} +``` + +--- + +## Notes +- All timestamps (`import_time`, `last_seen`) are returned in ISO 8601 format. +- `portnum` is an integer representing the packet type. +- `payload` is always a UTF-8 decoded string. +- Node IDs are integers (e.g., `12345678`). diff --git a/docs/Database-Changes-With-Alembic.md b/docs/Database-Changes-With-Alembic.md new file mode 100644 index 0000000..453dfa5 --- /dev/null +++ b/docs/Database-Changes-With-Alembic.md @@ -0,0 +1,146 @@ +# Database Changes With Alembic + +This guide explains how to make database schema changes in MeshView using Alembic migrations. + +## Overview + +When you need to add, modify, or remove columns from database tables, you must: +1. Update the SQLAlchemy model +2. Create an Alembic migration +3. Let the system automatically apply the migration + +## Step-by-Step Process + +### 1. Update the Model + +Edit `meshview/models.py` to add/modify the column in the appropriate model class: + +```python +class Traceroute(Base): + __tablename__ = "traceroute" + + id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True) + # ... existing columns ... + route_return: Mapped[bytes] = mapped_column(nullable=True) # New column +``` + +### 2. Create an Alembic Migration + +Generate a new migration file with a descriptive message: + +```bash +./env/bin/alembic revision -m "add route_return to traceroute" +``` + +This creates a new file in `alembic/versions/` with a unique revision ID. + +### 3. Fill in the Migration + +Edit the generated migration file to implement the actual database changes: + +```python +def upgrade() -> None: + # Add route_return column to traceroute table + with op.batch_alter_table('traceroute', schema=None) as batch_op: + batch_op.add_column(sa.Column('route_return', sa.LargeBinary(), nullable=True)) + + +def downgrade() -> None: + # Remove route_return column from traceroute table + with op.batch_alter_table('traceroute', schema=None) as batch_op: + batch_op.drop_column('route_return') +``` + +### 4. Migration Runs Automatically + +When you restart the application with `mvrun.py`: + +1. The writer process (`startdb.py`) starts up +2. It checks if the database schema is up to date +3. If new migrations are pending, it runs them automatically +4. The reader process (web server) waits for migrations to complete before starting + +**No manual migration command is needed** - the application handles this automatically on startup. + +### 5. Commit Both Files + +Add both files to git: + +```bash +git add meshview/models.py +git add alembic/versions/ac311b3782a1_add_route_return_to_traceroute.py +git commit -m "Add route_return column to traceroute table" +``` + +## Important Notes + +### SQLite Compatibility + +Always use `batch_alter_table` for SQLite compatibility: + +```python +with op.batch_alter_table('table_name', schema=None) as batch_op: + batch_op.add_column(...) +``` + +SQLite has limited ALTER TABLE support, and `batch_alter_table` works around these limitations. + +### Migration Process + +- **Writer process** (`startdb.py`): Runs migrations on startup +- **Reader process** (web server in `main.py`): Waits for migrations to complete +- Migrations are checked and applied every time the application starts +- The system uses a migration status table to coordinate between processes + +### Common Column Types + +```python +# Integer +column: Mapped[int] = mapped_column(BigInteger, nullable=True) + +# String +column: Mapped[str] = mapped_column(nullable=True) + +# Bytes/Binary +column: Mapped[bytes] = mapped_column(nullable=True) + +# DateTime +column: Mapped[datetime] = mapped_column(nullable=True) + +# Boolean +column: Mapped[bool] = mapped_column(nullable=True) + +# Float +column: Mapped[float] = mapped_column(nullable=True) +``` + +### Migration File Location + +Migrations are stored in: `alembic/versions/` + +Each migration file includes: +- Revision ID (unique identifier) +- Down revision (previous migration in chain) +- Create date +- `upgrade()` function (applies changes) +- `downgrade()` function (reverts changes) + +## Troubleshooting + +### Migration Not Running + +If migrations don't run automatically: + +1. Check that the database is writable +2. Look for errors in the startup logs +3. Verify the migration chain is correct (each migration references the previous one) + +### Manual Migration (Not Recommended) + +If you need to manually run migrations for debugging: + +```bash +./env/bin/alembic upgrade head +``` + +However, the application normally handles this automatically. diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..552da62 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,14 @@ +# Technical Documentation + +This directory contains technical documentation for MeshView that goes beyond initial setup and basic usage. + +These documents are intended for developers, contributors, and advanced users who need deeper insight into the system's architecture, database migrations, API endpoints, and internal workings. + +## Contents + +- [ALEMBIC_SETUP.md](ALEMBIC_SETUP.md) - Database migration setup and management +- [TIMESTAMP_MIGRATION.md](TIMESTAMP_MIGRATION.md) - Details on timestamp schema changes +- [API_Documentation.md](API_Documentation.md) - REST API endpoints and usage +- [CODE_IMPROVEMENTS.md](CODE_IMPROVEMENTS.md) - Suggested code improvements and refactoring ideas + +For initial setup and basic usage instructions, please see the main [README.md](../README.md) in the root directory. diff --git a/docs/TIMESTAMP_MIGRATION.md b/docs/TIMESTAMP_MIGRATION.md new file mode 100644 index 0000000..2197ef4 --- /dev/null +++ b/docs/TIMESTAMP_MIGRATION.md @@ -0,0 +1,193 @@ +# High-Resolution Timestamp Migration + +This document describes the implementation of GitHub issue #55: storing high-resolution timestamps as integers in the database for improved performance and query efficiency. + +## Overview + +The meshview database now stores timestamps in two formats: +1. **TEXT format** (`import_time`): Human-readable ISO8601 format with microseconds (e.g., `2025-03-12 04:15:56.058038`) +2. **INTEGER format** (`import_time_us`): Microseconds since Unix epoch (1970-01-01 00:00:00 UTC) + +The dual format approach provides: +- **Backward compatibility**: Existing `import_time` TEXT columns remain unchanged +- **Performance**: Fast integer comparisons and math operations +- **Precision**: Microsecond resolution for accurate timing +- **Efficiency**: Compact storage and fast indexed lookups + +## Database Changes + +### New Columns Added + +Three tables have new `import_time_us` columns: + +1. **packet.import_time_us** (INTEGER) + - Stores when the packet was imported into the database + - Indexed for fast queries + +2. **packet_seen.import_time_us** (INTEGER) + - Stores when the packet_seen record was imported + - Indexed for performance + +3. **traceroute.import_time_us** (INTEGER) + - Stores when the traceroute was imported + - Indexed for fast lookups + +### New Indexes + +The following indexes were created for optimal query performance: + +```sql +CREATE INDEX idx_packet_import_time_us ON packet(import_time_us DESC); +CREATE INDEX idx_packet_from_node_time_us ON packet(from_node_id, import_time_us DESC); +CREATE INDEX idx_packet_seen_import_time_us ON packet_seen(import_time_us); +CREATE INDEX idx_traceroute_import_time_us ON traceroute(import_time_us); +``` + +## Migration Process + +### For Existing Databases + +Run the migration script to add the new columns and populate them from existing data: + +```bash +python migrate_add_timestamp_us.py [database_path] +``` + +If no path is provided, it defaults to `packets.db` in the current directory. + +The migration script: +1. Checks if migration is needed (idempotent) +2. Adds `import_time_us` columns to the three tables +3. Populates the new columns from existing `import_time` values +4. Creates indexes for optimal performance +5. Verifies the migration completed successfully + +### For New Databases + +New databases created with the updated schema will automatically include the `import_time_us` columns. The MQTT store module populates both columns when inserting new records. + +## Code Changes + +### Models (meshview/models.py) + +The ORM models now include the new `import_time_us` fields: + +```python +class Packet(Base): + import_time: Mapped[datetime] = mapped_column(nullable=True) + import_time_us: Mapped[int] = mapped_column(BigInteger, nullable=True) +``` + +### MQTT Store (meshview/mqtt_store.py) + +The data ingestion logic now populates both timestamp columns using UTC time: + +```python +now = datetime.datetime.now(datetime.timezone.utc) +now_us = int(now.timestamp() * 1_000_000) + +# Both columns are populated +import_time=now, +import_time_us=now_us, +``` + +**Important**: All new timestamps use UTC (Coordinated Universal Time) for consistency across time zones. + +## Using the New Timestamps + +### Example Queries + +**Query packets from the last 7 days:** + +```sql +-- Old way (slower) +SELECT * FROM packet +WHERE import_time >= datetime('now', '-7 days'); + +-- New way (faster) +SELECT * FROM packet +WHERE import_time_us >= (strftime('%s', 'now', '-7 days') * 1000000); +``` + +**Query packets in a specific time range:** + +```sql +SELECT * FROM packet +WHERE import_time_us BETWEEN 1759254380000000 AND 1759254390000000; +``` + +**Calculate time differences (in microseconds):** + +```sql +SELECT + id, + (import_time_us - LAG(import_time_us) OVER (ORDER BY import_time_us)) / 1000000.0 as seconds_since_last +FROM packet +LIMIT 10; +``` + +### Converting Timestamps + +**From datetime to microseconds (UTC):** +```python +import datetime +now = datetime.datetime.now(datetime.timezone.utc) +now_us = int(now.timestamp() * 1_000_000) +``` + +**From microseconds to datetime:** +```python +import datetime +timestamp_us = 1759254380813451 +dt = datetime.datetime.fromtimestamp(timestamp_us / 1_000_000) +``` + +**In SQL queries:** +```sql +-- Datetime to microseconds +SELECT CAST((strftime('%s', import_time) || substr(import_time, 21, 6)) AS INTEGER); + +-- Microseconds to datetime (approximate) +SELECT datetime(import_time_us / 1000000, 'unixepoch'); +``` + +## Performance Benefits + +The integer timestamp columns provide significant performance improvements: + +1. **Faster comparisons**: Integer comparisons are much faster than string/datetime comparisons +2. **Smaller index size**: Integer indexes are more compact than datetime indexes +3. **Range queries**: BETWEEN operations on integers are highly optimized +4. **Math operations**: Easy to calculate time differences, averages, etc. +5. **Sorting**: Integer sorting is faster than datetime sorting + +## Backward Compatibility + +The original `import_time` TEXT columns remain unchanged: +- Existing code continues to work +- Human-readable timestamps still available +- Gradual migration to new columns possible +- No breaking changes for existing queries + +## Future Work + +Future improvements could include: +- Migrating queries to use `import_time_us` columns +- Deprecating the TEXT `import_time` columns (after transition period) +- Adding helper functions for timestamp conversion +- Creating views that expose both formats + +## Testing + +The migration was tested on a production database with: +- 132,466 packet records +- 1,385,659 packet_seen records +- 28,414 traceroute records + +All records were successfully migrated with microsecond precision preserved. + +## References + +- GitHub Issue: #55 - Storing High-Resolution Timestamps in SQLite +- SQLite datetime functions: https://www.sqlite.org/lang_datefunc.html +- Python datetime module: https://docs.python.org/3/library/datetime.html diff --git a/meshview/__version__.py b/meshview/__version__.py new file mode 100644 index 0000000..696a8d7 --- /dev/null +++ b/meshview/__version__.py @@ -0,0 +1,57 @@ +"""Version information for MeshView.""" + +import subprocess +from pathlib import Path + +__version__ = "3.0.0" +__release_date__ = "2025-11-05" + + +def get_git_revision(): + """Get the current git revision hash.""" + try: + repo_dir = Path(__file__).parent.parent + result = subprocess.run( + ["git", "rev-parse", "HEAD"], + capture_output=True, + text=True, + check=True, + cwd=repo_dir, + ) + return result.stdout.strip() + except (subprocess.CalledProcessError, FileNotFoundError): + return "unknown" + + +def get_git_revision_short(): + """Get the short git revision hash.""" + try: + repo_dir = Path(__file__).parent.parent + result = subprocess.run( + ["git", "rev-parse", "--short", "HEAD"], + capture_output=True, + text=True, + check=True, + cwd=repo_dir, + ) + return result.stdout.strip() + except (subprocess.CalledProcessError, FileNotFoundError): + return "unknown" + + +def get_version_info(): + """Get complete version information.""" + return { + "version": __version__, + "release_date": __release_date__, + "git_revision": get_git_revision(), + "git_revision_short": get_git_revision_short(), + } + + +# Cache git info at import time for performance +_git_revision = get_git_revision() +_git_revision_short = get_git_revision_short() + +# Full version string for display +__version_string__ = f"{__version__} ~ {__release_date__}" diff --git a/meshview/lang/en.json b/meshview/lang/en.json index d28029b..18df79d 100644 --- a/meshview/lang/en.json +++ b/meshview/lang/en.json @@ -1,110 +1,141 @@ -{ - "base": { - "conversations": "Conversations", - "nodes": "Nodes", - "everything": "See Everything", - "graph": "Mesh Graphs", - "net": "Weekly Net", - "map": "Live Map", - "stats": "Stats", - "top": "Top Traffic Nodes", - "footer": "Visit Meshview on GitHub", - "node id": "Node id", - "go to node": "Go to Node", - "all": "All", - "portnum_options": { - "1": "Text Message", - "3": "Position", - "4": "Node Info", - "67": "Telemetry", - "70": "Traceroute", - "71": "Neighbor Info" - } + { + "base": { + "chat": "Chat", + "nodes": "Nodes", + "everything": "See Everything", + "graphs": "Mesh Graphs", + "net": "Weekly Net", + "map": "Live Map", + "stats": "Stats", + "top": "Top Traffic Nodes", + "footer": "Visit Meshview on GitHub", + "node id": "Node id", + "go to node": "Go to Node", + "all": "All", + "portnum_options": { + "1": "Text Message", + "3": "Position", + "4": "Node Info", + "67": "Telemetry", + "70": "Traceroute", + "71": "Neighbor Info" + } + }, + "chat": { + "replying_to": "Replying to:", + "view_packet_details": "View packet details" + }, + "nodelist": { + "search_placeholder": "Search by name or ID...", + "all_roles": "All Roles", + "all_channels": "All Channels", + "all_hw_models": "All HW Models", + "all_firmware": "All Firmware", + "export_csv": "Export CSV", + "clear_filters": "Clear Filters", + "showing": "Showing", + "nodes": "nodes", + "short": "Short", + "long_name": "Long Name", + "hw_model": "HW Model", + "firmware": "Firmware", + "role": "Role", + "last_lat": "Last Latitude", + "last_long": "Last Longitude", + "channel": "Channel", + "last_update": "Last Update", + "loading_nodes": "Loading nodes...", + "no_nodes": "No nodes found", + "error_nodes": "Error loading nodes" }, - "chat": { - "replying_to": "Replying to:", - "view_packet_details": "View packet details" - }, - "nodelist": { - "search_placeholder": "Search by name or ID...", - "all_roles": "All Roles", - "all_channels": "All Channels", - "all_hw_models": "All HW Models", - "all_firmware": "All Firmware", - "export_csv": "Export CSV", - "clear_filters": "Clear Filters", - "showing": "Showing", - "nodes": "nodes", - "short": "Short", - "long_name": "Long Name", - "hw_model": "HW Model", - "firmware": "Firmware", - "role": "Role", - "last_lat": "Last Latitude", - "last_long": "Last Longitude", - "channel": "Channel", - "last_update": "Last Update", - "loading_nodes": "Loading nodes...", - "no_nodes": "No nodes found", - "error_nodes": "Error loading nodes" - }, - "net": { - "number_of_checkins": "Number of Check-ins:", - "view_packet_details": "View packet details", - "view_all_packets_from_node": "View all packets from this node", - "no_packets_found": "No packets found." - }, - "map": { - "channel": "Channel:", - "model": "Model:", - "role": "Role:", - "last_seen": "Last seen:", - "firmware": "Firmware:", - "show_routers_only": "Show Routers Only", - "share_view": "Share This View" - }, - "stats": - { - "mesh_stats_summary": "Mesh Statistics - Summary (all available in Database)", - "total_nodes": "Total Nodes", - "total_packets": "Total Packets", - "total_packets_seen": "Total Packets Seen", - "packets_per_day_all": "Packets per Day - All Ports (Last 14 Days)", - "packets_per_day_text": "Packets per Day - Text Messages (Port 1, Last 14 Days)", - "packets_per_hour_all": "Packets per Hour - All Ports", - "packets_per_hour_text": "Packets per Hour - Text Messages (Port 1)", - "packet_types_last_24h": "Packet Types - Last 24 Hours", - "hardware_breakdown": "Hardware Breakdown", - "role_breakdown": "Role Breakdown", - "channel_breakdown": "Channel Breakdown", - "expand_chart": "Expand Chart", - "export_csv": "Export CSV", - "all_channels": "All Channels" - }, - "top": - { - "top_traffic_nodes": "Top Traffic Nodes (last 24 hours)", - "chart_description_1": "This chart shows a bell curve (normal distribution) based on the total \"Times Seen\" values for all nodes. It helps visualize how frequently nodes are heard, relative to the average.", - "chart_description_2": "This \"Times Seen\" value is the closest that we can get to Mesh utilization by node.", - "mean_label": "Mean:", - "stddev_label": "Standard Deviation:", - "long_name": "Long Name", - "short_name": "Short Name", - "channel": "Channel", - "packets_sent": "Packets Sent", - "times_seen": "Times Seen", - "seen_percent": "Seen % of Mean", - "no_nodes": "No top traffic nodes available." - }, - "nodegraph": - { - "channel_label": "Channel:", - "search_node_placeholder": "Search node...", - "search_button": "Search", - "long_name_label": "Long Name:", - "short_name_label": "Short Name:", - "role_label": "Role:", - "hw_model_label": "Hardware Model:", - "node_not_found": "Node not found in current channel!" - } -} \ No newline at end of file + "net": { + "number_of_checkins": "Number of Check-ins:", + "view_packet_details": "View packet details", + "view_all_packets_from_node": "View all packets from this node", + "no_packets_found": "No packets found." + }, + "map": { + "channel": "Channel:", + "model": "Model:", + "role": "Role:", + "last_seen": "Last seen:", + "firmware": "Firmware:", + "show_routers_only": "Show Routers Only", + "share_view": "Share This View" + }, + "stats": + { + "mesh_stats_summary": "Mesh Statistics - Summary (all available in Database)", + "total_nodes": "Total Nodes", + "total_packets": "Total Packets", + "total_packets_seen": "Total Packets Seen", + "packets_per_day_all": "Packets per Day - All Ports (Last 14 Days)", + "packets_per_day_text": "Packets per Day - Text Messages (Port 1, Last 14 Days)", + "packets_per_hour_all": "Packets per Hour - All Ports", + "packets_per_hour_text": "Packets per Hour - Text Messages (Port 1)", + "packet_types_last_24h": "Packet Types - Last 24 Hours", + "hardware_breakdown": "Hardware Breakdown", + "role_breakdown": "Role Breakdown", + "channel_breakdown": "Channel Breakdown", + "expand_chart": "Expand Chart", + "export_csv": "Export CSV", + "all_channels": "All Channels", + "node_id": "Node ID" + }, + "top": + { + "top_traffic_nodes": "Top Traffic Nodes (last 24 hours)", + "chart_description_1": "This chart shows a bell curve (normal distribution) based on the total \"Times Seen\" values for all nodes. It helps visualize how frequently nodes are heard, relative to the average.", + "chart_description_2": "This \"Times Seen\" value is the closest that we can get to Mesh utilization by node.", + "mean_label": "Mean:", + "stddev_label": "Standard Deviation:", + "long_name": "Long Name", + "short_name": "Short Name", + "channel": "Channel", + "packets_sent": "Packets Sent", + "times_seen": "Times Seen", + "seen_percent": "Seen % of Mean", + "no_nodes": "No top traffic nodes available." + }, + "nodegraph": + { + "channel_label": "Channel:", + "search_node_placeholder": "Search node...", + "search_button": "Search", + "long_name_label": "Long Name:", + "short_name_label": "Short Name:", + "role_label": "Role:", + "hw_model_label": "Hardware Model:", + "node_not_found": "Node not found in current channel!" + }, + "firehose": + { + "live_feed": "📡 Live Feed", + "pause": "Pause", + "resume": "Resume", + "time": "Time", + "packet_id": "Packet ID", + "from": "From", + "to": "To", + "port": "Port", + "links": "Links", + + "unknown_app": "UNKNOWN APP", + "text_message": "Text Message", + "position": "Position", + "node_info": "Node Info", + "routing": "Routing", + "administration": "Administration", + "waypoint": "Waypoint", + "store_forward": "Store Forward", + "telemetry": "Telemetry", + "trace_route": "Trace Route", + "neighbor_info": "Neighbor Info", + + "direct_to_mqtt": "direct to MQTT", + "all": "All", + "map": "Map", + "graph": "Graph" + } + + } \ No newline at end of file diff --git a/meshview/lang/es.json b/meshview/lang/es.json index c78810d..e430b12 100644 --- a/meshview/lang/es.json +++ b/meshview/lang/es.json @@ -108,5 +108,35 @@ "other": "Otro", "unknown": "Desconocido", "node_not_found": "¡Nodo no encontrado en el canal actual!" - } + }, + "firehose": + { + "live_feed": "📡 Flujo en Vivo", + "pause": "Pausar", + "resume": "Continuar", + "time": "Hora", + "packet_id": "ID del Paquete", + "from": "De", + "to": "Para", + "port": "Puerto", + "links": "Enlaces", + + "unknown_app": "APLICACIÓN DESCONOCIDA", + "text_message": "Mensaje de Texto", + "position": "Posición", + "node_info": "Información del Nodo", + "routing": "Enrutamiento", + "administration": "Administración", + "waypoint": "Punto de Ruta", + "store_forward": "Almacenar y Reenviar", + "telemetry": "Telemetría", + "trace_route": "Rastreo de Ruta", + "neighbor_info": "Información de Vecinos", + + "direct_to_mqtt": "Directo a MQTT", + "all": "Todos", + "map": "Mapa", + "graph": "Gráfico" + } + } diff --git a/meshview/migrations.py b/meshview/migrations.py new file mode 100644 index 0000000..e76eeda --- /dev/null +++ b/meshview/migrations.py @@ -0,0 +1,243 @@ +""" +Database migration management for MeshView. + +This module provides utilities for: +- Running Alembic migrations programmatically +- Checking database schema versions +- Coordinating migrations between writer and reader apps +""" + +import asyncio +import logging +from pathlib import Path + +from alembic.config import Config +from alembic.runtime.migration import MigrationContext +from alembic.script import ScriptDirectory +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncEngine + +from alembic import command + +logger = logging.getLogger(__name__) + + +def get_alembic_config(database_url: str) -> Config: + """ + Get Alembic configuration with the database URL set. + + Args: + database_url: SQLAlchemy database connection string + + Returns: + Configured Alembic Config object + """ + # Get the alembic.ini path (in project root) + alembic_ini = Path(__file__).parent.parent / "alembic.ini" + + config = Config(str(alembic_ini)) + config.set_main_option("sqlalchemy.url", database_url) + + return config + + +async def get_current_revision(engine: AsyncEngine) -> str | None: + """ + Get the current database schema revision. + + Args: + engine: Async SQLAlchemy engine + + Returns: + Current revision string, or None if no migrations applied + """ + async with engine.connect() as connection: + + def _get_revision(conn): + context = MigrationContext.configure(conn) + return context.get_current_revision() + + revision = await connection.run_sync(_get_revision) + return revision + + +async def get_head_revision(database_url: str) -> str | None: + """ + Get the head (latest) revision from migration scripts. + + Args: + database_url: Database connection string + + Returns: + Head revision string, or None if no migrations exist + """ + config = get_alembic_config(database_url) + script_dir = ScriptDirectory.from_config(config) + + head = script_dir.get_current_head() + return head + + +async def is_database_up_to_date(engine: AsyncEngine, database_url: str) -> bool: + """ + Check if database is at the latest schema version. + + Args: + engine: Async SQLAlchemy engine + database_url: Database connection string + + Returns: + True if database is up to date, False otherwise + """ + current = await get_current_revision(engine) + head = await get_head_revision(database_url) + + # If there are no migrations yet, consider it up to date + if head is None: + return True + + return current == head + + +def run_migrations(database_url: str) -> None: + """ + Run all pending migrations to bring database up to date. + + This is a synchronous operation that runs Alembic migrations. + Should be called by the writer app on startup. + + Args: + database_url: Database connection string + """ + logger.info("Running database migrations...") + import sys + + sys.stdout.flush() + + config = get_alembic_config(database_url) + + try: + # Run migrations to head + logger.info("Calling alembic upgrade command...") + sys.stdout.flush() + command.upgrade(config, "head") + logger.info("Database migrations completed successfully") + sys.stdout.flush() + except Exception as e: + logger.error(f"Error running migrations: {e}") + raise + + +async def wait_for_migrations( + engine: AsyncEngine, database_url: str, max_retries: int = 30, retry_delay: int = 2 +) -> bool: + """ + Wait for database migrations to complete. + + This should be called by the reader app to wait until + the database schema is up to date before proceeding. + + Args: + engine: Async SQLAlchemy engine + database_url: Database connection string + max_retries: Maximum number of retry attempts + retry_delay: Seconds to wait between retries + + Returns: + True if database is up to date, False if max retries exceeded + """ + for attempt in range(max_retries): + try: + if await is_database_up_to_date(engine, database_url): + logger.info("Database schema is up to date") + return True + + current = await get_current_revision(engine) + head = await get_head_revision(database_url) + + logger.info( + f"Database schema not up to date (current: {current}, head: {head}). " + f"Waiting... (attempt {attempt + 1}/{max_retries})" + ) + + await asyncio.sleep(retry_delay) + + except Exception as e: + logger.warning( + f"Error checking database version (attempt {attempt + 1}/{max_retries}): {e}" + ) + await asyncio.sleep(retry_delay) + + logger.error(f"Database schema not up to date after {max_retries} attempts") + return False + + +async def create_migration_status_table(engine: AsyncEngine) -> None: + """ + Create a simple status table for migration coordination. + + This table can be used to signal when migrations are in progress. + + Args: + engine: Async SQLAlchemy engine + """ + async with engine.begin() as conn: + await conn.execute( + text(""" + CREATE TABLE IF NOT EXISTS migration_status ( + id INTEGER PRIMARY KEY CHECK (id = 1), + in_progress BOOLEAN NOT NULL DEFAULT 0, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """) + ) + + # Insert initial row if not exists + await conn.execute( + text(""" + INSERT OR IGNORE INTO migration_status (id, in_progress) + VALUES (1, 0) + """) + ) + + +async def set_migration_in_progress(engine: AsyncEngine, in_progress: bool) -> None: + """ + Set the migration in-progress flag. + + Args: + engine: Async SQLAlchemy engine + in_progress: True if migration is in progress, False otherwise + """ + async with engine.begin() as conn: + await conn.execute( + text(""" + UPDATE migration_status + SET in_progress = :in_progress, + updated_at = CURRENT_TIMESTAMP + WHERE id = 1 + """), + {"in_progress": in_progress}, + ) + + +async def is_migration_in_progress(engine: AsyncEngine) -> bool: + """ + Check if a migration is currently in progress. + + Args: + engine: Async SQLAlchemy engine + + Returns: + True if migration is in progress, False otherwise + """ + try: + async with engine.connect() as conn: + result = await conn.execute( + text("SELECT in_progress FROM migration_status WHERE id = 1") + ) + row = result.fetchone() + return bool(row[0]) if row else False + except Exception: + # If table doesn't exist or query fails, assume no migration in progress + return False diff --git a/meshview/models.py b/meshview/models.py index 5c26b48..981ec09 100644 --- a/meshview/models.py +++ b/meshview/models.py @@ -23,8 +23,14 @@ class Node(Base): last_long: Mapped[int] = mapped_column(BigInteger, nullable=True) channel: Mapped[str] = mapped_column(nullable=True) last_update: Mapped[datetime] = mapped_column(nullable=True) + first_seen_us: Mapped[int] = mapped_column(BigInteger, nullable=True) + last_seen_us: Mapped[int] = mapped_column(BigInteger, nullable=True) - __table_args__ = (Index("idx_node_node_id", "node_id"),) + __table_args__ = ( + Index("idx_node_node_id", "node_id"), + Index("idx_node_first_seen_us", "first_seen_us"), + Index("idx_node_last_seen_us", "last_seen_us"), + ) def to_dict(self): return { @@ -50,14 +56,17 @@ class Packet(Base): ) payload: Mapped[bytes] = mapped_column(nullable=True) import_time: Mapped[datetime] = mapped_column(nullable=True) + import_time_us: Mapped[int] = mapped_column(BigInteger, nullable=True) channel: Mapped[str] = mapped_column(nullable=True) __table_args__ = ( Index("idx_packet_from_node_id", "from_node_id"), Index("idx_packet_to_node_id", "to_node_id"), Index("idx_packet_import_time", desc("import_time")), + Index("idx_packet_import_time_us", desc("import_time_us")), # Composite index for /top endpoint performance - filters by from_node_id AND import_time Index("idx_packet_from_node_time", "from_node_id", desc("import_time")), + Index("idx_packet_from_node_time_us", "from_node_id", desc("import_time_us")), ) @@ -78,11 +87,13 @@ class PacketSeen(Base): rx_rssi: Mapped[int] = mapped_column(nullable=True) topic: Mapped[str] = mapped_column(nullable=True) import_time: Mapped[datetime] = mapped_column(nullable=True) + import_time_us: Mapped[int] = mapped_column(BigInteger, nullable=True) __table_args__ = ( Index("idx_packet_seen_node_id", "node_id"), # Index for /top endpoint performance - JOIN on packet_id Index("idx_packet_seen_packet_id", "packet_id"), + Index("idx_packet_seen_import_time_us", "import_time_us"), ) @@ -98,5 +109,10 @@ class Traceroute(Base): done: Mapped[bool] = mapped_column(nullable=True) route: Mapped[bytes] = mapped_column(nullable=True) import_time: Mapped[datetime] = mapped_column(nullable=True) + route_return: Mapped[bytes] = mapped_column(nullable=True) + import_time_us: Mapped[int] = mapped_column(BigInteger, nullable=True) - __table_args__ = (Index("idx_traceroute_import_time", "import_time"),) + __table_args__ = ( + Index("idx_traceroute_import_time", "import_time"), + Index("idx_traceroute_import_time_us", "import_time_us"), + ) diff --git a/meshview/mqtt_store.py b/meshview/mqtt_store.py index b4ee710..949ec8c 100644 --- a/meshview/mqtt_store.py +++ b/meshview/mqtt_store.py @@ -37,6 +37,9 @@ async def process_envelope(topic, env): await session.execute(select(Node).where(Node.node_id == node_id)) ).scalar_one_or_none() + now = datetime.datetime.now(datetime.UTC) + now_us = int(now.timestamp() * 1_000_000) + if node: node.node_id = node_id node.long_name = map_report.long_name @@ -47,7 +50,10 @@ async def process_envelope(topic, env): node.last_lat = map_report.latitude_i node.last_long = map_report.longitude_i node.firmware = map_report.firmware_version - node.last_update = datetime.datetime.now() + node.last_update = now + node.last_seen_us = now_us + if node.first_seen_us is None: + node.first_seen_us = now_us else: node = Node( id=user_id, @@ -60,7 +66,9 @@ async def process_envelope(topic, env): firmware=map_report.firmware_version, last_lat=map_report.latitude_i, last_long=map_report.longitude_i, - last_update=datetime.datetime.now(), + last_update=now, + first_seen_us=now_us, + last_seen_us=now_us, ) session.add(node) except Exception as e: @@ -80,6 +88,8 @@ async def process_envelope(topic, env): if not packet: # FIXME: Not Used # new_packet = True + now = datetime.datetime.now(datetime.UTC) + now_us = int(now.timestamp() * 1_000_000) stmt = ( sqlite_insert(Packet) .values( @@ -88,7 +98,8 @@ async def process_envelope(topic, env): from_node_id=getattr(env.packet, "from"), to_node_id=env.packet.to, payload=env.packet.SerializeToString(), - import_time=datetime.datetime.now(), + import_time=now, + import_time_us=now_us, channel=env.channel_id, ) .on_conflict_do_nothing(index_elements=["id"]) @@ -112,6 +123,8 @@ async def process_envelope(topic, env): ) ) if not result.scalar_one_or_none(): + now = datetime.datetime.now(datetime.UTC) + now_us = int(now.timestamp() * 1_000_000) seen = PacketSeen( packet_id=env.packet.id, node_id=int(env.gateway_id[1:], 16), @@ -122,7 +135,8 @@ async def process_envelope(topic, env): hop_limit=env.packet.hop_limit, hop_start=env.packet.hop_start, topic=topic, - import_time=datetime.datetime.now(), + import_time=now, + import_time_us=now_us, ) session.add(seen) @@ -153,6 +167,9 @@ async def process_envelope(topic, env): await session.execute(select(Node).where(Node.id == user.id)) ).scalar_one_or_none() + now = datetime.datetime.now(datetime.UTC) + now_us = int(now.timestamp() * 1_000_000) + if node: node.node_id = node_id node.long_name = user.long_name @@ -160,7 +177,10 @@ async def process_envelope(topic, env): node.hw_model = hw_model node.role = role node.channel = env.channel_id - node.last_update = datetime.datetime.now() + node.last_update = now + node.last_seen_us = now_us + if node.first_seen_us is None: + node.first_seen_us = now_us else: node = Node( id=user.id, @@ -170,7 +190,9 @@ async def process_envelope(topic, env): hw_model=hw_model, role=role, channel=env.channel_id, - last_update=datetime.datetime.now(), + last_update=now, + first_seen_us=now_us, + last_seen_us=now_us, ) session.add(node) except Exception as e: @@ -187,29 +209,30 @@ async def process_envelope(topic, env): await session.execute(select(Node).where(Node.node_id == from_node_id)) ).scalar_one_or_none() if node: + now = datetime.datetime.now(datetime.UTC) + now_us = int(now.timestamp() * 1_000_000) node.last_lat = position.latitude_i node.last_long = position.longitude_i + node.last_update = now + node.last_seen_us = now_us + if node.first_seen_us is None: + node.first_seen_us = now_us session.add(node) # --- TRACEROUTE_APP (no conflict handling, normal insert) if env.packet.decoded.portnum == PortNum.TRACEROUTE_APP: - packet_id = None - if env.packet.decoded.want_response: - packet_id = env.packet.id - else: - result = await session.execute( - select(Packet).where(Packet.id == env.packet.decoded.request_id) - ) - if result.scalar_one_or_none(): - packet_id = env.packet.decoded.request_id + packet_id = env.packet.id if packet_id is not None: + now = datetime.datetime.now(datetime.UTC) + now_us = int(now.timestamp() * 1_000_000) session.add( Traceroute( packet_id=packet_id, route=env.packet.decoded.payload, done=not env.packet.decoded.want_response, gateway_node_id=int(env.gateway_id[1:], 16), - import_time=datetime.datetime.now(), + import_time=now, + import_time_us=now_us, ) ) diff --git a/meshview/store.py b/meshview/store.py index ef3c7ae..b50d7a7 100644 --- a/meshview/store.py +++ b/meshview/store.py @@ -1,9 +1,9 @@ from datetime import datetime, timedelta -from sqlalchemy import func, select, text +from sqlalchemy import and_, func, or_, select, text from sqlalchemy.orm import lazyload -from meshview import database +from meshview import database, models from meshview.models import Node, Packet, PacketSeen, Traceroute @@ -24,27 +24,65 @@ async def get_fuzzy_nodes(query): return result.scalars() -async def get_packets(node_id=None, portnum=None, after=None, before=None, limit=None): +async def get_packets( + from_node_id=None, + to_node_id=None, + node_id=None, # legacy: match either from OR to + portnum=None, + after=None, + contains=None, # NEW: SQL-level substring match + limit=50, +): + """ + SQLAlchemy 2.0 async ORM version. + Supports strict from/to/node filtering, substring payload search, + portnum, since, and limit. + """ + async with database.async_session() as session: - q = select(Packet) + stmt = select(models.Packet) + conditions = [] - if node_id: - q = q.where((Packet.from_node_id == node_id) | (Packet.to_node_id == node_id)) - if portnum: - q = q.where(Packet.portnum == portnum) - if after: - q = q.where(Packet.import_time > after) - if before: - q = q.where(Packet.import_time < before) + # Strict FROM filter + if from_node_id is not None: + conditions.append(models.Packet.from_node_id == from_node_id) - q = q.order_by(Packet.import_time.desc()) + # Strict TO filter + if to_node_id is not None: + conditions.append(models.Packet.to_node_id == to_node_id) - if limit is not None: - q = q.limit(limit) + # Legacy node ID filter: match either direction + if node_id is not None: + conditions.append( + or_(models.Packet.from_node_id == node_id, models.Packet.to_node_id == node_id) + ) - result = await session.execute(q) - packets = list(result.scalars()) - return packets + # Port filter + if portnum is not None: + conditions.append(models.Packet.portnum == portnum) + + # Timestamp filter + if after is not None: + conditions.append(models.Packet.import_time_us > after) + + # Case-insensitive substring search on UTF-8 payload (stored as BLOB) + if contains: + contains_lower = contains.lower() + conditions.append(func.lower(models.Packet.payload).like(f"%{contains_lower}%")) + + # Apply all conditions + if conditions: + stmt = stmt.where(and_(*conditions)) + + # Order newest → oldest + stmt = stmt.order_by(models.Packet.import_time_us.desc()) + + # Apply limit + stmt = stmt.limit(limit) + + # Execute query + result = await session.execute(stmt) + return result.scalars().all() async def get_packets_from(node_id=None, portnum=None, since=None, limit=500): @@ -68,21 +106,6 @@ async def get_packet(packet_id): return result.scalar_one_or_none() -async def get_uplinked_packets(node_id, portnum=None): - async with database.async_session() as session: - q = ( - select(Packet) - .join(PacketSeen) - .where(PacketSeen.node_id == node_id) - .order_by(Packet.import_time.desc()) - .limit(500) - ) - if portnum: - q = q.where(Packet.portnum == portnum) - result = await session.execute(q) - return result.scalars() - - async def get_packets_seen(packet_id): async with database.async_session() as session: result = await session.execute( @@ -145,23 +168,6 @@ async def get_mqtt_neighbors(since): return result -# We count the total amount of packages -# This is to be used by /stats in web.py -async def get_total_packet_count(): - async with database.async_session() as session: - q = select(func.count(Packet.id)) # Use SQLAlchemy's func to count packets - result = await session.execute(q) - return result.scalar() # Return the total count of packets - - -# We count the total amount of seen packets -async def get_total_packet_seen_count(): - async with database.async_session() as session: - q = select(func.count(PacketSeen.node_id)) # Use SQLAlchemy's func to count nodes - result = await session.execute(q) - return result.scalar() # Return the` total count of seen packets - - async def get_total_node_count(channel: str = None) -> int: try: async with database.async_session() as session: @@ -356,27 +362,155 @@ async def get_packet_stats( async def get_channels_in_period(period_type: str = "hour", length: int = 24): """ - Returns a list of distinct channels used in packets over a given period. + Returns a sorted list of distinct channels used in packets over a given period. period_type: "hour" or "day" length: number of hours or days to look back """ - now = datetime.now() + now_us = int(datetime.utcnow().timestamp() * 1_000_000) if period_type == "hour": - start_time = now - timedelta(hours=length) + delta_us = length * 3600 * 1_000_000 elif period_type == "day": - start_time = now - timedelta(days=length) + delta_us = length * 86400 * 1_000_000 else: raise ValueError("period_type must be 'hour' or 'day'") + start_us = now_us - delta_us + async with database.async_session() as session: - q = ( + stmt = ( select(Packet.channel) - .where(Packet.import_time >= start_time) + .where(Packet.import_time_us >= start_us) .distinct() .order_by(Packet.channel) ) - result = await session.execute(q) - channels = [row[0] for row in result if row[0] is not None] + result = await session.execute(stmt) + + channels = [ch for ch in result.scalars().all() if ch is not None] + return channels + + +async def get_total_packet_count( + period_type: str | None = None, + length: int | None = None, + channel: str | None = None, + from_node: int | None = None, + to_node: int | None = None, +): + """ + Count total packets, with ALL filters optional. + If no filters -> return ALL packets ever. + Uses import_time_us (microseconds). + """ + + # CASE 1: no filters -> count everything + if ( + period_type is None + and length is None + and channel is None + and from_node is None + and to_node is None + ): + async with database.async_session() as session: + q = select(func.count(Packet.id)) + res = await session.execute(q) + return res.scalar() or 0 + + # CASE 2: filtered mode -> compute time window using import_time_us + now_us = int(datetime.now().timestamp() * 1_000_000) + + if period_type is None: + period_type = "day" + if length is None: + length = 1 + + if period_type == "hour": + start_time_us = now_us - (length * 3600 * 1_000_000) + elif period_type == "day": + start_time_us = now_us - (length * 86400 * 1_000_000) + else: + raise ValueError("period_type must be 'hour' or 'day'") + + async with database.async_session() as session: + q = select(func.count(Packet.id)).where(Packet.import_time_us >= start_time_us) + + if channel: + q = q.where(func.lower(Packet.channel) == channel.lower()) + if from_node: + q = q.where(Packet.from_node_id == from_node) + if to_node: + q = q.where(Packet.to_node_id == to_node) + + res = await session.execute(q) + return res.scalar() or 0 + + +async def get_total_packet_seen_count( + packet_id: int | None = None, + period_type: str | None = None, + length: int | None = None, + channel: str | None = None, + from_node: int | None = None, + to_node: int | None = None, +): + """ + Count total PacketSeen rows. + - If packet_id is provided -> count only that packet's seen entries. + - Otherwise match EXACT SAME FILTERS as get_total_packet_count. + Uses import_time_us for time window. + """ + + # SPECIAL CASE: direct packet_id lookup + if packet_id is not None: + async with database.async_session() as session: + q = select(func.count(PacketSeen.packet_id)).where(PacketSeen.packet_id == packet_id) + res = await session.execute(q) + return res.scalar() or 0 + + # No filters -> return ALL seen entries + if ( + period_type is None + and length is None + and channel is None + and from_node is None + and to_node is None + ): + async with database.async_session() as session: + q = select(func.count(PacketSeen.packet_id)) + res = await session.execute(q) + return res.scalar() or 0 + + # Compute time window + now_us = int(datetime.now().timestamp() * 1_000_000) + + if period_type is None: + period_type = "day" + if length is None: + length = 1 + + if period_type == "hour": + start_time_us = now_us - (length * 3600 * 1_000_000) + elif period_type == "day": + start_time_us = now_us - (length * 86400 * 1_000_000) + else: + raise ValueError("period_type must be 'hour' or 'day'") + + # JOIN Packet so we can apply identical filters + async with database.async_session() as session: + q = ( + select(func.count(PacketSeen.packet_id)) + .join(Packet, Packet.id == PacketSeen.packet_id) + .where(Packet.import_time_us >= start_time_us) + ) + + if channel: + q = q.where(func.lower(Packet.channel) == channel.lower()) + if from_node: + q = q.where(Packet.from_node_id == from_node) + if to_node: + q = q.where(Packet.to_node_id == to_node) + + res = await session.execute(q) + return res.scalar() or 0 diff --git a/meshview/templates/base.html b/meshview/templates/base.html index 2cfb0be..44f3d0d 100644 --- a/meshview/templates/base.html +++ b/meshview/templates/base.html @@ -6,11 +6,7 @@ - - - - @@ -25,181 +21,182 @@ body.ready { opacity: 1; } - .htmx-indicator { opacity: 0; transition: opacity 500ms ease-in; } - .htmx-request .htmx-indicator { opacity: 1; } - #search_form { z-index: 4000; } - #details_map { width: 100%; height: 500px; } + + .htmx-indicator { + opacity: 0; + transition: opacity 500ms ease-in; + } + .htmx-request .htmx-indicator { + opacity: 1; + } + + #search_form { + z-index: 4000; + } + + #details_map { + width: 100%; + height: 500px; + } + {% block css %}{% endblock %} + -
- -
- +
+ +
+ - -
-
- - - {% for option in node_options %} - - {% endfor %} - - {% set options = { - 1: "Text Message", - 3: "Position", - 4: "Node Info", - 67: "Telemetry", - 70: "Traceroute", - 71: "Neighbor Info", - } - %} - - -
-
+
- {% block body %}{% endblock %} +{% block body %}{% endblock %} -
- -
ver. unknown
-
+
+ +
ver. unknown
+
- - + // Version + document.getElementById("site-version").textContent = + "ver. " + (site.version || "unknown"); + + // Apply translations + applyTranslations(dict); + fillPortnumSelect(dict, "{{ portnum or '' }}"); + + document.body.classList.add("ready"); + } catch (err) { + console.error("Failed to initialize page:", err); + document.body.classList.add("ready"); + } +} + +document.addEventListener("DOMContentLoaded", initializePage); + + diff --git a/meshview/templates/buttons.html b/meshview/templates/buttons.html deleted file mode 100644 index 9959bff..0000000 --- a/meshview/templates/buttons.html +++ /dev/null @@ -1,16 +0,0 @@ -
- - TX/RX - - - Uplinked - -
diff --git a/meshview/templates/chat.html b/meshview/templates/chat.html index 24d333d..bfba95b 100644 --- a/meshview/templates/chat.html +++ b/meshview/templates/chat.html @@ -1,20 +1,63 @@ {% extends "base.html" %} {% block css %} -.timestamp { min-width: 10em; } +.timestamp { + min-width: 10em; + color: #ccc; +} + .chat-packet:nth-of-type(odd) { background-color: #3a3a3a; } -.chat-packet { border-bottom: 1px solid #555; padding: 8px; border-radius: 8px; } +.chat-packet { + border-bottom: 1px solid #555; + padding: 3px 6px; + border-radius: 6px; + margin: 0; +} + +/* Same column spacing as before */ +.chat-packet > [class^="col-"] { + padding-left: 10px !important; + padding-right: 10px !important; + padding-top: 1px !important; + padding-bottom: 1px !important; +} + .chat-packet:nth-of-type(even) { background-color: #333333; } -@keyframes flash { 0% { background-color: #ffe066; } 100% { background-color: inherit; } } +.channel { + font-style: italic; + color: #bbb; +} +.channel a { + font-style: normal; + color: #999; +} + +@keyframes flash { + 0% { background-color: #ffe066; } + 100% { background-color: inherit; } +} .chat-packet.flash { animation: flash 3.5s ease-out; } -.replying-to { font-size: 0.85em; color: #aaa; margin-top: 4px; padding-left: 20px; } +.replying-to { + font-size: 0.8em; + color: #aaa; + margin-top: 2px; + padding-left: 10px; +} .replying-to .reply-preview { color: #aaa; } {% endblock %} {% block body %} -
+
+ + +
+

+ 💬 Chat +

+
+
@@ -26,7 +69,19 @@ document.addEventListener("DOMContentLoaded", async () => { let lastTime = null; const renderedPacketIds = new Set(); const packetMap = new Map(); - let chatTranslations = {}; + let chatLang = {}; + + function applyTranslations(dict, root = document) { + root.querySelectorAll("[data-translate]").forEach(el => { + const key = el.dataset.translate; + const val = dict[key]; + if (!val) return; + if (el.placeholder) el.placeholder = val; + else if (el.tagName === "INPUT" && el.value) el.value = val; + else if (key === "footer") el.innerHTML = val; + else el.textContent = val; + }); + } function escapeHtml(text) { const div = document.createElement("div"); @@ -34,43 +89,50 @@ document.addEventListener("DOMContentLoaded", async () => { return div.innerHTML; } - function applyTranslations(translations, root=document) { - root.querySelectorAll("[data-translate-lang]").forEach(el => { - const key = el.dataset.translateLang; - if (translations[key]) el.textContent = translations[key]; - }); - root.querySelectorAll("[data-translate-lang-title]").forEach(el => { - const key = el.dataset.translateLangTitle; - if (translations[key]) el.title = translations[key]; - }); - } - function renderPacket(packet, highlight = false) { if (renderedPacketIds.has(packet.id)) return; renderedPacketIds.add(packet.id); packetMap.set(packet.id, packet); - const date = new Date(packet.import_time); - const formattedTime = date.toLocaleTimeString([], { hour:"numeric", minute:"2-digit", second:"2-digit", hour12:true }); - const formattedDate = `${(date.getMonth()+1).toString().padStart(2,"0")}/${date.getDate().toString().padStart(2,"0")}/${date.getFullYear()}`; + let date; + if (packet.import_time_us && packet.import_time_us > 0) { + date = new Date(packet.import_time_us / 1000); + } else if (packet.import_time) { + date = new Date(packet.import_time); + } else { + date = new Date(); + } + + const formattedTime = date.toLocaleTimeString([], { + hour:"numeric", + minute:"2-digit", + second:"2-digit", + hour12:true + }); + const formattedDate = + `${(date.getMonth()+1).toString().padStart(2,"0")}/` + + `${date.getDate().toString().padStart(2,"0")}/` + + `${date.getFullYear()}`; + const formattedTimestamp = `${formattedTime} - ${formattedDate}`; let replyHtml = ""; if (packet.reply_id) { const parent = packetMap.get(packet.reply_id); + const replyPrefix = ``; if (parent) { - replyHtml = `
-
- + replyHtml = ` +
+ ${replyPrefix} ${escapeHtml((parent.long_name || "").trim() || `Node ${parent.from_node_id}`)}: ${escapeHtml(parent.payload || "")} -
-
`; +
`; } else { - replyHtml = ``; + replyHtml = ` +
+ ${replyPrefix} + ${packet.reply_id} +
`; } } @@ -78,33 +140,43 @@ document.addEventListener("DOMContentLoaded", async () => { div.className = "row chat-packet" + (highlight ? " flash" : ""); div.dataset.packetId = packet.id; div.innerHTML = ` - ${formattedTimestamp} + ${formattedTimestamp} - ✉️ - ${escapeHtml(packet.channel || "")} + 🔎 + ${escapeHtml(packet.channel || "")} - + ${escapeHtml((packet.long_name || "").trim() || `Node ${packet.from_node_id}`)} ${escapeHtml(packet.payload)}${replyHtml} `; chatContainer.prepend(div); - applyTranslations(chatTranslations, div); + applyTranslations(chatLang, div); if (highlight) setTimeout(() => div.classList.remove("flash"), 2500); } function renderPacketsEnsureDescending(packets, highlight=false) { if (!Array.isArray(packets) || packets.length===0) return; - const sortedDesc = packets.slice().sort((a,b)=>new Date(b.import_time)-new Date(a.import_time)); + const sortedDesc = packets.slice().sort((a,b)=>{ + const aTime = + (a.import_time_us && a.import_time_us > 0) + ? a.import_time_us + : (a.import_time ? new Date(a.import_time).getTime() * 1000 : 0); + const bTime = + (b.import_time_us && b.import_time_us > 0) + ? b.import_time_us + : (b.import_time ? new Date(b.import_time).getTime() * 1000 : 0); + return bTime - aTime; + }); for (let i=sortedDesc.length-1; i>=0; i--) renderPacket(sortedDesc[i], highlight); } async function fetchInitial() { try { - const resp = await fetch("/api/chat?limit=100"); + const resp = await fetch("/api/packets?portnum=1&limit=100"); const data = await resp.json(); if (data?.packets?.length) renderPacketsEnsureDescending(data.packets); lastTime = data?.latest_import_time || lastTime; @@ -113,7 +185,7 @@ document.addEventListener("DOMContentLoaded", async () => { async function fetchUpdates() { try { - const url = new URL("/api/chat", window.location.origin); + const url = new URL("/api/packets?portnum=1", window.location.origin); url.searchParams.set("limit","100"); if (lastTime) url.searchParams.set("since", lastTime); const resp = await fetch(url); @@ -123,18 +195,17 @@ document.addEventListener("DOMContentLoaded", async () => { } catch(err){ console.error("Fetch updates error:", err); } } - async function loadTranslations() { + async function loadChatLang() { try { const cfg = await window._siteConfigPromise; const langCode = cfg?.site?.language || "en"; const res = await fetch(`/api/lang?lang=${langCode}§ion=chat`); - chatTranslations = await res.json(); - applyTranslations(chatTranslations, document); + chatLang = await res.json(); + applyTranslations(chatLang); } catch(err){ console.error("Chat translation load failed:", err); } } - await loadTranslations(); - await fetchInitial(); + await Promise.all([loadChatLang(), fetchInitial()]); setInterval(fetchUpdates, 5000); }); diff --git a/meshview/templates/datalist.html b/meshview/templates/datalist.html deleted file mode 100644 index 5c65c4b..0000000 --- a/meshview/templates/datalist.html +++ /dev/null @@ -1,7 +0,0 @@ - - {% for option in node_options %} - - {% endfor %} - diff --git a/meshview/templates/firehose.html b/meshview/templates/firehose.html index 78bbc22..d40a1e7 100644 --- a/meshview/templates/firehose.html +++ b/meshview/templates/firehose.html @@ -2,7 +2,6 @@ {% block css %} .container { - max-width: 900px; margin: 0 auto; padding: 10px; } @@ -19,6 +18,7 @@ font-size: 0.85rem; color: #e4e9ee; } + .packet-table th, .packet-table td { border: 1px solid #3a3f44; padding: 6px 10px; @@ -30,6 +30,7 @@ } .packet-table tr:nth-of-type(odd) { background-color: #272b2f; } .packet-table tr:nth-of-type(even) { background-color: #212529; } + .port-tag { display: inline-block; padding: 1px 6px; @@ -38,8 +39,6 @@ font-weight: 500; color: #fff; } - -/* --- Color-coded port labels --- */ .port-0 { background-color: #6c757d; } .port-1 { background-color: #007bff; } .port-3 { background-color: #28a745; } @@ -51,16 +50,9 @@ .port-70 { background-color: #6f42c1; } .port-71 { background-color: #fd7e14; } -.to-mqtt { - font-style: italic; - color: #aaa; -} +.to-mqtt { font-style: italic; color: #aaa; } -/* --- Payload rows --- */ -.payload-row { - display: none; - background-color: #1b1e22; -} +.payload-row { display: none; background-color: #1b1e22; } .payload-cell { padding: 8px 12px; font-family: monospace; @@ -68,35 +60,43 @@ color: #b0bec5; border-top: none; } -.packet-table tr.expanded + .payload-row { - display: table-row; -} +.packet-table tr.expanded + .payload-row { display: table-row; } + .toggle-btn { cursor: pointer; color: #aaa; margin-right: 6px; font-weight: bold; } -.toggle-btn:hover { - color: #fff; +.toggle-btn:hover { color: #fff; } + +/* Link next to port tag */ +.inline-link { + margin-left: 6px; + font-weight: bold; + text-decoration: none; + color: #9fd4ff; +} +.inline-link:hover { + color: #c7e6ff; } {% endblock %} {% block body %}
-
📡 Live Feed
- +
📡 Live Feed
+
- - - - - + + + + + @@ -104,10 +104,30 @@ - -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/meshview/templates/map.html b/meshview/templates/map.html index 7bbc2e1..1c7d3d3 100644 --- a/meshview/templates/map.html +++ b/meshview/templates/map.html @@ -60,21 +60,36 @@ function hashToColor(str){ if(colorMap.has(str)) return colorMap.get(str); const function isInvalidCoord(n){ return !n||!n.lat||!n.long||n.lat===0||n.long===0||Number.isNaN(n.lat)||Number.isNaN(n.long); } // ---------------------- Packet Fetching ---------------------- -function fetchLatestPacket(){ fetch(`/api/packets?limit=1`).then(r=>r.json()).then(data=>{ lastImportTime=data.packets?.[0]?.import_time||new Date().toISOString(); }).catch(console.error); } +function fetchLatestPacket(){ + fetch(`/api/packets?limit=1`) + .then(r=>r.json()) + .then(data=>{ + lastImportTime=data.packets?.[0]?.import_time_us||0; + }) + .catch(console.error); +} + function fetchNewPackets(){ if(mapInterval <= 0) return; - if(!lastImportTime) return; - fetch(`/api/packets?since=${encodeURIComponent(lastImportTime)}`).then(r=>r.json()).then(data=>{ - if(!data.packets||data.packets.length===0) return; - let latest = lastImportTime; - data.packets.forEach(pkt=>{ - if(pkt.import_time>latest) latest=pkt.import_time; - const marker = markerById[pkt.from_node_id]; - const nodeData = nodeMap.get(pkt.from_node_id); - if(marker && nodeData) blinkNode(marker,nodeData.long_name,pkt.portnum); - }); - lastImportTime=latest; - }).catch(console.error); + if(lastImportTime===null) return; + const url = new URL(`/api/packets`, window.location.origin); + url.searchParams.set("since", lastImportTime); + url.searchParams.set("limit", 50); + + fetch(url) + .then(r=>r.json()) + .then(data=>{ + if(!data.packets || data.packets.length===0) return; + let latest = lastImportTime; + data.packets.forEach(pkt=>{ + if(pkt.import_time_us > latest) latest = pkt.import_time_us; + const marker = markerById[pkt.from_node_id]; + const nodeData = nodeMap.get(pkt.from_node_id); + if(marker && nodeData) blinkNode(marker,nodeData.long_name,pkt.portnum); + }); + lastImportTime = latest; + }) + .catch(console.error); } // ---------------------- Polling ---------------------- @@ -190,7 +205,7 @@ function renderNodesOnMap(){ marker.nodeId = node.key; marker.originalColor = color; markerById[node.key] = marker; - const popup = `${node.long_name} (${node.short_name})
+ const popup = `${node.long_name} (${node.short_name})
Channel: ${node.channel}
Model: ${node.hw_model}
Role: ${node.role}
diff --git a/meshview/templates/net.html b/meshview/templates/net.html index af9efe6..fb2c451 100644 --- a/meshview/templates/net.html +++ b/meshview/templates/net.html @@ -1,75 +1,184 @@ {% extends "base.html" %} {% block css %} -.timestamp { - min-width:10em; -} -.chat-packet:nth-of-type(odd){ - background-color: #3a3a3a; /* Lighter than #2a2a2a */ -} +.timestamp { min-width: 10em; color: #ccc; } + +.chat-packet:nth-of-type(odd) { background-color: #3a3a3a; } .chat-packet { border-bottom: 1px solid #555; - padding: 8px; - border-radius: 8px; /* Adjust the value to make the corners more or less rounded */ + padding: 3px 6px; + border-radius: 6px; + margin: 0; } -.chat-packet:nth-of-type(even){ - background-color: #333333; /* Slightly lighter than the previous #181818 */ + +.chat-packet > [class^="col-"] { + padding-left: 10px !important; + padding-right: 10px !important; + padding-top: 1px !important; + padding-bottom: 1px !important; } + +.chat-packet:nth-of-type(even) { background-color: #333333; } + +.channel { font-style: italic; color: #bbb; } +.channel a { font-style: normal; color: #999; } + +@keyframes flash { 0% { background-color: #ffe066; } 100% { background-color: inherit; } } +.chat-packet.flash { animation: flash 3.5s ease-out; } + +.replying-to { font-size: 0.8em; color: #aaa; margin-top: 2px; padding-left: 10px; } +.replying-to .reply-preview { color: #aaa; } + +#weekly-message { margin: 15px 0; font-weight: bold; color: #ffeb3b; } +#total-count { margin-bottom: 10px; font-style: italic; color: #ccc; } {% endblock %} {% block body %}
- {{ site_config["site"]["weekly_net_message"] }}

+
Loading weekly message...
+
Total messages: 0
-
- Number of Check-ins: {{ packets|length }} -
-
- -
- {% for packet in packets %} -
- - {{ packet.import_time.strftime('%-I:%M:%S %p - %m-%d-%Y') }} - - - ✉️ {{ packet.from_node.channel }} - - - - {{ packet.from_node.long_name or (packet.from_node_id | node_id_to_hex) }} - - - - {{ packet.payload }} - -
- {% else %} - No packets found. - {% endfor %} +
+
+
{% endblock %} diff --git a/meshview/templates/node.html b/meshview/templates/node.html index ef8ca3c..f84f31e 100644 --- a/meshview/templates/node.html +++ b/meshview/templates/node.html @@ -1,234 +1,885 @@ {% extends "base.html" %} {% block css %} - /* Styles for the node info card */ - #node_info { - height: 100%; - } +{{ super() }} - /* Styles for the map */ - #map { - height: 100%; - min-height: 400px; - } +/* --- Map --- */ +#map { + width: 100%; + height: 400px; + margin-bottom: 20px; + border-radius: 8px; + display: block; +} +.leaflet-container { + background: #1a1a1a; + z-index: 1; +} - /* Styles for packet details section */ - #packet_details { - height: 95vh; - overflow: scroll; - top: 3em; - } +/* --- Node Info (3-column compact grid) --- */ +.node-info { + background-color: #1f2226; + border: 1px solid #3a3f44; + color: #ddd; + font-size: 0.88rem; + padding: 12px 14px; + margin-bottom: 14px; + border-radius: 8px; - /* Ensure inline display for details */ - div.tab-pane > dl { - display: inline-block; - } + display: grid; + grid-template-columns: repeat(3, minmax(120px, 1fr)); + grid-column-gap: 14px; + grid-row-gap: 6px; +} - /* Set the maximum width of the page to 900px */ - .container { - max-width: 900px; - margin: 0 auto; /* Center the content horizontally */ - } +.node-info div { + padding: 2px 0; +} + +.node-info strong { + color: #9fd4ff; + font-weight: 600; +} + +/* --- Charts --- */ +.chart-container { + width: 100%; + height: 320px; + margin-bottom: 25px; + border: 1px solid #3a3f44; + border-radius: 8px; + overflow: hidden; + background-color: #16191d; +} +.chart-header { + display: flex; + justify-content: space-between; + align-items: center; + background: #1f2226; + padding: 6px 12px; + font-weight: bold; + border-bottom: 1px solid #333; + font-size: 1rem; + letter-spacing: 0.5px; +} +.chart-actions button { + background: rgba(255,255,255,0.05); + border: 1px solid #555; + border-radius: 4px; + color: #ccc; + font-size: 0.8rem; + padding: 2px 6px; + cursor: pointer; + transition: background 0.2s; +} +.chart-actions button:hover { + color: #fff; + background: rgba(255,255,255,0.15); + border-color: #888; +} + +/* --- Packet Table --- */ +.packet-table { + width: 100%; + border-collapse: collapse; + font-size: 0.85rem; + color: #e4e9ee; +} +.packet-table th, .packet-table td { + border: 1px solid #3a3f44; + padding: 6px 10px; + text-align: left; +} +.packet-table th { + background-color: #1f2226; + font-weight: bold; +} +.packet-table tr:nth-of-type(odd) { background-color: #272b2f; } +.packet-table tr:nth-of-type(even) { background-color: #212529; } + +.port-tag { + padding: 2px 6px; + border-radius: 6px; + font-size: 0.75rem; + color: #fff; +} +.port-1 { background-color: #007bff; } +.port-3 { background-color: #28a745; } +.port-4 { background-color: #ffc107; color:#000; } +.port-5 { background-color: #dc3545; } +.port-6 { background-color: #20c997; } +.port-67 { background-color: #17a2b8; } +.port-70 { background-color: #ff7043; } +.port-71 { background-color: #ff66cc; } +.port-0 { background-color: #6c757d; } + +.to-mqtt { font-style: italic; color: #aaa; } +.payload-row { display: none; background-color: #1b1e22; } +.payload-cell { padding: 8px 12px; font-family: monospace; white-space: pre-wrap; color: #b0bec5; border-top: none; } +.packet-table tr.expanded + .payload-row { display: table-row; } +.toggle-btn { cursor: pointer; color: #aaa; margin-right: 6px; } +.toggle-btn:hover { color: #fff; } + +/* --- Chart Modal --- */ +#chartModal { + display:none; position:fixed; top:0; left:0; width:100%; height:100%; + background:rgba(0,0,0,0.9); z-index:9999; + align-items:center; justify-content:center; +} +#chartModal > div { + background:#1b1e22; border-radius:8px; + width:90%; height:85%; padding:10px; +} + +/* Link next to port tag */ +.inline-link { + margin-left: 6px; + font-weight: bold; + text-decoration: none; + color: #9fd4ff; +} +.inline-link:hover { + color: #c7e6ff; +} {% endblock %} {% block body %} -
-
-
-
- -
- {% if node %} -
- {{node.short_name}} -

{{node.long_name}}

-
-
-
- {% if trace %} -
- {% endif %} -
NodeID
-
{{node.node_id|node_id_to_hex}}
-
Channel
-
{{node.channel}}
-
HW Model
-
{{node.hw_model}}
-
Role
-
{{node.role}}
- {% if node.firmware %} -
Firmware
-
{{node.firmware}}
- {% endif %} -
- Get node traffic totals - {% include "node_graphs.html" %} -
- {% else %} -
- A NodeInfo has not been seen. -
- {% endif %} -
-
-
+
-
-
- -
-
+
📡 Specifications:
-
-
- {% include 'packet_list.html' %} + +
+
Node ID:
+
Long Name:
+
Short Name:
+ +
Hardware Model:
+
Firmware:
+
Role:
+ +
Channel:
+
Latitude:
+
Longitude:
+ +
Last Update:
+
+ + +
+ + +
+
+ 🔋 Battery & Voltage +
+ +
-
+
+
+ +
+
+ 📶 Air & Channel Utilization +
+ + +
+
+
+
+ + + + + + + +
Packet IDFromToPortLinksTimePacket IDFromToPort
+ + + + + + + + + + +
TimePacket IDFromToPort
+ +
+ + +
+
+
+ +
+
+ +} -{% if trace %} - - -{% endif %} - -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/meshview/templates/node2.html b/meshview/templates/node2.html deleted file mode 100644 index cf9fd61..0000000 --- a/meshview/templates/node2.html +++ /dev/null @@ -1,58 +0,0 @@ -{% extends "base.html" %} - -{% block css %} - #node_info { - height:100%; - } - #map{ - height:100%; - min-height: 400px; - } - #packet_details{ - height: 95vh; - overflow: scroll; - top: 3em; - } - div.tab-pane > dl { - display: inline-block; - } -{% endblock %} - -{% block body %} - -{% include "search_form.html" %} - -
-
-
- {% if node %} -
- {{node.long_name}} -
-
-
-
ShortName
-
{{node.short_name}}
-
HW Model
-
{{node.hw_model}}
-
Role
-
{{node.role}}
-
-
- {% else %} -
- A NodeInfo has not been seen. -
- {% endif %} -
-
-
-
- {% include 'packet_list.html' %} -
-
-
-
-
-
-{% endblock %} diff --git a/meshview/templates/node_graphs.html b/meshview/templates/node_graphs.html deleted file mode 100644 index a068ea2..0000000 --- a/meshview/templates/node_graphs.html +++ /dev/null @@ -1,263 +0,0 @@ -{% macro graph(name) %} -
-{% endmacro %} - - -
- - -
- - - - - -
- {% for name in [ - "power", "utilization", "temperature", "humidity", "pressure", - "iaq", "wind_speed", "wind_direction", "power_metrics", "neighbors" - ] %} -
- {{ graph(name) | safe }} -
- {% endfor %} -
- - - - - - - - diff --git a/meshview/templates/node_traffic.html b/meshview/templates/node_traffic.html deleted file mode 100644 index 571114a..0000000 --- a/meshview/templates/node_traffic.html +++ /dev/null @@ -1,109 +0,0 @@ -{% extends "base.html" %} - -{% block css %} -.table-title { - font-size: 2rem; - text-align: center; - margin-bottom: 20px; - } - - .traffic-table { - width: 50%; - border-collapse: collapse; - margin: 0 auto; - font-family: Arial, sans-serif; - } - - .traffic-table th, - .traffic-table td { - padding: 10px 15px; - text-align: left; - border: 1px solid #474b4e; - } - - .traffic-table th { - background-color: #272b2f; - color: white; - } - - .traffic:nth-of-type(odd) { - background-color: #272b2f; /* Lighter than #2a2a2a */ - } - - .traffic { - border: 1px solid #474b4e; - padding: 8px; - margin-bottom: 4px; - border-radius: 8px; - } - - .traffic:nth-of-type(even) { - background-color: #212529; /* Slightly lighter than the previous #181818 */ - } - - .footer { - text-align: center; - margin-top: 20px; - } - -{% endblock %} - -{% block body %} -
-

- {% if traffic %} - {{ traffic[0].long_name }} (last 24 hours) - {% else %} - No Traffic Data Available - {% endif %} -

- - - - - - - - - {% for port in traffic %} - - - - - {% else %} - - - - {% endfor %} - -
Port NumberPacket Count
- {% if port.portnum == 1 %} - TEXT_MESSAGE_APP - {% elif port.portnum == 3 %} - POSITION_APP - {% elif port.portnum == 4 %} - NODEINFO_APP - {% elif port.portnum == 5 %} - ROUTING_APP - {% elif port.portnum == 8 %} - WAYPOINT_APP - {% elif port.portnum == 67 %} - TELEMETRY_APP - {% elif port.portnum == 70 %} - TRACEROUTE_APP - {% elif port.portnum == 71 %} - NEIGHBORINFO_APP - {% elif port.portnum == 73 %} - MAP_REPORT_APP - {% elif port.portnum == 0 %} - UNKNOWN_APP - {% else %} - {{ port.portnum }} - {% endif %} - {{ port.packet_count }}
No traffic data available for this node.
-
- - -{% endblock %} diff --git a/meshview/templates/nodegraph.html b/meshview/templates/nodegraph.html index 3d25625..fe515b1 100644 --- a/meshview/templates/nodegraph.html +++ b/meshview/templates/nodegraph.html @@ -13,11 +13,13 @@ border-radius: 10px; box-shadow: 0 4px 8px rgba(0,0,0,0.1); } + +/* Search UI */ .search-container { position: absolute; bottom: 100px; left: 10px; - z-index: 10;1 + z-index: 10; display: flex; flex-direction: column; gap: 5px; @@ -37,6 +39,8 @@ .search-container button:hover { background-color: #0056b3; } + +/* Node info box */ #node-info { position: absolute; bottom: 10px; @@ -52,6 +56,8 @@ max-height: 250px; overflow-y: auto; } + +/* Legend */ #legend { position: absolute; bottom: 10px; @@ -67,9 +73,6 @@ } .legend-category { margin-right: 10px; - code { - color: inherit; - } } .legend-box { display: inline-block; @@ -77,22 +80,23 @@ height: 12px; margin-right: 5px; border-radius: 3px; - &.circle { - border-radius: 6px; - } } +.circle { border-radius: 6px; } {% endblock %} {% block body %}
+
- + +
+
Long Name:
Short Name:
@@ -100,196 +104,280 @@ Hardware Model:
+
-
Traceroute
-
Neighbor
+
Traceroute
+
Neighbor
-
ROUTER
-
ROUTER_LATE
+
ROUTER
+
ROUTER_LATE
-
CLIENT
-
CLIENT_MUTE
+
CLIENT
+
CLIENT_MUTE
-
CLIENT_BASE
-
Other
+
CLIENT_BASE
+
Other
-
Unknown
+
Unknown
+ {% endblock %} diff --git a/meshview/templates/nodelist.html b/meshview/templates/nodelist.html index 9ef3da7..65195d3 100644 --- a/meshview/templates/nodelist.html +++ b/meshview/templates/nodelist.html @@ -120,21 +120,10 @@ select, .export-btn, .search-box, .clear-btn {
- - - - - - - + + + + @@ -149,7 +138,7 @@ select, .export-btn, .search-box, .clear-btn { - + @@ -157,52 +146,75 @@ select, .export-btn, .search-box, .clear-btn { - - + + - +
ShortShort Long Name HW Model Firmware Last Latitude Last Longitude Channel Last Update FavoriteLast Seen
Loading nodes...
Loading nodes...
+{% endblock %} diff --git a/meshview/templates/packet_details.html b/meshview/templates/packet_details.html deleted file mode 100644 index b93aeec..0000000 --- a/meshview/templates/packet_details.html +++ /dev/null @@ -1,132 +0,0 @@ -
- -{% for seen in packets_seen %} -
-
- {{seen.node.long_name}}( - - {{seen.node_id|node_id_to_hex}} - - ) -
-
-
-
-
Import Time
-
{{seen.import_time.strftime('%-I:%M:%S %p - %m-%d-%Y')}}
-
rx_time
-
{{seen.rx_time|format_timestamp}}
-
hop_limit
-
{{seen.hop_limit}}
-
hop_start
-
{{seen.hop_start}}
-
channel
-
{{seen.channel}}
-
rx_snr
-
{{seen.rx_snr}}
-
rx_rssi
-
{{seen.rx_rssi}}
-
topic
-
{{seen.topic}}
-
-
-
-
-{% endfor %} - -{% if map_center %} - - -{% endif %} diff --git a/meshview/templates/packet_index.html b/meshview/templates/packet_index.html deleted file mode 100644 index 9d1dca8..0000000 --- a/meshview/templates/packet_index.html +++ /dev/null @@ -1,24 +0,0 @@ -{% extends "base.html" %} -{% block css %} - - /* Set the maximum width of the page to 900px */ - .container { - max-width: 900px; - margin: 0 auto; /* Center the content horizontally */ - } -{% endblock %} -{% block body %} -
-
-
- {% include 'packet.html' %} -
-
-
-
-
-{% endblock %} diff --git a/meshview/templates/packet_list.html b/meshview/templates/packet_list.html deleted file mode 100644 index 24f2f8c..0000000 --- a/meshview/templates/packet_list.html +++ /dev/null @@ -1,7 +0,0 @@ -
- {% for packet in packets %} - {% include 'packet.html' %} - {% else %} - No packets found. - {% endfor %} -
diff --git a/meshview/templates/search.html b/meshview/templates/search.html deleted file mode 100644 index 8c9d2e7..0000000 --- a/meshview/templates/search.html +++ /dev/null @@ -1,21 +0,0 @@ -{% extends "base.html" %} - - -{% block body %} - -{% include "search_form.html" %} - - - -{% endblock %} diff --git a/meshview/templates/search_form.html b/meshview/templates/search_form.html deleted file mode 100644 index 94cc99c..0000000 --- a/meshview/templates/search_form.html +++ /dev/null @@ -1,44 +0,0 @@ -
-
- - {% include "datalist.html" %} - {% set options = { - 1: "Text Message", - 3: "Position", - 4: "Node Info", - 67: "Telemetry", - 70: "Traceroute", - 71: "Neighbor Info", - } - %} - - -
-
diff --git a/meshview/templates/stats.html b/meshview/templates/stats.html index e045371..29287fc 100644 --- a/meshview/templates/stats.html +++ b/meshview/templates/stats.html @@ -93,26 +93,31 @@ {% block body %}
-

Mesh Statistics - Summary (all available in Database)

+

+ Mesh Statistics - Summary (all available in Database) +

+

Total Nodes

-
{{ "{:,}".format(total_nodes) }}
+
0

Total Packets

-
{{ "{:,}".format(total_packets) }}
+
0

Total Packets Seen

-
{{ "{:,}".format(total_packets_seen) }}
+
0
-

Packets per Day - All Ports (Last 14 Days)

+

+ Packets per Day - All Ports (Last 14 Days) +

Total: 0
@@ -121,7 +126,9 @@
-

Packet Types - Last 24 Hours

+

+ Packet Types - Last 24 Hours +

@@ -131,7 +138,9 @@
-

Packets per Day - Text Messages (Port 1, Last 14 Days)

+

+ Packets per Day - Text Messages (Port 1, Last 14 Days) +

Total: 0
@@ -140,7 +149,9 @@
-

Packets per Hour - All Ports

+

+ Packets per Hour - All Ports +

Total: 0
@@ -148,7 +159,9 @@
-

Packets per Hour - Text Messages (Port 1)

+

+ Packets per Hour - Text Messages (Port 1) +

Total: 0
@@ -214,17 +227,123 @@ async function fetchStats(period_type,length,portnum=null,channel=null){ }catch{return [];} } -async function fetchNodes(){ try{ const res=await fetch("/api/nodes"); const json=await res.json(); return json.nodes||[];}catch{return [];} } -async function fetchChannels(){ try{ const res = await fetch("/api/channels"); const json = await res.json(); return json.channels || [];}catch{return [];} } +async function fetchNodes(){ + try{ + const res=await fetch("/api/nodes"); + const json=await res.json(); + return json.nodes||[]; + }catch{ + return []; + } +} -function processCountField(nodes,field){ const counts={}; nodes.forEach(n=>{ const key=n[field]||"Unknown"; counts[key]=(counts[key]||0)+1; }); return Object.entries(counts).map(([name,value])=>({name,value})); } -function updateTotalCount(domId,data){ const el=document.getElementById(domId); if(!el||!data.length) return; const total=data.reduce((acc,d)=>acc+(d.count??d.packet_count??0),0); el.textContent=`Total: ${total.toLocaleString()}`; } -function prepareTopN(data,n=20){ data.sort((a,b)=>b.value-a.value); let top=data.slice(0,n); if(data.length>n){ const otherValue=data.slice(n).reduce((sum,item)=>sum+item.value,0); top.push({name:"Other", value:otherValue}); } return top; } +async function fetchChannels(){ + try{ + const res = await fetch("/api/channels"); + const json = await res.json(); + return json.channels || []; + }catch{ + return []; + } +} + +function processCountField(nodes,field){ + const counts={}; + nodes.forEach(n=>{ + const key=n[field]||"Unknown"; + counts[key]=(counts[key]||0)+1; + }); + return Object.entries(counts).map(([name,value])=>({name,value})); +} + +function updateTotalCount(domId,data){ + const el=document.getElementById(domId); + if(!el||!data.length) return; + const total=data.reduce((acc,d)=>acc+(d.count??d.packet_count??0),0); + el.textContent=`Total: ${total.toLocaleString()}`; +} + +function prepareTopN(data,n=20){ + data.sort((a,b)=>b.value-a.value); + let top=data.slice(0,n); + if(data.length>n){ + const otherValue=data.slice(n).reduce((sum,item)=>sum+item.value,0); + top.push({name:"Other", value:otherValue}); + } + return top; +} // --- Chart Rendering --- -function renderChart(domId,data,type,color){ const el=document.getElementById(domId); if(!el) return; const chart=echarts.init(el); const periods=data.map(d=>(d.period??d.period===0)?d.period.toString():''); const counts=data.map(d=>d.count??d.packet_count??0); chart.setOption({backgroundColor:'#272b2f', tooltip:{trigger:'axis'}, grid:{left:'6%', right:'6%', bottom:'18%'}, xAxis:{type:'category', data:periods, axisLine:{lineStyle:{color:'#aaa'}}, axisLabel:{rotate:45,color:'#ccc'}}, yAxis:{type:'value', axisLine:{lineStyle:{color:'#aaa'}}, axisLabel:{color:'#ccc'}}, series:[{data:counts,type:type,smooth:type==='line',itemStyle:{color:color}, areaStyle:type==='line'?{}:undefined}]}); return chart; } +function renderChart(domId,data,type,color){ + const el=document.getElementById(domId); + if(!el) return; + const chart=echarts.init(el); + const periods=data.map(d=>(d.period??d.period===0)?d.period.toString():''); + const counts=data.map(d=>d.count??d.packet_count??0); + chart.setOption({ + backgroundColor:'#272b2f', + tooltip:{trigger:'axis'}, + grid:{left:'6%', right:'6%', bottom:'18%'}, + xAxis:{ + type:'category', + data:periods, + axisLine:{lineStyle:{color:'#aaa'}}, + axisLabel:{rotate:45,color:'#ccc'} + }, + yAxis:{ + type:'value', + axisLine:{lineStyle:{color:'#aaa'}}, + axisLabel:{color:'#ccc'} + }, + series:[{ + data:counts, + type:type, + smooth:type==='line', + itemStyle:{color:color}, + areaStyle:type==='line'?{}:undefined + }] + }); + return chart; +} -function renderPieChart(elId,data,name){ const el=document.getElementById(elId); if(!el) return; const chart=echarts.init(el); const top20=prepareTopN(data,20); chart.setOption({backgroundColor:"#272b2f", tooltip:{trigger:"item", formatter: params=>`${params.name}: ${Math.round(params.percent)}% (${params.value})`}, series:[{name:name, type:"pie", radius:["30%","70%"], center:["50%","50%"], avoidLabelOverlap:true, itemStyle:{borderRadius:6,borderColor:"#272b2f",borderWidth:2}, label:{show:true,formatter:"{b}\n{d}%", color:"#ccc", fontSize:10}, labelLine:{show:true,length:10,length2:6}, data:top20}]}); return chart; } +function renderPieChart(elId,data,name){ + const el=document.getElementById(elId); + if(!el) return; + const chart=echarts.init(el); + const top20=prepareTopN(data,20); + chart.setOption({ + backgroundColor:"#272b2f", + tooltip:{ + trigger:"item", + formatter: params=>`${params.name}: ${Math.round(params.percent)}% (${params.value})` + }, + series:[{ + name:name, + type:"pie", + radius:["30%","70%"], + center:["50%","50%"], + avoidLabelOverlap:true, + itemStyle:{ + borderRadius:6, + borderColor:"#272b2f", + borderWidth:2 + }, + label:{ + show:true, + formatter:"{b}\n{d}%", + color:"#ccc", + fontSize:10 + }, + labelLine:{ + show:true, + length:10, + length2:6 + }, + data:top20 + }] + }); + return chart; +} // --- Packet Type Pie Chart --- async function fetchPacketTypeBreakdown(channel=null) { @@ -234,8 +353,10 @@ async function fetchPacketTypeBreakdown(channel=null) { const total = (data || []).reduce((sum,d)=>sum+(d.count??d.packet_count??0),0); return {portnum: pn, count: total}; }); + const allData = await fetchStats('hour',24,null,channel); const totalAll = allData.reduce((sum,d)=>sum+(d.count??d.packet_count??0),0); + const results = await Promise.all(requests); const trackedTotal = results.reduce((sum,d)=>sum+d.count,0); const other = Math.max(totalAll - trackedTotal,0); @@ -250,40 +371,102 @@ let chartHwModel, chartRole, chartChannel; let chartPacketTypes; async function init(){ + // Channel selector const channels = await fetchChannels(); const select = document.getElementById("channelSelect"); - channels.forEach(ch=>{ const opt = document.createElement("option"); opt.value = ch; opt.textContent = ch; select.appendChild(opt); }); + channels.forEach(ch=>{ + const opt = document.createElement("option"); + opt.value = ch; + opt.textContent = ch; + select.appendChild(opt); + }); + // Daily all ports const dailyAllData=await fetchStats('day',14); updateTotalCount('total_daily_all',dailyAllData); chartDailyAll=renderChart('chart_daily_all',dailyAllData,'line','#66bb6a'); + // Daily port 1 const dailyPort1Data=await fetchStats('day',14,1); updateTotalCount('total_daily_portnum_1',dailyPort1Data); chartDailyPortnum1=renderChart('chart_daily_portnum_1',dailyPort1Data,'bar','#ff5722'); + // Hourly all ports const hourlyAllData=await fetchStats('hour',24); updateTotalCount('total_hourly_all',hourlyAllData); chartHourlyAll=renderChart('chart_hourly_all',hourlyAllData,'bar','#03dac6'); + // Hourly per port const portnums=[1,3,4,67,70,71]; const colors=['#ff5722','#2196f3','#9c27b0','#ffeb3b','#795548','#4caf50']; const domIds=['chart_portnum_1','chart_portnum_3','chart_portnum_4','chart_portnum_67','chart_portnum_70','chart_portnum_71']; const totalIds=['total_portnum_1','total_portnum_3','total_portnum_4','total_portnum_67','total_portnum_70','total_portnum_71']; - const allData=await Promise.all(portnums.map(pn=>fetchStats('hour',24,pn))); - for(let i=0;ifetchStats('hour',24,pn))); + for(let i=0;id.count>0).map(d=>({ name: d.portnum==="other" ? "Other" : (PORTNUM_LABELS[d.portnum]||`Port ${d.portnum}`), value: d.count })); + const formatted = packetTypesData + .filter(d=>d.count>0) + .map(d=>({ + name: d.portnum==="other" + ? "Other" + : (PORTNUM_LABELS[d.portnum]||`Port ${d.portnum}`), + value: d.count + })); chartPacketTypes = renderPieChart("chart_packet_types",formatted,"Packet Types (Last 24h)"); + + // Total packet + total seen from /api/stats/count + try { + const countsRes = await fetch("/api/stats/count"); + if (countsRes.ok) { + const countsJson = await countsRes.json(); + const elPackets = document.getElementById("summary_packets"); + const elSeen = document.getElementById("summary_seen"); + if (elPackets) { + elPackets.textContent = (countsJson.total_packets || 0).toLocaleString(); + } + if (elSeen) { + elSeen.textContent = (countsJson.total_seen || 0).toLocaleString(); + } + } + } catch (err) { + console.error("Failed to load /api/stats/count:", err); + } } -window.addEventListener('resize',()=>{ [chartHourlyAll,chartPortnum1,chartPortnum3,chartPortnum4,chartPortnum67,chartPortnum70,chartPortnum71, chartDailyAll,chartDailyPortnum1,chartHwModel,chartRole,chartChannel,chartPacketTypes].forEach(c=>c?.resize()); }); +window.addEventListener('resize',()=>{ + [ + chartHourlyAll, + chartPortnum1, + chartPortnum3, + chartPortnum4, + chartPortnum67, + chartPortnum70, + chartPortnum71, + chartDailyAll, + chartDailyPortnum1, + chartHwModel, + chartRole, + chartChannel, + chartPacketTypes + ].forEach(c=>c?.resize()); +}); const modal=document.getElementById("chartModal"); const modalChartEl=document.getElementById("modalChart"); @@ -345,31 +528,51 @@ document.querySelectorAll(".export-btn").forEach(btn=>{ document.getElementById("channelSelect").addEventListener("change", async (e)=>{ const channel = e.target.value; const packetTypesData = await fetchPacketTypeBreakdown(channel); - const formatted = packetTypesData.filter(d=>d.count>0).map(d=>({ name: d.portnum==="other" ? "Other" : (PORTNUM_LABELS[d.portnum]||`Port ${d.portnum}`), value: d.count })); + const formatted = packetTypesData + .filter(d=>d.count>0) + .map(d=>({ + name: d.portnum==="other" + ? "Other" + : (PORTNUM_LABELS[d.portnum]||`Port ${d.portnum}`), + value: d.count + })); chartPacketTypes?.dispose(); chartPacketTypes = renderPieChart("chart_packet_types",formatted,"Packet Types (Last 24h)"); }); +// Kick everything off init(); -// --- Translation Loader --- -async function loadTranslations() { - const langCode = "{{ site_config.get('site', {}).get('language','en') }}"; +// --- Load config and translations --- +async function loadConfigAndTranslations() { + let langCode = "en"; try { - const res = await fetch(`/api/lang?lang=${langCode}§ion=stats`); - window.statsTranslations = await res.json(); - } catch(err){ + const resConfig = await fetch("/api/config"); + const cfg = await resConfig.json(); + window.site_config = cfg; + langCode = cfg?.site?.language || "en"; + } catch(err) { + console.error("Failed to load /api/config:", err); + window.site_config = { site: { language: "en" } }; + } + + try { + const resLang = await fetch(`/api/lang?lang=${langCode}§ion=stats`); + window.statsTranslations = await resLang.json(); + } catch(err) { console.error("Stats translation load failed:", err); window.statsTranslations = {}; } -} -function applyTranslations() { + + // Apply translations const t = window.statsTranslations || {}; document.querySelectorAll("[data-translate-lang]").forEach(el=>{ const key = el.getAttribute("data-translate-lang"); if(t[key]) el.textContent = t[key]; }); } -loadTranslations().then(applyTranslations); + +// Call after init +loadConfigAndTranslations(); {% endblock %} diff --git a/meshview/templates/stats.html.old b/meshview/templates/stats.html.old deleted file mode 100644 index a826a0f..0000000 --- a/meshview/templates/stats.html.old +++ /dev/null @@ -1,80 +0,0 @@ -{% extends "base.html" %} - -{% block css %} - #packet_details { - height: 95vh; - overflow: auto; - } - - .main-container, .container { - max-width: 600px; - margin: 0 auto; - text-align: center; - } - - .card-section { - background-color: #272b2f; - border: 1px solid #474b4e; - padding: 15px 20px; - margin-bottom: 10px; - border-radius: 10px; - transition: background-color 0.2s ease; - } - - .card-section:hover { - background-color: #2f3338; - } - - .section-header { - font-size: 16px; - margin: 0; - font-weight: 500; - } - - .section-value { - font-weight: 700; - color: #03dac6; - } - - .percentage { - font-size: 12px; - color: #ffeb3b; - font-weight: 400; - } - - .main-header { - font-size: 22px; - margin-bottom: 20px; - font-weight: 600; - } -{% endblock %} - -{% block body %} -
-

Mesh Statistics

- - -
-

- Total Active Nodes (24 hours):
- {{ "{:,}".format(total_nodes) }} -

-
- - -
-

- Total Packets (14 days): - {{ "{:,}".format(total_packets) }} -

-
- - -
-

- Total MQTT Reports (14 days): - {{ "{:,}".format(total_packets_seen) }} -

-
-
-{% endblock %} diff --git a/meshview/templates/top.html b/meshview/templates/top.html index cb72ea1..9531503 100644 --- a/meshview/templates/top.html +++ b/meshview/templates/top.html @@ -2,287 +2,283 @@ {% block css %} {% endblock %} {% block body %} -

Top Traffic Nodes (last 24 hours)

- - +

Top Nodes Traffic

-
-

- This chart shows a bell curve (normal distribution) based on the total "Times Seen" values for all nodes. It helps visualize how frequently nodes are heard, relative to the average. -

-

- This "Times Seen" value is the closest that we can get to Mesh utilization by node. -

-

- Mean: - - Standard Deviation: -

+
+ +
+
+ + +
+ +
+ + +
- -
- -{% if nodes %} -
- - - - - - - - - - - - -
Long NameShort NameChannelPackets SentTimes SeenSeen % of Mean
+ + +
+ + + + + + + + + + + + +
Long NameShort NameChannelSent (24h)Seen (24h)Avg Gateways
+
+
-{% else %} -

No top traffic nodes available.

-{% endif %} - -{% if timing_data %} - -
-

⚡ Performance Metrics

-
-
- Database Query:
- {{ timing_data.db_query_ms }}ms -
-
- Data Processing:
- {{ timing_data.processing_ms }}ms -
-
- Total Time:
- {{ timing_data.total_ms }}ms -
-
- Nodes Processed:
- {{ timing_data.node_count }} -
-
- Total Packets:
- {{ "{:,}".format(timing_data.total_packets) }} -
-
- Times Seen:
- {{ "{:,}".format(timing_data.total_seen) }} -
-
-

- 📊 Use these metrics to measure performance before and after database index changes -

-
-{% endif %} - {% endblock %} diff --git a/meshview/templates/traceroute.html b/meshview/templates/traceroute.html deleted file mode 100644 index c2e329a..0000000 --- a/meshview/templates/traceroute.html +++ /dev/null @@ -1,94 +0,0 @@ -{% block head %} - -{% endblock %} - -{% block body %} -
- - -{% endblock %} diff --git a/meshview/web.py b/meshview/web.py index ea145f5..3f47415 100644 --- a/meshview/web.py +++ b/meshview/web.py @@ -1,15 +1,10 @@ import asyncio import datetime -import json import logging import os -import pathlib import re import ssl -import traceback -from collections import Counter, defaultdict from dataclasses import dataclass -from datetime import timedelta import pydot from aiohttp import web @@ -17,10 +12,13 @@ from google.protobuf import text_format from google.protobuf.message import Message from jinja2 import Environment, PackageLoader, Undefined, select_autoescape from markupsafe import Markup -from pandas import DataFrame from meshtastic.protobuf.portnums_pb2 import PortNum -from meshview import config, database, decode_payload, models, store +from meshview import config, database, decode_payload, migrations, models, store +from meshview.__version__ import ( + __version_string__, +) +from meshview.web_api import api logging.basicConfig( level=logging.INFO, @@ -30,7 +28,7 @@ logging.basicConfig( logger = logging.getLogger(__name__) SEQ_REGEX = re.compile(r"seq \d+") -SOFTWARE_RELEASE = "2.0.8 ~ 10-22-25" +SOFTWARE_RELEASE = __version_string__ # Keep for backward compatibility CONFIG = config.CONFIG env = Environment(loader=PackageLoader("meshview"), autoescape=select_autoescape()) @@ -59,11 +57,11 @@ class Packet: payload: str pretty_payload: Markup import_time: datetime.datetime + import_time_us: int @classmethod def from_model(cls, packet): mesh_packet, payload = decode_payload.decode(packet) - pretty_payload = None if mesh_packet: @@ -78,14 +76,16 @@ class Packet: text_payload = text_format.MessageToString(payload) elif packet.portnum == PortNum.TEXT_MESSAGE_APP and packet.to_node_id != 0xFFFFFFFF: text_payload = "" + elif isinstance(payload, bytes): + text_payload = payload.decode("utf-8", errors="replace") # decode bytes safely else: - text_payload = payload + text_payload = str(payload) if payload: if ( packet.portnum == PortNum.POSITION_APP - and payload.latitude_i - and payload.longitude_i + and getattr(payload, "latitude_i", None) + and getattr(payload, "longitude_i", None) ): pretty_payload = Markup( f'map' @@ -99,25 +99,15 @@ class Packet: to_node_id=packet.to_node_id, portnum=packet.portnum, data=text_mesh_packet, - payload=text_payload, + payload=text_payload, # now always a string pretty_payload=pretty_payload, import_time=packet.import_time, + import_time_us=packet.import_time_us, # <-- include microseconds raw_mesh_packet=mesh_packet, raw_payload=payload, ) -@dataclass -class UplinkedNode: - lat: float - long: float - long_name: str - short_name: str - hops: int - snr: float - rssi: float - - async def build_trace(node_id): trace = [] for raw_p in await store.get_packets_from( @@ -186,6 +176,10 @@ def format_timestamp(timestamp): env.filters["node_id_to_hex"] = node_id_to_hex env.filters["format_timestamp"] = format_timestamp +# Initialize API module with dependencies +api.init_api_module(Packet, SEQ_REGEX, LANG_DIR) + +# Create main routes table routes = web.RouteTableDef() @@ -200,450 +194,99 @@ async def index(request): raise web.HTTPFound(location=starting_url) -def generate_response(request, body, raw_node_id="", node=None): - if "HX-Request" in request.headers: - return web.Response(text=body, content_type="text/html") - - template = env.get_template("index.html") - response = web.Response( - text=template.render( - is_hx_request="HX-Request" in request.headers, - raw_node_id=raw_node_id, - node_html=Markup(body), - node=node, - ), - content_type="text/html", - ) - return response +# redirect for backwards compatibility +@routes.get("/packet_list/{packet_id}") +async def redirect_packet_list(request): + packet_id = request.match_info["packet_id"] + raise web.HTTPFound(location=f"/node/{packet_id}") -@routes.get("/node_search") -async def node_search(request): - def parse_int(value, base=10): - try: - return int(value, base) - except ValueError: - return None - - q = request.query.get("q") - if not q: - return web.Response(text="Bad node id", status=400, content_type="text/plain") - - node_id = None - fuzzy_nodes = [] - - if q == "^all": - node_id = 0xFFFFFFFF - elif q.startswith("!"): - node_id = parse_int(q[1:], 16) - else: - node_id = parse_int(q) - - if node_id is None: - fuzzy_nodes = list(await store.get_fuzzy_nodes(q)) - if len(fuzzy_nodes) == 1: - node_id = fuzzy_nodes[0].node_id - - if node_id: - return web.Response( - status=307, - headers={'Location': f'/packet_list/{node_id}?{request.query_string}'}, - ) - - template = env.get_template("search.html") +@routes.get("/net") +async def net(request): return web.Response( - text=template.render( - nodes=fuzzy_nodes, - query_string=request.query_string, - site_config=CONFIG, - ), + text=env.get_template("net.html").render(), content_type="text/html", ) -@routes.get("/node_match") -async def node_match(request): - if "q" not in request.query or not request.query["q"]: - return web.Response(text="Bad node id") - raw_node_id = request.query["q"] - node_options = await store.get_fuzzy_nodes(raw_node_id) +@routes.get("/map") +async def map(request): + template = env.get_template("map.html") + return web.Response(text=template.render(), content_type="text/html") - template = env.get_template("datalist.html") + +@routes.get("/nodelist") +async def nodelist(request): + template = env.get_template("nodelist.html") return web.Response( - text=template.render( - node_options=node_options, - ), - content_type="text/html", - ) - - -@routes.get("/packet_list/{node_id}") -async def packet_list(request): - try: - # Parse and validate node_id - try: - node_id = int(request.match_info["node_id"]) - except (KeyError, ValueError): - template = env.get_template("error.html") - rendered = template.render( - error_message="Invalid or missing node ID", - error_details=None, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, status=400, content_type="text/html") - - # Parse and validate portnum (optional) - portnum = request.query.get("portnum") - try: - portnum = int(portnum) if portnum else None - except ValueError: - template = env.get_template("error.html") - rendered = template.render( - error_message="Invalid portnum value", - error_details=None, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, status=400, content_type="text/html") - - # Run tasks concurrently - async with asyncio.TaskGroup() as tg: - node_task = tg.create_task(store.get_node(node_id)) - raw_packets_task = tg.create_task(store.get_packets(node_id, portnum, limit=200)) - trace_task = tg.create_task(build_trace(node_id)) - neighbors_task = tg.create_task(build_neighbors(node_id)) - has_telemetry_task = tg.create_task(store.has_packets(node_id, PortNum.TELEMETRY_APP)) - - # Await task results - node = await node_task - packets = [Packet.from_model(p) for p in await raw_packets_task] - trace = await trace_task - neighbors = await neighbors_task - has_telemetry = await has_telemetry_task - - if node is None: - template = env.get_template("error.html") - rendered = template.render( - error_message="Node not found", - error_details=None, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, status=404, content_type="text/html") - - # Render template - template = env.get_template("node.html") - html = template.render( - raw_node_id=node_id_to_hex(node_id), - node_id=node_id, - node=node, - portnum=portnum, - packets=packets, - trace=trace, - neighbors=neighbors, - has_telemetry=has_telemetry, - query_string=request.query_string, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=html, content_type="text/html") - - except asyncio.CancelledError: - raise # Let TaskGroup cancellation propagate correctly - - except Exception: - # Log full traceback for diagnostics - traceback.print_exc() - template = env.get_template("error.html") - rendered = template.render( - error_message="Internal server error", - error_details=traceback.format_exc(), - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, content_type="text/html") - - -@routes.get("/packet_details/{packet_id}") -async def packet_details(request): - packet_id = int(request.match_info["packet_id"]) - packets_seen = list(await store.get_packets_seen(packet_id)) - packet = await store.get_packet(packet_id) - - node = None - if packet and packet.from_node_id: - node = await store.get_node(packet.from_node_id) - - from_node_cord = None - if packet and packet.from_node and packet.from_node.last_lat: - from_node_cord = [ - packet.from_node.last_lat * 1e-7, - packet.from_node.last_long * 1e-7, - ] - - uplinked_nodes = [] - for p in packets_seen: - if p.node and p.node.last_lat: - if p.topic.startswith('mqtt-meshtastic-org'): - hops = 666 - else: - hops = p.hop_start - p.hop_limit - uplinked_nodes.append( - UplinkedNode( - lat=p.node.last_lat * 1e-7, - long=p.node.last_long * 1e-7, - long_name=p.node.long_name, - short_name=p.node.short_name, - hops=hops, - snr=p.rx_snr, - rssi=p.rx_rssi, - ) - ) - - map_center = None - if from_node_cord: - map_center = from_node_cord - elif uplinked_nodes: - map_center = [uplinked_nodes[0].lat, uplinked_nodes[0].long] - - # Render the template and return the response - template = env.get_template("packet_details.html") - return web.Response( - text=template.render( - packets_seen=packets_seen, - map_center=map_center, - from_node_cord=from_node_cord, - uplinked_nodes=uplinked_nodes, - node=node, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ), + text=template.render(), content_type="text/html", ) @routes.get("/firehose") -async def packet_details_firehose(request): +async def firehose(request): return web.Response( text=env.get_template("firehose.html").render(), content_type="text/html", ) +@routes.get("/chat") +async def chat(request): + template = env.get_template("chat.html") + return web.Response( + text=template.render(), + content_type="text/html", + ) + + @routes.get("/packet/{packet_id}") -async def packet(request): - try: - packet_id = int(request.match_info["packet_id"]) - except (ValueError, KeyError): - template = env.get_template("error.html") - rendered = template.render( - error_message="Invalid packet ID", - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, content_type="text/html") - - packet = await store.get_packet(packet_id) - if not packet: - template = env.get_template("error.html") - rendered = template.render( - error_message="Packet not found", - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, content_type="text/html") - - node = await store.get_node(packet.from_node_id) - template = env.get_template("packet_index.html") - - rendered = template.render( - packet=Packet.from_model(packet), - node=node, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, content_type="text/html") - - -@routes.get("/graph/power_json/{node_id}") -async def graph_power_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'device_metrics', - [ - {'label': 'battery level', 'fields': ['battery_level']}, - {'label': 'voltage', 'fields': ['voltage'], 'palette': 'Set2'}, - ], +async def new_packet(request): + template = env.get_template("packet.html") + return web.Response( + text=template.render(), + content_type="text/html", ) -@routes.get("/graph/utilization_json/{node_id}") -async def graph_chutil_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'device_metrics', - [{'label': 'utilization', 'fields': ['channel_utilization', 'air_util_tx']}], +@routes.get("/node/{from_node_id}") +async def firehose_node(request): + template = env.get_template("node.html") + return web.Response( + text=template.render(), + content_type="text/html", ) -@routes.get("/graph/wind_speed_json/{node_id}") -async def graph_wind_speed_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'environment_metrics', - [{'label': 'wind speed m/s', 'fields': ['wind_speed']}], +@routes.get("/nodegraph") +async def nodegraph(request): + template = env.get_template("nodegraph.html") + return web.Response( + text=template.render(), + content_type="text/html", ) -@routes.get("/graph/wind_direction_json/{node_id}") -async def graph_wind_direction_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'environment_metrics', - [{'label': 'wind direction', 'fields': ['wind_direction']}], +@routes.get("/top") +async def top(request): + template = env.get_template("top.html") + return web.Response( + text=template.render(), + content_type="text/html", ) -@routes.get("/graph/temperature_json/{node_id}") -async def graph_temperature_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'environment_metrics', - [{'label': 'temperature C', 'fields': ['temperature']}], - ) - - -@routes.get("/graph/humidity_json/{node_id}") -async def graph_humidity_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'environment_metrics', - [{'label': 'humidity', 'fields': ['relative_humidity']}], - ) - - -@routes.get("/graph/pressure_json/{node_id}") -async def graph_pressure_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'environment_metrics', - [{'label': 'barometric pressure', 'fields': ['barometric_pressure']}], - ) - - -@routes.get("/graph/iaq_json/{node_id}") -async def graph_iaq_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'environment_metrics', - [{'label': 'IAQ', 'fields': ['iaq']}], - ) - - -@routes.get("/graph/power_metrics_json/{node_id}") -async def graph_power_metrics_json(request): - return await graph_telemetry_json( - int(request.match_info['node_id']), - 'power_metrics', - [ - {'label': 'voltage', 'fields': ['ch1_voltage', 'ch2_voltage', 'ch3_voltage']}, - { - 'label': 'current', - 'fields': ['ch1_current', 'ch2_current', 'ch3_current'], - 'palette': 'Set2', - }, - ], - ) - - -async def graph_telemetry_json(node_id, payload_type, graph_config): - data = {'date': []} - fields = [] - for c in graph_config: - fields.extend(c['fields']) - - for field in fields: - data[field] = [] - - for p in await store.get_packets_from(node_id, PortNum.TELEMETRY_APP): - _, payload = decode_payload.decode(p) - if not payload or not payload.HasField(payload_type): - continue - data_field = getattr(payload, payload_type) - timestamp = p.import_time - data['date'].append(timestamp.isoformat()) # For JSON/ECharts - for field in fields: - data[field].append(getattr(data_field, field, None)) - - if not data['date']: - return web.json_response({'timestamps': [], 'series': []}, status=404) - - df = DataFrame(data) - - series = [] - for conf in graph_config: - for field in conf['fields']: - series.append( - { - 'name': f"{conf['label']} - {field}" - if len(conf['fields']) > 1 - else conf['label'], - 'data': df[field].tolist(), - } - ) - - return web.json_response( - { - 'timestamps': df['date'].tolist(), - 'series': series, - } - ) - - -@routes.get("/graph/neighbors_json/{node_id}") -async def graph_neighbors_json(request): - import datetime - - node_id = int(request.match_info['node_id']) - oldest = datetime.datetime.now() - datetime.timedelta(days=4) - - data = {} - dates = [] - for p in await store.get_packets_from(node_id, PortNum.NEIGHBORINFO_APP): - _, payload = decode_payload.decode(p) - if not payload: - continue - if p.import_time < oldest: - break - - dates.append(p.import_time.isoformat()) # format for JSON - for v in data.values(): - v.append(None) - - for n in payload.neighbors: - data.setdefault(n.node_id, [None] * len(dates))[-1] = n.snr - - # Resolve node short names - nodes = {} - async with asyncio.TaskGroup() as tg: - for nid in data: - nodes[nid] = tg.create_task(store.get_node(nid)) - - series = [] - for node_id, snrs in data.items(): - node = await nodes[node_id] - name = node.short_name if node else node_id_to_hex(node_id) - series.append({"name": name, "data": snrs}) - - return web.json_response( - { - "timestamps": dates, - "series": series, - } +@routes.get("/stats") +async def stats(request): + template = env.get_template("stats.html") + return web.Response( + text=template.render(), + content_type="text/html", ) +# Keep !! @routes.get("/graph/traceroute/{packet_id}") async def graph_traceroute(request): packet_id = int(request.match_info['packet_id']) @@ -751,382 +394,7 @@ async def graph_traceroute(request): ) -@routes.get("/graph/traceroute2/{packet_id}") -async def graph_traceroute2(request): - packet_id = int(request.match_info['packet_id']) - traceroutes = list(await store.get_traceroute(packet_id)) - - # Fetch the packet - packet = await store.get_packet(packet_id) - if not packet: - return web.Response(status=404) - - node_ids = set() - for tr in traceroutes: - route = decode_payload.decode_payload(PortNum.TRACEROUTE_APP, tr.route) - node_ids.add(tr.gateway_node_id) - for node_id in route.route: - node_ids.add(node_id) - node_ids.add(packet.from_node_id) - node_ids.add(packet.to_node_id) - - nodes = {} - async with asyncio.TaskGroup() as tg: - for node_id in node_ids: - nodes[node_id] = tg.create_task(store.get_node(node_id)) - - paths = set() - node_color = {} - mqtt_nodes = set() - saw_reply = set() - dest = None - node_seen_time = {} - for tr in traceroutes: - if tr.done: - saw_reply.add(tr.gateway_node_id) - if tr.done and dest: - continue - route = decode_payload.decode_payload(PortNum.TRACEROUTE_APP, tr.route) - path = [packet.from_node_id] - path.extend(route.route) - if tr.done: - dest = packet.to_node_id - path.append(packet.to_node_id) - elif path[-1] != tr.gateway_node_id: - path.append(tr.gateway_node_id) - - if not tr.done and tr.gateway_node_id not in node_seen_time and tr.import_time: - node_seen_time[path[-1]] = tr.import_time - - mqtt_nodes.add(tr.gateway_node_id) - node_color[path[-1]] = '#' + hex(hash(tuple(path)))[3:9] - paths.add(tuple(path)) - - used_nodes = set() - for path in paths: - used_nodes.update(path) - - import_times = [tr.import_time for tr in traceroutes if tr.import_time] - if import_times: - first_time = min(import_times) - else: - first_time = 0 - - # Prepare data for ECharts rendering - chart_nodes = [] - chart_edges = [] - for node_id in used_nodes: - node = await nodes[node_id] - if not node: - # Handle case where node is None - node_name = node_id_to_hex(node_id) - chart_nodes.append( - { - "name": str(node_id), - "value": node_name, - "symbol": 'rect', - } - ) - else: - node_name = ( - f'[{node.short_name}] {node.long_name}\n{node_id_to_hex(node_id)}\n{node.role}' - ) - if node_id in node_seen_time: - ms = (node_seen_time[node_id] - first_time).total_seconds() * 1000 - node_name += f'\n {ms:.2f}ms' - style = 'dashed' - if node_id == dest: - style = 'filled' - elif node_id in mqtt_nodes: - style = 'solid' - - if node_id in saw_reply: - style += ', diagonals' - - chart_nodes.append( - { - "name": str(node_id), - "value": node_name, - "symbol": 'rect', - "long_name": node.long_name, - "short_name": node.short_name, - "role": node.role, - "hw_model": node.hw_model, - } - ) - - # Create edges - organize by whether path is complete - incomplete_edges = [] - complete_edges = [] - - for path in paths: - color = '#' + hex(hash(tuple(path)))[3:9] - is_complete = path[-1] == dest - for src, dest_node in zip(path, path[1:], strict=False): - edge = { - "source": str(src), - "target": str(dest_node), - "originalColor": color, - } - if is_complete: - complete_edges.append(edge) - else: - incomplete_edges.append(edge) - - # Add incomplete edges first, then complete edges - # This ensures complete paths render on top - chart_edges.extend(incomplete_edges) - chart_edges.extend(complete_edges) - - chart_data = { - "nodes": chart_nodes, - "edges": chart_edges, - } - - template = env.get_template("traceroute.html") - # Render the page with the chart data - return web.Response( - text=template.render(chart_data=chart_data, packet_id=packet_id), - content_type="text/html", - ) - - -@routes.get("/graph/network") -async def graph_network(request): - root = request.query.get("root") - depth = int(request.query.get("depth", 5)) - hours = int(request.query.get("hours", 24)) - minutes = int(request.query.get("minutes", 0)) - since = datetime.timedelta(hours=hours, minutes=minutes) - - nodes = {} - node_ids = set() - - traceroutes = [] - async for tr in store.get_traceroutes(since): - node_ids.add(tr.gateway_node_id) - node_ids.add(tr.packet.from_node_id) - node_ids.add(tr.packet.to_node_id) - route = decode_payload.decode_payload(PortNum.TRACEROUTE_APP, tr.route) - node_ids.update(route.route) - - path = [tr.packet.from_node_id] - path.extend(route.route) - if tr.done: - path.append(tr.packet.to_node_id) - else: - if path[-1] != tr.gateway_node_id: - # It seems some nodes add them self to the list before uplinking - path.append(tr.gateway_node_id) - traceroutes.append((tr, path)) - - edges = Counter() - edge_type = {} - used_nodes = set() - - for ps, p in await store.get_mqtt_neighbors(since): - node_ids.add(ps.node_id) - node_ids.add(p.from_node_id) - used_nodes.add(ps.node_id) - used_nodes.add(p.from_node_id) - edges[(p.from_node_id, ps.node_id)] += 1 - edge_type[(p.from_node_id, ps.node_id)] = 'sni' - - for packet in await store.get_packets( - portnum=PortNum.NEIGHBORINFO_APP, - after=since, - ): - _, neighbor_info = decode_payload.decode(packet) - node_ids.add(packet.from_node_id) - used_nodes.add(packet.from_node_id) - for node in neighbor_info.neighbors: - node_ids.add(node.node_id) - used_nodes.add(node.node_id) - edges[(node.node_id, packet.from_node_id)] += 1 - edge_type[(node.node_id, packet.from_node_id)] = 'ni' - - async with asyncio.TaskGroup() as tg: - for node_id in node_ids: - nodes[node_id] = tg.create_task(store.get_node(node_id)) - - tr_done = set() - for tr, path in traceroutes: - if tr.done: - if tr.packet_id in tr_done: - continue - else: - tr_done.add(tr.packet_id) - - for src, dest in zip(path, path[1:], strict=False): - used_nodes.add(src) - used_nodes.add(dest) - edges[(src, dest)] += 1 - edge_type[(src, dest)] = 'tr' - - async def get_node_name(node_id): - node = await nodes[node_id] - if not node: - node_name = node_id_to_hex(node_id) - else: - node_name = f'[{node.short_name}] {node.long_name}\n{node_id_to_hex(node_id)}' - return node_name - - if root: - new_used_nodes = set() - new_edges = Counter() - edge_map = {} - for src, dest in edges: - edge_map.setdefault(dest, []).append(src) - - queue = [int(root)] - for _ in range(depth): - next_queue = [] - for node in queue: - new_used_nodes.add(node) - for dest in edge_map.get(node, []): - new_used_nodes.add(dest) - new_edges[(dest, node)] += 1 - next_queue.append(dest) - queue = next_queue - - used_nodes = new_used_nodes - edges = new_edges - # Create the graph - graph = pydot.Dot( - 'network', - graph_type="digraph", - layout="sfdp", - overlap="prism", - esep="+10", - nodesep="0.5", - ranksep="1", - ) - - for node_id in used_nodes: - node_future = nodes.get(node_id) - if not node_future: - # You could log a warning here if needed - continue - - node = await node_future - color = '#000000' - node_name = await get_node_name(node_id) - - if node and node.role in ('ROUTER', 'ROUTER_CLIENT', 'REPEATER'): - color = '#0000FF' - elif node and node.role == 'CLIENT_MUTE': - color = '#00FF00' - - graph.add_node( - pydot.Node( - str(node_id), - label=node_name, - shape='box', - color=color, - href=f"/graph/network?root={node_id}&depth={depth - 1}", - ) - ) - edge_added = set() - - for (src, dest), _ in edges.items(): - if edge_type[(src, dest)] in ('ni'): - color = '#FF0000' - elif edge_type[(src, dest)] in ('sni'): - color = '#00FF00' - else: - color = '#000000' - edge_dir = "forward" - if (dest, src) in edges and edge_type[(src, dest)] == edge_type[(dest, src)]: - edge_dir = "both" - edge_added.add((dest, src)) - - if (src, dest) not in edge_added: - edge_added.add((src, dest)) - graph.add_edge( - pydot.Edge( - str(src), - str(dest), - color=color, - tooltip=f'{await get_node_name(src)} -> {await get_node_name(dest)}', - penwidth=1.85, - dir=edge_dir, - ) - ) - return web.Response( - body=graph.create_svg(), - content_type="image/svg+xml", - ) - - -@routes.get("/nodelist") -async def nodelist(request): - try: - template = env.get_template("nodelist.html") - return web.Response( - text=template.render(site_config=CONFIG, SOFTWARE_RELEASE=SOFTWARE_RELEASE), - content_type="text/html", - ) - except Exception: - template = env.get_template("error.html") - rendered = template.render( - error_message="An error occurred while loading the node list page.", - error_details=traceback.format_exc(), - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, status=500, content_type="text/html") - - -@routes.get("/net") -async def net(request): - try: - # Fetch packets for the given node ID and port number - after_time = datetime.datetime.now() - timedelta(days=6) - packets = await store.get_packets(portnum=PortNum.TEXT_MESSAGE_APP, after=after_time) - - # Convert packets to UI packets - ui_packets = [Packet.from_model(p) for p in packets] - # Precompile regex for performance - seq_pattern = re.compile(r"seq \d+$") - - # Filter packets: exclude "seq \d+$" but include those containing Tag - filtered_packets = [ - p - for p in ui_packets - if not seq_pattern.match(p.payload) - and (CONFIG["site"]["net_tag"]).lower() in p.payload.lower() - ] - - # Render template - template = env.get_template("net.html") - return web.Response( - text=template.render( - packets=filtered_packets, site_config=CONFIG, SOFTWARE_RELEASE=SOFTWARE_RELEASE - ), - content_type="text/html", - ) - - except web.HTTPException: - raise # Let aiohttp handle HTTP exceptions properly - - except Exception as e: - logger.error(f"Error processing net request: {e}") - template = env.get_template("error.html") - rendered = template.render( - error_message="An internal server error occurred.", - error_details=traceback.format_exc(), - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, status=500, content_type="text/html") - - -@routes.get("/map") -async def map(request): - template = env.get_template("map.html") - return web.Response(text=template.render(), content_type="text/html") - - +''' @routes.get("/stats") async def stats(request): try: @@ -1139,8 +407,6 @@ async def stats(request): total_packets=total_packets, total_nodes=total_nodes, total_packets_seen=total_packets_seen, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, ), content_type="text/html", ) @@ -1150,626 +416,28 @@ async def stats(request): status=500, content_type="text/plain", ) - - -@routes.get("/top") -async def top(request): - import time - - try: - # Check if performance metrics should be displayed - show_perf = request.query.get("perf", "").lower() in ("true", "1", "yes") - - # Start overall timing - start_time = time.perf_counter() - timing_data = None - - node_id = request.query.get("node_id") # Get node_id from the URL query parameters - - if node_id: - # If node_id is provided, fetch traffic data for the specific node - db_start = time.perf_counter() - node_traffic = await store.get_node_traffic(int(node_id)) - db_time = time.perf_counter() - db_start - - template = env.get_template("node_traffic.html") - html_content = template.render( - traffic=node_traffic, node_id=node_id, site_config=CONFIG - ) - else: - # Otherwise, fetch top traffic nodes as usual - db_start = time.perf_counter() - top_nodes = await store.get_top_traffic_nodes() - db_time = time.perf_counter() - db_start - - # Data processing timing - process_start = time.perf_counter() - - # Count records processed - total_packets = sum(node.get('total_packets_sent', 0) for node in top_nodes) - total_seen = sum(node.get('total_times_seen', 0) for node in top_nodes) - - process_time = time.perf_counter() - process_start - - # Calculate total time - total_time = time.perf_counter() - start_time - - # Only include timing_data if perf parameter is set - if show_perf: - timing_data = { - 'db_query_ms': f"{db_time * 1000:.2f}", - 'processing_ms': f"{process_time * 1000:.2f}", - 'total_ms': f"{total_time * 1000:.2f}", - 'node_count': len(top_nodes), - 'total_packets': total_packets, - 'total_seen': total_seen, - } - - template = env.get_template("top.html") - html_content = template.render( - nodes=top_nodes, - timing_data=timing_data, - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - - return web.Response( - text=html_content, - content_type="text/html", - ) - except Exception as e: - logger.error(f"Error in /top: {e}") - template = env.get_template("error.html") - rendered = template.render( - error_message="An error occurred in /top", - error_details=traceback.format_exc(), - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ) - return web.Response(text=rendered, status=500, content_type="text/html") - - -@routes.get("/chat") -async def chat(request): - try: - template = env.get_template("chat.html") - return web.Response( - text=template.render(), - content_type="text/html", - ) - except Exception as e: - logger.error(f"Error in /chat: {e}") - template = env.get_template("error.html") - rendered = template.render( - error_message="An error occurred while processing your request.", - error_details=traceback.format_exc(), - ) - return web.Response(text=rendered, status=500, content_type="text/html") - - -# Assuming the route URL structure is /nodegraph -@routes.get("/nodegraph") -async def nodegraph(request): - nodes = await store.get_nodes(days_active=3) # Fetch nodes for the given channel - node_ids = set() - edges_map = defaultdict( - lambda: {"weight": 0, "type": None} - ) # weight is based on the number of traceroutes and neighbor info packets - used_nodes = set() # This will track nodes involved in edges (including traceroutes) - since = datetime.timedelta(hours=48) - traceroutes = [] - - # Fetch traceroutes - async for tr in store.get_traceroutes(since): - node_ids.add(tr.gateway_node_id) - node_ids.add(tr.packet.from_node_id) - node_ids.add(tr.packet.to_node_id) - route = decode_payload.decode_payload(PortNum.TRACEROUTE_APP, tr.route) - node_ids.update(route.route) - - path = [tr.packet.from_node_id] - path.extend(route.route) - if tr.done: - path.append(tr.packet.to_node_id) - else: - if path[-1] != tr.gateway_node_id: - path.append(tr.gateway_node_id) - traceroutes.append((tr, path)) - - # Add traceroute edges with their type and update used_nodes - for i in range(len(path) - 1): - edge_pair = (path[i], path[i + 1]) - edges_map[edge_pair]["weight"] += 1 - edges_map[edge_pair]["type"] = "traceroute" - used_nodes.add(path[i]) # Add all nodes in the traceroute path - used_nodes.add(path[i + 1]) # Add all nodes in the traceroute path - - # Fetch NeighborInfo packets - for packet in await store.get_packets(portnum=PortNum.NEIGHBORINFO_APP, after=since): - try: - _, neighbor_info = decode_payload.decode(packet) - node_ids.add(packet.from_node_id) - used_nodes.add(packet.from_node_id) - for node in neighbor_info.neighbors: - node_ids.add(node.node_id) - used_nodes.add(node.node_id) - - edge_pair = (node.node_id, packet.from_node_id) - edges_map[edge_pair]["weight"] += 1 - edges_map[edge_pair]["type"] = "neighbor" - except Exception as e: - logger.error(f"Error decoding NeighborInfo packet: {e}") - - # Convert edges_map to a list of dicts with colors - max_weight = max(i['weight'] for i in edges_map.values()) if edges_map else 1 - edges = [ - { - "from": frm, - "to": to, - "type": info["type"], - "weight": max([info['weight'] / float(max_weight) * 10, 1]), - } - for (frm, to), info in edges_map.items() - ] - - # Filter nodes to only include those involved in edges (including traceroutes) - nodes_with_edges = [node for node in nodes if node.node_id in used_nodes] - - template = env.get_template("nodegraph.html") - return web.Response( - text=template.render( - nodes=nodes_with_edges, - edges=edges, # Pass edges with color info - site_config=CONFIG, - SOFTWARE_RELEASE=SOFTWARE_RELEASE, - ), - content_type="text/html", - ) - - -# API Section -####################################################################### -# How this works -# When your frontend calls /api/chat without since, it returns the most recent limit (default 100) messages. -# When your frontend calls /api/chat?since=ISO_TIMESTAMP, it returns only messages with import_time > since. -# The response includes "latest_import_time" for frontend to keep track of the newest message timestamp. -# The backend fetches extra packets (limit*5) to account for filtering messages like "seq N" and since filtering. - - -@routes.get("/api/channels") -async def api_channels(request: web.Request): - period_type = request.query.get("period_type", "hour") - length = int(request.query.get("length", 24)) - - try: - channels = await store.get_channels_in_period(period_type, length) - return web.json_response({"channels": channels}) - except Exception as e: - return web.json_response({"channels": [], "error": str(e)}) - - -@routes.get("/api/chat") -async def api_chat(request): - try: - # Parse query params - limit_str = request.query.get("limit", "20") - since_str = request.query.get("since") - - # Clamp limit between 1 and 200 - try: - limit = min(max(int(limit_str), 1), 100) - except ValueError: - limit = 50 - - # Parse "since" timestamp if provided - since = None - if since_str: - try: - since = datetime.datetime.fromisoformat(since_str) - except Exception as e: - logger.error(f"Failed to parse since '{since_str}': {e}") - - # Fetch packets from store - packets = await store.get_packets( - node_id=0xFFFFFFFF, - portnum=PortNum.TEXT_MESSAGE_APP, - limit=limit, - ) - - ui_packets = [Packet.from_model(p) for p in packets] - - # Filter out "seq N" and missing payloads - filtered_packets = [ - p for p in ui_packets if p.payload and not SEQ_REGEX.fullmatch(p.payload) - ] - - # Apply "since" filter - if since: - filtered_packets = [p for p in filtered_packets if p.import_time > since] - - # Sort by import_time descending (latest first) - filtered_packets.sort(key=lambda p: p.import_time, reverse=True) - - # Trim to requested limit - filtered_packets = filtered_packets[:limit] - - # Build response data - packets_data = [] - for p in filtered_packets: - reply_id = getattr( - getattr(getattr(p, "raw_mesh_packet", None), "decoded", None), "reply_id", None - ) - - packet_dict = { - "id": p.id, - "import_time": p.import_time.isoformat(), - "channel": getattr(p.from_node, "channel", ""), - "from_node_id": p.from_node_id, - "long_name": getattr(p.from_node, "long_name", ""), - "payload": p.payload, - } - - if reply_id: # ✅ only include if not None/0 - packet_dict["reply_id"] = reply_id - - packets_data.append(packet_dict) - - # Pick latest import time for clients to use in next request - if filtered_packets: - latest_import_time = filtered_packets[0].import_time.isoformat() - elif since: - latest_import_time = since.isoformat() - else: - latest_import_time = None - - return web.json_response( - { - "packets": packets_data, - "latest_import_time": latest_import_time, - } - ) - - except Exception as e: - logger.error(f"Error in /api/chat: {e}") - return web.json_response( - {"error": "Failed to fetch chat data", "details": str(e)}, status=500 - ) - - -@routes.get("/api/nodes") -async def api_nodes(request): - try: - # Optional query parameters - role = request.query.get("role") - channel = request.query.get("channel") - hw_model = request.query.get("hw_model") - days_active = request.query.get("days_active") - - if days_active: - try: - days_active = int(days_active) - except ValueError: - days_active = None - - # Fetch nodes from database using your get_nodes function - nodes = await store.get_nodes( - role=role, channel=channel, hw_model=hw_model, days_active=days_active - ) - - # Prepare the JSON response - nodes_data = [] - for n in nodes: - nodes_data.append( - { - "id": getattr(n, "id", None), - "node_id": n.node_id, - "long_name": n.long_name, - "short_name": n.short_name, - "hw_model": n.hw_model, - "firmware": n.firmware, - "role": n.role, - "last_lat": getattr(n, "last_lat", None), - "last_long": getattr(n, "last_long", None), - "channel": n.channel, - "last_update": n.last_update.isoformat(), - } - ) - - return web.json_response({"nodes": nodes_data}) - - except Exception as e: - logger.error(f"Error in /api/nodes: {e}") - return web.json_response({"error": "Failed to fetch nodes"}, status=500) - - -@routes.get("/api/packets") -async def api_packets(request): - try: - # Query parameters - limit = int(request.query.get("limit", 50)) - since_str = request.query.get("since") - since_time = None - - if since_str: - try: - # Robust ISO 8601 parsing (handles 'Z' for UTC) - since_time = datetime.datetime.fromisoformat(since_str.replace("Z", "+00:00")) - except Exception as e: - logger.error(f"Failed to parse 'since' timestamp '{since_str}': {e}") - - # Fetch packets from the store - packets = await store.get_packets(limit=limit, after=since_time) - packets = [Packet.from_model(p) for p in packets] - - packets_json = [] - for p in packets: - payload = (p.payload or "").strip() - - packets_json.append( - { - "id": p.id, - "from_node_id": p.from_node_id, - "to_node_id": p.to_node_id, - "portnum": int(p.portnum) if p.portnum is not None else None, - "import_time": p.import_time.isoformat(), - "payload": payload, - } - ) - - return web.json_response({"packets": packets_json}) - - except Exception as e: - logger.error(f"Error in /api/packets: {e}") - return web.json_response({"error": "Failed to fetch packets"}, status=500) - - -@routes.get("/api/stats") -async def api_stats(request): - """ - Return packet statistics for a given period type, length, - and optional filters for channel, portnum, to_node, from_node. - """ - allowed_periods = {"hour", "day"} - - # period_type validation - period_type = request.query.get("period_type", "hour").lower() - if period_type not in allowed_periods: - return web.json_response( - {"error": f"Invalid period_type. Must be one of {allowed_periods}"}, status=400 - ) - - # length validation - try: - length = int(request.query.get("length", 24)) - except ValueError: - return web.json_response({"error": "length must be an integer"}, status=400) - - # Optional filters - channel = request.query.get("channel") - - def parse_int_param(name): - value = request.query.get(name) - if value is not None: - try: - return int(value) - except ValueError: - raise web.HTTPBadRequest( - text=json.dumps({"error": f"{name} must be an integer"}), - content_type="application/json", - ) from None - return None - - portnum = parse_int_param("portnum") - to_node = parse_int_param("to_node") - from_node = parse_int_param("from_node") - - # Fetch stats - stats = await store.get_packet_stats( - period_type=period_type, - length=length, - channel=channel, - portnum=portnum, - to_node=to_node, - from_node=from_node, - ) - - return web.json_response(stats) - - -@routes.get("/api/edges") -async def api_edges(request): - since = datetime.datetime.now() - datetime.timedelta(hours=48) - filter_type = request.query.get("type") - - edges = {} - - # Only build traceroute edges if requested - if filter_type in (None, "traceroute"): - async for tr in store.get_traceroutes(since): - try: - route = decode_payload.decode_payload(PortNum.TRACEROUTE_APP, tr.route) - except Exception as e: - logger.error(f"Error decoding Traceroute {tr.id}: {e}") - continue - - path = [tr.packet.from_node_id] + list(route.route) - path.append(tr.packet.to_node_id if tr.done else tr.gateway_node_id) - - for a, b in zip(path, path[1:], strict=False): - edges[(a, b)] = "traceroute" - - # Only build neighbor edges if requested - if filter_type in (None, "neighbor"): - packets = await store.get_packets(portnum=PortNum.NEIGHBORINFO_APP, after=since) - for packet in packets: - try: - _, neighbor_info = decode_payload.decode(packet) - for node in neighbor_info.neighbors: - edges.setdefault((node.node_id, packet.from_node_id), "neighbor") - except Exception as e: - logger.error( - f"Error decoding NeighborInfo packet {getattr(packet, 'id', '?')}: {e}" - ) - - # Convert edges dict to list format for JSON response - edges_list = [ - {"from": frm, "to": to, "type": edge_type} for (frm, to), edge_type in edges.items() - ] - - return web.json_response({"edges": edges_list}) - - -@routes.get("/api/config") -async def api_config(request): - try: - # ------------------ Helpers ------------------ - def get(section, key, default=None): - """Safe getter for both dict and ConfigParser.""" - if isinstance(section, dict): - return section.get(key, default) - return section.get(key, fallback=default) - - def get_bool(section, key, default=False): - val = get(section, key, default) - if isinstance(val, bool): - return "true" if val else "false" - if isinstance(val, str): - return "true" if val.lower() in ("1", "true", "yes", "on") else "false" - return "true" if bool(val) else "false" - - def get_float(section, key, default=0.0): - try: - return float(get(section, key, default)) - except Exception: - return float(default) - - def get_int(section, key, default=0): - try: - return int(get(section, key, default)) - except Exception: - return default - - def get_str(section, key, default=""): - val = get(section, key, default) - return str(val) if val is not None else str(default) - - # ------------------ SITE ------------------ - site = CONFIG.get("site", {}) - safe_site = { - "domain": get_str(site, "domain", ""), - "language": get_str(site, "language", "en"), - "title": get_str(site, "title", ""), - "message": get_str(site, "message", ""), - "starting": get_str(site, "starting", "/chat"), - "nodes": get_bool(site, "nodes", True), - "conversations": get_bool(site, "conversations", True), - "everything": get_bool(site, "everything", True), - "graphs": get_bool(site, "graphs", True), - "stats": get_bool(site, "stats", True), - "net": get_bool(site, "net", True), - "map": get_bool(site, "map", True), - "top": get_bool(site, "top", True), - "map_top_left_lat": get_float(site, "map_top_left_lat", 39.0), - "map_top_left_lon": get_float(site, "map_top_left_lon", -123.0), - "map_bottom_right_lat": get_float(site, "map_bottom_right_lat", 36.0), - "map_bottom_right_lon": get_float(site, "map_bottom_right_lon", -121.0), - "map_interval": get_int(site, "map_interval", 3), - "firehose_interval": get_int(site, "firehose_interval", 3), - "weekly_net_message": get_str( - site, "weekly_net_message", "Weekly Mesh check-in message." - ), - "net_tag": get_str(site, "net_tag", "#BayMeshNet"), - "version": str(SOFTWARE_RELEASE), - } - - # ------------------ MQTT ------------------ - mqtt = CONFIG.get("mqtt", {}) - topics_raw = get(mqtt, "topics", []) - import json - - if isinstance(topics_raw, str): - try: - topics = json.loads(topics_raw) - except Exception: - topics = [topics_raw] - elif isinstance(topics_raw, list): - topics = topics_raw - else: - topics = [] - - safe_mqtt = { - "server": get_str(mqtt, "server", ""), - "topics": topics, - } - - # ------------------ CLEANUP ------------------ - cleanup = CONFIG.get("cleanup", {}) - safe_cleanup = { - "enabled": get_bool(cleanup, "enabled", False), - "days_to_keep": get_str(cleanup, "days_to_keep", "14"), - "hour": get_str(cleanup, "hour", "2"), - "minute": get_str(cleanup, "minute", "0"), - "vacuum": get_bool(cleanup, "vacuum", False), - } - - safe_config = { - "site": safe_site, - "mqtt": safe_mqtt, - "cleanup": safe_cleanup, - } - - return web.json_response(safe_config) - except Exception as e: - return web.json_response({"error": str(e)}, status=500) - - -@routes.get("/api/lang") -async def api_lang(request): - # Language from ?lang=xx, fallback to config, then to "en" - lang_code = request.query.get("lang") or CONFIG.get("site", {}).get("language", "en") - section = request.query.get("section") # new: section name - - lang_file = os.path.join(LANG_DIR, f"{lang_code}.json") - if not os.path.exists(lang_file): - lang_file = os.path.join(LANG_DIR, "en.json") - - # Load JSON translations - with open(lang_file, encoding="utf-8") as f: - translations = json.load(f) - - if section: - section = section.lower() - if section in translations: - return web.json_response(translations[section]) - else: - return web.json_response( - {"error": f"Section '{section}' not found in {lang_code}"}, status=404 - ) - - # if no section requested → return full translation file - return web.json_response(translations) - - -# Generic static HTML route -@routes.get("/{page}") -async def serve_page(request): - page = request.match_info["page"] - - # default to index.html if no extension - if not page.endswith(".html"): - page = f"{page}.html" - - html_file = pathlib.Path(__file__).parent / "static" / page - if not html_file.exists(): - raise web.HTTPNotFound(text=f"Page '{page}' not found") - - content = html_file.read_text(encoding="utf-8") - return web.Response(text=content, content_type="text/html") +''' async def run_server(): + # Wait for database migrations to complete before starting web server + logger.info("Checking database schema status...") + database_url = CONFIG["database"]["connection_string"] + + # Wait for migrations to complete (writer app responsibility) + migration_ready = await migrations.wait_for_migrations( + database.engine, database_url, max_retries=30, retry_delay=2 + ) + + if not migration_ready: + logger.error("Database schema is not up to date. Cannot start web server.") + raise RuntimeError("Database schema version mismatch - migrations not complete") + + logger.info("Database schema verified - starting web server") + app = web.Application() - app.add_routes(routes) + app.add_routes(api.routes) # Add API routes + app.add_routes(routes) # Add main web routes # Check if access logging should be disabled enable_access_log = CONFIG.get("logging", {}).get("access_log", "False").lower() == "true" diff --git a/meshview/web_api/__init__.py b/meshview/web_api/__init__.py new file mode 100644 index 0000000..da34840 --- /dev/null +++ b/meshview/web_api/__init__.py @@ -0,0 +1 @@ +"""Web submodule for MeshView API endpoints.""" diff --git a/meshview/web_api/api.py b/meshview/web_api/api.py new file mode 100644 index 0000000..8f5fb5d --- /dev/null +++ b/meshview/web_api/api.py @@ -0,0 +1,692 @@ +"""API endpoints for MeshView.""" + +import datetime +import json +import logging +import os + +from aiohttp import web +from sqlalchemy import text + +from meshtastic.protobuf.portnums_pb2 import PortNum +from meshview import database, decode_payload, store +from meshview.__version__ import __version__, _git_revision_short, get_version_info +from meshview.config import CONFIG + +logger = logging.getLogger(__name__) + +# Will be set by web.py during initialization +Packet = None +SEQ_REGEX = None +LANG_DIR = None + +# Create dedicated route table for API endpoints +routes = web.RouteTableDef() + + +def init_api_module(packet_class, seq_regex, lang_dir): + """Initialize API module with dependencies from main web module.""" + global Packet, SEQ_REGEX, LANG_DIR + Packet = packet_class + SEQ_REGEX = seq_regex + LANG_DIR = lang_dir + + +@routes.get("/api/channels") +async def api_channels(request: web.Request): + period_type = request.query.get("period_type", "hour") + length = int(request.query.get("length", 24)) + + try: + channels = await store.get_channels_in_period(period_type, length) + return web.json_response({"channels": channels}) + except Exception as e: + return web.json_response({"channels": [], "error": str(e)}) + + +@routes.get("/api/nodes") +async def api_nodes(request): + try: + # Optional query parameters + role = request.query.get("role") + channel = request.query.get("channel") + hw_model = request.query.get("hw_model") + days_active = request.query.get("days_active") + + if days_active: + try: + days_active = int(days_active) + except ValueError: + days_active = None + + # Fetch nodes from database + nodes = await store.get_nodes( + role=role, channel=channel, hw_model=hw_model, days_active=days_active + ) + + # Prepare the JSON response + nodes_data = [] + for n in nodes: + nodes_data.append( + { + "id": getattr(n, "id", None), + "node_id": n.node_id, + "long_name": n.long_name, + "short_name": n.short_name, + "hw_model": n.hw_model, + "firmware": n.firmware, + "role": n.role, + "last_lat": getattr(n, "last_lat", None), + "last_long": getattr(n, "last_long", None), + "channel": n.channel, + # "last_update": n.last_update.isoformat(), + "last_seen_us": n.last_seen_us, + } + ) + + return web.json_response({"nodes": nodes_data}) + + except Exception as e: + logger.error(f"Error in /api/nodes: {e}") + return web.json_response({"error": "Failed to fetch nodes"}, status=500) + + +@routes.get("/api/packets") +async def api_packets(request): + try: + # --- Parse query parameters --- + packet_id_str = request.query.get("packet_id") + limit_str = request.query.get("limit", "50") + since_str = request.query.get("since") + portnum_str = request.query.get("portnum") + contains = request.query.get("contains") + + # NEW — explicit filters + from_node_id_str = request.query.get("from_node_id") + to_node_id_str = request.query.get("to_node_id") + node_id_str = request.query.get("node_id") # legacy: match either from/to + + # --- If a packet_id is provided, return only that packet --- + if packet_id_str: + try: + packet_id = int(packet_id_str) + except ValueError: + return web.json_response({"error": "Invalid packet_id format"}, status=400) + + packet = await store.get_packet(packet_id) + if not packet: + return web.json_response({"packets": []}) + + p = Packet.from_model(packet) + data = { + "id": p.id, + "from_node_id": p.from_node_id, + "to_node_id": p.to_node_id, + "portnum": int(p.portnum) if p.portnum is not None else None, + "payload": (p.payload or "").strip(), + "import_time_us": p.import_time_us, + "import_time": p.import_time.isoformat() if p.import_time else None, + "channel": getattr(p.from_node, "channel", ""), + "long_name": getattr(p.from_node, "long_name", ""), + } + return web.json_response({"packets": [data]}) + + # --- Parse limit --- + try: + limit = min(max(int(limit_str), 1), 100) + except ValueError: + limit = 50 + + # --- Parse since timestamp --- + since = None + if since_str: + try: + since = int(since_str) + except ValueError: + logger.warning(f"Invalid 'since' value (expected microseconds): {since_str}") + + # --- Parse portnum --- + portnum = None + if portnum_str: + try: + portnum = int(portnum_str) + except ValueError: + logger.warning(f"Invalid portnum: {portnum_str}") + + # --- Parse node filters --- + from_node_id = None + to_node_id = None + node_id = None # legacy: match either from/to + + if from_node_id_str: + try: + from_node_id = int(from_node_id_str, 0) + except ValueError: + logger.warning(f"Invalid from_node_id: {from_node_id_str}") + + if to_node_id_str: + try: + to_node_id = int(to_node_id_str, 0) + except ValueError: + logger.warning(f"Invalid to_node_id: {to_node_id_str}") + + if node_id_str: + try: + node_id = int(node_id_str, 0) + except ValueError: + logger.warning(f"Invalid node_id: {node_id_str}") + + # --- Fetch packets using explicit filters --- + packets = await store.get_packets( + from_node_id=from_node_id, + to_node_id=to_node_id, + node_id=node_id, + portnum=portnum, + after=since, + contains=contains, + limit=limit, + ) + + ui_packets = [Packet.from_model(p) for p in packets] + + # --- Text message filtering --- + if portnum == PortNum.TEXT_MESSAGE_APP: + ui_packets = [p for p in ui_packets if p.payload and not SEQ_REGEX.fullmatch(p.payload)] + if contains: + ui_packets = [p for p in ui_packets if contains.lower() in p.payload.lower()] + + # --- Sort descending by import_time_us --- + ui_packets.sort( + key=lambda p: (p.import_time_us is not None, p.import_time_us or 0), reverse=True + ) + ui_packets = ui_packets[:limit] + + # --- Build JSON output --- + packets_data = [] + for p in ui_packets: + packet_dict = { + "id": p.id, + "import_time_us": p.import_time_us, + "import_time": p.import_time.isoformat() if p.import_time else None, + "channel": getattr(p.from_node, "channel", ""), + "from_node_id": p.from_node_id, + "to_node_id": p.to_node_id, + "portnum": int(p.portnum), + "long_name": getattr(p.from_node, "long_name", ""), + "payload": (p.payload or "").strip(), + } + + reply_id = getattr( + getattr(getattr(p, "raw_mesh_packet", None), "decoded", None), + "reply_id", + None, + ) + if reply_id: + packet_dict["reply_id"] = reply_id + + packets_data.append(packet_dict) + + # --- Latest import_time for incremental fetch --- + latest_import_time = None + if packets_data: + for p in packets_data: + if p.get("import_time_us") and p["import_time_us"] > 0: + latest_import_time = max(latest_import_time or 0, p["import_time_us"]) + elif p.get("import_time") and latest_import_time is None: + try: + dt = datetime.datetime.fromisoformat( + p["import_time"].replace("Z", "+00:00") + ) + latest_import_time = int(dt.timestamp() * 1_000_000) + except Exception: + pass + + response = {"packets": packets_data} + if latest_import_time is not None: + response["latest_import_time"] = latest_import_time + + return web.json_response(response) + + except Exception as e: + logger.error(f"Error in /api/packets: {e}") + return web.json_response({"error": "Failed to fetch packets"}, status=500) + + +@routes.get("/api/stats") +async def api_stats(request): + """ + Enhanced stats endpoint: + - Supports global stats (existing behavior) + - Supports per-node stats using ?node= + returning both sent AND seen counts in the specified period + """ + allowed_periods = {"hour", "day"} + + period_type = request.query.get("period_type", "hour").lower() + if period_type not in allowed_periods: + return web.json_response( + {"error": f"Invalid period_type. Must be one of {allowed_periods}"}, + status=400, + ) + + try: + length = int(request.query.get("length", 24)) + except ValueError: + return web.json_response({"error": "length must be an integer"}, status=400) + + # NEW: optional combined node stats + node_str = request.query.get("node") + if node_str: + try: + node_id = int(node_str) + except ValueError: + return web.json_response({"error": "node must be an integer"}, status=400) + + # Fetch sent packets + sent = await store.get_packet_stats( + period_type=period_type, + length=length, + from_node=node_id, + ) + + # Fetch seen packets + seen = await store.get_packet_stats( + period_type=period_type, + length=length, + to_node=node_id, + ) + + return web.json_response( + { + "node_id": node_id, + "period_type": period_type, + "length": length, + "sent": sent.get("total", 0), + "seen": seen.get("total", 0), + } + ) + + # ---- Existing full stats mode (unchanged) ---- + channel = request.query.get("channel") + + def parse_int_param(name): + value = request.query.get(name) + if value is not None: + try: + return int(value) + except ValueError: + raise web.HTTPBadRequest( + text=json.dumps({"error": f"{name} must be an integer"}), + content_type="application/json", + ) from None + return None + + portnum = parse_int_param("portnum") + to_node = parse_int_param("to_node") + from_node = parse_int_param("from_node") + + stats = await store.get_packet_stats( + period_type=period_type, + length=length, + channel=channel, + portnum=portnum, + to_node=to_node, + from_node=from_node, + ) + + return web.json_response(stats) + + +@routes.get("/api/stats/count") +async def api_stats_count(request): + """ + Returns packet and packet_seen totals. + Behavior: + • If no filters → total packets ever + total seen ever + • If filters → apply window/channel/from/to + packet_id + """ + + # -------- Parse request parameters -------- + packet_id_str = request.query.get("packet_id") + packet_id = None + if packet_id_str: + try: + packet_id = int(packet_id_str) + except ValueError: + return web.json_response({"error": "packet_id must be integer"}, status=400) + + period_type = request.query.get("period_type") + length_str = request.query.get("length") + length = None + if length_str: + try: + length = int(length_str) + except ValueError: + return web.json_response({"error": "length must be integer"}, status=400) + + channel = request.query.get("channel") + + def parse_int(name): + value = request.query.get(name) + if value is None: + return None + try: + return int(value) + except ValueError: + raise web.HTTPBadRequest( + text=json.dumps({"error": f"{name} must be integer"}), + content_type="application/json", + ) from None + + from_node = parse_int("from_node") + to_node = parse_int("to_node") + + # -------- Case 1: NO FILTERS → return global totals -------- + no_filters = ( + period_type is None + and length is None + and channel is None + and from_node is None + and to_node is None + and packet_id is None + ) + + if no_filters: + total_packets = await store.get_total_packet_count() + total_seen = await store.get_total_packet_seen_count() + return web.json_response({"total_packets": total_packets, "total_seen": total_seen}) + + # -------- Case 2: Apply filters → compute totals -------- + total_packets = await store.get_total_packet_count( + period_type=period_type, + length=length, + channel=channel, + from_node=from_node, + to_node=to_node, + ) + + total_seen = await store.get_total_packet_seen_count( + packet_id=packet_id, + period_type=period_type, + length=length, + channel=channel, + from_node=from_node, + to_node=to_node, + ) + + return web.json_response({"total_packets": total_packets, "total_seen": total_seen}) + + +@routes.get("/api/edges") +async def api_edges(request): + since = datetime.datetime.now() - datetime.timedelta(hours=48) + filter_type = request.query.get("type") + + edges = {} + + # Only build traceroute edges if requested + if filter_type in (None, "traceroute"): + async for tr in store.get_traceroutes(since): + try: + route = decode_payload.decode_payload(PortNum.TRACEROUTE_APP, tr.route) + except Exception as e: + logger.error(f"Error decoding Traceroute {tr.id}: {e}") + continue + + path = [tr.packet.from_node_id] + list(route.route) + path.append(tr.packet.to_node_id if tr.done else tr.gateway_node_id) + + for a, b in zip(path, path[1:], strict=False): + edges[(a, b)] = "traceroute" + + # Only build neighbor edges if requested + if filter_type in (None, "neighbor"): + packets = await store.get_packets(portnum=PortNum.NEIGHBORINFO_APP, after=since) + for packet in packets: + try: + _, neighbor_info = decode_payload.decode(packet) + for node in neighbor_info.neighbors: + edges.setdefault((node.node_id, packet.from_node_id), "neighbor") + except Exception as e: + logger.error( + f"Error decoding NeighborInfo packet {getattr(packet, 'id', '?')}: {e}" + ) + + # Convert edges dict to list format for JSON response + edges_list = [ + {"from": frm, "to": to, "type": edge_type} for (frm, to), edge_type in edges.items() + ] + + return web.json_response({"edges": edges_list}) + + +@routes.get("/api/config") +async def api_config(request): + try: + # ------------------ Helpers ------------------ + def get(section, key, default=None): + """Safe getter for both dict and ConfigParser.""" + if isinstance(section, dict): + return section.get(key, default) + return section.get(key, fallback=default) + + def get_bool(section, key, default=False): + val = get(section, key, default) + if isinstance(val, bool): + return "true" if val else "false" + if isinstance(val, str): + return "true" if val.lower() in ("1", "true", "yes", "on") else "false" + return "true" if bool(val) else "false" + + def get_float(section, key, default=0.0): + try: + return float(get(section, key, default)) + except Exception: + return float(default) + + def get_int(section, key, default=0): + try: + return int(get(section, key, default)) + except Exception: + return default + + def get_str(section, key, default=""): + val = get(section, key, default) + return str(val) if val is not None else str(default) + + # ------------------ SITE ------------------ + site = CONFIG.get("site", {}) + safe_site = { + "domain": get_str(site, "domain", ""), + "language": get_str(site, "language", "en"), + "title": get_str(site, "title", ""), + "message": get_str(site, "message", ""), + "starting": get_str(site, "starting", "/chat"), + "nodes": get_bool(site, "nodes", True), + "chat": get_bool(site, "chat", True), + "everything": get_bool(site, "everything", True), + "graphs": get_bool(site, "graphs", True), + "stats": get_bool(site, "stats", True), + "net": get_bool(site, "net", True), + "map": get_bool(site, "map", True), + "top": get_bool(site, "top", True), + "map_top_left_lat": get_float(site, "map_top_left_lat", 39.0), + "map_top_left_lon": get_float(site, "map_top_left_lon", -123.0), + "map_bottom_right_lat": get_float(site, "map_bottom_right_lat", 36.0), + "map_bottom_right_lon": get_float(site, "map_bottom_right_lon", -121.0), + "map_interval": get_int(site, "map_interval", 3), + "firehose_interval": get_int(site, "firehose_interval", 3), + "weekly_net_message": get_str( + site, "weekly_net_message", "Weekly Mesh check-in message." + ), + "net_tag": get_str(site, "net_tag", "#BayMeshNet"), + "version": str(__version__), + } + + # ------------------ MQTT ------------------ + mqtt = CONFIG.get("mqtt", {}) + topics_raw = get(mqtt, "topics", []) + + if isinstance(topics_raw, str): + try: + topics = json.loads(topics_raw) + except Exception: + topics = [topics_raw] + elif isinstance(topics_raw, list): + topics = topics_raw + else: + topics = [] + + safe_mqtt = { + "server": get_str(mqtt, "server", ""), + "topics": topics, + } + + # ------------------ CLEANUP ------------------ + cleanup = CONFIG.get("cleanup", {}) + safe_cleanup = { + "enabled": get_bool(cleanup, "enabled", False), + "days_to_keep": get_str(cleanup, "days_to_keep", "14"), + "hour": get_str(cleanup, "hour", "2"), + "minute": get_str(cleanup, "minute", "0"), + "vacuum": get_bool(cleanup, "vacuum", False), + } + + safe_config = { + "site": safe_site, + "mqtt": safe_mqtt, + "cleanup": safe_cleanup, + } + + return web.json_response(safe_config) + except Exception as e: + return web.json_response({"error": str(e)}, status=500) + + +@routes.get("/api/lang") +async def api_lang(request): + # Language from ?lang=xx, fallback to config, then to "en" + lang_code = request.query.get("lang") or CONFIG.get("site", {}).get("language", "en") + section = request.query.get("section") + + lang_file = os.path.join(LANG_DIR, f"{lang_code}.json") + if not os.path.exists(lang_file): + lang_file = os.path.join(LANG_DIR, "en.json") + + # Load JSON translations + with open(lang_file, encoding="utf-8") as f: + translations = json.load(f) + + if section: + section = section.lower() + if section in translations: + return web.json_response(translations[section]) + else: + return web.json_response( + {"error": f"Section '{section}' not found in {lang_code}"}, status=404 + ) + + # if no section requested → return full translation file + return web.json_response(translations) + + +@routes.get("/health") +async def health_check(request): + """Health check endpoint for monitoring and load balancers.""" + health_status = { + "status": "healthy", + "timestamp": datetime.datetime.now(datetime.UTC).isoformat(), + "version": __version__, + "git_revision": _git_revision_short, + } + + # Check database connectivity + try: + async with database.async_session() as session: + await session.execute(text("SELECT 1")) + health_status["database"] = "connected" + except Exception as e: + logger.error(f"Database health check failed: {e}") + health_status["database"] = "disconnected" + health_status["status"] = "unhealthy" + return web.json_response(health_status, status=503) + + # Get database file size + try: + db_url = CONFIG.get("database", {}).get("connection_string", "") + # Extract file path from SQLite connection string (e.g., "sqlite+aiosqlite:///packets.db") + if "sqlite" in db_url.lower(): + db_path = db_url.split("///")[-1].split("?")[0] + if os.path.exists(db_path): + db_size_bytes = os.path.getsize(db_path) + # Convert to human-readable format + if db_size_bytes < 1024: + health_status["database_size"] = f"{db_size_bytes} B" + elif db_size_bytes < 1024 * 1024: + health_status["database_size"] = f"{db_size_bytes / 1024:.2f} KB" + elif db_size_bytes < 1024 * 1024 * 1024: + health_status["database_size"] = f"{db_size_bytes / (1024 * 1024):.2f} MB" + else: + health_status["database_size"] = ( + f"{db_size_bytes / (1024 * 1024 * 1024):.2f} GB" + ) + health_status["database_size_bytes"] = db_size_bytes + except Exception as e: + logger.warning(f"Failed to get database size: {e}") + # Don't fail health check if we can't get size + + return web.json_response(health_status) + + +@routes.get("/version") +async def version_endpoint(request): + """Return version information including semver and git revision.""" + try: + version_info = get_version_info() + return web.json_response(version_info) + except Exception as e: + logger.error(f"Error in /version: {e}") + return web.json_response({"error": "Failed to fetch version info"}, status=500) + + +@routes.get("/api/packets_seen/{packet_id}") +async def api_packets_seen(request): + try: + # --- Validate packet_id --- + try: + packet_id = int(request.match_info["packet_id"]) + except (KeyError, ValueError): + return web.json_response( + {"error": "Invalid or missing packet_id"}, + status=400, + ) + + # --- Fetch list using your helper --- + rows = await store.get_packets_seen(packet_id) + + items = [] + for row in rows: # <-- FIX: normal for-loop + items.append( + { + "packet_id": row.packet_id, + "node_id": row.node_id, + "rx_time": row.rx_time, + "hop_limit": row.hop_limit, + "hop_start": row.hop_start, + "channel": row.channel, + "rx_snr": row.rx_snr, + "rx_rssi": row.rx_rssi, + "topic": row.topic, + "import_time": (row.import_time.isoformat() if row.import_time else None), + "import_time_us": row.import_time_us, + } + ) + + return web.json_response({"seen": items}) + + except Exception: + logger.exception("Error in /api/packets_seen") + return web.json_response( + {"error": "Internal server error"}, + status=500, + ) diff --git a/mvrun.py b/mvrun.py index 0b9e7e4..469d9f4 100644 --- a/mvrun.py +++ b/mvrun.py @@ -59,14 +59,11 @@ def signal_handler(sig, frame): # Run python in subprocess -def run_script(script_name, pid_file, *args): +def run_script(python_executable, script_name, pid_file, *args): process = None try: - # Path to the Python interpreter inside the virtual environment - python_executable = './env/bin/python' - # Combine the script name and arguments - command = [python_executable, script_name] + list(args) + command = [python_executable, '-u', script_name] + list(args) # Run the subprocess (output goes directly to console for real-time viewing) process = subprocess.Popen(command) @@ -101,11 +98,13 @@ def main(): # Add --config runtime argument parser.add_argument('--config', help="Path to the configuration file.", default='config.ini') + parser.add_argument('--pid_dir', help="PID files path.", default='.') + parser.add_argument('--py_exec', help="Path to the Python executable.", default=sys.executable) args = parser.parse_args() # PID file paths - db_pid_file = 'meshview-db.pid' - web_pid_file = 'meshview-web.pid' + db_pid_file = os.path.join(args.pid_dir, 'meshview-db.pid') + web_pid_file = os.path.join(args.pid_dir, 'meshview-web.pid') # Track PID files globally for cleanup pid_files.append(db_pid_file) @@ -113,12 +112,12 @@ def main(): # Database Thread dbthrd = threading.Thread( - target=run_script, args=('startdb.py', db_pid_file, '--config', args.config) + target=run_script, args=(args.py_exec, 'startdb.py', db_pid_file, '--config', args.config) ) # Web server thread webthrd = threading.Thread( - target=run_script, args=('main.py', web_pid_file, '--config', args.config) + target=run_script, args=(args.py_exec, 'main.py', web_pid_file, '--config', args.config) ) # Start Meshview subprocess threads diff --git a/pyproject.toml b/pyproject.toml index b879c85..6d9aeb1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,49 @@ +[project] +name = "meshview" +version = "3.0.0" +description = "Real-time monitoring and diagnostic tool for the Meshtastic mesh network" +readme = "README.md" +requires-python = ">=3.10" +dependencies = [ + # Core async + networking + "aiohttp>=3.11.12,<4.0.0", + "aiohttp-sse", + "aiodns>=3.2.0,<4.0.0", + "aiomqtt>=2.3.0,<3.0.0", + "asyncpg>=0.30.0,<0.31.0", + "aiosqlite>=0.21.0,<0.22.0", + # Database + ORM + "sqlalchemy[asyncio]>=2.0.38,<3.0.0", + "alembic>=1.14.0,<2.0.0", + # Serialization / security + "protobuf>=5.29.3,<6.0.0", + "cryptography>=44.0.1,<45.0.0", + # Templates + "Jinja2>=3.1.5,<4.0.0", + "MarkupSafe>=3.0.2,<4.0.0", + # Graphs / diagrams + "pydot>=3.0.4,<4.0.0", +] + +[project.optional-dependencies] +dev = [ + # Data science stack + "numpy>=2.2.3,<3.0.0", + "pandas>=2.2.3,<3.0.0", + "matplotlib>=3.10.0,<4.0.0", + "seaborn>=0.13.2,<1.0.0", + "plotly>=6.0.0,<7.0.0", + # Image support + "pillow>=11.1.0,<12.0.0", + # Debugging / profiling + "psutil>=7.0.0,<8.0.0", + "objgraph>=3.6.2,<4.0.0", + # Testing + "pytest>=8.3.4,<9.0.0", + "pytest-aiohttp>=1.0.5,<2.0.0", + "pytest-asyncio>=0.24.0,<1.0.0", +] + [tool.ruff] # Linting target-version = "py313" diff --git a/requirements.txt b/requirements.txt index 4ce4803..5265cb5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,6 +12,7 @@ aiosqlite~=0.21.0 # Database + ORM sqlalchemy[asyncio]~=2.0.38 +alembic~=1.14.0 # Serialization / security protobuf~=5.29.3 @@ -42,3 +43,8 @@ pillow~=11.1.0 # Debugging / profiling psutil~=7.0.0 objgraph~=3.6.2 + +# Testing +pytest~=8.3.4 +pytest-aiohttp~=1.0.5 +pytest-asyncio~=0.24.0 \ No newline at end of file diff --git a/sample.config.ini b/sample.config.ini index 1931931..b8a1dd4 100644 --- a/sample.config.ini +++ b/sample.config.ini @@ -99,6 +99,15 @@ minute = 00 # Run VACUUM after cleanup vacuum = False +# Enable database backups (independent of cleanup) +backup_enabled = False +# Directory to store database backups (relative or absolute path) +backup_dir = ./backups +# Time to run daily backup (24-hour format) +# If not specified, uses cleanup hour/minute +backup_hour = 2 +backup_minute = 00 + # ------------------------- # Logging Configuration @@ -109,3 +118,5 @@ vacuum = False # Application logs (errors, startup messages, etc.) are unaffected # Set to True to enable, False to disable (default: False) access_log = False +# Database cleanup logfile +db_cleanup_logfile = dbcleanup.log diff --git a/setup-dev.sh b/setup-dev.sh new file mode 100755 index 0000000..d8edbf9 --- /dev/null +++ b/setup-dev.sh @@ -0,0 +1,84 @@ +#!/bin/bash +# +# setup-dev.sh +# +# Development environment setup script for MeshView +# This script sets up the Python virtual environment and installs development tools + +set -e + +echo "Setting up MeshView development environment..." +echo "" + +# Check if uv is installed +if ! command -v uv &> /dev/null; then + echo "Error: 'uv' is not installed." + echo "Install it with: curl -LsSf https://astral.sh/uv/install.sh | sh" + exit 1 +fi + +# Create virtual environment if it doesn't exist +if [ ! -d "env" ]; then + echo "Creating Python virtual environment with uv..." + uv venv env + echo "✓ Virtual environment created" +else + echo "✓ Virtual environment already exists" +fi + +# Install requirements +echo "" +echo "Installing requirements..." +uv pip install -r requirements.txt +echo "✓ Requirements installed" + +# Install development tools +echo "" +echo "Installing development tools..." +uv pip install pre-commit pytest pytest-asyncio pytest-aiohttp +echo "✓ Development tools installed" + +# Install pre-commit hooks +echo "" +echo "Installing pre-commit hooks..." +./env/bin/pre-commit install +echo "✓ Pre-commit hooks installed" + +# Install graphviz check +echo "" +if command -v dot &> /dev/null; then + echo "✓ graphviz is installed" +else + echo "⚠ Warning: graphviz is not installed" + echo " Install it with:" + echo " macOS: brew install graphviz" + echo " Debian: sudo apt-get install graphviz" +fi + +# Create config.ini if it doesn't exist +echo "" +if [ ! -f "config.ini" ]; then + echo "Creating config.ini from sample..." + cp sample.config.ini config.ini + echo "✓ config.ini created" + echo " Edit config.ini to configure your MQTT and site settings" +else + echo "✓ config.ini already exists" +fi + +echo "" +echo "==========================================" +echo "Development environment setup complete!" +echo "==========================================" +echo "" +echo "Next steps:" +echo " 1. Edit config.ini with your MQTT settings" +echo " 2. Run: ./env/bin/python mvrun.py" +echo " 3. Open: http://localhost:8081" +echo "" +echo "Pre-commit hooks are now active:" +echo " - Ruff will auto-format and fix issues before each commit" +echo " - If files are changed, you'll need to git add and commit again" +echo "" +echo "Run tests with: ./env/bin/pytest tests/" +echo "" diff --git a/startdb.py b/startdb.py index 06ecd25..e733875 100644 --- a/startdb.py +++ b/startdb.py @@ -1,19 +1,32 @@ import asyncio import datetime +import gzip import json import logging +import shutil +from pathlib import Path from sqlalchemy import delete -from meshview import models, mqtt_database, mqtt_reader, mqtt_store +from meshview import migrations, models, mqtt_database, mqtt_reader, mqtt_store from meshview.config import CONFIG +# ------------------------- +# Basic logging configuration +# ------------------------- +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s %(filename)s:%(lineno)d [pid:%(process)d] %(levelname)s - %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", +) + # ------------------------- # Logging for cleanup # ------------------------- cleanup_logger = logging.getLogger("dbcleanup") cleanup_logger.setLevel(logging.INFO) -file_handler = logging.FileHandler("dbcleanup.log") +cleanup_logfile = CONFIG.get("logging", {}).get("db_cleanup_logfile", "dbcleanup.log") +file_handler = logging.FileHandler(cleanup_logfile) file_handler.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s') file_handler.setFormatter(formatter) @@ -40,11 +53,91 @@ def get_int(config, section, key, default=0): db_lock = asyncio.Lock() +# ------------------------- +# Database backup function +# ------------------------- +async def backup_database(database_url: str, backup_dir: str = ".") -> None: + """ + Create a compressed backup of the database file. + + Args: + database_url: SQLAlchemy connection string + backup_dir: Directory to store backups (default: current directory) + """ + try: + # Extract database file path from connection string + # Format: sqlite+aiosqlite:///path/to/db.db + if not database_url.startswith("sqlite"): + cleanup_logger.warning("Backup only supported for SQLite databases") + return + + db_path = database_url.split("///", 1)[1] if "///" in database_url else None + if not db_path: + cleanup_logger.error("Could not extract database path from connection string") + return + + db_file = Path(db_path) + if not db_file.exists(): + cleanup_logger.error(f"Database file not found: {db_file}") + return + + # Create backup directory if it doesn't exist + backup_path = Path(backup_dir) + backup_path.mkdir(parents=True, exist_ok=True) + + # Generate backup filename with timestamp + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + backup_filename = f"{db_file.stem}_backup_{timestamp}.db.gz" + backup_file = backup_path / backup_filename + + cleanup_logger.info(f"Creating backup: {backup_file}") + + # Copy and compress the database file + with open(db_file, 'rb') as f_in: + with gzip.open(backup_file, 'wb', compresslevel=9) as f_out: + shutil.copyfileobj(f_in, f_out) + + # Get file sizes for logging + original_size = db_file.stat().st_size / (1024 * 1024) # MB + compressed_size = backup_file.stat().st_size / (1024 * 1024) # MB + compression_ratio = (1 - compressed_size / original_size) * 100 if original_size > 0 else 0 + + cleanup_logger.info( + f"Backup created successfully: {backup_file.name} " + f"({original_size:.2f} MB -> {compressed_size:.2f} MB, " + f"{compression_ratio:.1f}% compression)" + ) + + except Exception as e: + cleanup_logger.error(f"Error creating database backup: {e}") + + +# ------------------------- +# Database backup scheduler +# ------------------------- +async def daily_backup_at(hour: int = 2, minute: int = 0, backup_dir: str = "."): + while True: + now = datetime.datetime.now() + next_run = now.replace(hour=hour, minute=minute, second=0, microsecond=0) + if next_run <= now: + next_run += datetime.timedelta(days=1) + delay = (next_run - now).total_seconds() + cleanup_logger.info(f"Next backup scheduled at {next_run}") + await asyncio.sleep(delay) + + database_url = CONFIG["database"]["connection_string"] + await backup_database(database_url, backup_dir) + + # ------------------------- # Database cleanup using ORM # ------------------------- async def daily_cleanup_at( - hour: int = 2, minute: int = 0, days_to_keep: int = 14, vacuum_db: bool = True + hour: int = 2, + minute: int = 0, + days_to_keep: int = 14, + vacuum_db: bool = True, + wait_for_backup: bool = False, ): while True: now = datetime.datetime.now() @@ -55,6 +148,11 @@ async def daily_cleanup_at( cleanup_logger.info(f"Next cleanup scheduled at {next_run}") await asyncio.sleep(delay) + # If backup is enabled, wait a bit to let backup complete first + if wait_for_backup: + cleanup_logger.info("Waiting 60 seconds for backup to complete...") + await asyncio.sleep(60) + # Local-time cutoff as string for SQLite DATETIME comparison cutoff = (datetime.datetime.now() - datetime.timedelta(days=days_to_keep)).strftime( "%Y-%m-%d %H:%M:%S" @@ -134,9 +232,39 @@ async def load_database_from_mqtt( # Main function # ------------------------- async def main(): + logger = logging.getLogger(__name__) + # Initialize database - mqtt_database.init_database(CONFIG["database"]["connection_string"]) - await mqtt_database.create_tables() + database_url = CONFIG["database"]["connection_string"] + mqtt_database.init_database(database_url) + + # Create migration status table + await migrations.create_migration_status_table(mqtt_database.engine) + + # Set migration in progress flag + await migrations.set_migration_in_progress(mqtt_database.engine, True) + logger.info("Migration status set to 'in progress'") + + try: + # Check if migrations are needed before running them + logger.info("Checking for pending database migrations...") + if await migrations.is_database_up_to_date(mqtt_database.engine, database_url): + logger.info("Database schema is already up to date, skipping migrations") + else: + logger.info("Database schema needs updating, running migrations...") + migrations.run_migrations(database_url) + logger.info("Database migrations completed") + + # Create tables if needed (for backwards compatibility) + logger.info("Creating database tables...") + await mqtt_database.create_tables() + logger.info("Database tables created") + + finally: + # Clear migration in progress flag + logger.info("Clearing migration status...") + await migrations.set_migration_in_progress(mqtt_database.engine, False) + logger.info("Migration status cleared - database ready") mqtt_user = CONFIG["mqtt"].get("username") or None mqtt_passwd = CONFIG["mqtt"].get("password") or None @@ -148,6 +276,21 @@ async def main(): cleanup_hour = get_int(CONFIG, "cleanup", "hour", 2) cleanup_minute = get_int(CONFIG, "cleanup", "minute", 0) + backup_enabled = get_bool(CONFIG, "cleanup", "backup_enabled", False) + backup_dir = CONFIG.get("cleanup", {}).get("backup_dir", "./backups") + backup_hour = get_int(CONFIG, "cleanup", "backup_hour", cleanup_hour) + backup_minute = get_int(CONFIG, "cleanup", "backup_minute", cleanup_minute) + + logger.info(f"Starting MQTT ingestion from {CONFIG['mqtt']['server']}:{CONFIG['mqtt']['port']}") + if cleanup_enabled: + logger.info( + f"Daily cleanup enabled: keeping {cleanup_days} days of data at {cleanup_hour:02d}:{cleanup_minute:02d}" + ) + if backup_enabled: + logger.info( + f"Daily backups enabled: storing in {backup_dir} at {backup_hour:02d}:{backup_minute:02d}" + ) + async with asyncio.TaskGroup() as tg: tg.create_task( load_database_from_mqtt( @@ -159,10 +302,25 @@ async def main(): ) ) + # Start backup task if enabled + if backup_enabled: + tg.create_task(daily_backup_at(backup_hour, backup_minute, backup_dir)) + + # Start cleanup task if enabled (waits for backup if both run at same time) if cleanup_enabled: - tg.create_task(daily_cleanup_at(cleanup_hour, cleanup_minute, cleanup_days, vacuum_db)) - else: - cleanup_logger.info("Daily cleanup is disabled by configuration.") + wait_for_backup = ( + backup_enabled + and (backup_hour == cleanup_hour) + and (backup_minute == cleanup_minute) + ) + tg.create_task( + daily_cleanup_at( + cleanup_hour, cleanup_minute, cleanup_days, vacuum_db, wait_for_backup + ) + ) + + if not cleanup_enabled and not backup_enabled: + cleanup_logger.info("Daily cleanup and backups are both disabled by configuration.") # -------------------------