mirror of
https://github.com/jkingsman/Remote-Terminal-for-MeshCore.git
synced 2026-05-07 22:05:14 +02:00
Compare commits
67 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 3b7e2737ee | |||
| 01158ac69f | |||
| 485df05372 | |||
| e5e9eab935 | |||
| 33b2d3c260 | |||
| eccbd0bac5 | |||
| 4f54ec2c93 | |||
| eed38337c8 | |||
| e1ee7fcd24 | |||
| 2756b1ae8d | |||
| ef1d6a5a1a | |||
| 14f42c59fe | |||
| b9414e84ee | |||
| 95a17ca8ee | |||
| e6cedfbd0b | |||
| c3d0af1473 | |||
| c24e291017 | |||
| d2d009ae79 | |||
| d09166df84 | |||
| f2762ab495 | |||
| a411562ca7 | |||
| cde4d1744e | |||
| 4e73cd39c8 | |||
| 53b341d6fb | |||
| 76ac97010e | |||
| 53a4d8186a | |||
| 70e1669113 | |||
| 3b1a292507 | |||
| 4f19e1ec9a | |||
| 59601bb98e | |||
| f6b0fd21fb | |||
| 8a4858a313 | |||
| 442c2fad20 | |||
| 8cc542ce23 | |||
| a7258c120e | |||
| 8752320f52 | |||
| f9f046a05f | |||
| 390c0624ea | |||
| 2f55d11b0b | |||
| fa0be24990 | |||
| 1e22a21445 | |||
| e09a3a01f7 | |||
| 3bd756ee4e | |||
| 43c5e0f67d | |||
| c0fc5fbba2 | |||
| c7248222dd | |||
| 1e18a91f12 | |||
| 18db6e4dd8 | |||
| 2393dadf1b | |||
| fd26576e0d | |||
| cb5a76eb5f | |||
| 7f5dde119f | |||
| 799a721761 | |||
| 152a584f35 | |||
| 5cc0476426 | |||
| e468c6c161 | |||
| e33537018b | |||
| 0727793560 | |||
| 5c4e04e024 | |||
| 967269ef7d | |||
| 1903797d0d | |||
| bb5af5ba82 | |||
| 424da7e232 | |||
| 159df1ec5b | |||
| 8e2e039985 | |||
| 01c86a486e | |||
| 7d5cfdec26 |
@@ -11,7 +11,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v6
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v6
|
||||
|
||||
@@ -0,0 +1,73 @@
|
||||
name: Publish AUR package
|
||||
|
||||
# Pushes the contents of pkg/aur/ to the remoteterm-meshcore AUR repository
|
||||
# whenever a GitHub release is published. Can also be triggered manually for
|
||||
# testing or out-of-band republishes.
|
||||
#
|
||||
# Required secrets:
|
||||
# AUR_SSH_PRIVATE_KEY Private SSH key registered with the AUR maintainer
|
||||
# account that owns the remoteterm-meshcore package.
|
||||
# AUR_COMMIT_EMAIL Email used for the AUR git commit identity.
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to publish (no v prefix, e.g. 3.9.1)'
|
||||
required: true
|
||||
|
||||
concurrency:
|
||||
# Serialize publishes so a fast back-to-back release sequence cannot race
|
||||
# two pushes against the AUR repo. The later one wins by virtue of being
|
||||
# the final state.
|
||||
group: publish-aur
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
publish-aur:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Resolve version from event
|
||||
id: version
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ inputs.version }}"
|
||||
else
|
||||
VERSION="${{ github.event.release.tag_name }}"
|
||||
fi
|
||||
VERSION="${VERSION#v}"
|
||||
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "Publishing AUR package for version $VERSION"
|
||||
|
||||
- name: Stamp pkgver into PKGBUILD
|
||||
run: |
|
||||
sed -i "s/^pkgver=.*/pkgver=${{ steps.version.outputs.version }}/" pkg/aur/PKGBUILD
|
||||
sed -i "s/^pkgrel=.*/pkgrel=1/" pkg/aur/PKGBUILD
|
||||
|
||||
- name: Publish to AUR
|
||||
uses: KSXGitHub/github-actions-deploy-aur@v4.1.2
|
||||
with:
|
||||
pkgname: remoteterm-meshcore
|
||||
pkgbuild: pkg/aur/PKGBUILD
|
||||
assets: |
|
||||
pkg/aur/remoteterm-meshcore.install
|
||||
pkg/aur/remoteterm-meshcore.service
|
||||
pkg/aur/remoteterm-meshcore.sysusers
|
||||
pkg/aur/remoteterm-meshcore.tmpfiles
|
||||
pkg/aur/remoteterm.env
|
||||
commit_username: jackkingsman
|
||||
commit_email: ${{ secrets.AUR_COMMIT_EMAIL }}
|
||||
ssh_private_key: ${{ secrets.AUR_SSH_PRIVATE_KEY }}
|
||||
commit_message: "Update to ${{ steps.version.outputs.version }}"
|
||||
# Recompute sha256sums from the live release tarball + the bundled
|
||||
# service/env files. The committed PKGBUILD has SKIP placeholders.
|
||||
updpkgsums: true
|
||||
# Validate the PKGBUILD parses and sources download, but skip the
|
||||
# actual build (which would run uv sync + npm install for several
|
||||
# minutes of CI time on every release).
|
||||
test: true
|
||||
test_flags: --clean --cleanbuild --nodeps --nobuild
|
||||
@@ -30,3 +30,6 @@ references/
|
||||
docker-compose.yml
|
||||
docker-compose.yaml
|
||||
.docker-certs/
|
||||
|
||||
# HA test environment (created by scripts/setup/start_ha_test_env.sh)
|
||||
ha_test_config/
|
||||
|
||||
@@ -209,6 +209,7 @@ This message-layer echo/path handling is independent of raw-packet storage dedup
|
||||
│ │ ├── MapView.tsx # Leaflet map showing node locations
|
||||
│ │ └── ...
|
||||
│ └── vite.config.ts
|
||||
├── pkg/aur/ # AUR package files (PKGBUILD, systemd service, env, install hooks)
|
||||
├── scripts/ # Quality / release helpers (listing below is representative, not exhaustive)
|
||||
│ ├── build/
|
||||
│ │ ├── collect_licenses.sh # Gather third-party license attributions
|
||||
@@ -216,7 +217,8 @@ This message-layer echo/path handling is independent of raw-packet storage dedup
|
||||
│ ├── quality/
|
||||
│ │ ├── all_quality.sh # Repo-standard autofix + validate gate
|
||||
│ │ ├── e2e.sh # End-to-end test runner
|
||||
│ │ └── extended_quality.sh # Quality gate plus e2e and Docker matrix
|
||||
│ │ ├── extended_quality.sh # Quality gate plus e2e and Docker matrix
|
||||
│ │ └── test_aur_package.sh # Build + install AUR package in Arch Docker containers
|
||||
│ └── setup/
|
||||
│ ├── fetch_prebuilt_frontend.py # Download release frontend fallback
|
||||
│ └── install_service.sh # Install/configure Linux systemd service
|
||||
@@ -371,7 +373,7 @@ All endpoints are prefixed with `/api` (e.g., `/api/health`).
|
||||
| POST | `/api/settings/favorites/toggle` | Toggle favorite status |
|
||||
| POST | `/api/settings/blocked-keys/toggle` | Toggle blocked key |
|
||||
| POST | `/api/settings/blocked-names/toggle` | Toggle blocked name |
|
||||
| POST | `/api/settings/migrate` | One-time migration from frontend localStorage |
|
||||
| POST | `/api/settings/tracked-telemetry/toggle` | Toggle tracked telemetry repeater |
|
||||
| GET | `/api/fanout` | List all fanout configs |
|
||||
| POST | `/api/fanout` | Create new fanout config |
|
||||
| PATCH | `/api/fanout/{id}` | Update fanout config (triggers module reload) |
|
||||
@@ -478,7 +480,7 @@ mc.subscribe(EventType.ACK, handler)
|
||||
| `MESHCORE_ENABLE_MESSAGE_POLL_FALLBACK` | `false` | Switch the always-on radio audit task from hourly checks to aggressive 10-second polling; the audit checks both missed message drift and channel-slot cache drift |
|
||||
| `MESHCORE_FORCE_CHANNEL_SLOT_RECONFIGURE` | `false` | Disable channel-slot reuse and force `set_channel(...)` before every channel send, even on serial/BLE |
|
||||
|
||||
**Note:** Runtime app settings are stored in the database (`app_settings` table), not environment variables. These include `max_radio_contacts`, `auto_decrypt_dm_on_advert`, `advert_interval`, `last_advert_time`, `favorites`, `last_message_times`, `flood_scope`, `blocked_keys`, `blocked_names`, and `discovery_blocked_types`. `max_radio_contacts` is the configured radio contact capacity baseline used by background maintenance: favorites reload first, non-favorite fill targets about 80% of that value, and full offload/reload triggers around 95% occupancy. They are configured via `GET/PATCH /api/settings`. MQTT, bot, webhook, Apprise, and SQS configs are stored in the `fanout_configs` table, managed via `/api/fanout`. If the radio's channel slots appear unstable or another client is mutating them underneath this app, operators can force the old always-reconfigure send path with `MESHCORE_FORCE_CHANNEL_SLOT_RECONFIGURE=true`.
|
||||
**Note:** Runtime app settings are stored in the database (`app_settings` table), not environment variables. These include `max_radio_contacts`, `auto_decrypt_dm_on_advert`, `advert_interval`, `last_advert_time`, `last_message_times`, `flood_scope`, `blocked_keys`, `blocked_names`, `discovery_blocked_types`, `tracked_telemetry_repeaters`, and `auto_resend_channel`. `max_radio_contacts` is the configured radio contact capacity baseline used by background maintenance: favorites reload first, non-favorite fill targets about 80% of that value, and full offload/reload triggers around 95% occupancy. They are configured via `GET/PATCH /api/settings`. MQTT, bot, webhook, Apprise, and SQS configs are stored in the `fanout_configs` table, managed via `/api/fanout`. If the radio's channel slots appear unstable or another client is mutating them underneath this app, operators can force the old always-reconfigure send path with `MESHCORE_FORCE_CHANNEL_SLOT_RECONFIGURE=true`.
|
||||
|
||||
Byte-perfect channel retries are user-triggered via `POST /api/messages/channel/{message_id}/resend` and are allowed for 30 seconds after the original send.
|
||||
|
||||
|
||||
+41
-7
@@ -1,3 +1,41 @@
|
||||
## [3.11.3] - 2026-04-12
|
||||
|
||||
* Bugfix: Add icons and screenshots for webmanifest
|
||||
* Bugfix: Use incoming DMs, not just outgoing, for recency ranking for preferential radio contact load
|
||||
|
||||
## [3.11.2] - 2026-04-12
|
||||
|
||||
* Feature: Unread DMs are always at the top of the DM list no matter what
|
||||
* Bugfix: Webmanifest needs withCredentials
|
||||
|
||||
## [3.11.1] - 2026-04-12
|
||||
|
||||
* Feature: Home Assistant MQTT fanout
|
||||
* Feature: Add dummy service worker to enable PWA
|
||||
* Bugfix: DB connection plurality issues
|
||||
* Misc: Migration improvements
|
||||
* Misc: Search keys from beginning
|
||||
|
||||
## [3.11.0] - 2026-04-10
|
||||
|
||||
* Feature: Radio health and contact data accessible on fanout bus
|
||||
* Feature: Local node radio stats (voltage etc.) on WS health bus
|
||||
* Feature: Battery indicator optional in status bar (configured in Local Settings)
|
||||
* Bugfix: Fix same-second same-message collision in room servers
|
||||
* Bugfix: Don't consume DM resend attempt if the radio was just busy
|
||||
* Bugfix: Assume that a same-second same-message same-first-byte-key DM is more likely an echo than them sending the same message
|
||||
* Bugfix: Multi-retry for flood scope restoration
|
||||
* Misc: Testing & documentation improvements
|
||||
|
||||
## [3.10.0] - 2026-04-10
|
||||
|
||||
* Feature: Add Arch AUR package
|
||||
* Feature: 72hr packet density view in statistics
|
||||
* Feature: Add warnings for event loop selection for MQTT on Windows startup
|
||||
* Bugfix: Bump Apprise to 1.9.9 to fix Matrix bug
|
||||
* Misc: More memory-conscious on recent contact fetch
|
||||
* Misc: Fix statistics pane e2e test
|
||||
|
||||
## [3.9.0] - 2026-04-06
|
||||
|
||||
* Feature: Add hop counts to hop-width selection options
|
||||
@@ -136,7 +174,7 @@
|
||||
* Bugfix: Fix Apprise duplicate names
|
||||
* Bugfix: Be better about identity resolution in the stats pane
|
||||
* Misc: Docs, test, and performance enhancements
|
||||
* Misc: Don't prompt "Are you sure" when leaving an unedited interation
|
||||
* Misc: Don't prompt "Are you sure" when leaving an unedited integration
|
||||
* Misc: Log node time on startup
|
||||
* Misc: Improve community MQTT error bubble-up
|
||||
* Misc: Unread DMs always have a red unread counter
|
||||
@@ -163,7 +201,7 @@
|
||||
## [3.3.0] - 2026-03-13
|
||||
|
||||
* Feature: Use dashed lines to show collapsed ambiguous router results
|
||||
* Feature: Jump to unred
|
||||
* Feature: Jump to unread
|
||||
* Feature: Local channel management to prevent need to reload channel every time
|
||||
* Feature: Debug endpoint
|
||||
* Feature: Force-singleton channel management
|
||||
@@ -226,7 +264,7 @@
|
||||
* Feature: Massive codebase refactor and overhaul
|
||||
* Bugfix: Fix packet parsing for trace packets
|
||||
* Bugfix: Refetch channels on reconnect
|
||||
* Bugfix: Load All on repeater pane on mobile doesn't etend into lower text
|
||||
* Bugfix: Load All on repeater pane on mobile doesn't extend into lower text
|
||||
* Bugfix: Timestamps in logs
|
||||
* Bugfix: Correct wrong clock sync command
|
||||
* Misc: Improve bot error bubble up
|
||||
@@ -243,10 +281,6 @@
|
||||
|
||||
* Bugfix: Don't obscure new integration dropdown on session boundary
|
||||
|
||||
## [2.7.8] - 2026-03-08
|
||||
|
||||
|
||||
|
||||
## [2.7.8] - 2026-03-08
|
||||
|
||||
* Bugfix: Improve frontend asset resolution and fixup the build/push script
|
||||
|
||||
+98
-4
@@ -70,17 +70,111 @@ npm run test:run
|
||||
npm run build
|
||||
```
|
||||
|
||||
## Quality + Publishing Scripts
|
||||
|
||||
<details>
|
||||
<summary>scripts/quality/</summary>
|
||||
|
||||
| Script | Purpose |
|
||||
|--------|---------|
|
||||
| `all_quality.sh` | Repo-standard gate: autofix (ruff, eslint, prettier), then pyright, pytest, vitest, and frontend build. Run before finishing any code change. |
|
||||
| `extended_quality.sh` | `all_quality.sh` plus e2e tests and Docker build matrix. Used for release validation. |
|
||||
| `e2e.sh` | Thin wrapper that runs Playwright e2e tests from `tests/e2e/`. |
|
||||
| `docker_ci.sh` | Builds the Docker image and runs a smoke test against it. |
|
||||
| `test_aur_package.sh` | Builds the AUR package in an Arch container, then installs and boots it in a second container with port 8000 exposed (hang finish). |
|
||||
| `run_aur_with_radio.sh` | Like `test_aur_package.sh` but passes through the host serial device for testing with a real radio (hang finish). |
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>scripts/build/</summary>
|
||||
|
||||
| Script | Purpose |
|
||||
|--------|---------|
|
||||
| `publish.sh` | Full release ceremony: quality gate, version bump, changelog, frontend build, Docker multi-arch push, GitHub release. |
|
||||
| `release_common.sh` | Shared shell helpers (version validation, formatting) sourced by other build scripts. |
|
||||
| `package_release_artifact.sh` | Builds the prebuilt-frontend release zip attached to GitHub releases. |
|
||||
| `push_docker_multiarch.sh` | Builds and pushes multi-arch Docker images (amd64 + arm64). |
|
||||
| `create_github_release.sh` | Creates a GitHub release with changelog notes and the release artifact. |
|
||||
| `extract_release_notes.sh` | Extracts the latest version's notes from `CHANGELOG.md` for the release body. |
|
||||
| `collect_licenses.sh` | Gathers third-party license attributions into `LICENSES.md`. |
|
||||
| `print_frontend_licenses.cjs` | Helper that extracts frontend npm dependency licenses. |
|
||||
| `dump_api_specs.py` | Dumps the OpenAPI spec from the running backend (developer utility). |
|
||||
|
||||
</details>
|
||||
|
||||
## E2E Testing
|
||||
|
||||
E2E coverage exists, but it is intentionally not part of the normal development path.
|
||||
E2E tests exercise the full stack (backend + frontend + real radio hardware) via Playwright.
|
||||
|
||||
These tests are only guaranteed to run correctly in a narrow subset of environments; they require a busy mesh with messages arriving constantly, an available autodetect-able radio, and a contact in the test database (which you can provide in `tests/e2e/.tmp/e2e-test.db` after an initial run). E2E tests are generally not necessary to run for normal development work.
|
||||
> [!WARNING]
|
||||
> E2E tests are **not part of the normal development path** — most contributors will never need to run them. They exist to catch integration issues that unit tests can't and generally only need to be run by maintainers.
|
||||
|
||||
### Hardware requirements
|
||||
|
||||
- A MeshCore radio connected via serial (auto-detected, or set `MESHCORE_SERIAL_PORT`)
|
||||
- The radio must be powered on and past its startup sequence before tests begin
|
||||
|
||||
### Running
|
||||
|
||||
```bash
|
||||
cd tests/e2e
|
||||
npm install
|
||||
npx playwright test # headless
|
||||
npx playwright test --headed # you can probably guess
|
||||
npx playwright install chromium # first time only
|
||||
npx playwright test # headless
|
||||
npx playwright test --headed # watch it run
|
||||
```
|
||||
|
||||
The test harness starts its own uvicorn instance on port 8001 with a fresh temporary database. Your development server (port 8000) is unaffected.
|
||||
|
||||
### Test tiers
|
||||
|
||||
**Most tests (22 of 28) are fully self-contained.** They seed their own data via API calls or direct DB writes and need only a connected radio. These cover messaging, pagination, search, favorites, settings, fanout integrations, historical decryption, and all UI-only views.
|
||||
|
||||
**Mesh-traffic tests (tagged `@mesh-traffic`)** wait up to 3 minutes for an incoming message from another node on the network. If no traffic arrives, they fail with an advisory that the failure may be RF conditions, not a bug. These are: `incoming-message` and `packet-feed` (second test only).
|
||||
|
||||
**The partner-radio DM ACK test (tagged `@partner-radio`)** validates direct-route learning by sending a DM and waiting for an ACK. It requires a second radio in range that has your test radio in its contacts. Configure the partner node's public key and name via `E2E_PARTNER_RADIO_PUBKEY` and `E2E_PARTNER_RADIO_NAME`.
|
||||
|
||||
### Making mesh-traffic tests reliable: the echo bot
|
||||
|
||||
The most practical way to guarantee incoming traffic is to run an **echo bot on a second radio** monitoring a known channel. When the test suite starts a `@mesh-traffic` test, it sends a trigger message to that channel. If a bot on another radio is listening, it replies — generating the incoming RF packet the test needs within seconds instead of waiting for organic mesh traffic.
|
||||
|
||||
The test suite sends `!echo please give incoming message` to the echo channel (default `#flightless`) at the start of each `@mesh-traffic` test. The trigger message is configurable via `E2E_ECHO_TRIGGER_MESSAGE`.
|
||||
|
||||
Setup:
|
||||
1. Set up a second MeshCore radio within RF range of your test radio
|
||||
2. Run a RemoteTerm instance on the second radio
|
||||
3. Configure a bot on the second radio that monitors the echo channel and replies when it sees the trigger. Example bot code:
|
||||
```python
|
||||
def bot(sender_name, sender_key, message_text, is_dm,
|
||||
channel_key, channel_name, sender_timestamp, path):
|
||||
if "!echo" in message_text.lower():
|
||||
return f"[ECHO] {message_text}"
|
||||
return None
|
||||
```
|
||||
4. The test suite calls `nudgeEchoBot()` automatically — no manual intervention needed
|
||||
|
||||
Without the echo bot, `@mesh-traffic` tests rely on organic traffic from other nodes. In a quiet RF environment they will time out.
|
||||
|
||||
### Environment variables
|
||||
|
||||
All E2E environment configuration is centralized in `tests/e2e/helpers/env.ts` with defaults that work for the maintainer's test rig. Override via environment variables:
|
||||
|
||||
| Variable | Default | Purpose |
|
||||
|----------|---------|---------|
|
||||
| `MESHCORE_SERIAL_PORT` | auto-detect | Serial port for the test radio |
|
||||
| `E2E_ECHO_CHANNEL` | `#flightless` | Channel the echo bot monitors for traffic generation |
|
||||
| `E2E_ECHO_TRIGGER_MESSAGE` | `!echo please give incoming message` | Message sent to nudge the echo bot |
|
||||
| `E2E_PARTNER_RADIO_PUBKEY` | *(maintainer's test node)* | 64-char hex public key of a node that will ACK DMs from your radio |
|
||||
| `E2E_PARTNER_RADIO_NAME` | *(maintainer's test node)* | Display name of that node (used in UI assertions) |
|
||||
|
||||
Example for a contributor with their own two-radio setup:
|
||||
|
||||
```bash
|
||||
E2E_ECHO_CHANNEL="#mytest" \
|
||||
E2E_PARTNER_RADIO_PUBKEY="abcd1234...full64charhexkey..." \
|
||||
E2E_PARTNER_RADIO_NAME="MyTestNode" \
|
||||
npx playwright test
|
||||
```
|
||||
|
||||
## Pull Request Expectations
|
||||
|
||||
+2
-2
@@ -56,7 +56,7 @@ SOFTWARE.
|
||||
|
||||
</details>
|
||||
|
||||
### apprise (1.9.7) — BSD-2-Clause
|
||||
### apprise (1.9.9) — BSD-2-Clause
|
||||
|
||||
<details>
|
||||
<summary>Full license text</summary>
|
||||
@@ -64,7 +64,7 @@ SOFTWARE.
|
||||
```
|
||||
BSD 2-Clause License
|
||||
|
||||
Copyright (c) 2025, Chris Caron <lead2gold@gmail.com>
|
||||
Copyright (c) 2026, Chris Caron <lead2gold@gmail.com>
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
|
||||
@@ -23,7 +23,7 @@ For advanced setup and troubleshooting see [README_ADVANCED.md](README_ADVANCED.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.10+
|
||||
- Python 3.11+
|
||||
- Node.js LTS or current (20, 22, 24, 25) if you're not using a prebuilt release
|
||||
- [UV](https://astral.sh/uv) package manager: `curl -LsSf https://astral.sh/uv/install.sh | sh`
|
||||
- MeshCore radio connected via USB serial, TCP, or BLE
|
||||
@@ -116,6 +116,8 @@ cp docker-compose.example.yml docker-compose.yml
|
||||
bash scripts/setup/install_docker.sh
|
||||
```
|
||||
|
||||
> The interactive generator enables a self-signed (snakeoil) TLS certificate by default. If you accept the default, the app will be served over HTTPS and the generated compose file will include certificate mounts and an SSL command override. Decline if you prefer plain HTTP or plan to terminate TLS externally.
|
||||
|
||||
Your local `docker-compose.yml` is gitignored so future pulls do not overwrite your Docker settings.
|
||||
|
||||
The guided Docker flow can collect BLE settings, but BLE access from Docker still needs manual compose customization such as Bluetooth passthrough and possibly privileged mode or host networking. If you want the simpler path for BLE, use the regular Python launch flow instead.
|
||||
@@ -135,6 +137,8 @@ sudo docker compose pull
|
||||
sudo docker compose up -d
|
||||
```
|
||||
|
||||
> If you switched to a local build (`build: .` instead of `image:`), use `sudo docker compose up -d --build` instead — `pull` only fetches remote images.
|
||||
|
||||
The example file and setup script default to the published Docker Hub image. To build locally from your checkout instead, replace:
|
||||
|
||||
```yaml
|
||||
@@ -161,6 +165,29 @@ To stop:
|
||||
sudo docker compose down
|
||||
```
|
||||
|
||||
## Install Path 3: Arch Linux (AUR)
|
||||
|
||||
A [`remoteterm-meshcore`](https://aur.archlinux.org/packages/remoteterm-meshcore) package is available in the AUR. Install it with an AUR helper or build it manually:
|
||||
|
||||
```bash
|
||||
# with an AUR helper
|
||||
yay -S remoteterm-meshcore
|
||||
|
||||
# or manually
|
||||
git clone https://aur.archlinux.org/remoteterm-meshcore.git
|
||||
cd remoteterm-meshcore
|
||||
makepkg -si
|
||||
```
|
||||
|
||||
Configure your radio connection, then start the service:
|
||||
|
||||
```bash
|
||||
sudo vi /etc/remoteterm-meshcore/remoteterm.env
|
||||
sudo systemctl enable --now remoteterm-meshcore
|
||||
```
|
||||
|
||||
Access the app at http://localhost:8000.
|
||||
|
||||
## Standard Environment Variables
|
||||
|
||||
Only one transport may be active at a time. If multiple are set, the server will refuse to start.
|
||||
@@ -199,6 +226,15 @@ $env:MESHCORE_SERIAL_PORT="COM8" # or your COM port
|
||||
uv run uvicorn app.main:app --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> **Windows + MQTT fanout:** Python's default Windows event loop (ProactorEventLoop) is not compatible with the MQTT libraries used by RemoteTerm. If you configure any MQTT integration, add `--loop none` to your uvicorn command:
|
||||
>
|
||||
> ```powershell
|
||||
> uv run uvicorn app.main:app --host 0.0.0.0 --port 8000 --loop none
|
||||
> ```
|
||||
>
|
||||
> If you forget, the app will start normally but MQTT connections will fail and you'll see a toast in the UI with this same guidance.
|
||||
|
||||
If you enable Basic Auth, protect the app with HTTPS. HTTP Basic credentials are not safe on plain HTTP. Also note that the app's permissive CORS policy is a deliberate trusted-network tradeoff, so cross-origin browser JavaScript is not a reliable way to use that Basic Auth gate.
|
||||
|
||||
## Where To Go Next
|
||||
|
||||
+305
@@ -0,0 +1,305 @@
|
||||
# Home Assistant Integration
|
||||
|
||||
RemoteTerm can publish mesh network data to Home Assistant via MQTT Discovery. Devices and entities appear automatically in HA -- no custom component or HACS install needed.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Home Assistant with the [MQTT integration](https://www.home-assistant.io/integrations/mqtt/) configured
|
||||
- An MQTT broker (e.g. Mosquitto) accessible to both HA and RemoteTerm
|
||||
- RemoteTerm running and connected to a radio
|
||||
|
||||
## Setup
|
||||
|
||||
1. In RemoteTerm, go to **Settings > Integrations > Add > Home Assistant MQTT Discovery**
|
||||
2. Enter your MQTT broker host and port (same broker HA is connected to)
|
||||
3. Optionally enter broker username/password and TLS settings
|
||||
4. Select contacts for GPS tracking and repeaters for telemetry (see below)
|
||||
5. Configure which messages should fire events (scope selector at the bottom)
|
||||
6. Save and enable
|
||||
|
||||
Devices will appear in HA under **Settings > Devices & Services > MQTT** within a few seconds.
|
||||
|
||||
## What Gets Created
|
||||
|
||||
### Local Radio Device
|
||||
|
||||
Always created. Updates every 60 seconds.
|
||||
|
||||
| Entity | Type | Description |
|
||||
|--------|------|-------------|
|
||||
| `binary_sensor.meshcore_*_connected` | Connectivity | Radio online/offline |
|
||||
| `sensor.meshcore_*_noise_floor` | Signal strength | Radio noise floor (dBm) |
|
||||
|
||||
### Repeater Devices
|
||||
|
||||
One device per tracked repeater (must have repeater opted). Updates when telemetry is collected (auto-collect cycle (~8 hours), or when you manually fetch from the repeater dashboard).
|
||||
|
||||
Repeaters must first be added to the auto-telemetry tracking list in RemoteTerm's Radio settings section. Only auto-tracked repeaters appear in the HA integration's repeater picker.
|
||||
|
||||
| Entity | Type | Unit | Description |
|
||||
|--------|------|------|-------------|
|
||||
| `sensor.meshcore_*_battery_voltage` | Voltage | V | Battery level |
|
||||
| `sensor.meshcore_*_noise_floor` | Signal strength | dBm | Local noise floor |
|
||||
| `sensor.meshcore_*_last_rssi` | Signal strength | dBm | Last received signal strength |
|
||||
| `sensor.meshcore_*_last_snr` | -- | dB | Last signal-to-noise ratio |
|
||||
| `sensor.meshcore_*_packets_received` | -- | count | Total packets received |
|
||||
| `sensor.meshcore_*_packets_sent` | -- | count | Total packets sent |
|
||||
| `sensor.meshcore_*_uptime` | Duration | s | Uptime since last reboot |
|
||||
|
||||
### Contact Device Trackers
|
||||
|
||||
One `device_tracker` per tracked contact. Updates passively whenever RemoteTerm hears an advertisement with GPS coordinates from that contact. No radio commands are sent -- it piggybacks on normal mesh traffic.
|
||||
|
||||
| Entity | Description |
|
||||
|--------|-------------|
|
||||
| `device_tracker.meshcore_*` | GPS position (latitude/longitude) |
|
||||
|
||||
### Message Event Entity
|
||||
|
||||
A single `event.meshcore_messages` entity that fires for each message matching your configured scope. Each event carries these attributes:
|
||||
|
||||
| Attribute | Example | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `event_type` | `message_received` | Always `message_received` |
|
||||
| `sender_name` | `Alice` | Display name of the sender |
|
||||
| `sender_key` | `aabbccdd...` | Sender's public key |
|
||||
| `text` | `hello` | Message body |
|
||||
| `message_type` | `PRIV` or `CHAN` | Direct message or channel |
|
||||
| `channel_name` | `#general` | Channel name (null for DMs) |
|
||||
| `conversation_key` | `aabbccdd...` | Contact key (DM) or channel key |
|
||||
| `outgoing` | `false` | Whether you sent this message |
|
||||
|
||||
## Entity Naming
|
||||
|
||||
Entity IDs use the first 12 characters of the node's public key as an identifier. For example, a contact with public key `ae92577bae6c...` gets entity ID `device_tracker.meshcore_ae92577bae6c`. You can rename entities in HA's UI without affecting the integration.
|
||||
|
||||
## Common Automations
|
||||
|
||||
### Low repeater battery alert
|
||||
|
||||
Notify when a tracked repeater's battery drops below a threshold.
|
||||
|
||||
**GUI:** Settings > Automations > Create > Numeric state trigger on `sensor.meshcore_*_battery_voltage`, below `3.8`, action: notification.
|
||||
|
||||
**YAML:**
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Repeater battery low"
|
||||
trigger:
|
||||
- platform: numeric_state
|
||||
entity_id: sensor.meshcore_aabbccddeeff_battery_voltage
|
||||
below: 3.8
|
||||
action:
|
||||
- service: notify.mobile_app_your_phone
|
||||
data:
|
||||
title: "Repeater Battery Low"
|
||||
message: >-
|
||||
{{ state_attr('sensor.meshcore_aabbccddeeff_battery_voltage', 'friendly_name') }}
|
||||
is at {{ states('sensor.meshcore_aabbccddeeff_battery_voltage') }}V
|
||||
```
|
||||
|
||||
### Radio offline alert
|
||||
|
||||
Notify if the radio has been disconnected for more than 5 minutes.
|
||||
|
||||
**GUI:** Settings > Automations > Create > State trigger on `binary_sensor.meshcore_*_connected`, to `off`, for `00:05:00`, action: notification.
|
||||
|
||||
**YAML:**
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Radio offline"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: binary_sensor.meshcore_aabbccddeeff_connected
|
||||
to: "off"
|
||||
for: "00:05:00"
|
||||
action:
|
||||
- service: notify.mobile_app_your_phone
|
||||
data:
|
||||
title: "MeshCore Radio Offline"
|
||||
message: "Radio has been disconnected for 5 minutes"
|
||||
```
|
||||
|
||||
### Alert on any message from a specific room
|
||||
|
||||
Trigger when a message arrives in a specific channel. Two approaches:
|
||||
|
||||
#### Option A: Scope filtering (fully GUI, no template)
|
||||
|
||||
If you only care about one room, configure the HA integration's message scope to "Only listed channels" and select that room. Then every event fire is from that room.
|
||||
|
||||
**GUI:** Settings > Automations > Create > State trigger on `event.meshcore_messages`, action: notification.
|
||||
|
||||
**YAML:**
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Emergency channel alert"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: event.meshcore_messages
|
||||
action:
|
||||
- service: notify.mobile_app_your_phone
|
||||
data:
|
||||
title: "Message in #emergency"
|
||||
message: >-
|
||||
{{ trigger.to_state.attributes.sender_name }}:
|
||||
{{ trigger.to_state.attributes.text }}
|
||||
```
|
||||
|
||||
#### Option B: Template condition (multiple rooms, one integration)
|
||||
|
||||
Keep scope as "All messages" and filter in the automation. The trigger is GUI, but the condition uses a one-line template.
|
||||
|
||||
**GUI:** Settings > Automations > Create > State trigger on `event.meshcore_messages` > Add condition > Template > enter the template below.
|
||||
|
||||
**YAML:**
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Emergency channel alert"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: event.meshcore_messages
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: >-
|
||||
{{ trigger.to_state.attributes.channel_name == '#emergency' }}
|
||||
action:
|
||||
- service: notify.mobile_app_your_phone
|
||||
data:
|
||||
title: "Message in #emergency"
|
||||
message: >-
|
||||
{{ trigger.to_state.attributes.sender_name }}:
|
||||
{{ trigger.to_state.attributes.text }}
|
||||
```
|
||||
|
||||
### Alert on DM from a specific contact
|
||||
|
||||
**YAML:**
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "DM from Alice"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: event.meshcore_messages
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: >-
|
||||
{{ trigger.to_state.attributes.message_type == 'PRIV'
|
||||
and trigger.to_state.attributes.sender_name == 'Alice' }}
|
||||
action:
|
||||
- service: notify.mobile_app_your_phone
|
||||
data:
|
||||
title: "DM from Alice"
|
||||
message: "{{ trigger.to_state.attributes.text }}"
|
||||
```
|
||||
|
||||
### Alert on messages containing a keyword
|
||||
|
||||
**YAML:**
|
||||
```yaml
|
||||
automation:
|
||||
- alias: "Keyword alert"
|
||||
trigger:
|
||||
- platform: state
|
||||
entity_id: event.meshcore_messages
|
||||
condition:
|
||||
- condition: template
|
||||
value_template: >-
|
||||
{{ 'emergency' in trigger.to_state.attributes.text | lower }}
|
||||
action:
|
||||
- service: notify.mobile_app_your_phone
|
||||
data:
|
||||
title: "Emergency keyword detected"
|
||||
message: >-
|
||||
{{ trigger.to_state.attributes.sender_name }} in
|
||||
{{ trigger.to_state.attributes.channel_name or 'DM' }}:
|
||||
{{ trigger.to_state.attributes.text }}
|
||||
```
|
||||
|
||||
### Track a contact on the HA map
|
||||
|
||||
No automation needed. Once a contact is selected for GPS tracking, their `device_tracker` entity automatically appears on the HA map. Go to **Settings > Dashboards > Map** (or add a Map card to any dashboard) and the tracked contact will show up when they advertise their GPS position.
|
||||
|
||||
### Dashboard card showing repeater battery
|
||||
|
||||
Add a sensor card to any dashboard:
|
||||
|
||||
```yaml
|
||||
type: sensor
|
||||
entity: sensor.meshcore_aabbccddeeff_battery_voltage
|
||||
name: "Hilltop Repeater Battery"
|
||||
```
|
||||
|
||||
Or an entities card for multiple repeaters:
|
||||
|
||||
```yaml
|
||||
type: entities
|
||||
title: "Repeater Status"
|
||||
entities:
|
||||
- entity: sensor.meshcore_aabbccddeeff_battery_voltage
|
||||
name: "Hilltop"
|
||||
- entity: sensor.meshcore_ccdd11223344_battery_voltage
|
||||
name: "Valley"
|
||||
- entity: sensor.meshcore_eeff55667788_battery_voltage
|
||||
name: "Ridge"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Devices don't appear in HA
|
||||
|
||||
- Verify the MQTT integration is configured in HA (**Settings > Devices & Services > MQTT**) and shows "Connected"
|
||||
- Verify RemoteTerm's HA integration shows "Connected" (green dot)
|
||||
- Check that both HA and RemoteTerm are using the same MQTT broker
|
||||
- Subscribe to discovery topics to verify messages are flowing:
|
||||
```
|
||||
mosquitto_sub -h <broker> -t 'homeassistant/#' -v
|
||||
```
|
||||
|
||||
### Stale or duplicate devices
|
||||
|
||||
If you see unexpected devices (e.g. a generic "MeshCore Radio" alongside your named radio), clear the stale retained messages:
|
||||
```
|
||||
mosquitto_pub -h <broker> -t 'homeassistant/binary_sensor/meshcore_unknown/connected/config' -r -n
|
||||
mosquitto_pub -h <broker> -t 'homeassistant/sensor/meshcore_unknown/noise_floor/config' -r -n
|
||||
```
|
||||
|
||||
### Repeater sensors show "Unknown" or "Unavailable"
|
||||
|
||||
Repeater telemetry only updates when collected. Trigger a manual fetch by opening the repeater's dashboard in RemoteTerm and clicking "Status", or wait for the next auto-collect cycle (~8 hours). Sensors show "Unknown" until the first telemetry reading arrives.
|
||||
|
||||
### Contact device tracker shows "Unknown"
|
||||
|
||||
The contact's GPS position only updates when RemoteTerm hears an advertisement from that node that includes GPS coordinates. If the contact's device doesn't broadcast GPS or hasn't advertised recently, the tracker will show as unknown.
|
||||
|
||||
### Entity is "Unavailable"
|
||||
|
||||
Radio health entities have a 120-second expiry. If RemoteTerm stops sending health updates (e.g. it's shut down or loses connection to the broker), HA marks the entities as unavailable after 2 minutes. Restart RemoteTerm or check the broker connection.
|
||||
|
||||
## Removing the Integration
|
||||
|
||||
Disabling or deleting the HA integration in RemoteTerm's settings publishes empty retained messages to all discovery topics, which removes the devices and entities from HA automatically.
|
||||
|
||||
## MQTT Topics Reference
|
||||
|
||||
State topics (where data is published):
|
||||
|
||||
| Topic | Content | Update frequency |
|
||||
|-------|---------|-----------------|
|
||||
| `meshcore/{node_id}/health` | `{"connected": bool, "noise_floor_dbm": int}` | Every 60s |
|
||||
| `meshcore/{node_id}/telemetry` | `{"battery_volts": float, ...}` | ~8h or manual |
|
||||
| `meshcore/{node_id}/gps` | `{"latitude": float, "longitude": float, ...}` | On advert |
|
||||
| `meshcore/events/message` | `{"event_type": "message_received", ...}` | On message |
|
||||
|
||||
Discovery topics (entity registration, under `homeassistant/`):
|
||||
|
||||
| Pattern | Entity type |
|
||||
|---------|------------|
|
||||
| `homeassistant/binary_sensor/meshcore_*/connected/config` | Radio connectivity |
|
||||
| `homeassistant/sensor/meshcore_*/noise_floor/config` | Noise floor sensor |
|
||||
| `homeassistant/sensor/meshcore_*/battery_voltage/config` | Repeater battery |
|
||||
| `homeassistant/sensor/meshcore_*/*/config` | Other repeater sensors |
|
||||
| `homeassistant/device_tracker/meshcore_*/config` | Contact GPS tracker |
|
||||
| `homeassistant/event/meshcore_messages/config` | Message event entity |
|
||||
|
||||
The `{node_id}` is always the first 12 characters of the node's public key, lowercased.
|
||||
+11
-7
@@ -40,7 +40,7 @@ app/
|
||||
│ ├── contact_reconciliation.py # Prefix-claim, sender-key backfill, name-history wiring
|
||||
│ ├── radio_lifecycle.py # Post-connect setup and reconnect/setup helpers
|
||||
│ ├── radio_commands.py # Radio config/private-key command workflows
|
||||
│ ├── radio_noise_floor.py # In-memory local radio noise-floor sampling/history
|
||||
│ ├── radio_stats.py # In-memory local radio stats sampling and noise-floor history
|
||||
│ └── radio_runtime.py # Router/dependency seam over the global RadioManager
|
||||
├── radio.py # RadioManager transport/session state + lock management
|
||||
├── radio_sync.py # Polling, sync, periodic advertisement loop
|
||||
@@ -161,10 +161,12 @@ app/
|
||||
|
||||
- All external integrations (MQTT, bots, webhooks, Apprise, SQS) are managed through the fanout bus (`app/fanout/`).
|
||||
- Configs stored in `fanout_configs` table, managed via `GET/POST/PATCH/DELETE /api/fanout`.
|
||||
- `broadcast_event()` in `websocket.py` dispatches to the fanout manager for `message` and `raw_packet` events.
|
||||
- Each integration is a `FanoutModule` with scope-based filtering.
|
||||
- `broadcast_event()` in `websocket.py` dispatches to the fanout manager for `message`, `raw_packet`, and `contact` events.
|
||||
- `on_message` and `on_raw` are scope-gated. `on_contact`, `on_telemetry`, and `on_health` are dispatched to all modules unconditionally (modules filter internally).
|
||||
- Repeater telemetry broadcasts are emitted after `RepeaterTelemetryRepository.record()` in both `radio_sync.py` (auto-collect) and `routers/repeaters.py` (manual fetch).
|
||||
- The 60-second radio stats sampling loop in `radio_stats.py` dispatches an enriched health snapshot (radio identity + full stats) to all fanout modules after each sample.
|
||||
- Community MQTT publishes raw packets only, but its derived `path` field for direct packets is emitted as comma-separated hop identifiers, not flat path bytes.
|
||||
- See `app/fanout/AGENTS_fanout.md` for full architecture details.
|
||||
- See `app/fanout/AGENTS_fanout.md` for full architecture details and event payload shapes.
|
||||
|
||||
## API Surface (all under `/api`)
|
||||
|
||||
@@ -244,7 +246,7 @@ app/
|
||||
- `POST /settings/favorites/toggle`
|
||||
- `POST /settings/blocked-keys/toggle`
|
||||
- `POST /settings/blocked-names/toggle`
|
||||
- `POST /settings/migrate`
|
||||
- `POST /settings/tracked-telemetry/toggle`
|
||||
|
||||
### Fanout
|
||||
- `GET /fanout` — list all fanout configs
|
||||
@@ -286,6 +288,8 @@ Main tables:
|
||||
- `raw_packets`
|
||||
- `contact_advert_paths` (recent unique advertisement paths per contact, keyed by contact + path bytes + hop count)
|
||||
- `contact_name_history` (tracks name changes over time)
|
||||
- `repeater_telemetry_history` (time-series telemetry snapshots for tracked repeaters)
|
||||
- `fanout_configs` (MQTT, bot, webhook, Apprise, SQS integration configs)
|
||||
- `app_settings`
|
||||
|
||||
Contact route state is canonicalized on the backend:
|
||||
@@ -301,14 +305,14 @@ Repository writes should prefer typed models such as `ContactUpsert` over ad hoc
|
||||
|
||||
`app_settings` fields in active model:
|
||||
- `max_radio_contacts`
|
||||
- `favorites`
|
||||
- `auto_decrypt_dm_on_advert`
|
||||
- `last_message_times`
|
||||
- `preferences_migrated`
|
||||
- `advert_interval`
|
||||
- `last_advert_time`
|
||||
- `flood_scope`
|
||||
- `blocked_keys`, `blocked_names`, `discovery_blocked_types`
|
||||
- `tracked_telemetry_repeaters`
|
||||
- `auto_resend_channel`
|
||||
|
||||
Note: MQTT, community MQTT, and bot configs were migrated to the `fanout_configs` table (migrations 36-38).
|
||||
|
||||
|
||||
+31
-5
@@ -7,7 +7,7 @@ from app.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SCHEMA = """
|
||||
SCHEMA_TABLES = """
|
||||
CREATE TABLE IF NOT EXISTS contacts (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
@@ -130,13 +130,18 @@ CREATE TABLE IF NOT EXISTS repeater_telemetry_history (
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
);
|
||||
"""
|
||||
|
||||
# Indexes are created after migrations so that legacy databases have all
|
||||
# required columns (e.g. sender_key, added by migration 25) before index
|
||||
# creation runs.
|
||||
SCHEMA_INDEXES = """
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_received ON messages(received_at);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
WHERE type = 'CHAN';
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_incoming_priv_dedup
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0), COALESCE(sender_key, ''))
|
||||
WHERE type = 'PRIV' AND outgoing = 0;
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_sender_key ON messages(sender_key);
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_pagination
|
||||
@@ -173,6 +178,22 @@ class Database:
|
||||
# Persists in the DB file but we set it explicitly on every connection.
|
||||
await self._connection.execute("PRAGMA journal_mode = WAL")
|
||||
|
||||
# synchronous = NORMAL is safe with WAL — only the most recent
|
||||
# transaction can be lost on an OS crash (no corruption risk).
|
||||
# Reduces fsync overhead vs. the default FULL.
|
||||
await self._connection.execute("PRAGMA synchronous = NORMAL")
|
||||
|
||||
# Retry for up to 5s on lock contention instead of failing instantly.
|
||||
# Matters when a second connection (e.g. VACUUM) touches the DB.
|
||||
await self._connection.execute("PRAGMA busy_timeout = 5000")
|
||||
|
||||
# Bump page cache to ~64 MB (negative value = KB). Keeps hot pages
|
||||
# in memory for read-heavy queries (unreads, pagination, search).
|
||||
await self._connection.execute("PRAGMA cache_size = -64000")
|
||||
|
||||
# Keep temp tables and sort spills in memory instead of on disk.
|
||||
await self._connection.execute("PRAGMA temp_store = MEMORY")
|
||||
|
||||
# Incremental auto-vacuum: freed pages are reclaimable via
|
||||
# PRAGMA incremental_vacuum without a full VACUUM. Must be set before
|
||||
# the first table is created (for new databases); for existing databases
|
||||
@@ -185,15 +206,20 @@ class Database:
|
||||
# constraints, then re-enabled for all subsequent application queries.
|
||||
await self._connection.execute("PRAGMA foreign_keys = OFF")
|
||||
|
||||
await self._connection.executescript(SCHEMA)
|
||||
await self._connection.executescript(SCHEMA_TABLES)
|
||||
await self._connection.commit()
|
||||
logger.debug("Database schema initialized")
|
||||
logger.debug("Database tables initialized")
|
||||
|
||||
# Run any pending migrations
|
||||
# Run any pending migrations before creating indexes, so that
|
||||
# legacy databases have all required columns first.
|
||||
from app.migrations import run_migrations
|
||||
|
||||
await run_migrations(self._connection)
|
||||
|
||||
await self._connection.executescript(SCHEMA_INDEXES)
|
||||
await self._connection.commit()
|
||||
logger.debug("Database indexes initialized")
|
||||
|
||||
# Enable FK enforcement for all application queries from this point on.
|
||||
await self._connection.execute("PRAGMA foreign_keys = ON")
|
||||
logger.debug("Foreign key enforcement enabled")
|
||||
|
||||
+2
-2
@@ -2,10 +2,10 @@
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Literal
|
||||
from typing import Any, Literal, NotRequired
|
||||
|
||||
from pydantic import TypeAdapter
|
||||
from typing_extensions import NotRequired, TypedDict
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from app.models import Channel, Contact, Message, MessagePath, RawPacketBroadcast
|
||||
from app.routers.health import HealthResponse
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Fanout Bus Architecture
|
||||
|
||||
The fanout bus is a unified system for dispatching mesh radio events (decoded messages and raw packets) to external integrations. It replaces the previous scattered singleton MQTT publishers with a modular, configurable framework.
|
||||
The fanout bus is a unified system for dispatching mesh radio events to external integrations. It replaces the previous scattered singleton MQTT publishers with a modular, configurable framework.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
@@ -8,10 +8,15 @@ The fanout bus is a unified system for dispatching mesh radio events (decoded me
|
||||
Base class that all integration modules extend:
|
||||
- `__init__(config_id, config, *, name="")` — constructor; receives the config UUID, the type-specific config dict, and the user-assigned name
|
||||
- `start()` / `stop()` — async lifecycle (e.g. open/close connections)
|
||||
- `on_message(data)` — receive decoded messages (DM/channel)
|
||||
- `on_raw(data)` — receive raw RF packets
|
||||
- `on_message(data)` — receive decoded messages (scope-gated)
|
||||
- `on_raw(data)` — receive raw RF packets (scope-gated)
|
||||
- `on_contact(data)` — receive contact upserts; dispatched to all modules
|
||||
- `on_telemetry(data)` — receive repeater telemetry snapshots; dispatched to all modules
|
||||
- `on_health(data)` — receive periodic radio health snapshots; dispatched to all modules
|
||||
- `status` property (**must override**) — return `"connected"`, `"disconnected"`, or `"error"`
|
||||
|
||||
All five event hooks are no-ops by default; modules override only the ones they care about.
|
||||
|
||||
### FanoutManager (manager.py)
|
||||
Singleton that owns all active modules and dispatches events:
|
||||
- `load_from_db()` — startup: load enabled configs, instantiate modules
|
||||
@@ -19,6 +24,9 @@ Singleton that owns all active modules and dispatches events:
|
||||
- `remove_config(id)` — delete: stop and remove
|
||||
- `broadcast_message(data)` — scope-check + dispatch `on_message`
|
||||
- `broadcast_raw(data)` — scope-check + dispatch `on_raw`
|
||||
- `broadcast_contact(data)` — dispatch `on_contact` to all modules
|
||||
- `broadcast_telemetry(data)` — dispatch `on_telemetry` to all modules
|
||||
- `broadcast_health_fanout(data)` — dispatch `on_health` to all modules
|
||||
- `stop_all()` — shutdown
|
||||
- `get_statuses()` — health endpoint data
|
||||
|
||||
@@ -33,19 +41,65 @@ Each config has a `scope` JSON blob controlling what events reach it:
|
||||
```
|
||||
Community MQTT always enforces `{"messages": "none", "raw_packets": "all"}`.
|
||||
|
||||
Scope only gates `on_message` and `on_raw`. The `on_contact`, `on_telemetry`, and `on_health` hooks are dispatched to all modules unconditionally — modules that care about specific contacts or repeaters filter internally based on their own config.
|
||||
|
||||
## Event Flow
|
||||
|
||||
```
|
||||
Radio Event -> packet_processor / event_handler
|
||||
-> broadcast_event("message"|"raw_packet", data, realtime=True)
|
||||
-> broadcast_event("message"|"raw_packet"|"contact", data, realtime=True)
|
||||
-> WebSocket broadcast (always)
|
||||
-> FanoutManager.broadcast_message/raw (only if realtime=True)
|
||||
-> scope check per module
|
||||
-> module.on_message / on_raw
|
||||
-> FanoutManager.broadcast_message/raw/contact (only if realtime=True)
|
||||
-> scope check per module (message/raw only)
|
||||
-> module.on_message / on_raw / on_contact
|
||||
|
||||
Telemetry collect (radio_sync.py / routers/repeaters.py)
|
||||
-> RepeaterTelemetryRepository.record(...)
|
||||
-> FanoutManager.broadcast_telemetry(data)
|
||||
-> module.on_telemetry (all modules, unconditional)
|
||||
|
||||
Health fanout (radio_stats.py, piggybacks on 60s stats sampling loop)
|
||||
-> FanoutManager.broadcast_health_fanout(data)
|
||||
-> module.on_health (all modules, unconditional)
|
||||
```
|
||||
|
||||
Setting `realtime=False` (used during historical decryption) skips fanout dispatch entirely.
|
||||
|
||||
## Event Payloads
|
||||
|
||||
### on_message(data)
|
||||
`Message.model_dump()` — the full Pydantic message model. Key fields:
|
||||
- `type` (`"PRIV"` | `"CHAN"`), `conversation_key`, `text`, `sender_name`, `sender_key`
|
||||
- `outgoing`, `acked`, `paths`, `sender_timestamp`, `received_at`
|
||||
|
||||
### on_raw(data)
|
||||
Raw packet dict from `packet_processor.py`. Key fields:
|
||||
- `id` (storage row ID), `observation_id` (per-arrival), `raw` (hex), `timestamp`
|
||||
- `decrypted_info` (optional: `channel_key`, `contact_key`, `text`)
|
||||
|
||||
### on_contact(data)
|
||||
`Contact.model_dump()` — the full Pydantic contact model. Key fields:
|
||||
- `public_key`, `name`, `type` (0=unknown, 1=client, 2=repeater, 3=room, 4=sensor)
|
||||
- `lat`, `lon`, `last_seen`, `first_seen`, `on_radio`
|
||||
|
||||
### on_telemetry(data)
|
||||
Repeater telemetry snapshot, broadcast after successful `RepeaterTelemetryRepository.record()`.
|
||||
Identical shape from both auto-collect (`radio_sync.py`) and manual fetch (`routers/repeaters.py`):
|
||||
- `public_key`, `name`, `timestamp`
|
||||
- `battery_volts`, `noise_floor_dbm`, `last_rssi_dbm`, `last_snr_db`
|
||||
- `packets_received`, `packets_sent`, `airtime_seconds`, `rx_airtime_seconds`
|
||||
- `uptime_seconds`, `sent_flood`, `sent_direct`, `recv_flood`, `recv_direct`
|
||||
- `flood_dups`, `direct_dups`, `full_events`, `tx_queue_len`
|
||||
|
||||
### on_health(data)
|
||||
Radio health + stats snapshot, broadcast every 60s by the stats sampling loop in `radio_stats.py`:
|
||||
- `connected` (bool), `connection_info` (str | None)
|
||||
- `public_key` (str | None), `name` (str | None)
|
||||
- `noise_floor_dbm`, `battery_mv`, `uptime_secs` (int | None)
|
||||
- `last_rssi` (int | None), `last_snr` (float | None)
|
||||
- `tx_air_secs`, `rx_air_secs` (int | None)
|
||||
- `packets_recv`, `packets_sent`, `flood_tx`, `direct_tx`, `flood_rx`, `direct_rx` (int | None)
|
||||
|
||||
## Current Module Types
|
||||
|
||||
### mqtt_private (mqtt_private.py)
|
||||
|
||||
@@ -38,6 +38,15 @@ class FanoutModule:
|
||||
async def on_raw(self, data: dict) -> None:
|
||||
"""Called for raw RF packets. Override if needed."""
|
||||
|
||||
async def on_contact(self, data: dict) -> None:
|
||||
"""Called for contact upserts (adverts, sync). Override if needed."""
|
||||
|
||||
async def on_telemetry(self, data: dict) -> None:
|
||||
"""Called for repeater telemetry snapshots. Override if needed."""
|
||||
|
||||
async def on_health(self, data: dict) -> None:
|
||||
"""Called for periodic radio health snapshots. Override if needed."""
|
||||
|
||||
@property
|
||||
def status(self) -> str:
|
||||
"""Return 'connected', 'disconnected', or 'error'."""
|
||||
|
||||
+1
-1
@@ -164,7 +164,7 @@ class BotModule(FanoutModule):
|
||||
),
|
||||
timeout=BOT_EXECUTION_TIMEOUT,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
logger.warning("Bot '%s' execution timed out", self.name)
|
||||
return
|
||||
except Exception:
|
||||
|
||||
@@ -538,7 +538,7 @@ class CommunityMqttPublisher(BaseMqttPublisher):
|
||||
self._version_event.clear()
|
||||
try:
|
||||
await asyncio.wait_for(self._version_event.wait(), timeout=30)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
pass
|
||||
return False
|
||||
return True
|
||||
|
||||
+35
-1
@@ -31,12 +31,14 @@ def _register_module_types() -> None:
|
||||
from app.fanout.bot import BotModule
|
||||
from app.fanout.map_upload import MapUploadModule
|
||||
from app.fanout.mqtt_community import MqttCommunityModule
|
||||
from app.fanout.mqtt_ha import MqttHaModule
|
||||
from app.fanout.mqtt_private import MqttPrivateModule
|
||||
from app.fanout.sqs import SqsModule
|
||||
from app.fanout.webhook import WebhookModule
|
||||
|
||||
_MODULE_TYPES["mqtt_private"] = MqttPrivateModule
|
||||
_MODULE_TYPES["mqtt_community"] = MqttCommunityModule
|
||||
_MODULE_TYPES["mqtt_ha"] = MqttHaModule
|
||||
_MODULE_TYPES["bot"] = BotModule
|
||||
_MODULE_TYPES["webhook"] = WebhookModule
|
||||
_MODULE_TYPES["apprise"] = AppriseModule
|
||||
@@ -86,6 +88,11 @@ def _scope_matches_raw(scope: dict, _data: dict) -> bool:
|
||||
return scope.get("raw_packets", "none") == "all"
|
||||
|
||||
|
||||
def _always_match(_scope: dict, _data: dict) -> bool:
|
||||
"""Match all modules unconditionally (filtering is module-internal)."""
|
||||
return True
|
||||
|
||||
|
||||
class FanoutManager:
|
||||
"""Owns all active fanout modules and dispatches events."""
|
||||
|
||||
@@ -220,7 +227,7 @@ class FanoutManager:
|
||||
handler = getattr(module, handler_name)
|
||||
await asyncio.wait_for(handler(data), timeout=_DISPATCH_TIMEOUT_SECONDS)
|
||||
self._clear_module_error(config_id)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
timeout_error = f"{handler_name} timed out after {_DISPATCH_TIMEOUT_SECONDS:.1f}s"
|
||||
self._set_module_error(config_id, timeout_error)
|
||||
logger.error(
|
||||
@@ -270,6 +277,33 @@ class FanoutManager:
|
||||
log_label="on_raw",
|
||||
)
|
||||
|
||||
async def broadcast_contact(self, data: dict) -> None:
|
||||
"""Dispatch a contact upsert to all modules."""
|
||||
await self._dispatch_matching(
|
||||
data,
|
||||
matcher=_always_match,
|
||||
handler_name="on_contact",
|
||||
log_label="on_contact",
|
||||
)
|
||||
|
||||
async def broadcast_telemetry(self, data: dict) -> None:
|
||||
"""Dispatch a repeater telemetry snapshot to all modules."""
|
||||
await self._dispatch_matching(
|
||||
data,
|
||||
matcher=_always_match,
|
||||
handler_name="on_telemetry",
|
||||
log_label="on_telemetry",
|
||||
)
|
||||
|
||||
async def broadcast_health_fanout(self, data: dict) -> None:
|
||||
"""Dispatch a radio health snapshot to all modules."""
|
||||
await self._dispatch_matching(
|
||||
data,
|
||||
matcher=_always_match,
|
||||
handler_name="on_health",
|
||||
log_label="on_health",
|
||||
)
|
||||
|
||||
async def stop_all(self) -> None:
|
||||
"""Shutdown all modules."""
|
||||
for config_id, (module, _) in list(self._modules.items()):
|
||||
|
||||
+31
-2
@@ -12,6 +12,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any
|
||||
@@ -195,7 +196,7 @@ class BaseMqttPublisher(ABC):
|
||||
self._version_event.wait(),
|
||||
timeout=self._not_configured_timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
continue
|
||||
except asyncio.CancelledError:
|
||||
return
|
||||
@@ -230,7 +231,7 @@ class BaseMqttPublisher(ABC):
|
||||
self._version_event.clear()
|
||||
try:
|
||||
await asyncio.wait_for(self._version_event.wait(), timeout=60)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
elapsed = time.monotonic() - connect_time
|
||||
await self._on_periodic_wake(elapsed)
|
||||
if self._should_break_wait(elapsed):
|
||||
@@ -252,6 +253,34 @@ class BaseMqttPublisher(ABC):
|
||||
self._client = None
|
||||
self._last_error = _format_error_detail(e)
|
||||
|
||||
# Windows ProactorEventLoop does not implement add_reader /
|
||||
# add_writer, which paho-mqtt requires. The failure can
|
||||
# surface as a direct NotImplementedError (add_writer in
|
||||
# __aenter__) or as a generic timeout (add_reader fails
|
||||
# inside an event-loop callback, so paho never hears back).
|
||||
# Either way, if we're on Windows with Proactor the root
|
||||
# cause is the same and retrying won't help.
|
||||
_on_proactor = (
|
||||
sys.platform == "win32"
|
||||
and type(asyncio.get_event_loop()).__name__ == "ProactorEventLoop"
|
||||
)
|
||||
if _on_proactor:
|
||||
broadcast_error(
|
||||
"MQTT unavailable — Windows event loop incompatible",
|
||||
"The default Windows event loop (ProactorEventLoop) does "
|
||||
"not support MQTT. Add --loop none to your uvicorn "
|
||||
"command and restart. See README.md for details.",
|
||||
)
|
||||
_broadcast_health()
|
||||
logger.error(
|
||||
"%s cannot run: Windows ProactorEventLoop does not "
|
||||
"implement add_reader/add_writer required by paho-mqtt. "
|
||||
"Restart uvicorn with '--loop none' to use "
|
||||
"SelectorEventLoop instead. Giving up (will not retry).",
|
||||
self._integration_label(),
|
||||
)
|
||||
return
|
||||
|
||||
title, detail = self._on_error()
|
||||
broadcast_error(title, detail)
|
||||
_broadcast_health()
|
||||
|
||||
@@ -0,0 +1,757 @@
|
||||
"""Home Assistant MQTT Discovery fanout module.
|
||||
|
||||
Publishes HA-compatible discovery configs and state updates so that mesh
|
||||
network devices appear natively in Home Assistant via its built-in MQTT
|
||||
integration. No custom HA component is needed.
|
||||
|
||||
Entity types created:
|
||||
- Local radio: binary_sensor (connectivity) + sensors (noise floor, battery,
|
||||
uptime, RSSI, SNR, airtime, packet counts)
|
||||
- Per tracked repeater: sensor entities for telemetry fields
|
||||
- Per tracked contact: device_tracker for GPS position
|
||||
- Messages: event entity for scope-matched messages
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import ssl
|
||||
from types import SimpleNamespace
|
||||
from typing import Any
|
||||
|
||||
from app.fanout.base import FanoutModule, get_fanout_message_text
|
||||
from app.fanout.mqtt_base import BaseMqttPublisher
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ── Repeater telemetry sensor definitions ─────────────────────────────────
|
||||
|
||||
_REPEATER_SENSORS: list[dict[str, Any]] = [
|
||||
{
|
||||
"field": "battery_volts",
|
||||
"name": "Battery Voltage",
|
||||
"object_id": "battery_voltage",
|
||||
"device_class": "voltage",
|
||||
"state_class": "measurement",
|
||||
"unit": "V",
|
||||
"precision": 2,
|
||||
},
|
||||
{
|
||||
"field": "noise_floor_dbm",
|
||||
"name": "Noise Floor",
|
||||
"object_id": "noise_floor",
|
||||
"device_class": "signal_strength",
|
||||
"state_class": "measurement",
|
||||
"unit": "dBm",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "last_rssi_dbm",
|
||||
"name": "Last RSSI",
|
||||
"object_id": "last_rssi",
|
||||
"device_class": "signal_strength",
|
||||
"state_class": "measurement",
|
||||
"unit": "dBm",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "last_snr_db",
|
||||
"name": "Last SNR",
|
||||
"object_id": "last_snr",
|
||||
"device_class": None,
|
||||
"state_class": "measurement",
|
||||
"unit": "dB",
|
||||
"precision": 1,
|
||||
},
|
||||
{
|
||||
"field": "packets_received",
|
||||
"name": "Packets Received",
|
||||
"object_id": "packets_received",
|
||||
"device_class": None,
|
||||
"state_class": "total_increasing",
|
||||
"unit": None,
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "packets_sent",
|
||||
"name": "Packets Sent",
|
||||
"object_id": "packets_sent",
|
||||
"device_class": None,
|
||||
"state_class": "total_increasing",
|
||||
"unit": None,
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "uptime_seconds",
|
||||
"name": "Uptime",
|
||||
"object_id": "uptime",
|
||||
"device_class": "duration",
|
||||
"state_class": None,
|
||||
"unit": "s",
|
||||
"precision": 0,
|
||||
},
|
||||
]
|
||||
|
||||
# ── LPP sensor metadata ─────────────────────────────────────────────────
|
||||
|
||||
_LPP_HA_META: dict[str, dict[str, Any]] = {
|
||||
"temperature": {"device_class": "temperature", "unit": "°C", "precision": 1},
|
||||
"humidity": {"device_class": "humidity", "unit": "%", "precision": 1},
|
||||
"barometer": {"device_class": "atmospheric_pressure", "unit": "hPa", "precision": 1},
|
||||
"voltage": {"device_class": "voltage", "unit": "V", "precision": 2},
|
||||
"current": {"device_class": "current", "unit": "mA", "precision": 1},
|
||||
"luminosity": {"device_class": "illuminance", "unit": "lux", "precision": 0},
|
||||
"power": {"device_class": "power", "unit": "W", "precision": 1},
|
||||
"energy": {"device_class": "energy", "unit": "kWh", "precision": 2},
|
||||
"distance": {"device_class": "distance", "unit": "mm", "precision": 0},
|
||||
"concentration": {"device_class": None, "unit": "ppm", "precision": 0},
|
||||
"direction": {"device_class": None, "unit": "°", "precision": 0},
|
||||
"altitude": {"device_class": None, "unit": "m", "precision": 1},
|
||||
}
|
||||
|
||||
|
||||
def _lpp_sensor_key(type_name: str, channel: int) -> str:
|
||||
"""Build the flat telemetry-payload key for an LPP sensor."""
|
||||
return f"lpp_{type_name}_ch{channel}"
|
||||
|
||||
|
||||
def _lpp_discovery_configs(
|
||||
prefix: str,
|
||||
pub_key: str,
|
||||
device: dict,
|
||||
lpp_sensors: list[dict],
|
||||
state_topic: str,
|
||||
) -> list[tuple[str, dict]]:
|
||||
"""Build HA discovery configs for a repeater's LPP sensors."""
|
||||
configs: list[tuple[str, dict]] = []
|
||||
for sensor in lpp_sensors:
|
||||
type_name = sensor.get("type_name", "unknown")
|
||||
channel = sensor.get("channel", 0)
|
||||
field = _lpp_sensor_key(type_name, channel)
|
||||
meta = _LPP_HA_META.get(type_name, {})
|
||||
|
||||
nid = _node_id(pub_key)
|
||||
object_id = field
|
||||
display = type_name.replace("_", " ").title()
|
||||
name = f"{display} (Ch {channel})"
|
||||
|
||||
cfg: dict[str, Any] = {
|
||||
"name": name,
|
||||
"unique_id": f"meshcore_{nid}_{object_id}",
|
||||
"device": device,
|
||||
"state_topic": state_topic,
|
||||
"value_template": "{{ value_json." + field + " }}",
|
||||
"state_class": "measurement",
|
||||
"expire_after": 36000,
|
||||
}
|
||||
if meta.get("device_class"):
|
||||
cfg["device_class"] = meta["device_class"]
|
||||
if meta.get("unit"):
|
||||
cfg["unit_of_measurement"] = meta["unit"]
|
||||
if meta.get("precision") is not None:
|
||||
cfg["suggested_display_precision"] = meta["precision"]
|
||||
|
||||
topic = f"homeassistant/sensor/meshcore_{nid}/{object_id}/config"
|
||||
configs.append((topic, cfg))
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
# ── Local radio sensor definitions ────────────────────────────────────────
|
||||
|
||||
_RADIO_SENSORS: list[dict[str, Any]] = [
|
||||
{
|
||||
"field": "noise_floor_dbm",
|
||||
"name": "Noise Floor",
|
||||
"object_id": "noise_floor",
|
||||
"device_class": "signal_strength",
|
||||
"state_class": "measurement",
|
||||
"unit": "dBm",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "battery_volts",
|
||||
"name": "Battery",
|
||||
"object_id": "battery",
|
||||
"device_class": "voltage",
|
||||
"state_class": "measurement",
|
||||
"unit": "V",
|
||||
"precision": 2,
|
||||
},
|
||||
{
|
||||
"field": "uptime_secs",
|
||||
"name": "Uptime",
|
||||
"object_id": "uptime",
|
||||
"device_class": "duration",
|
||||
"state_class": None,
|
||||
"unit": "s",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "last_rssi",
|
||||
"name": "Last RSSI",
|
||||
"object_id": "last_rssi",
|
||||
"device_class": "signal_strength",
|
||||
"state_class": "measurement",
|
||||
"unit": "dBm",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "last_snr",
|
||||
"name": "Last SNR",
|
||||
"object_id": "last_snr",
|
||||
"device_class": None,
|
||||
"state_class": "measurement",
|
||||
"unit": "dB",
|
||||
"precision": 1,
|
||||
},
|
||||
{
|
||||
"field": "tx_air_secs",
|
||||
"name": "TX Airtime",
|
||||
"object_id": "tx_airtime",
|
||||
"device_class": "duration",
|
||||
"state_class": "total_increasing",
|
||||
"unit": "s",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "rx_air_secs",
|
||||
"name": "RX Airtime",
|
||||
"object_id": "rx_airtime",
|
||||
"device_class": "duration",
|
||||
"state_class": "total_increasing",
|
||||
"unit": "s",
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "packets_recv",
|
||||
"name": "Packets Received",
|
||||
"object_id": "packets_received",
|
||||
"device_class": None,
|
||||
"state_class": "total_increasing",
|
||||
"unit": None,
|
||||
"precision": 0,
|
||||
},
|
||||
{
|
||||
"field": "packets_sent",
|
||||
"name": "Packets Sent",
|
||||
"object_id": "packets_sent",
|
||||
"device_class": None,
|
||||
"state_class": "total_increasing",
|
||||
"unit": None,
|
||||
"precision": 0,
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def _node_id(public_key: str) -> str:
|
||||
"""Derive a stable, MQTT-safe node identifier from a public key."""
|
||||
return public_key[:12].lower()
|
||||
|
||||
|
||||
def _device_payload(
|
||||
public_key: str,
|
||||
name: str,
|
||||
model: str,
|
||||
*,
|
||||
via_device_key: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Build an HA device registry fragment."""
|
||||
dev: dict[str, Any] = {
|
||||
"identifiers": [f"meshcore_{_node_id(public_key)}"],
|
||||
"name": name or public_key[:12],
|
||||
"manufacturer": "MeshCore",
|
||||
"model": model,
|
||||
}
|
||||
if via_device_key:
|
||||
dev["via_device"] = f"meshcore_{_node_id(via_device_key)}"
|
||||
return dev
|
||||
|
||||
|
||||
# ── MQTT publisher subclass ───────────────────────────────────────────────
|
||||
|
||||
|
||||
class _HaMqttPublisher(BaseMqttPublisher):
|
||||
"""Thin MQTT lifecycle wrapper for the HA discovery module."""
|
||||
|
||||
_backoff_max = 30
|
||||
_log_prefix = "HA-MQTT"
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._on_connected_callback: Any = None
|
||||
|
||||
def _is_configured(self) -> bool:
|
||||
s = self._settings
|
||||
return bool(s and s.broker_host)
|
||||
|
||||
def _build_client_kwargs(self, settings: object) -> dict[str, Any]:
|
||||
s: Any = settings
|
||||
kw: dict[str, Any] = {
|
||||
"hostname": s.broker_host,
|
||||
"port": s.broker_port,
|
||||
"username": s.username or None,
|
||||
"password": s.password or None,
|
||||
}
|
||||
if s.use_tls:
|
||||
ctx = ssl.create_default_context()
|
||||
if s.tls_insecure:
|
||||
ctx.check_hostname = False
|
||||
ctx.verify_mode = ssl.CERT_NONE
|
||||
kw["tls_context"] = ctx
|
||||
return kw
|
||||
|
||||
def _on_connected(self, settings: object) -> tuple[str, str]:
|
||||
s: Any = settings
|
||||
return ("HA MQTT connected", f"{s.broker_host}:{s.broker_port}")
|
||||
|
||||
def _on_error(self) -> tuple[str, str]:
|
||||
return ("HA MQTT connection failure", "Please correct the settings or disable.")
|
||||
|
||||
async def _on_connected_async(self, settings: object) -> None:
|
||||
if self._on_connected_callback:
|
||||
await self._on_connected_callback()
|
||||
|
||||
|
||||
# ── Discovery config builders ─────────────────────────────────────────────
|
||||
|
||||
|
||||
def _radio_discovery_configs(
|
||||
prefix: str,
|
||||
radio_key: str,
|
||||
radio_name: str,
|
||||
) -> list[tuple[str, dict]]:
|
||||
"""Build HA discovery config payloads for the local radio device."""
|
||||
nid = _node_id(radio_key)
|
||||
device = _device_payload(radio_key, radio_name, "Radio")
|
||||
state_topic = f"{prefix}/{nid}/health"
|
||||
configs: list[tuple[str, dict]] = []
|
||||
|
||||
# binary_sensor: connected
|
||||
configs.append(
|
||||
(
|
||||
f"homeassistant/binary_sensor/meshcore_{nid}/connected/config",
|
||||
{
|
||||
"name": "Connected",
|
||||
"unique_id": f"meshcore_{nid}_connected",
|
||||
"device": device,
|
||||
"state_topic": state_topic,
|
||||
"value_template": "{{ 'ON' if value_json.connected else 'OFF' }}",
|
||||
"device_class": "connectivity",
|
||||
"payload_on": "ON",
|
||||
"payload_off": "OFF",
|
||||
"expire_after": 120,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# sensors from _RADIO_SENSORS (noise floor, battery, uptime, RSSI, etc.)
|
||||
for sensor in _RADIO_SENSORS:
|
||||
cfg: dict[str, Any] = {
|
||||
"name": sensor["name"],
|
||||
"unique_id": f"meshcore_{nid}_{sensor['object_id']}",
|
||||
"device": device,
|
||||
"state_topic": state_topic,
|
||||
"value_template": "{{ value_json." + sensor["field"] + " }}", # type: ignore[operator]
|
||||
"expire_after": 120,
|
||||
}
|
||||
if sensor["device_class"]:
|
||||
cfg["device_class"] = sensor["device_class"]
|
||||
if sensor["state_class"]:
|
||||
cfg["state_class"] = sensor["state_class"]
|
||||
if sensor["unit"]:
|
||||
cfg["unit_of_measurement"] = sensor["unit"]
|
||||
if sensor.get("precision") is not None:
|
||||
cfg["suggested_display_precision"] = sensor["precision"]
|
||||
|
||||
topic = f"homeassistant/sensor/meshcore_{nid}/{sensor['object_id']}/config"
|
||||
configs.append((topic, cfg))
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def _repeater_discovery_configs(
|
||||
prefix: str,
|
||||
pub_key: str,
|
||||
name: str,
|
||||
radio_key: str | None,
|
||||
) -> list[tuple[str, dict]]:
|
||||
"""Build HA discovery config payloads for a tracked repeater."""
|
||||
nid = _node_id(pub_key)
|
||||
device = _device_payload(pub_key, name, "Repeater", via_device_key=radio_key)
|
||||
state_topic = f"{prefix}/{nid}/telemetry"
|
||||
configs: list[tuple[str, dict]] = []
|
||||
|
||||
for sensor in _REPEATER_SENSORS:
|
||||
cfg: dict[str, Any] = {
|
||||
"name": sensor["name"],
|
||||
"unique_id": f"meshcore_{nid}_{sensor['object_id']}",
|
||||
"device": device,
|
||||
"state_topic": state_topic,
|
||||
"value_template": "{{ value_json." + sensor["field"] + " }}", # type: ignore[operator]
|
||||
}
|
||||
if sensor["device_class"]:
|
||||
cfg["device_class"] = sensor["device_class"]
|
||||
if sensor["state_class"]:
|
||||
cfg["state_class"] = sensor["state_class"]
|
||||
if sensor["unit"]:
|
||||
cfg["unit_of_measurement"] = sensor["unit"]
|
||||
if sensor.get("precision") is not None:
|
||||
cfg["suggested_display_precision"] = sensor["precision"]
|
||||
# 10 hours — margin over the 8-hour auto-collect cycle
|
||||
cfg["expire_after"] = 36000
|
||||
|
||||
topic = f"homeassistant/sensor/meshcore_{nid}/{sensor['object_id']}/config"
|
||||
configs.append((topic, cfg))
|
||||
|
||||
return configs
|
||||
|
||||
|
||||
def _contact_tracker_discovery_config(
|
||||
prefix: str,
|
||||
pub_key: str,
|
||||
name: str,
|
||||
radio_key: str | None,
|
||||
) -> tuple[str, dict]:
|
||||
"""Build HA discovery config for a tracked contact's device_tracker."""
|
||||
nid = _node_id(pub_key)
|
||||
device = _device_payload(pub_key, name, "Node", via_device_key=radio_key)
|
||||
topic = f"homeassistant/device_tracker/meshcore_{nid}/config"
|
||||
cfg: dict[str, Any] = {
|
||||
"name": name or pub_key[:12],
|
||||
"unique_id": f"meshcore_{nid}_tracker",
|
||||
"device": device,
|
||||
"json_attributes_topic": f"{prefix}/{nid}/gps",
|
||||
"source_type": "gps",
|
||||
}
|
||||
return topic, cfg
|
||||
|
||||
|
||||
def _message_event_discovery_config(
|
||||
prefix: str, radio_key: str, radio_name: str
|
||||
) -> tuple[str, dict]:
|
||||
"""Build HA discovery config for the message event entity."""
|
||||
nid = _node_id(radio_key)
|
||||
device = _device_payload(radio_key, radio_name, "Radio")
|
||||
topic = f"homeassistant/event/meshcore_{nid}/messages/config"
|
||||
cfg: dict[str, Any] = {
|
||||
"name": "MeshCore Messages",
|
||||
"unique_id": f"meshcore_{nid}_messages",
|
||||
"device": device,
|
||||
"state_topic": f"{prefix}/{nid}/events/message",
|
||||
"event_types": ["message_received"],
|
||||
}
|
||||
return topic, cfg
|
||||
|
||||
|
||||
# ── Module class ──────────────────────────────────────────────────────────
|
||||
|
||||
|
||||
def _config_to_settings(config: dict) -> SimpleNamespace:
|
||||
return SimpleNamespace(
|
||||
broker_host=config.get("broker_host", ""),
|
||||
broker_port=config.get("broker_port", 1883),
|
||||
username=config.get("username", ""),
|
||||
password=config.get("password", ""),
|
||||
use_tls=config.get("use_tls", False),
|
||||
tls_insecure=config.get("tls_insecure", False),
|
||||
)
|
||||
|
||||
|
||||
class MqttHaModule(FanoutModule):
|
||||
"""Home Assistant MQTT Discovery fanout module."""
|
||||
|
||||
def __init__(self, config_id: str, config: dict, *, name: str = "") -> None:
|
||||
super().__init__(config_id, config, name=name)
|
||||
self._publisher = _HaMqttPublisher()
|
||||
self._publisher.set_integration_name(name or config_id)
|
||||
self._publisher._on_connected_callback = self._publish_discovery
|
||||
self._discovery_topics: list[str] = []
|
||||
self._radio_key: str | None = None
|
||||
self._radio_name: str | None = None
|
||||
|
||||
@property
|
||||
def _prefix(self) -> str:
|
||||
return self.config.get("topic_prefix", "meshcore")
|
||||
|
||||
@property
|
||||
def _tracked_contacts(self) -> list[str]:
|
||||
return self.config.get("tracked_contacts") or []
|
||||
|
||||
@property
|
||||
def _tracked_repeaters(self) -> list[str]:
|
||||
return self.config.get("tracked_repeaters") or []
|
||||
|
||||
# ── Lifecycle ──────────────────────────────────────────────────────
|
||||
|
||||
async def start(self) -> None:
|
||||
self._seed_radio_identity_from_runtime()
|
||||
settings = _config_to_settings(self.config)
|
||||
await self._publisher.start(settings)
|
||||
|
||||
async def stop(self) -> None:
|
||||
await self._remove_discovery()
|
||||
await self._publisher.stop()
|
||||
self._discovery_topics.clear()
|
||||
|
||||
# ── Discovery publishing ──────────────────────────────────────────
|
||||
|
||||
async def _publish_discovery(self) -> None:
|
||||
"""Publish all HA discovery configs with retain=True."""
|
||||
if not self._radio_key:
|
||||
# Don't publish discovery until we know the radio identity —
|
||||
# the first health heartbeat will provide it and trigger this.
|
||||
return
|
||||
|
||||
configs: list[tuple[str, dict]] = []
|
||||
|
||||
radio_name = self._radio_name or "MeshCore Radio"
|
||||
configs.extend(_radio_discovery_configs(self._prefix, self._radio_key, radio_name))
|
||||
|
||||
# Tracked repeaters — resolve names and LPP sensors from DB best-effort
|
||||
for pub_key in self._tracked_repeaters:
|
||||
rname = await self._resolve_contact_name(pub_key)
|
||||
configs.extend(
|
||||
_repeater_discovery_configs(self._prefix, pub_key, rname, self._radio_key)
|
||||
)
|
||||
# Dynamic LPP sensor entities from last known telemetry snapshot
|
||||
lpp_sensors = await self._resolve_lpp_sensors(pub_key)
|
||||
if lpp_sensors:
|
||||
nid = _node_id(pub_key)
|
||||
device = _device_payload(pub_key, rname, "Repeater", via_device_key=self._radio_key)
|
||||
state_topic = f"{self._prefix}/{nid}/telemetry"
|
||||
configs.extend(
|
||||
_lpp_discovery_configs(self._prefix, pub_key, device, lpp_sensors, state_topic)
|
||||
)
|
||||
|
||||
# Tracked contacts — resolve names from DB best-effort
|
||||
for pub_key in self._tracked_contacts:
|
||||
cname = await self._resolve_contact_name(pub_key)
|
||||
configs.append(
|
||||
_contact_tracker_discovery_config(self._prefix, pub_key, cname, self._radio_key)
|
||||
)
|
||||
|
||||
# Message event entity (namespaced to this radio)
|
||||
configs.append(_message_event_discovery_config(self._prefix, self._radio_key, radio_name))
|
||||
|
||||
self._discovery_topics = [topic for topic, _ in configs]
|
||||
|
||||
for topic, payload in configs:
|
||||
await self._publisher.publish(topic, payload, retain=True)
|
||||
|
||||
logger.info(
|
||||
"HA MQTT: published %d discovery configs (%d repeaters, %d contacts)",
|
||||
len(configs),
|
||||
len(self._tracked_repeaters),
|
||||
len(self._tracked_contacts),
|
||||
)
|
||||
|
||||
async def _clear_retained_topics(self, topics: list[str]) -> None:
|
||||
"""Publish empty retained payloads to remove entries from broker."""
|
||||
for topic in topics:
|
||||
try:
|
||||
if self._publisher._client:
|
||||
await self._publisher._client.publish(topic, b"", retain=True)
|
||||
except Exception:
|
||||
pass # best-effort cleanup
|
||||
|
||||
async def _remove_discovery(self) -> None:
|
||||
"""Publish empty retained payloads to remove all HA entities."""
|
||||
if not self._publisher.connected or not self._discovery_topics:
|
||||
return
|
||||
await self._clear_retained_topics(self._discovery_topics)
|
||||
|
||||
@staticmethod
|
||||
async def _resolve_contact_name(pub_key: str) -> str:
|
||||
"""Look up a contact's display name, falling back to 12-char prefix."""
|
||||
try:
|
||||
from app.repository.contacts import ContactRepository
|
||||
|
||||
contact = await ContactRepository.get_by_key(pub_key)
|
||||
if contact and contact.name:
|
||||
return contact.name
|
||||
except Exception:
|
||||
pass
|
||||
return pub_key[:12]
|
||||
|
||||
@staticmethod
|
||||
async def _resolve_lpp_sensors(pub_key: str) -> list[dict]:
|
||||
"""Return the LPP sensor list from the most recent telemetry snapshot, or []."""
|
||||
try:
|
||||
from app.repository.repeater_telemetry import RepeaterTelemetryRepository
|
||||
|
||||
latest = await RepeaterTelemetryRepository.get_latest(pub_key)
|
||||
if latest:
|
||||
return latest.get("data", {}).get("lpp_sensors", [])
|
||||
except Exception:
|
||||
pass
|
||||
return []
|
||||
|
||||
def _seed_radio_identity_from_runtime(self) -> None:
|
||||
"""Best-effort bootstrap from the currently connected radio session."""
|
||||
try:
|
||||
from app.services.radio_runtime import radio_runtime
|
||||
|
||||
if not radio_runtime.is_connected:
|
||||
return
|
||||
|
||||
mc = radio_runtime.meshcore
|
||||
self_info = mc.self_info if mc is not None else None
|
||||
if not isinstance(self_info, dict):
|
||||
return
|
||||
|
||||
pub_key = self_info.get("public_key")
|
||||
if isinstance(pub_key, str) and pub_key.strip():
|
||||
self._radio_key = pub_key.strip().lower()
|
||||
|
||||
name = self_info.get("name")
|
||||
if isinstance(name, str) and name.strip():
|
||||
self._radio_name = name.strip()
|
||||
except Exception:
|
||||
logger.debug("HA MQTT: failed to seed radio identity from runtime", exc_info=True)
|
||||
|
||||
# ── Event handlers ────────────────────────────────────────────────
|
||||
|
||||
async def on_health(self, data: dict) -> None:
|
||||
if not self._publisher.connected:
|
||||
return
|
||||
|
||||
# Cache radio identity for discovery config generation
|
||||
pub_key = data.get("public_key")
|
||||
if pub_key:
|
||||
new_name = data.get("name")
|
||||
key_changed = pub_key != self._radio_key
|
||||
name_changed = new_name and new_name != self._radio_name
|
||||
|
||||
if key_changed:
|
||||
old_key = self._radio_key
|
||||
old_topics = list(self._discovery_topics)
|
||||
if old_topics:
|
||||
await self._clear_retained_topics(old_topics)
|
||||
self._discovery_topics.clear()
|
||||
self._radio_key = pub_key
|
||||
self._radio_name = new_name
|
||||
# Remove stale discovery entries from the old identity (e.g.
|
||||
# "unknown" placeholder from before the radio key was known),
|
||||
# then re-publish with the real identity.
|
||||
if old_key is not None and not old_topics:
|
||||
await self._clear_retained_topics(
|
||||
[t for t, _ in _radio_discovery_configs(self._prefix, old_key, "")]
|
||||
)
|
||||
await self._publish_discovery()
|
||||
elif name_changed:
|
||||
self._radio_name = new_name
|
||||
await self._publish_discovery()
|
||||
|
||||
# Don't publish health state until we know the radio identity —
|
||||
# otherwise we create a stale "unknown" device in HA.
|
||||
if not self._radio_key:
|
||||
return
|
||||
|
||||
nid = _node_id(self._radio_key)
|
||||
payload: dict[str, Any] = {"connected": data.get("connected", False)}
|
||||
for sensor in _RADIO_SENSORS:
|
||||
field = sensor["field"]
|
||||
if field is not None:
|
||||
payload[field] = data.get(field)
|
||||
|
||||
# Normalize battery from millivolts to volts for consistency with
|
||||
# repeater battery and the discovery config (unit: V, precision: 2).
|
||||
battery_mv = data.get("battery_mv")
|
||||
if battery_mv is not None:
|
||||
payload["battery_volts"] = battery_mv / 1000.0
|
||||
|
||||
await self._publisher.publish(f"{self._prefix}/{nid}/health", payload)
|
||||
|
||||
async def on_contact(self, data: dict) -> None:
|
||||
if not self._publisher.connected:
|
||||
return
|
||||
|
||||
pub_key = data.get("public_key", "")
|
||||
if pub_key not in self._tracked_contacts:
|
||||
return
|
||||
|
||||
lat = data.get("lat")
|
||||
lon = data.get("lon")
|
||||
if lat is None or lon is None or (lat == 0.0 and lon == 0.0):
|
||||
return
|
||||
|
||||
nid = _node_id(pub_key)
|
||||
await self._publisher.publish(
|
||||
f"{self._prefix}/{nid}/gps",
|
||||
{
|
||||
"latitude": lat,
|
||||
"longitude": lon,
|
||||
"gps_accuracy": 0,
|
||||
"source_type": "gps",
|
||||
},
|
||||
)
|
||||
|
||||
async def on_telemetry(self, data: dict) -> None:
|
||||
if not self._publisher.connected:
|
||||
return
|
||||
|
||||
pub_key = data.get("public_key", "")
|
||||
if pub_key not in self._tracked_repeaters:
|
||||
return
|
||||
|
||||
nid = _node_id(pub_key)
|
||||
# Publish the full telemetry dict — HA sensors use value_template
|
||||
# to extract individual fields
|
||||
payload: dict[str, Any] = {}
|
||||
for s in _REPEATER_SENSORS:
|
||||
field = s["field"]
|
||||
if field is not None:
|
||||
payload[field] = data.get(field)
|
||||
|
||||
# Flatten LPP sensors into the same payload so HA value_templates work
|
||||
lpp_sensors: list[dict] = data.get("lpp_sensors", [])
|
||||
rediscover = False
|
||||
for sensor in lpp_sensors:
|
||||
key = _lpp_sensor_key(sensor.get("type_name", "unknown"), sensor.get("channel", 0))
|
||||
payload[key] = sensor.get("value")
|
||||
# Check if discovery for this sensor has been published yet
|
||||
expected_topic = f"homeassistant/sensor/meshcore_{nid}/{key}/config"
|
||||
if expected_topic not in self._discovery_topics:
|
||||
rediscover = True
|
||||
|
||||
# If new LPP sensor types appeared, re-publish discovery *before*
|
||||
# the state payload so HA already knows the entity when the value arrives.
|
||||
if rediscover:
|
||||
await self._publish_discovery()
|
||||
|
||||
await self._publisher.publish(f"{self._prefix}/{nid}/telemetry", payload)
|
||||
|
||||
async def on_message(self, data: dict) -> None:
|
||||
if not self._publisher.connected or not self._radio_key:
|
||||
return
|
||||
|
||||
text = get_fanout_message_text(data)
|
||||
nid = _node_id(self._radio_key)
|
||||
await self._publisher.publish(
|
||||
f"{self._prefix}/{nid}/events/message",
|
||||
{
|
||||
"event_type": "message_received",
|
||||
"sender_name": data.get("sender_name", ""),
|
||||
"sender_key": data.get("sender_key", ""),
|
||||
"text": text,
|
||||
"conversation_key": data.get("conversation_key", ""),
|
||||
"message_type": data.get("type", ""),
|
||||
"channel_name": data.get("channel_name"),
|
||||
"outgoing": data.get("outgoing", False),
|
||||
},
|
||||
)
|
||||
|
||||
# ── Status ────────────────────────────────────────────────────────
|
||||
|
||||
@property
|
||||
def status(self) -> str:
|
||||
if not self.config.get("broker_host"):
|
||||
return "disconnected"
|
||||
if self._publisher.last_error:
|
||||
return "error"
|
||||
return "connected" if self._publisher.connected else "disconnected"
|
||||
|
||||
@property
|
||||
def last_error(self) -> str | None:
|
||||
return self._publisher.last_error
|
||||
@@ -148,6 +148,39 @@ def register_frontend_static_routes(app: FastAPI, frontend_dir: Path) -> bool:
|
||||
"type": "image/png",
|
||||
"purpose": "maskable",
|
||||
},
|
||||
{
|
||||
"src": f"{base}favicon.svg",
|
||||
"sizes": "any",
|
||||
"type": "image/svg+xml",
|
||||
"purpose": "any",
|
||||
},
|
||||
{
|
||||
"src": f"{base}favicon-256x256.png",
|
||||
"sizes": "256x256",
|
||||
"type": "image/png",
|
||||
"purpose": "any",
|
||||
},
|
||||
],
|
||||
"screenshots": [
|
||||
{
|
||||
"src": f"{base}screenshot-wide.png",
|
||||
"sizes": "1367x909",
|
||||
"type": "image/png",
|
||||
"form_factor": "wide",
|
||||
"label": "RemoteTerm desktop view",
|
||||
},
|
||||
{
|
||||
"src": f"{base}screenshot-mobile.png",
|
||||
"sizes": "1170x2532",
|
||||
"type": "image/png",
|
||||
"label": "RemoteTerm mobile view",
|
||||
},
|
||||
{
|
||||
"src": f"{base}screenshot-mobile-2.png",
|
||||
"sizes": "750x1334",
|
||||
"type": "image/png",
|
||||
"label": "RemoteTerm mobile conversation",
|
||||
},
|
||||
],
|
||||
}
|
||||
return JSONResponse(
|
||||
|
||||
+1
-1
@@ -24,7 +24,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
NO_EVENT_RECEIVED_GUIDANCE = (
|
||||
"Radio command channel is unresponsive (no_event_received). Ensure that your firmware is not "
|
||||
"incompatible, outdated, or wrong-mode (e.g. repeater, not client), and that"
|
||||
"incompatible, outdated, or wrong-mode (e.g. repeater, not client), and that "
|
||||
"serial/TCP/BLE connectivity is successful (try another app and see if that one works?). The app cannot proceed because it cannot "
|
||||
"issue commands to the radio."
|
||||
)
|
||||
|
||||
+40
-4
@@ -1,5 +1,41 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Windows event-loop advisory for MQTT fanout
|
||||
# ---------------------------------------------------------------------------
|
||||
# On Windows, uvicorn's default event loop (ProactorEventLoop) does not
|
||||
# implement add_reader()/add_writer(), which paho-mqtt (via aiomqtt) requires.
|
||||
# We cannot fix this from inside the app — the loop is already created by the
|
||||
# time this module is imported. Log a prominent warning so Windows operators
|
||||
# who want MQTT know to add ``--loop none`` to their uvicorn command.
|
||||
# ---------------------------------------------------------------------------
|
||||
if sys.platform == "win32":
|
||||
import asyncio as _asyncio
|
||||
|
||||
_loop = _asyncio.get_event_loop()
|
||||
_is_proactor = type(_loop).__name__ == "ProactorEventLoop"
|
||||
if _is_proactor:
|
||||
print(
|
||||
"\n" + "!" * 78 + "\n"
|
||||
" NOTE FOR WINDOWS USERS\n" + "!" * 78 + "\n"
|
||||
"\n"
|
||||
" The running event loop is ProactorEventLoop, which is not\n"
|
||||
" compatible with MQTT fanout (aiomqtt / paho-mqtt).\n"
|
||||
"\n"
|
||||
" If you use MQTT integrations, restart with --loop none:\n"
|
||||
"\n"
|
||||
" uv run uvicorn app.main:app \033[1m--loop none\033[0m"
|
||||
" [... other options ...]\n"
|
||||
"\n"
|
||||
" Everything else works fine as-is.\n"
|
||||
"\n" + "!" * 78 + "\n",
|
||||
file=sys.stderr,
|
||||
flush=True,
|
||||
)
|
||||
del _loop, _is_proactor
|
||||
|
||||
import asyncio
|
||||
from contextlib import asynccontextmanager
|
||||
from pathlib import Path
|
||||
|
||||
@@ -40,8 +76,8 @@ from app.routers import (
|
||||
ws,
|
||||
)
|
||||
from app.security import add_optional_basic_auth_middleware
|
||||
from app.services.radio_noise_floor import start_noise_floor_sampling, stop_noise_floor_sampling
|
||||
from app.services.radio_runtime import radio_runtime as radio_manager
|
||||
from app.services.radio_stats import start_radio_stats_sampling, stop_radio_stats_sampling
|
||||
from app.version_info import get_app_build_info
|
||||
|
||||
setup_logging()
|
||||
@@ -72,7 +108,7 @@ async def lifespan(app: FastAPI):
|
||||
from app.radio_sync import ensure_default_channels
|
||||
|
||||
await ensure_default_channels()
|
||||
await start_noise_floor_sampling()
|
||||
await start_radio_stats_sampling()
|
||||
|
||||
# Always start connection monitor (even if initial connection failed)
|
||||
await radio_manager.start_connection_monitor()
|
||||
@@ -101,7 +137,7 @@ async def lifespan(app: FastAPI):
|
||||
await radio_manager.stop_connection_monitor()
|
||||
await stop_background_contact_reconciliation()
|
||||
await stop_message_polling()
|
||||
await stop_noise_floor_sampling()
|
||||
await stop_radio_stats_sampling()
|
||||
await stop_periodic_advert()
|
||||
await stop_periodic_sync()
|
||||
await stop_telemetry_collect()
|
||||
|
||||
-3309
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,38 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add last_read_at column to contacts and channels tables.
|
||||
|
||||
This enables server-side read state tracking, replacing the localStorage
|
||||
approach for consistent read state across devices.
|
||||
|
||||
ALTER TABLE ADD COLUMN is safe - it preserves existing data and handles
|
||||
the "column already exists" case gracefully.
|
||||
"""
|
||||
# Add to contacts table
|
||||
try:
|
||||
await conn.execute("ALTER TABLE contacts ADD COLUMN last_read_at INTEGER")
|
||||
logger.debug("Added last_read_at to contacts table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.last_read_at already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Add to channels table
|
||||
try:
|
||||
await conn.execute("ALTER TABLE channels ADD COLUMN last_read_at INTEGER")
|
||||
logger.debug("Added last_read_at to channels table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("channels.last_read_at already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,32 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop unused decrypt_attempts and last_attempt columns from raw_packets.
|
||||
|
||||
These columns were added for a retry-limiting feature that was never implemented.
|
||||
They are written to but never read, so we can safely remove them.
|
||||
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip (the columns will remain but are harmless).
|
||||
"""
|
||||
for column in ["decrypt_attempts", "last_attempt"]:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE raw_packets DROP COLUMN {column}")
|
||||
logger.debug("Dropped %s from raw_packets table", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("raw_packets.%s already dropped, skipping", column)
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, %s column will remain", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,49 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop the decrypted column and update indexes.
|
||||
|
||||
The decrypted column is redundant with message_id - a packet is decrypted
|
||||
iff message_id IS NOT NULL. We replace the decrypted index with a message_id index.
|
||||
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip the column drop but still update the index.
|
||||
"""
|
||||
# First, drop the old index on decrypted (safe even if it doesn't exist)
|
||||
try:
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_raw_packets_decrypted")
|
||||
logger.debug("Dropped idx_raw_packets_decrypted index")
|
||||
except aiosqlite.OperationalError:
|
||||
pass # Index didn't exist
|
||||
|
||||
# Create new index on message_id for efficient undecrypted packet queries
|
||||
try:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_message_id ON raw_packets(message_id)"
|
||||
)
|
||||
logger.debug("Created idx_raw_packets_message_id index")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "already exists" not in str(e).lower():
|
||||
raise
|
||||
|
||||
# Try to drop the decrypted column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE raw_packets DROP COLUMN decrypted")
|
||||
logger.debug("Dropped decrypted from raw_packets table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("raw_packets.decrypted already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, decrypted column will remain")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add payload_hash column to raw_packets for deduplication.
|
||||
|
||||
This column stores the SHA-256 hash of the packet payload (excluding routing/path info).
|
||||
It will be used with a unique index to prevent duplicate packets from being stored.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE raw_packets ADD COLUMN payload_hash TEXT")
|
||||
logger.debug("Added payload_hash column to raw_packets table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("raw_packets.payload_hash already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,126 @@
|
||||
import logging
|
||||
from hashlib import sha256
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_payload_for_hash(raw_packet: bytes) -> bytes | None:
|
||||
"""
|
||||
Extract payload from a raw packet for hashing using canonical framing validation.
|
||||
|
||||
Returns the payload bytes, or None if packet is malformed.
|
||||
"""
|
||||
from app.path_utils import parse_packet_envelope
|
||||
|
||||
envelope = parse_packet_envelope(raw_packet)
|
||||
return envelope.payload if envelope is not None else None
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Backfill payload_hash for existing packets and remove duplicates.
|
||||
|
||||
This may take a while for large databases. Progress is logged.
|
||||
After backfilling, a unique index is created to prevent future duplicates.
|
||||
"""
|
||||
# Get count first
|
||||
cursor = await conn.execute("SELECT COUNT(*) FROM raw_packets WHERE payload_hash IS NULL")
|
||||
row = await cursor.fetchone()
|
||||
total = row[0] if row else 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug("No packets need hash backfill")
|
||||
else:
|
||||
logger.info("Backfilling payload hashes for %d packets. This may take a while...", total)
|
||||
|
||||
# Process in batches to avoid memory issues
|
||||
batch_size = 1000
|
||||
processed = 0
|
||||
duplicates_deleted = 0
|
||||
|
||||
# Track seen hashes to identify duplicates (keep oldest = lowest ID)
|
||||
seen_hashes: dict[str, int] = {} # hash -> oldest packet ID
|
||||
|
||||
# First pass: compute hashes and identify duplicates
|
||||
cursor = await conn.execute("SELECT id, data FROM raw_packets ORDER BY id ASC")
|
||||
|
||||
packets_to_update: list[tuple[str, int]] = [] # (hash, id)
|
||||
ids_to_delete: list[int] = []
|
||||
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
packet_id = row[0]
|
||||
packet_data = bytes(row[1])
|
||||
|
||||
# Extract payload and compute hash
|
||||
payload = _extract_payload_for_hash(packet_data)
|
||||
if payload:
|
||||
payload_hash = sha256(payload).hexdigest()
|
||||
else:
|
||||
# For malformed packets, hash the full data
|
||||
payload_hash = sha256(packet_data).hexdigest()
|
||||
|
||||
if payload_hash in seen_hashes:
|
||||
# Duplicate - mark for deletion (we keep the older one)
|
||||
ids_to_delete.append(packet_id)
|
||||
duplicates_deleted += 1
|
||||
else:
|
||||
# New hash - keep this packet
|
||||
seen_hashes[payload_hash] = packet_id
|
||||
packets_to_update.append((payload_hash, packet_id))
|
||||
|
||||
processed += 1
|
||||
|
||||
if processed % 10000 == 0:
|
||||
logger.info("Processed %d/%d packets...", processed, total)
|
||||
|
||||
# Second pass: update hashes for packets we're keeping
|
||||
total_updates = len(packets_to_update)
|
||||
logger.info("Updating %d packets with hashes...", total_updates)
|
||||
for idx, (payload_hash, packet_id) in enumerate(packets_to_update, 1):
|
||||
await conn.execute(
|
||||
"UPDATE raw_packets SET payload_hash = ? WHERE id = ?",
|
||||
(payload_hash, packet_id),
|
||||
)
|
||||
if idx % 10000 == 0:
|
||||
logger.info("Updated %d/%d packets...", idx, total_updates)
|
||||
|
||||
# Third pass: delete duplicates
|
||||
if ids_to_delete:
|
||||
total_deletes = len(ids_to_delete)
|
||||
logger.info("Removing %d duplicate packets...", total_deletes)
|
||||
deleted_count = 0
|
||||
# Delete in batches to avoid "too many SQL variables" error
|
||||
for i in range(0, len(ids_to_delete), 500):
|
||||
batch = ids_to_delete[i : i + 500]
|
||||
placeholders = ",".join("?" * len(batch))
|
||||
await conn.execute(f"DELETE FROM raw_packets WHERE id IN ({placeholders})", batch)
|
||||
deleted_count += len(batch)
|
||||
if deleted_count % 10000 < 500: # Log roughly every 10k
|
||||
logger.info("Removed %d/%d duplicates...", deleted_count, total_deletes)
|
||||
|
||||
await conn.commit()
|
||||
logger.info(
|
||||
"Hash backfill complete: %d packets updated, %d duplicates removed",
|
||||
len(packets_to_update),
|
||||
duplicates_deleted,
|
||||
)
|
||||
|
||||
# Create unique index on payload_hash (this enforces uniqueness going forward)
|
||||
try:
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_raw_packets_payload_hash "
|
||||
"ON raw_packets(payload_hash)"
|
||||
)
|
||||
logger.debug("Created unique index on payload_hash")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "already exists" not in str(e).lower():
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,42 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Replace path_len INTEGER column with path TEXT column in messages table.
|
||||
|
||||
The path column stores the hex-encoded routing path bytes. Path length can
|
||||
be derived from the hex string (2 chars per byte = 1 hop).
|
||||
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip the drop (the column will remain but is unused).
|
||||
"""
|
||||
# First, add the new path column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages ADD COLUMN path TEXT")
|
||||
logger.debug("Added path column to messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("messages.path already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Try to drop the old path_len column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages DROP COLUMN path_len")
|
||||
logger.debug("Dropped path_len from messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("messages.path_len already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, path_len column will remain")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _extract_path_from_packet(raw_packet: bytes) -> str | None:
|
||||
"""
|
||||
Extract path hex string from a raw packet using canonical framing validation.
|
||||
|
||||
Returns the path as a hex string, or None if packet is malformed.
|
||||
"""
|
||||
from app.path_utils import parse_packet_envelope
|
||||
|
||||
envelope = parse_packet_envelope(raw_packet)
|
||||
return envelope.path.hex() if envelope is not None else None
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Backfill path column for messages that have linked raw_packets.
|
||||
|
||||
For each message with a linked raw_packet (via message_id), extract the
|
||||
path from the raw packet and update the message.
|
||||
|
||||
Only updates incoming messages (outgoing=0) since outgoing messages
|
||||
don't have meaningful path data.
|
||||
"""
|
||||
# Get count of messages that need backfill
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*)
|
||||
FROM messages m
|
||||
JOIN raw_packets rp ON rp.message_id = m.id
|
||||
WHERE m.path IS NULL AND m.outgoing = 0
|
||||
"""
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
total = row[0] if row else 0
|
||||
|
||||
if total == 0:
|
||||
logger.debug("No messages need path backfill")
|
||||
return
|
||||
|
||||
logger.info("Backfilling path for %d messages. This may take a while...", total)
|
||||
|
||||
# Process in batches
|
||||
batch_size = 1000
|
||||
processed = 0
|
||||
updated = 0
|
||||
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT m.id, rp.data
|
||||
FROM messages m
|
||||
JOIN raw_packets rp ON rp.message_id = m.id
|
||||
WHERE m.path IS NULL AND m.outgoing = 0
|
||||
ORDER BY m.id ASC
|
||||
"""
|
||||
)
|
||||
|
||||
updates: list[tuple[str, int]] = [] # (path, message_id)
|
||||
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
for row in rows:
|
||||
message_id = row[0]
|
||||
packet_data = bytes(row[1])
|
||||
|
||||
path_hex = _extract_path_from_packet(packet_data)
|
||||
if path_hex is not None:
|
||||
updates.append((path_hex, message_id))
|
||||
|
||||
processed += 1
|
||||
|
||||
if processed % 10000 == 0:
|
||||
logger.info("Processed %d/%d messages...", processed, total)
|
||||
|
||||
# Apply updates in batches
|
||||
if updates:
|
||||
logger.info("Updating %d messages with path data...", len(updates))
|
||||
for idx, (path_hex, message_id) in enumerate(updates, 1):
|
||||
await conn.execute(
|
||||
"UPDATE messages SET path = ? WHERE id = ?",
|
||||
(path_hex, message_id),
|
||||
)
|
||||
updated += 1
|
||||
if idx % 10000 == 0:
|
||||
logger.info("Updated %d/%d messages...", idx, len(updates))
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Path backfill complete: %d messages updated", updated)
|
||||
@@ -0,0 +1,66 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Convert path TEXT column to paths TEXT column storing JSON array.
|
||||
|
||||
The new format stores multiple paths as a JSON array of objects:
|
||||
[{"path": "1A2B", "received_at": 1234567890}, ...]
|
||||
|
||||
This enables tracking multiple delivery paths for the same message
|
||||
(e.g., when a message is received via different repeater routes).
|
||||
"""
|
||||
|
||||
# First, add the new paths column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages ADD COLUMN paths TEXT")
|
||||
logger.debug("Added paths column to messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("messages.paths already exists, skipping column add")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Migrate existing path data to paths array format
|
||||
cursor = await conn.execute(
|
||||
"SELECT id, path, received_at FROM messages WHERE path IS NOT NULL AND paths IS NULL"
|
||||
)
|
||||
rows = list(await cursor.fetchall())
|
||||
|
||||
if rows:
|
||||
logger.info("Converting %d messages from path to paths array format...", len(rows))
|
||||
for row in rows:
|
||||
message_id = row[0]
|
||||
old_path = row[1]
|
||||
received_at = row[2]
|
||||
|
||||
# Convert single path to array format
|
||||
paths_json = json.dumps([{"path": old_path, "received_at": received_at}])
|
||||
await conn.execute(
|
||||
"UPDATE messages SET paths = ? WHERE id = ?",
|
||||
(paths_json, message_id),
|
||||
)
|
||||
|
||||
logger.info("Converted %d messages to paths array format", len(rows))
|
||||
|
||||
# Try to drop the old path column (SQLite 3.35.0+ only)
|
||||
try:
|
||||
await conn.execute("ALTER TABLE messages DROP COLUMN path")
|
||||
logger.debug("Dropped path column from messages table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("messages.path already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, path column will remain")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,41 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Create app_settings table for persistent application preferences.
|
||||
|
||||
This table stores:
|
||||
- max_radio_contacts: Configured radio contact capacity baseline for maintenance thresholds
|
||||
- favorites: JSON array of favorite conversations [{type, id}, ...]
|
||||
- auto_decrypt_dm_on_advert: Whether to attempt historical DM decryption on new contact
|
||||
- sidebar_sort_order: 'recent' or 'alpha' for sidebar sorting
|
||||
- last_message_times: JSON object mapping conversation keys to timestamps
|
||||
- preferences_migrated: Flag to track if localStorage has been migrated
|
||||
|
||||
The table uses a single-row pattern (id=1) for simplicity.
|
||||
"""
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS app_settings (
|
||||
id INTEGER PRIMARY KEY CHECK (id = 1),
|
||||
max_radio_contacts INTEGER DEFAULT 200,
|
||||
favorites TEXT DEFAULT '[]',
|
||||
auto_decrypt_dm_on_advert INTEGER DEFAULT 1,
|
||||
sidebar_sort_order TEXT DEFAULT 'recent',
|
||||
last_message_times TEXT DEFAULT '{}',
|
||||
preferences_migrated INTEGER DEFAULT 0
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# Initialize with default row (use only the id column so this works
|
||||
# regardless of which columns exist — defaults fill the rest).
|
||||
await conn.execute("INSERT OR IGNORE INTO app_settings (id) VALUES (1)")
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Created app_settings table with default values")
|
||||
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add advert_interval column to app_settings table.
|
||||
|
||||
This enables configurable periodic advertisement interval (default 0 = disabled).
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN advert_interval INTEGER DEFAULT 0")
|
||||
logger.debug("Added advert_interval column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("advert_interval column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add last_advert_time column to app_settings table.
|
||||
|
||||
This tracks when the last advertisement was sent, ensuring we never
|
||||
advertise faster than the configured advert_interval.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN last_advert_time INTEGER DEFAULT 0")
|
||||
logger.debug("Added last_advert_time column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("last_advert_time column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add bot_enabled and bot_code columns to app_settings table.
|
||||
|
||||
This enables user-defined Python code to be executed when messages are received,
|
||||
allowing for custom bot responses.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN bot_enabled INTEGER DEFAULT 0")
|
||||
logger.debug("Added bot_enabled column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("bot_enabled column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN bot_code TEXT DEFAULT ''")
|
||||
logger.debug("Added bot_code column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("bot_code column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,76 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Convert single bot_enabled/bot_code to multi-bot format.
|
||||
|
||||
Adds a 'bots' TEXT column storing a JSON array of bot configs:
|
||||
[{"id": "uuid", "name": "Bot 1", "enabled": true, "code": "..."}]
|
||||
|
||||
If existing bot_code is non-empty OR bot_enabled is true, migrates
|
||||
to a single bot named "Bot 1". Otherwise, creates empty array.
|
||||
|
||||
Attempts to drop the old bot_enabled and bot_code columns.
|
||||
"""
|
||||
|
||||
# Add new bots column
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN bots TEXT DEFAULT '[]'")
|
||||
logger.debug("Added bots column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("bots column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Migrate existing bot data
|
||||
cursor = await conn.execute("SELECT bot_enabled, bot_code FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
|
||||
if row:
|
||||
bot_enabled = bool(row[0]) if row[0] is not None else False
|
||||
bot_code = row[1] or ""
|
||||
|
||||
# If there's existing bot data, migrate it
|
||||
if bot_code.strip() or bot_enabled:
|
||||
bots = [
|
||||
{
|
||||
"id": str(uuid.uuid4()),
|
||||
"name": "Bot 1",
|
||||
"enabled": bot_enabled,
|
||||
"code": bot_code,
|
||||
}
|
||||
]
|
||||
bots_json = json.dumps(bots)
|
||||
logger.info("Migrating existing bot to multi-bot format: enabled=%s", bot_enabled)
|
||||
else:
|
||||
bots_json = "[]"
|
||||
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET bots = ? WHERE id = 1",
|
||||
(bots_json,),
|
||||
)
|
||||
|
||||
# Try to drop old columns (SQLite 3.35.0+ only)
|
||||
for column in ["bot_enabled", "bot_code"]:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE app_settings DROP COLUMN {column}")
|
||||
logger.debug("Dropped %s column from app_settings", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("app_settings.%s already dropped, skipping", column)
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
# SQLite version doesn't support DROP COLUMN - harmless, column stays
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, %s column will remain", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,152 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Lowercase all contact public keys and related data for case-insensitive matching.
|
||||
|
||||
Updates:
|
||||
- contacts.public_key (PRIMARY KEY) via temp table swap
|
||||
- messages.conversation_key for PRIV messages
|
||||
- app_settings.favorites (contact IDs)
|
||||
- app_settings.last_message_times (contact- prefixed keys)
|
||||
|
||||
Handles case collisions by keeping the most-recently-seen contact.
|
||||
"""
|
||||
|
||||
# 1. Lowercase message conversation keys for private messages
|
||||
try:
|
||||
await conn.execute(
|
||||
"UPDATE messages SET conversation_key = lower(conversation_key) WHERE type = 'PRIV'"
|
||||
)
|
||||
logger.debug("Lowercased PRIV message conversation_keys")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "no such table" in str(e).lower():
|
||||
logger.debug("messages table does not exist yet, skipping conversation_key lowercase")
|
||||
else:
|
||||
raise
|
||||
|
||||
# 2. Check if contacts table exists before proceeding
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("contacts table does not exist yet, skipping key lowercase")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# 3. Handle contacts table - check for case collisions first
|
||||
cursor = await conn.execute(
|
||||
"SELECT lower(public_key) as lk, COUNT(*) as cnt "
|
||||
"FROM contacts GROUP BY lower(public_key) HAVING COUNT(*) > 1"
|
||||
)
|
||||
collisions = list(await cursor.fetchall())
|
||||
|
||||
if collisions:
|
||||
logger.warning(
|
||||
"Found %d case-colliding contact groups, keeping most-recently-seen",
|
||||
len(collisions),
|
||||
)
|
||||
for row in collisions:
|
||||
lower_key = row[0]
|
||||
# Delete all but the most recently seen
|
||||
await conn.execute(
|
||||
"""DELETE FROM contacts WHERE public_key IN (
|
||||
SELECT public_key FROM contacts
|
||||
WHERE lower(public_key) = ?
|
||||
ORDER BY COALESCE(last_seen, 0) DESC
|
||||
LIMIT -1 OFFSET 1
|
||||
)""",
|
||||
(lower_key,),
|
||||
)
|
||||
|
||||
# 3. Rebuild contacts with lowercased keys
|
||||
# Get the actual column names from the table (handles different schema versions)
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns_info = await cursor.fetchall()
|
||||
all_columns = [col[1] for col in columns_info] # col[1] is column name
|
||||
|
||||
# Build column lists, lowering public_key
|
||||
select_cols = ", ".join(f"lower({c})" if c == "public_key" else c for c in all_columns)
|
||||
col_defs = []
|
||||
for col in columns_info:
|
||||
name, col_type, _notnull, default, pk = col[1], col[2], col[3], col[4], col[5]
|
||||
parts = [name, col_type or "TEXT"]
|
||||
if pk:
|
||||
parts.append("PRIMARY KEY")
|
||||
if default is not None:
|
||||
parts.append(f"DEFAULT {default}")
|
||||
col_defs.append(" ".join(parts))
|
||||
|
||||
create_sql = f"CREATE TABLE contacts_new ({', '.join(col_defs)})"
|
||||
await conn.execute(create_sql)
|
||||
await conn.execute(f"INSERT INTO contacts_new SELECT {select_cols} FROM contacts")
|
||||
await conn.execute("DROP TABLE contacts")
|
||||
await conn.execute("ALTER TABLE contacts_new RENAME TO contacts")
|
||||
|
||||
# Recreate the on_radio index (if column exists)
|
||||
if "on_radio" in all_columns:
|
||||
await conn.execute("CREATE INDEX IF NOT EXISTS idx_contacts_on_radio ON contacts(on_radio)")
|
||||
|
||||
# 4. Lowercase contact IDs in favorites JSON (if app_settings exists)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
await conn.commit()
|
||||
logger.info("Lowercased all contact public keys (no app_settings table)")
|
||||
return
|
||||
|
||||
cursor = await conn.execute("SELECT favorites FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
if row and row[0]:
|
||||
try:
|
||||
favorites = json.loads(row[0])
|
||||
updated = False
|
||||
for fav in favorites:
|
||||
if fav.get("type") == "contact" and fav.get("id"):
|
||||
new_id = fav["id"].lower()
|
||||
if new_id != fav["id"]:
|
||||
fav["id"] = new_id
|
||||
updated = True
|
||||
if updated:
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET favorites = ? WHERE id = 1",
|
||||
(json.dumps(favorites),),
|
||||
)
|
||||
logger.debug("Lowercased contact IDs in favorites")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
# 5. Lowercase contact keys in last_message_times JSON
|
||||
cursor = await conn.execute("SELECT last_message_times FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
if row and row[0]:
|
||||
try:
|
||||
times = json.loads(row[0])
|
||||
new_times = {}
|
||||
updated = False
|
||||
for key, val in times.items():
|
||||
if key.startswith("contact-"):
|
||||
new_key = "contact-" + key[8:].lower()
|
||||
if new_key != key:
|
||||
updated = True
|
||||
new_times[new_key] = val
|
||||
else:
|
||||
new_times[key] = val
|
||||
if updated:
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET last_message_times = ? WHERE id = 1",
|
||||
(json.dumps(new_times),),
|
||||
)
|
||||
logger.debug("Lowercased contact keys in last_message_times")
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Lowercased all contact public keys")
|
||||
@@ -0,0 +1,44 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Fix NULL sender_timestamp values and add null-safe dedup index.
|
||||
|
||||
1. Set sender_timestamp = received_at for any messages with NULL sender_timestamp
|
||||
2. Create a null-safe unique index as belt-and-suspenders protection
|
||||
"""
|
||||
# Check if messages table exists
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("messages table does not exist yet, skipping NULL sender_timestamp fix")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# Backfill NULL sender_timestamps with received_at
|
||||
cursor = await conn.execute(
|
||||
"UPDATE messages SET sender_timestamp = received_at WHERE sender_timestamp IS NULL"
|
||||
)
|
||||
if cursor.rowcount > 0:
|
||||
logger.info("Backfilled %d messages with NULL sender_timestamp", cursor.rowcount)
|
||||
|
||||
# Try to create null-safe dedup index (may fail if existing duplicates exist)
|
||||
try:
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
logger.debug("Created null-safe dedup index")
|
||||
except aiosqlite.IntegrityError:
|
||||
logger.warning(
|
||||
"Could not create null-safe dedup index due to existing duplicates - "
|
||||
"the application-level dedup will handle these"
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,26 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add experimental_channel_double_send column to app_settings table.
|
||||
|
||||
When enabled, channel sends perform an immediate byte-perfect duplicate send
|
||||
using the same timestamp bytes.
|
||||
"""
|
||||
try:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN experimental_channel_double_send INTEGER DEFAULT 0"
|
||||
)
|
||||
logger.debug("Added experimental_channel_double_send column to app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
logger.debug("experimental_channel_double_send column already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop experimental_channel_double_send column from app_settings.
|
||||
|
||||
This feature is replaced by a user-triggered resend button.
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
we silently skip (the column will remain but is unused).
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings DROP COLUMN experimental_channel_double_send")
|
||||
logger.debug("Dropped experimental_channel_double_send from app_settings")
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("app_settings.experimental_channel_double_send already dropped, skipping")
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug(
|
||||
"SQLite doesn't support DROP COLUMN, "
|
||||
"experimental_channel_double_send column will remain"
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,64 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop the UNIQUE constraint on raw_packets.data via table rebuild.
|
||||
|
||||
This constraint creates a large autoindex (~30 MB on a 340K-row database) that
|
||||
stores a complete copy of every raw packet BLOB in a B-tree. Deduplication is
|
||||
already handled by the unique index on payload_hash, making the data UNIQUE
|
||||
constraint pure storage overhead.
|
||||
|
||||
Requires table recreation since SQLite doesn't support DROP CONSTRAINT.
|
||||
"""
|
||||
# Check if the autoindex exists (indicates UNIQUE constraint on data)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='index' "
|
||||
"AND name='sqlite_autoindex_raw_packets_1'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("raw_packets.data UNIQUE constraint already absent, skipping rebuild")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
logger.info("Rebuilding raw_packets table to remove UNIQUE(data) constraint...")
|
||||
|
||||
# Get current columns from the existing table
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
old_cols = {col[1] for col in await cursor.fetchall()}
|
||||
|
||||
# Target schema without UNIQUE on data
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash TEXT,
|
||||
FOREIGN KEY (message_id) REFERENCES messages(id)
|
||||
)
|
||||
""")
|
||||
|
||||
# Copy only columns that exist in both old and new tables
|
||||
new_cols = {"id", "timestamp", "data", "message_id", "payload_hash"}
|
||||
copy_cols = ", ".join(sorted(c for c in new_cols if c in old_cols))
|
||||
|
||||
await conn.execute(
|
||||
f"INSERT INTO raw_packets_new ({copy_cols}) SELECT {copy_cols} FROM raw_packets"
|
||||
)
|
||||
await conn.execute("DROP TABLE raw_packets")
|
||||
await conn.execute("ALTER TABLE raw_packets_new RENAME TO raw_packets")
|
||||
|
||||
# Recreate indexes
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_message_id ON raw_packets(message_id)")
|
||||
|
||||
await conn.commit()
|
||||
logger.info("raw_packets table rebuilt without UNIQUE(data) constraint")
|
||||
@@ -0,0 +1,83 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Drop the UNIQUE(type, conversation_key, text, sender_timestamp) constraint on messages.
|
||||
|
||||
This constraint creates a large autoindex (~13 MB on a 112K-row database) that
|
||||
stores the full message text in a B-tree. The idx_messages_dedup_null_safe unique
|
||||
index already provides identical dedup protection — no rows have NULL
|
||||
sender_timestamp since migration 15 backfilled them all.
|
||||
|
||||
INSERT OR IGNORE still works correctly because it checks all unique constraints,
|
||||
including unique indexes like idx_messages_dedup_null_safe.
|
||||
|
||||
Requires table recreation since SQLite doesn't support DROP CONSTRAINT.
|
||||
"""
|
||||
# Check if the autoindex exists (indicates UNIQUE constraint)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='index' AND name='sqlite_autoindex_messages_1'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("messages UNIQUE constraint already absent, skipping rebuild")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
logger.info("Rebuilding messages table to remove UNIQUE constraint...")
|
||||
|
||||
# Get current columns from the existing table
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
old_cols = {col[1] for col in await cursor.fetchall()}
|
||||
|
||||
# Target schema without the UNIQUE table constraint
|
||||
await conn.execute("""
|
||||
CREATE TABLE messages_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
type TEXT NOT NULL,
|
||||
conversation_key TEXT NOT NULL,
|
||||
text TEXT NOT NULL,
|
||||
sender_timestamp INTEGER,
|
||||
received_at INTEGER NOT NULL,
|
||||
txt_type INTEGER DEFAULT 0,
|
||||
signature TEXT,
|
||||
outgoing INTEGER DEFAULT 0,
|
||||
acked INTEGER DEFAULT 0,
|
||||
paths TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
# Copy only columns that exist in both old and new tables
|
||||
new_cols = {
|
||||
"id",
|
||||
"type",
|
||||
"conversation_key",
|
||||
"text",
|
||||
"sender_timestamp",
|
||||
"received_at",
|
||||
"txt_type",
|
||||
"signature",
|
||||
"outgoing",
|
||||
"acked",
|
||||
"paths",
|
||||
}
|
||||
copy_cols = ", ".join(sorted(c for c in new_cols if c in old_cols))
|
||||
|
||||
await conn.execute(f"INSERT INTO messages_new ({copy_cols}) SELECT {copy_cols} FROM messages")
|
||||
await conn.execute("DROP TABLE messages")
|
||||
await conn.execute("ALTER TABLE messages_new RENAME TO messages")
|
||||
|
||||
# Recreate indexes
|
||||
await conn.execute("CREATE INDEX idx_messages_conversation ON messages(type, conversation_key)")
|
||||
await conn.execute("CREATE INDEX idx_messages_received ON messages(received_at)")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.info("messages table rebuilt without UNIQUE constraint")
|
||||
@@ -0,0 +1,45 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Enable WAL journal mode and incremental auto-vacuum.
|
||||
|
||||
WAL (Write-Ahead Logging):
|
||||
- Faster writes: appends to a WAL file instead of rewriting the main DB
|
||||
- Concurrent reads during writes (readers don't block writers)
|
||||
- No journal file create/delete churn on every commit
|
||||
|
||||
Incremental auto-vacuum:
|
||||
- Pages freed by DELETE become reclaimable without a full VACUUM
|
||||
- Call PRAGMA incremental_vacuum to reclaim on demand
|
||||
- Less overhead than FULL auto-vacuum (which reorganizes on every commit)
|
||||
|
||||
auto_vacuum mode change requires a VACUUM to restructure the file.
|
||||
The VACUUM is performed before switching to WAL so it runs under the
|
||||
current journal mode; WAL is then set as the final step.
|
||||
"""
|
||||
# Check current auto_vacuum mode
|
||||
cursor = await conn.execute("PRAGMA auto_vacuum")
|
||||
row = await cursor.fetchone()
|
||||
current_auto_vacuum = row[0] if row else 0
|
||||
|
||||
if current_auto_vacuum != 2: # 2 = INCREMENTAL
|
||||
logger.info("Switching auto_vacuum to INCREMENTAL (requires VACUUM)...")
|
||||
await conn.execute("PRAGMA auto_vacuum = INCREMENTAL")
|
||||
await conn.execute("VACUUM")
|
||||
logger.info("VACUUM complete, auto_vacuum set to INCREMENTAL")
|
||||
else:
|
||||
logger.debug("auto_vacuum already INCREMENTAL, skipping VACUUM")
|
||||
|
||||
# Enable WAL mode (idempotent — returns current mode)
|
||||
cursor = await conn.execute("PRAGMA journal_mode = WAL")
|
||||
row = await cursor.fetchone()
|
||||
mode = row[0] if row else "unknown"
|
||||
logger.info("Journal mode set to %s", mode)
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,29 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Enforce minimum 1-hour advert interval.
|
||||
|
||||
Any advert_interval between 1 and 3599 is clamped up to 3600 (1 hour).
|
||||
Zero (disabled) is left unchanged.
|
||||
"""
|
||||
# Guard: app_settings table may not exist if running against a very old schema
|
||||
# (it's created in migration 9). The UPDATE is harmless if the table exists
|
||||
# but has no rows, but will error if the table itself is missing.
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
logger.debug("app_settings table does not exist yet, skipping advert_interval clamp")
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"UPDATE app_settings SET advert_interval = 3600 WHERE advert_interval > 0 AND advert_interval < 3600"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Clamped advert_interval to minimum 3600 seconds")
|
||||
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Create table for recent unique advert paths per repeater.
|
||||
|
||||
This keeps path diversity for repeater advertisements without changing the
|
||||
existing payload-hash raw packet dedup policy.
|
||||
"""
|
||||
await conn.execute("""
|
||||
CREATE TABLE IF NOT EXISTS repeater_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
repeater_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(repeater_key, path_hex),
|
||||
FOREIGN KEY (repeater_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
""")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_repeater_advert_paths_recent "
|
||||
"ON repeater_advert_paths(repeater_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Ensured repeater_advert_paths table and indexes exist")
|
||||
@@ -0,0 +1,60 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add first_seen column to contacts table.
|
||||
|
||||
Backfill strategy:
|
||||
1. Set first_seen = last_seen for all contacts (baseline).
|
||||
2. For contacts with PRIV messages, set first_seen = MIN(messages.received_at)
|
||||
if that timestamp is earlier.
|
||||
"""
|
||||
# Guard: skip if contacts table doesn't exist (e.g. partial test schemas)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
return
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE contacts ADD COLUMN first_seen INTEGER")
|
||||
logger.debug("Added first_seen to contacts table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.first_seen already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
# Baseline: set first_seen = last_seen for all contacts
|
||||
# Check if last_seen column exists (should in production, may not in minimal test schemas)
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
if "last_seen" in columns:
|
||||
await conn.execute("UPDATE contacts SET first_seen = last_seen WHERE first_seen IS NULL")
|
||||
|
||||
# Refine: for contacts with PRIV messages, use earliest message timestamp if earlier
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone():
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts SET first_seen = (
|
||||
SELECT MIN(m.received_at) FROM messages m
|
||||
WHERE m.type = 'PRIV' AND m.conversation_key = contacts.public_key
|
||||
)
|
||||
WHERE EXISTS (
|
||||
SELECT 1 FROM messages m
|
||||
WHERE m.type = 'PRIV' AND m.conversation_key = contacts.public_key
|
||||
AND m.received_at < COALESCE(contacts.first_seen, 9999999999)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Added and backfilled first_seen column")
|
||||
@@ -0,0 +1,53 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Create contact_name_history table and seed with current contact names.
|
||||
"""
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_name_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
UNIQUE(public_key, name),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_name_history_key "
|
||||
"ON contact_name_history(public_key, last_seen DESC)"
|
||||
)
|
||||
|
||||
# Seed: one row per contact from current data (skip if contacts table doesn't exist
|
||||
# or lacks needed columns)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone():
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
cols = {row[1] for row in await cursor.fetchall()}
|
||||
if "name" in cols and "public_key" in cols:
|
||||
first_seen_expr = "first_seen" if "first_seen" in cols else "0"
|
||||
last_seen_expr = "last_seen" if "last_seen" in cols else "0"
|
||||
await conn.execute(
|
||||
f"""
|
||||
INSERT OR IGNORE INTO contact_name_history (public_key, name, first_seen, last_seen)
|
||||
SELECT public_key, name,
|
||||
COALESCE({first_seen_expr}, {last_seen_expr}, 0),
|
||||
COALESCE({last_seen_expr}, 0)
|
||||
FROM contacts
|
||||
WHERE name IS NOT NULL AND name != ''
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Created contact_name_history table and seeded from contacts")
|
||||
@@ -0,0 +1,124 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add sender_name and sender_key columns to messages table.
|
||||
|
||||
Backfill:
|
||||
- sender_name for CHAN messages: extract from "Name: message" format
|
||||
- sender_key for CHAN messages: match name to contact (skip ambiguous)
|
||||
- sender_key for incoming PRIV messages: set to conversation_key
|
||||
"""
|
||||
# Guard: skip if messages table doesn't exist
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
return
|
||||
|
||||
for column in ["sender_name", "sender_key"]:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE messages ADD COLUMN {column} TEXT")
|
||||
logger.debug("Added %s to messages table", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("messages.%s already exists, skipping", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
# Check which columns the messages table has (may be minimal in test environments)
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
msg_cols = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
# Only backfill if the required columns exist
|
||||
if "type" in msg_cols and "text" in msg_cols:
|
||||
# Count messages to backfill for progress reporting
|
||||
cursor = await conn.execute(
|
||||
"SELECT COUNT(*) FROM messages WHERE type = 'CHAN' AND sender_name IS NULL"
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
chan_count = row[0] if row else 0
|
||||
if chan_count > 0:
|
||||
logger.info("Backfilling sender_name for %d channel messages...", chan_count)
|
||||
|
||||
# Backfill sender_name for CHAN messages from "Name: message" format
|
||||
# Only extract if colon position is valid (> 1 and < 51, i.e. name is 1-50 chars)
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
UPDATE messages SET sender_name = SUBSTR(text, 1, INSTR(text, ': ') - 1)
|
||||
WHERE type = 'CHAN' AND sender_name IS NULL
|
||||
AND INSTR(text, ': ') > 1 AND INSTR(text, ': ') < 52
|
||||
"""
|
||||
)
|
||||
if cursor.rowcount > 0:
|
||||
logger.info("Backfilled sender_name for %d channel messages", cursor.rowcount)
|
||||
|
||||
# Backfill sender_key for incoming PRIV messages
|
||||
if "outgoing" in msg_cols and "conversation_key" in msg_cols:
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
UPDATE messages SET sender_key = conversation_key
|
||||
WHERE type = 'PRIV' AND outgoing = 0 AND sender_key IS NULL
|
||||
"""
|
||||
)
|
||||
if cursor.rowcount > 0:
|
||||
logger.info("Backfilled sender_key for %d DM messages", cursor.rowcount)
|
||||
|
||||
# Backfill sender_key for CHAN messages: match sender_name to contacts
|
||||
# Build name->key map, skip ambiguous names (multiple contacts with same name)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone():
|
||||
cursor = await conn.execute(
|
||||
"SELECT public_key, name FROM contacts WHERE name IS NOT NULL AND name != ''"
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
|
||||
name_to_keys: dict[str, list[str]] = {}
|
||||
for row in rows:
|
||||
name = row["name"]
|
||||
key = row["public_key"]
|
||||
if name not in name_to_keys:
|
||||
name_to_keys[name] = []
|
||||
name_to_keys[name].append(key)
|
||||
|
||||
# Only use unambiguous names (single contact per name)
|
||||
unambiguous = {n: ks[0] for n, ks in name_to_keys.items() if len(ks) == 1}
|
||||
if unambiguous:
|
||||
logger.info(
|
||||
"Matching sender_key for %d unique contact names...",
|
||||
len(unambiguous),
|
||||
)
|
||||
# Use a temp table for a single bulk UPDATE instead of N individual queries
|
||||
await conn.execute(
|
||||
"CREATE TEMP TABLE _name_key_map (name TEXT PRIMARY KEY, public_key TEXT NOT NULL)"
|
||||
)
|
||||
await conn.executemany(
|
||||
"INSERT INTO _name_key_map (name, public_key) VALUES (?, ?)",
|
||||
list(unambiguous.items()),
|
||||
)
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
UPDATE messages SET sender_key = (
|
||||
SELECT public_key FROM _name_key_map WHERE _name_key_map.name = messages.sender_name
|
||||
)
|
||||
WHERE type = 'CHAN' AND sender_key IS NULL
|
||||
AND sender_name IN (SELECT name FROM _name_key_map)
|
||||
"""
|
||||
)
|
||||
updated = cursor.rowcount
|
||||
await conn.execute("DROP TABLE _name_key_map")
|
||||
if updated > 0:
|
||||
logger.info("Backfilled sender_key for %d channel messages", updated)
|
||||
|
||||
# Create index on sender_key for per-contact channel message counts
|
||||
await conn.execute("CREATE INDEX IF NOT EXISTS idx_messages_sender_key ON messages(sender_key)")
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Added sender_name and sender_key columns with backfill")
|
||||
@@ -0,0 +1,81 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Rename repeater_advert_paths to contact_advert_paths with column
|
||||
repeater_key -> public_key.
|
||||
|
||||
Uses table rebuild since ALTER TABLE RENAME COLUMN may not be available
|
||||
in older SQLite versions.
|
||||
"""
|
||||
# Check if old table exists
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='repeater_advert_paths'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
# Already renamed or doesn't exist — ensure new table exists
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("contact_advert_paths already exists or old table missing, skipping rename")
|
||||
return
|
||||
|
||||
# Create new table (IF NOT EXISTS in case SCHEMA already created it)
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# Copy data (INSERT OR IGNORE in case of duplicates)
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT OR IGNORE INTO contact_advert_paths (public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
SELECT repeater_key, path_hex, path_len, first_seen, last_seen, heard_count
|
||||
FROM repeater_advert_paths
|
||||
"""
|
||||
)
|
||||
|
||||
# Drop old table
|
||||
await conn.execute("DROP TABLE repeater_advert_paths")
|
||||
|
||||
# Create index
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Renamed repeater_advert_paths to contact_advert_paths")
|
||||
@@ -0,0 +1,36 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Backfill contacts.first_seen from contact_advert_paths where advert path
|
||||
first_seen is earlier than the contact's current first_seen.
|
||||
"""
|
||||
# Guard: skip if either table doesn't exist
|
||||
for table in ("contacts", "contact_advert_paths"):
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table,)
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts SET first_seen = (
|
||||
SELECT MIN(cap.first_seen) FROM contact_advert_paths cap
|
||||
WHERE cap.public_key = contacts.public_key
|
||||
)
|
||||
WHERE EXISTS (
|
||||
SELECT 1 FROM contact_advert_paths cap
|
||||
WHERE cap.public_key = contacts.public_key
|
||||
AND cap.first_seen < COALESCE(contacts.first_seen, 9999999999)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
logger.debug("Backfilled first_seen from contact_advert_paths")
|
||||
@@ -0,0 +1,107 @@
|
||||
import logging
|
||||
from hashlib import sha256
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Convert payload_hash from 64-char hex TEXT to 32-byte BLOB.
|
||||
|
||||
Halves storage for both the column data and its UNIQUE index.
|
||||
Uses Python bytes.fromhex() for the conversion since SQLite's unhex()
|
||||
requires 3.41.0+ which may not be available on all deployments.
|
||||
"""
|
||||
# Guard: skip if raw_packets table doesn't exist
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='raw_packets'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
logger.debug("raw_packets table does not exist, skipping payload_hash conversion")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# Check column types — skip if payload_hash doesn't exist or is already BLOB
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
cols = {row[1]: row[2] for row in await cursor.fetchall()}
|
||||
if "payload_hash" not in cols:
|
||||
logger.debug("payload_hash column does not exist, skipping conversion")
|
||||
await conn.commit()
|
||||
return
|
||||
if cols["payload_hash"].upper() == "BLOB":
|
||||
logger.debug("payload_hash is already BLOB, skipping conversion")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
logger.info("Rebuilding raw_packets to convert payload_hash TEXT → BLOB...")
|
||||
|
||||
# Create new table with BLOB type
|
||||
await conn.execute("""
|
||||
CREATE TABLE raw_packets_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data BLOB NOT NULL,
|
||||
message_id INTEGER,
|
||||
payload_hash BLOB,
|
||||
FOREIGN KEY (message_id) REFERENCES messages(id)
|
||||
)
|
||||
""")
|
||||
|
||||
# Batch-convert rows: read TEXT hashes, convert to bytes, insert into new table
|
||||
batch_size = 5000
|
||||
cursor = await conn.execute(
|
||||
"SELECT id, timestamp, data, message_id, payload_hash FROM raw_packets ORDER BY id"
|
||||
)
|
||||
|
||||
total = 0
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
|
||||
batch: list[tuple[int, int, bytes, int | None, bytes | None]] = []
|
||||
for row in rows:
|
||||
rid, ts, data, mid, ph = row[0], row[1], row[2], row[3], row[4]
|
||||
if ph is not None and isinstance(ph, str):
|
||||
try:
|
||||
ph = bytes.fromhex(ph)
|
||||
except ValueError:
|
||||
# Not a valid hex string — hash the value to produce a valid BLOB
|
||||
ph = sha256(ph.encode()).digest()
|
||||
batch.append((rid, ts, data, mid, ph))
|
||||
|
||||
await conn.executemany(
|
||||
"INSERT INTO raw_packets_new (id, timestamp, data, message_id, payload_hash) "
|
||||
"VALUES (?, ?, ?, ?, ?)",
|
||||
batch,
|
||||
)
|
||||
total += len(batch)
|
||||
|
||||
if total % 50000 == 0:
|
||||
logger.info("Converted %d rows...", total)
|
||||
|
||||
# Preserve autoincrement sequence
|
||||
cursor = await conn.execute("SELECT seq FROM sqlite_sequence WHERE name = 'raw_packets'")
|
||||
seq_row = await cursor.fetchone()
|
||||
if seq_row is not None:
|
||||
await conn.execute(
|
||||
"INSERT OR REPLACE INTO sqlite_sequence (name, seq) VALUES ('raw_packets_new', ?)",
|
||||
(seq_row[0],),
|
||||
)
|
||||
|
||||
await conn.execute("DROP TABLE raw_packets")
|
||||
await conn.execute("ALTER TABLE raw_packets_new RENAME TO raw_packets")
|
||||
|
||||
# Clean up the sqlite_sequence entry for the old temp name
|
||||
await conn.execute("DELETE FROM sqlite_sequence WHERE name = 'raw_packets_new'")
|
||||
|
||||
# Recreate indexes
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.execute("CREATE INDEX idx_raw_packets_message_id ON raw_packets(message_id)")
|
||||
|
||||
await conn.commit()
|
||||
logger.info("Converted %d payload_hash values from TEXT to BLOB", total)
|
||||
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add a covering index for the unread counts query.
|
||||
|
||||
The /api/read-state/unreads endpoint runs three queries against messages.
|
||||
The last-message-times query (GROUP BY type, conversation_key + MAX(received_at))
|
||||
was doing a full table scan. This covering index lets SQLite resolve the
|
||||
grouping and MAX entirely from the index without touching the table.
|
||||
It also improves the unread count queries which filter on outgoing and received_at.
|
||||
"""
|
||||
# Guard: table or columns may not exist in partial-schema test setups
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required = {"type", "conversation_key", "outgoing", "received_at"}
|
||||
if required <= columns:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_messages_unread_covering "
|
||||
"ON messages(type, conversation_key, outgoing, received_at)"
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""
|
||||
Add a composite index for message pagination and drop the now-redundant
|
||||
idx_messages_conversation.
|
||||
|
||||
The pagination query (ORDER BY received_at DESC, id DESC LIMIT N) hits a
|
||||
temp B-tree sort without this index. With it, SQLite walks the index in
|
||||
order and stops after N rows — critical for channels with 30K+ messages.
|
||||
|
||||
idx_messages_conversation(type, conversation_key) is a strict prefix of
|
||||
both this index and idx_messages_unread_covering, so SQLite never picks it.
|
||||
Dropping it saves ~6 MB and one index to maintain per INSERT.
|
||||
"""
|
||||
# Guard: table or columns may not exist in partial-schema test setups
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required = {"type", "conversation_key", "received_at", "id"}
|
||||
if required <= columns:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_messages_pagination "
|
||||
"ON messages(type, conversation_key, received_at DESC, id DESC)"
|
||||
)
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_conversation")
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,37 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add MQTT configuration columns to app_settings."""
|
||||
# Guard: app_settings may not exist in partial-schema test setups
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
new_columns = [
|
||||
("mqtt_broker_host", "TEXT DEFAULT ''"),
|
||||
("mqtt_broker_port", "INTEGER DEFAULT 1883"),
|
||||
("mqtt_username", "TEXT DEFAULT ''"),
|
||||
("mqtt_password", "TEXT DEFAULT ''"),
|
||||
("mqtt_use_tls", "INTEGER DEFAULT 0"),
|
||||
("mqtt_tls_insecure", "INTEGER DEFAULT 0"),
|
||||
("mqtt_topic_prefix", "TEXT DEFAULT 'meshcore'"),
|
||||
("mqtt_publish_messages", "INTEGER DEFAULT 0"),
|
||||
("mqtt_publish_raw_packets", "INTEGER DEFAULT 0"),
|
||||
]
|
||||
|
||||
for col_name, col_def in new_columns:
|
||||
if col_name not in columns:
|
||||
await conn.execute(f"ALTER TABLE app_settings ADD COLUMN {col_name} {col_def}")
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add community MQTT configuration columns to app_settings."""
|
||||
# Guard: app_settings may not exist in partial-schema test setups
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if not await cursor.fetchone():
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
new_columns = [
|
||||
("community_mqtt_enabled", "INTEGER DEFAULT 0"),
|
||||
("community_mqtt_iata", "TEXT DEFAULT ''"),
|
||||
("community_mqtt_broker_host", "TEXT DEFAULT 'mqtt-us-v1.letsmesh.net'"),
|
||||
("community_mqtt_broker_port", "INTEGER DEFAULT 443"),
|
||||
("community_mqtt_email", "TEXT DEFAULT ''"),
|
||||
]
|
||||
|
||||
for col_name, col_def in new_columns:
|
||||
if col_name not in columns:
|
||||
await conn.execute(f"ALTER TABLE app_settings ADD COLUMN {col_name} {col_def}")
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Seed the #remoteterm hashtag channel so new installs have it by default.
|
||||
|
||||
Uses INSERT OR IGNORE so it's a no-op if the channel already exists
|
||||
(e.g. existing users who already added it manually). The channels table
|
||||
is created by the base schema before migrations run, so it always exists
|
||||
in production.
|
||||
"""
|
||||
try:
|
||||
await conn.execute(
|
||||
"INSERT OR IGNORE INTO channels (key, name, is_hashtag, on_radio) VALUES (?, ?, ?, ?)",
|
||||
("8959AE053F2201801342A1DBDDA184F6", "#remoteterm", 1, 0),
|
||||
)
|
||||
await conn.commit()
|
||||
except Exception:
|
||||
logger.debug("Skipping #remoteterm seed (channels table not ready)")
|
||||
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add flood_scope column to app_settings for outbound region tagging.
|
||||
|
||||
Empty string means disabled (no scope set, messages sent unscoped).
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN flood_scope TEXT DEFAULT ''")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("flood_scope column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping flood_scope migration")
|
||||
else:
|
||||
raise
|
||||
@@ -0,0 +1,36 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add blocked_keys and blocked_names columns to app_settings.
|
||||
|
||||
These store JSON arrays of blocked public keys and display names.
|
||||
Blocking hides messages from the UI but does not affect MQTT or bots.
|
||||
"""
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN blocked_keys TEXT DEFAULT '[]'")
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("blocked_keys column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping blocked_keys migration")
|
||||
else:
|
||||
raise
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings ADD COLUMN blocked_names TEXT DEFAULT '[]'")
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("blocked_names column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping blocked_names migration")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,143 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Create fanout_configs table and migrate existing MQTT settings.
|
||||
|
||||
Reads existing MQTT settings from app_settings and creates corresponding
|
||||
fanout_configs rows. Old columns are NOT dropped (rollback safety).
|
||||
"""
|
||||
|
||||
# 1. Create fanout_configs table
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS fanout_configs (
|
||||
id TEXT PRIMARY KEY,
|
||||
type TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
enabled INTEGER DEFAULT 0,
|
||||
config TEXT NOT NULL DEFAULT '{}',
|
||||
scope TEXT NOT NULL DEFAULT '{}',
|
||||
sort_order INTEGER DEFAULT 0,
|
||||
created_at INTEGER NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# 2. Read existing MQTT settings
|
||||
try:
|
||||
cursor = await conn.execute(
|
||||
"""
|
||||
SELECT mqtt_broker_host, mqtt_broker_port, mqtt_username, mqtt_password,
|
||||
mqtt_use_tls, mqtt_tls_insecure, mqtt_topic_prefix,
|
||||
mqtt_publish_messages, mqtt_publish_raw_packets,
|
||||
community_mqtt_enabled, community_mqtt_iata,
|
||||
community_mqtt_broker_host, community_mqtt_broker_port,
|
||||
community_mqtt_email
|
||||
FROM app_settings WHERE id = 1
|
||||
"""
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
except Exception:
|
||||
row = None
|
||||
|
||||
if row is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
import time
|
||||
|
||||
now = int(time.time())
|
||||
sort_order = 0
|
||||
|
||||
# 3. Migrate private MQTT if configured
|
||||
broker_host = row["mqtt_broker_host"] or ""
|
||||
if broker_host:
|
||||
publish_messages = bool(row["mqtt_publish_messages"])
|
||||
publish_raw = bool(row["mqtt_publish_raw_packets"])
|
||||
enabled = publish_messages or publish_raw
|
||||
|
||||
config = {
|
||||
"broker_host": broker_host,
|
||||
"broker_port": row["mqtt_broker_port"] or 1883,
|
||||
"username": row["mqtt_username"] or "",
|
||||
"password": row["mqtt_password"] or "",
|
||||
"use_tls": bool(row["mqtt_use_tls"]),
|
||||
"tls_insecure": bool(row["mqtt_tls_insecure"]),
|
||||
"topic_prefix": row["mqtt_topic_prefix"] or "meshcore",
|
||||
}
|
||||
|
||||
scope = {
|
||||
"messages": "all" if publish_messages else "none",
|
||||
"raw_packets": "all" if publish_raw else "none",
|
||||
}
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO fanout_configs (id, type, name, enabled, config, scope, sort_order, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(uuid.uuid4()),
|
||||
"mqtt_private",
|
||||
"Private MQTT",
|
||||
1 if enabled else 0,
|
||||
json.dumps(config),
|
||||
json.dumps(scope),
|
||||
sort_order,
|
||||
now,
|
||||
),
|
||||
)
|
||||
sort_order += 1
|
||||
logger.info("Migrated private MQTT settings to fanout_configs (enabled=%s)", enabled)
|
||||
|
||||
# 4. Migrate community MQTT if enabled OR configured (preserve disabled-but-configured)
|
||||
community_enabled = bool(row["community_mqtt_enabled"])
|
||||
community_iata = row["community_mqtt_iata"] or ""
|
||||
community_host = row["community_mqtt_broker_host"] or ""
|
||||
community_email = row["community_mqtt_email"] or ""
|
||||
community_has_config = bool(
|
||||
community_iata
|
||||
or community_email
|
||||
or (community_host and community_host != "mqtt-us-v1.letsmesh.net")
|
||||
)
|
||||
if community_enabled or community_has_config:
|
||||
config = {
|
||||
"broker_host": community_host or "mqtt-us-v1.letsmesh.net",
|
||||
"broker_port": row["community_mqtt_broker_port"] or 443,
|
||||
"iata": community_iata,
|
||||
"email": community_email,
|
||||
}
|
||||
|
||||
scope = {
|
||||
"messages": "none",
|
||||
"raw_packets": "all",
|
||||
}
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO fanout_configs (id, type, name, enabled, config, scope, sort_order, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(uuid.uuid4()),
|
||||
"mqtt_community",
|
||||
"Community MQTT",
|
||||
1 if community_enabled else 0,
|
||||
json.dumps(config),
|
||||
json.dumps(scope),
|
||||
sort_order,
|
||||
now,
|
||||
),
|
||||
)
|
||||
logger.info(
|
||||
"Migrated community MQTT settings to fanout_configs (enabled=%s)", community_enabled
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Migrate bots from app_settings.bots JSON to fanout_configs rows."""
|
||||
|
||||
try:
|
||||
cursor = await conn.execute("SELECT bots FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
except Exception:
|
||||
row = None
|
||||
|
||||
if row is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
bots_json = row["bots"] or "[]"
|
||||
try:
|
||||
bots = json.loads(bots_json)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
bots = []
|
||||
|
||||
if not bots:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
import time
|
||||
|
||||
now = int(time.time())
|
||||
|
||||
# Use sort_order starting at 200 to place bots after MQTT configs (0-99)
|
||||
for i, bot in enumerate(bots):
|
||||
bot_name = bot.get("name") or f"Bot {i + 1}"
|
||||
bot_enabled = bool(bot.get("enabled", False))
|
||||
bot_code = bot.get("code", "")
|
||||
|
||||
config_blob = json.dumps({"code": bot_code})
|
||||
scope = json.dumps({"messages": "all", "raw_packets": "none"})
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO fanout_configs (id, type, name, enabled, config, scope, sort_order, created_at)
|
||||
VALUES (?, 'bot', ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
str(uuid.uuid4()),
|
||||
bot_name,
|
||||
1 if bot_enabled else 0,
|
||||
config_blob,
|
||||
scope,
|
||||
200 + i,
|
||||
now,
|
||||
),
|
||||
)
|
||||
logger.info("Migrated bot '%s' to fanout_configs (enabled=%s)", bot_name, bot_enabled)
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,54 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Drop legacy MQTT, community MQTT, and bots columns from app_settings.
|
||||
|
||||
These columns were migrated to fanout_configs in migrations 36 and 37.
|
||||
SQLite 3.35.0+ supports ALTER TABLE DROP COLUMN. For older versions,
|
||||
the columns remain but are harmless (no longer read or written).
|
||||
"""
|
||||
# Check if app_settings table exists (some test DBs may not have it)
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='app_settings'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
columns_to_drop = [
|
||||
"bots",
|
||||
"mqtt_broker_host",
|
||||
"mqtt_broker_port",
|
||||
"mqtt_username",
|
||||
"mqtt_password",
|
||||
"mqtt_use_tls",
|
||||
"mqtt_tls_insecure",
|
||||
"mqtt_topic_prefix",
|
||||
"mqtt_publish_messages",
|
||||
"mqtt_publish_raw_packets",
|
||||
"community_mqtt_enabled",
|
||||
"community_mqtt_iata",
|
||||
"community_mqtt_broker_host",
|
||||
"community_mqtt_broker_port",
|
||||
"community_mqtt_email",
|
||||
]
|
||||
|
||||
for column in columns_to_drop:
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE app_settings DROP COLUMN {column}")
|
||||
logger.debug("Dropped %s from app_settings", column)
|
||||
except aiosqlite.OperationalError as e:
|
||||
error_msg = str(e).lower()
|
||||
if "no such column" in error_msg:
|
||||
logger.debug("app_settings.%s already dropped, skipping", column)
|
||||
elif "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug("SQLite doesn't support DROP COLUMN, %s column will remain", column)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,65 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add contacts.out_path_hash_mode and backfill legacy rows.
|
||||
|
||||
Historical databases predate multibyte routing support. Backfill rules:
|
||||
- contacts with last_path_len = -1 are flood routes -> out_path_hash_mode = -1
|
||||
- all other existing contacts default to 0 (1-byte legacy hop identifiers)
|
||||
"""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
column_cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns = {row[1] for row in await column_cursor.fetchall()}
|
||||
|
||||
added_column = False
|
||||
|
||||
try:
|
||||
await conn.execute(
|
||||
"ALTER TABLE contacts ADD COLUMN out_path_hash_mode INTEGER NOT NULL DEFAULT 0"
|
||||
)
|
||||
added_column = True
|
||||
logger.debug("Added out_path_hash_mode to contacts table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.out_path_hash_mode already exists, skipping add")
|
||||
else:
|
||||
raise
|
||||
|
||||
if "last_path_len" not in columns:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
if added_column:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts
|
||||
SET out_path_hash_mode = CASE
|
||||
WHEN last_path_len = -1 THEN -1
|
||||
ELSE 0
|
||||
END
|
||||
"""
|
||||
)
|
||||
else:
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE contacts
|
||||
SET out_path_hash_mode = CASE
|
||||
WHEN last_path_len = -1 THEN -1
|
||||
ELSE 0
|
||||
END
|
||||
WHERE out_path_hash_mode NOT IN (-1, 0, 1, 2)
|
||||
OR (last_path_len = -1 AND out_path_hash_mode != -1)
|
||||
"""
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,82 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(
|
||||
conn: aiosqlite.Connection,
|
||||
) -> None:
|
||||
"""Rebuild contact_advert_paths so uniqueness includes path_len.
|
||||
|
||||
Multi-byte routing can produce the same path_hex bytes with a different hop count,
|
||||
which changes the hop boundaries and therefore the semantic next-hop identity.
|
||||
"""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contact_advert_paths'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS contact_advert_paths (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_contact_advert_paths_recent")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contact_advert_paths_new (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths_new
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
SELECT
|
||||
public_key,
|
||||
path_hex,
|
||||
path_len,
|
||||
MIN(first_seen),
|
||||
MAX(last_seen),
|
||||
SUM(heard_count)
|
||||
FROM contact_advert_paths
|
||||
GROUP BY public_key, path_hex, path_len
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.execute("DROP TABLE contact_advert_paths")
|
||||
await conn.execute("ALTER TABLE contact_advert_paths_new RENAME TO contact_advert_paths")
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_contact_advert_paths_recent")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add nullable routing-override columns to contacts."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
for column_name, column_type in (
|
||||
("route_override_path", "TEXT"),
|
||||
("route_override_len", "INTEGER"),
|
||||
("route_override_hash_mode", "INTEGER"),
|
||||
):
|
||||
try:
|
||||
await conn.execute(f"ALTER TABLE contacts ADD COLUMN {column_name} {column_type}")
|
||||
logger.debug("Added %s to contacts table", column_name)
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("contacts.%s already exists, skipping", column_name)
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,26 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add nullable per-channel flood-scope override column."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='channels'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
try:
|
||||
await conn.execute("ALTER TABLE channels ADD COLUMN flood_scope_override TEXT")
|
||||
logger.debug("Added flood_scope_override to channels table")
|
||||
except aiosqlite.OperationalError as e:
|
||||
if "duplicate column name" in str(e).lower():
|
||||
logger.debug("channels.flood_scope_override already exists, skipping")
|
||||
else:
|
||||
raise
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Restrict the message dedup index to channel messages."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required_columns = {"type", "conversation_key", "text", "sender_timestamp"}
|
||||
if not required_columns.issubset(columns):
|
||||
logger.debug("messages table missing dedup-index columns, skipping migration 43")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_dedup_null_safe")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_dedup_null_safe
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
WHERE type = 'CHAN'"""
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,157 @@
|
||||
import json
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _merge_message_paths(paths_json_values: list[str | None]) -> str | None:
|
||||
"""Merge multiple message path arrays into one exact-observation list."""
|
||||
merged: list[dict[str, object]] = []
|
||||
seen: set[tuple[object | None, object | None, object | None]] = set()
|
||||
|
||||
for paths_json in paths_json_values:
|
||||
if not paths_json:
|
||||
continue
|
||||
try:
|
||||
parsed = json.loads(paths_json)
|
||||
except (TypeError, json.JSONDecodeError):
|
||||
continue
|
||||
if not isinstance(parsed, list):
|
||||
continue
|
||||
for entry in parsed:
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
key = (
|
||||
entry.get("path"),
|
||||
entry.get("received_at"),
|
||||
entry.get("path_len"),
|
||||
)
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
merged.append(entry)
|
||||
|
||||
return json.dumps(merged) if merged else None
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Collapse same-contact same-text same-second incoming DMs into one row."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
required_columns = {
|
||||
"id",
|
||||
"type",
|
||||
"conversation_key",
|
||||
"text",
|
||||
"sender_timestamp",
|
||||
"received_at",
|
||||
"paths",
|
||||
"txt_type",
|
||||
"signature",
|
||||
"outgoing",
|
||||
"acked",
|
||||
"sender_name",
|
||||
"sender_key",
|
||||
}
|
||||
if not required_columns.issubset(columns):
|
||||
logger.debug("messages table missing incoming-DM dedup columns, skipping migration 44")
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
raw_packets_cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='raw_packets'"
|
||||
)
|
||||
raw_packets_exists = await raw_packets_cursor.fetchone() is not None
|
||||
|
||||
duplicate_groups_cursor = await conn.execute(
|
||||
"""
|
||||
SELECT conversation_key, text,
|
||||
COALESCE(sender_timestamp, 0) AS normalized_sender_timestamp,
|
||||
COUNT(*) AS duplicate_count
|
||||
FROM messages
|
||||
WHERE type = 'PRIV' AND outgoing = 0
|
||||
GROUP BY conversation_key, text, COALESCE(sender_timestamp, 0)
|
||||
HAVING COUNT(*) > 1
|
||||
"""
|
||||
)
|
||||
duplicate_groups = await duplicate_groups_cursor.fetchall()
|
||||
|
||||
for group in duplicate_groups:
|
||||
normalized_sender_timestamp = group["normalized_sender_timestamp"]
|
||||
rows_cursor = await conn.execute(
|
||||
"""
|
||||
SELECT *
|
||||
FROM messages
|
||||
WHERE type = 'PRIV' AND outgoing = 0
|
||||
AND conversation_key = ? AND text = ?
|
||||
AND COALESCE(sender_timestamp, 0) = ?
|
||||
ORDER BY id ASC
|
||||
""",
|
||||
(
|
||||
group["conversation_key"],
|
||||
group["text"],
|
||||
normalized_sender_timestamp,
|
||||
),
|
||||
)
|
||||
rows = list(await rows_cursor.fetchall())
|
||||
if len(rows) < 2:
|
||||
continue
|
||||
|
||||
keeper = rows[0]
|
||||
duplicate_ids = [row["id"] for row in rows[1:]]
|
||||
merged_paths = _merge_message_paths([row["paths"] for row in rows])
|
||||
merged_received_at = min(row["received_at"] for row in rows)
|
||||
merged_txt_type = next((row["txt_type"] for row in rows if row["txt_type"] != 0), 0)
|
||||
merged_signature = next((row["signature"] for row in rows if row["signature"]), None)
|
||||
merged_sender_name = next((row["sender_name"] for row in rows if row["sender_name"]), None)
|
||||
merged_sender_key = next((row["sender_key"] for row in rows if row["sender_key"]), None)
|
||||
merged_acked = max(int(row["acked"] or 0) for row in rows)
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
UPDATE messages
|
||||
SET received_at = ?, paths = ?, txt_type = ?, signature = ?,
|
||||
acked = ?, sender_name = ?, sender_key = ?
|
||||
WHERE id = ?
|
||||
""",
|
||||
(
|
||||
merged_received_at,
|
||||
merged_paths,
|
||||
merged_txt_type,
|
||||
merged_signature,
|
||||
merged_acked,
|
||||
merged_sender_name,
|
||||
merged_sender_key,
|
||||
keeper["id"],
|
||||
),
|
||||
)
|
||||
|
||||
if raw_packets_exists:
|
||||
for duplicate_id in duplicate_ids:
|
||||
await conn.execute(
|
||||
"UPDATE raw_packets SET message_id = ? WHERE message_id = ?",
|
||||
(keeper["id"], duplicate_id),
|
||||
)
|
||||
|
||||
placeholders = ",".join("?" for _ in duplicate_ids)
|
||||
await conn.execute(
|
||||
f"DELETE FROM messages WHERE id IN ({placeholders})",
|
||||
duplicate_ids,
|
||||
)
|
||||
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_incoming_priv_dedup")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_incoming_priv_dedup
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0))
|
||||
WHERE type = 'PRIV' AND outgoing = 0"""
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,136 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Replace legacy contact route columns with canonical direct-route columns."""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='contacts'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
columns = {row[1] for row in await cursor.fetchall()}
|
||||
|
||||
target_columns = {
|
||||
"public_key",
|
||||
"name",
|
||||
"type",
|
||||
"flags",
|
||||
"direct_path",
|
||||
"direct_path_len",
|
||||
"direct_path_hash_mode",
|
||||
"direct_path_updated_at",
|
||||
"route_override_path",
|
||||
"route_override_len",
|
||||
"route_override_hash_mode",
|
||||
"last_advert",
|
||||
"lat",
|
||||
"lon",
|
||||
"last_seen",
|
||||
"on_radio",
|
||||
"last_contacted",
|
||||
"first_seen",
|
||||
"last_read_at",
|
||||
}
|
||||
if (
|
||||
target_columns.issubset(columns)
|
||||
and "last_path" not in columns
|
||||
and "out_path_hash_mode" not in columns
|
||||
):
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contacts_new (
|
||||
public_key TEXT PRIMARY KEY,
|
||||
name TEXT,
|
||||
type INTEGER DEFAULT 0,
|
||||
flags INTEGER DEFAULT 0,
|
||||
direct_path TEXT,
|
||||
direct_path_len INTEGER,
|
||||
direct_path_hash_mode INTEGER,
|
||||
direct_path_updated_at INTEGER,
|
||||
route_override_path TEXT,
|
||||
route_override_len INTEGER,
|
||||
route_override_hash_mode INTEGER,
|
||||
last_advert INTEGER,
|
||||
lat REAL,
|
||||
lon REAL,
|
||||
last_seen INTEGER,
|
||||
on_radio INTEGER DEFAULT 0,
|
||||
last_contacted INTEGER,
|
||||
first_seen INTEGER,
|
||||
last_read_at INTEGER
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
select_expr = {
|
||||
"public_key": "public_key",
|
||||
"name": "NULL",
|
||||
"type": "0",
|
||||
"flags": "0",
|
||||
"direct_path": "NULL",
|
||||
"direct_path_len": "NULL",
|
||||
"direct_path_hash_mode": "NULL",
|
||||
"direct_path_updated_at": "NULL",
|
||||
"route_override_path": "NULL",
|
||||
"route_override_len": "NULL",
|
||||
"route_override_hash_mode": "NULL",
|
||||
"last_advert": "NULL",
|
||||
"lat": "NULL",
|
||||
"lon": "NULL",
|
||||
"last_seen": "NULL",
|
||||
"on_radio": "0",
|
||||
"last_contacted": "NULL",
|
||||
"first_seen": "NULL",
|
||||
"last_read_at": "NULL",
|
||||
}
|
||||
for name in ("name", "type", "flags"):
|
||||
if name in columns:
|
||||
select_expr[name] = name
|
||||
|
||||
if "direct_path" in columns:
|
||||
select_expr["direct_path"] = "direct_path"
|
||||
|
||||
if "direct_path_len" in columns:
|
||||
select_expr["direct_path_len"] = "direct_path_len"
|
||||
|
||||
if "direct_path_hash_mode" in columns:
|
||||
select_expr["direct_path_hash_mode"] = "direct_path_hash_mode"
|
||||
|
||||
for name in (
|
||||
"route_override_path",
|
||||
"route_override_len",
|
||||
"route_override_hash_mode",
|
||||
"last_advert",
|
||||
"lat",
|
||||
"lon",
|
||||
"last_seen",
|
||||
"on_radio",
|
||||
"last_contacted",
|
||||
"first_seen",
|
||||
"last_read_at",
|
||||
):
|
||||
if name in columns:
|
||||
select_expr[name] = name
|
||||
|
||||
ordered_columns = list(select_expr.keys())
|
||||
await conn.execute(
|
||||
f"""
|
||||
INSERT INTO contacts_new ({", ".join(ordered_columns)})
|
||||
SELECT {", ".join(select_expr[name] for name in ordered_columns)}
|
||||
FROM contacts
|
||||
"""
|
||||
)
|
||||
|
||||
await conn.execute("DROP TABLE contacts")
|
||||
await conn.execute("ALTER TABLE contacts_new RENAME TO contacts")
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,93 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Move uniquely resolvable orphan contact child rows onto full contacts, drop the rest."""
|
||||
existing_tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
existing_tables = {row[0] for row in await existing_tables_cursor.fetchall()}
|
||||
if "contacts" not in existing_tables:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
child_tables = [
|
||||
table
|
||||
for table in ("contact_name_history", "contact_advert_paths")
|
||||
if table in existing_tables
|
||||
]
|
||||
if not child_tables:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
orphan_keys: set[str] = set()
|
||||
|
||||
for table in child_tables:
|
||||
cursor = await conn.execute(
|
||||
f"""
|
||||
SELECT DISTINCT child.public_key
|
||||
FROM {table} child
|
||||
LEFT JOIN contacts c ON c.public_key = child.public_key
|
||||
WHERE c.public_key IS NULL
|
||||
"""
|
||||
)
|
||||
orphan_keys.update(row[0] for row in await cursor.fetchall())
|
||||
|
||||
for orphan_key in sorted(orphan_keys, key=len, reverse=True):
|
||||
match_cursor = await conn.execute(
|
||||
"""
|
||||
SELECT public_key
|
||||
FROM contacts
|
||||
WHERE length(public_key) = 64
|
||||
AND public_key LIKE ? || '%'
|
||||
ORDER BY public_key
|
||||
""",
|
||||
(orphan_key.lower(),),
|
||||
)
|
||||
matches = [row[0] for row in await match_cursor.fetchall()]
|
||||
resolved_key = matches[0] if len(matches) == 1 else None
|
||||
|
||||
if resolved_key is not None:
|
||||
if "contact_name_history" in child_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_name_history (public_key, name, first_seen, last_seen)
|
||||
SELECT ?, name, first_seen, last_seen
|
||||
FROM contact_name_history
|
||||
WHERE public_key = ?
|
||||
ON CONFLICT(public_key, name) DO UPDATE SET
|
||||
first_seen = MIN(contact_name_history.first_seen, excluded.first_seen),
|
||||
last_seen = MAX(contact_name_history.last_seen, excluded.last_seen)
|
||||
""",
|
||||
(resolved_key, orphan_key),
|
||||
)
|
||||
if "contact_advert_paths" in child_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO contact_advert_paths
|
||||
(public_key, path_hex, path_len, first_seen, last_seen, heard_count)
|
||||
SELECT ?, path_hex, path_len, first_seen, last_seen, heard_count
|
||||
FROM contact_advert_paths
|
||||
WHERE public_key = ?
|
||||
ON CONFLICT(public_key, path_hex, path_len) DO UPDATE SET
|
||||
first_seen = MIN(contact_advert_paths.first_seen, excluded.first_seen),
|
||||
last_seen = MAX(contact_advert_paths.last_seen, excluded.last_seen),
|
||||
heard_count = contact_advert_paths.heard_count + excluded.heard_count
|
||||
""",
|
||||
(resolved_key, orphan_key),
|
||||
)
|
||||
|
||||
if "contact_name_history" in child_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_name_history WHERE public_key = ?",
|
||||
(orphan_key,),
|
||||
)
|
||||
if "contact_advert_paths" in child_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_advert_paths WHERE public_key = ?",
|
||||
(orphan_key,),
|
||||
)
|
||||
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,39 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add indexes used by the statistics endpoint's time-windowed scans."""
|
||||
cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
tables = {row[0] for row in await cursor.fetchall()}
|
||||
|
||||
if "raw_packets" in tables:
|
||||
cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
raw_packet_columns = {row[1] for row in await cursor.fetchall()}
|
||||
if "timestamp" in raw_packet_columns:
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_timestamp ON raw_packets(timestamp)"
|
||||
)
|
||||
|
||||
if "contacts" in tables:
|
||||
cursor = await conn.execute("PRAGMA table_info(contacts)")
|
||||
contact_columns = {row[1] for row in await cursor.fetchall()}
|
||||
if {"type", "last_seen"}.issubset(contact_columns):
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contacts_type_last_seen ON contacts(type, last_seen)"
|
||||
)
|
||||
|
||||
if "messages" in tables:
|
||||
cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
message_columns = {row[1] for row in await cursor.fetchall()}
|
||||
if {"type", "received_at", "conversation_key"}.issubset(message_columns):
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_messages_type_received_conversation
|
||||
ON messages(type, received_at, conversation_key)
|
||||
"""
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add discovery_blocked_types column to app_settings.
|
||||
|
||||
Stores a JSON array of integer contact type codes (1=Client, 2=Repeater,
|
||||
3=Room, 4=Sensor) whose advertisements should not create new contacts.
|
||||
Empty list means all types are accepted.
|
||||
"""
|
||||
try:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN discovery_blocked_types TEXT DEFAULT '[]'"
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "duplicate column" in error_msg:
|
||||
logger.debug("discovery_blocked_types column already exists, skipping")
|
||||
elif "no such table" in error_msg:
|
||||
logger.debug("app_settings table not ready, skipping discovery_blocked_types migration")
|
||||
else:
|
||||
raise
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,158 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Rebuild FK tables with CASCADE/SET NULL and clean orphaned rows.
|
||||
|
||||
SQLite cannot ALTER existing FK constraints, so each table is rebuilt.
|
||||
Orphaned child rows are cleaned up before the rebuild to ensure the
|
||||
INSERT...SELECT into the new table (which has enforced FKs) succeeds.
|
||||
"""
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
# Back up the database before table rebuilds (skip for in-memory DBs).
|
||||
cursor = await conn.execute("PRAGMA database_list")
|
||||
db_row = await cursor.fetchone()
|
||||
db_path = db_row[2] if db_row else ""
|
||||
if db_path and db_path != ":memory:" and Path(db_path).exists():
|
||||
backup_path = db_path + ".pre-fk-migration.bak"
|
||||
for suffix in ("", "-wal", "-shm"):
|
||||
src = Path(db_path + suffix)
|
||||
if src.exists():
|
||||
shutil.copy2(str(src), backup_path + suffix)
|
||||
logger.info("Database backed up to %s before FK migration", backup_path)
|
||||
|
||||
# --- Phase 1: clean orphans (guard each table's existence) ---
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
existing_tables = {row[0] for row in await tables_cursor.fetchall()}
|
||||
|
||||
if "contact_advert_paths" in existing_tables and "contacts" in existing_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_advert_paths "
|
||||
"WHERE public_key NOT IN (SELECT public_key FROM contacts)"
|
||||
)
|
||||
if "contact_name_history" in existing_tables and "contacts" in existing_tables:
|
||||
await conn.execute(
|
||||
"DELETE FROM contact_name_history "
|
||||
"WHERE public_key NOT IN (SELECT public_key FROM contacts)"
|
||||
)
|
||||
if "raw_packets" in existing_tables and "messages" in existing_tables:
|
||||
# Guard: message_id column may not exist on very old schemas
|
||||
col_cursor = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
raw_cols = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "message_id" in raw_cols:
|
||||
await conn.execute(
|
||||
"UPDATE raw_packets SET message_id = NULL WHERE message_id IS NOT NULL "
|
||||
"AND message_id NOT IN (SELECT id FROM messages)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Cleaned orphaned child rows before FK rebuild")
|
||||
|
||||
# --- Phase 2: rebuild raw_packets with ON DELETE SET NULL ---
|
||||
# Skip if raw_packets doesn't have message_id (pre-migration-18 schema)
|
||||
raw_has_message_id = False
|
||||
if "raw_packets" in existing_tables:
|
||||
col_cursor2 = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
raw_has_message_id = "message_id" in {row[1] for row in await col_cursor2.fetchall()}
|
||||
|
||||
if raw_has_message_id:
|
||||
# Dynamically build column list based on what the old table actually has,
|
||||
# since very old schemas may lack payload_hash (added in migration 28).
|
||||
col_cursor3 = await conn.execute("PRAGMA table_info(raw_packets)")
|
||||
old_cols = [row[1] for row in await col_cursor3.fetchall()]
|
||||
|
||||
new_col_defs = [
|
||||
"id INTEGER PRIMARY KEY AUTOINCREMENT",
|
||||
"timestamp INTEGER NOT NULL",
|
||||
"data BLOB NOT NULL",
|
||||
"message_id INTEGER",
|
||||
]
|
||||
copy_cols = ["id", "timestamp", "data", "message_id"]
|
||||
if "payload_hash" in old_cols:
|
||||
new_col_defs.append("payload_hash BLOB")
|
||||
copy_cols.append("payload_hash")
|
||||
new_col_defs.append("FOREIGN KEY (message_id) REFERENCES messages(id) ON DELETE SET NULL")
|
||||
|
||||
cols_sql = ", ".join(new_col_defs)
|
||||
copy_sql = ", ".join(copy_cols)
|
||||
await conn.execute(f"CREATE TABLE raw_packets_fk ({cols_sql})")
|
||||
await conn.execute(
|
||||
f"INSERT INTO raw_packets_fk ({copy_sql}) SELECT {copy_sql} FROM raw_packets"
|
||||
)
|
||||
await conn.execute("DROP TABLE raw_packets")
|
||||
await conn.execute("ALTER TABLE raw_packets_fk RENAME TO raw_packets")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_message_id ON raw_packets(message_id)"
|
||||
)
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_raw_packets_timestamp ON raw_packets(timestamp)"
|
||||
)
|
||||
if "payload_hash" in old_cols:
|
||||
await conn.execute(
|
||||
"CREATE UNIQUE INDEX IF NOT EXISTS idx_raw_packets_payload_hash ON raw_packets(payload_hash)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Rebuilt raw_packets with ON DELETE SET NULL")
|
||||
|
||||
# --- Phase 3: rebuild contact_advert_paths with ON DELETE CASCADE ---
|
||||
if "contact_advert_paths" in existing_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contact_advert_paths_fk (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
path_hex TEXT NOT NULL,
|
||||
path_len INTEGER NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
heard_count INTEGER NOT NULL DEFAULT 1,
|
||||
UNIQUE(public_key, path_hex, path_len),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO contact_advert_paths_fk (id, public_key, path_hex, path_len, first_seen, last_seen, heard_count) "
|
||||
"SELECT id, public_key, path_hex, path_len, first_seen, last_seen, heard_count FROM contact_advert_paths"
|
||||
)
|
||||
await conn.execute("DROP TABLE contact_advert_paths")
|
||||
await conn.execute("ALTER TABLE contact_advert_paths_fk RENAME TO contact_advert_paths")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_advert_paths_recent "
|
||||
"ON contact_advert_paths(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Rebuilt contact_advert_paths with ON DELETE CASCADE")
|
||||
|
||||
# --- Phase 4: rebuild contact_name_history with ON DELETE CASCADE ---
|
||||
if "contact_name_history" in existing_tables:
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE contact_name_history_fk (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
name TEXT NOT NULL,
|
||||
first_seen INTEGER NOT NULL,
|
||||
last_seen INTEGER NOT NULL,
|
||||
UNIQUE(public_key, name),
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"INSERT INTO contact_name_history_fk (id, public_key, name, first_seen, last_seen) "
|
||||
"SELECT id, public_key, name, first_seen, last_seen FROM contact_name_history"
|
||||
)
|
||||
await conn.execute("DROP TABLE contact_name_history")
|
||||
await conn.execute("ALTER TABLE contact_name_history_fk RENAME TO contact_name_history")
|
||||
await conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_contact_name_history_key "
|
||||
"ON contact_name_history(public_key, last_seen DESC)"
|
||||
)
|
||||
await conn.commit()
|
||||
logger.debug("Rebuilt contact_name_history with ON DELETE CASCADE")
|
||||
@@ -0,0 +1,27 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Create repeater_telemetry_history table for JSON-blob telemetry snapshots."""
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS repeater_telemetry_history (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
public_key TEXT NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
data TEXT NOT NULL,
|
||||
FOREIGN KEY (public_key) REFERENCES contacts(public_key) ON DELETE CASCADE
|
||||
)
|
||||
"""
|
||||
)
|
||||
await conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_repeater_telemetry_pk_ts
|
||||
ON repeater_telemetry_history (public_key, timestamp)
|
||||
"""
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,24 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Remove vestigial sidebar_sort_order column from app_settings."""
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "sidebar_sort_order" in columns:
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings DROP COLUMN sidebar_sort_order")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug(
|
||||
"SQLite doesn't support DROP COLUMN, sidebar_sort_order column will remain"
|
||||
)
|
||||
await conn.commit()
|
||||
else:
|
||||
raise
|
||||
@@ -0,0 +1,21 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add nullable per-channel path hash mode override column."""
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "channels" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
try:
|
||||
await conn.execute("ALTER TABLE channels ADD COLUMN path_hash_mode_override INTEGER")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
if "duplicate column" in str(e).lower():
|
||||
await conn.commit()
|
||||
else:
|
||||
raise
|
||||
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add tracked_telemetry_repeaters JSON list column to app_settings."""
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "app_settings" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "tracked_telemetry_repeaters" not in columns:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN tracked_telemetry_repeaters TEXT DEFAULT '[]'"
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,20 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add auto_resend_channel boolean column to app_settings."""
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "app_settings" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "auto_resend_channel" not in columns:
|
||||
await conn.execute(
|
||||
"ALTER TABLE app_settings ADD COLUMN auto_resend_channel INTEGER DEFAULT 0"
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,93 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Move favorites from app_settings JSON blob to per-entity boolean columns.
|
||||
|
||||
1. Add ``favorite`` column to contacts and channels tables.
|
||||
2. Backfill from the ``app_settings.favorites`` JSON array.
|
||||
3. Drop the ``favorites`` column from app_settings.
|
||||
"""
|
||||
import json as _json
|
||||
|
||||
# --- Add columns ---
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
existing_tables = {row[0] for row in await tables_cursor.fetchall()}
|
||||
for table in ("contacts", "channels"):
|
||||
if table not in existing_tables:
|
||||
continue
|
||||
col_cursor = await conn.execute(f"PRAGMA table_info({table})")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "favorite" not in columns:
|
||||
await conn.execute(f"ALTER TABLE {table} ADD COLUMN favorite INTEGER DEFAULT 0")
|
||||
await conn.commit()
|
||||
|
||||
# --- Backfill from JSON ---
|
||||
tables_cursor = await conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||
if "app_settings" not in {row[0] for row in await tables_cursor.fetchall()}:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
col_cursor = await conn.execute("PRAGMA table_info(app_settings)")
|
||||
settings_columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
if "favorites" not in settings_columns:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
cursor = await conn.execute("SELECT favorites FROM app_settings WHERE id = 1")
|
||||
row = await cursor.fetchone()
|
||||
if row and row[0]:
|
||||
try:
|
||||
favorites = _json.loads(row[0])
|
||||
except (ValueError, TypeError):
|
||||
favorites = []
|
||||
|
||||
contact_keys = []
|
||||
channel_keys = []
|
||||
for fav in favorites:
|
||||
if not isinstance(fav, dict):
|
||||
continue
|
||||
fav_type = fav.get("type")
|
||||
fav_id = fav.get("id")
|
||||
if not fav_id:
|
||||
continue
|
||||
if fav_type == "contact":
|
||||
contact_keys.append(fav_id)
|
||||
elif fav_type == "channel":
|
||||
channel_keys.append(fav_id)
|
||||
|
||||
if contact_keys:
|
||||
placeholders = ",".join("?" for _ in contact_keys)
|
||||
await conn.execute(
|
||||
f"UPDATE contacts SET favorite = 1 WHERE public_key IN ({placeholders})",
|
||||
contact_keys,
|
||||
)
|
||||
if channel_keys:
|
||||
placeholders = ",".join("?" for _ in channel_keys)
|
||||
await conn.execute(
|
||||
f"UPDATE channels SET favorite = 1 WHERE key IN ({placeholders})",
|
||||
channel_keys,
|
||||
)
|
||||
if contact_keys or channel_keys:
|
||||
logger.info(
|
||||
"Backfilled %d contact favorite(s) and %d channel favorite(s) from app_settings",
|
||||
len(contact_keys),
|
||||
len(channel_keys),
|
||||
)
|
||||
await conn.commit()
|
||||
|
||||
# --- Drop the JSON column ---
|
||||
try:
|
||||
await conn.execute("ALTER TABLE app_settings DROP COLUMN favorites")
|
||||
await conn.commit()
|
||||
except Exception as e:
|
||||
error_msg = str(e).lower()
|
||||
if "syntax error" in error_msg or "drop column" in error_msg:
|
||||
logger.debug("SQLite doesn't support DROP COLUMN; favorites column will remain unused")
|
||||
await conn.commit()
|
||||
else:
|
||||
raise
|
||||
@@ -0,0 +1,43 @@
|
||||
import logging
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def migrate(conn: aiosqlite.Connection) -> None:
|
||||
"""Add sender_key to the incoming PRIV dedup index.
|
||||
|
||||
Room-server posts are stored as PRIV messages sharing one conversation_key
|
||||
(the room contact). Without sender_key in the uniqueness constraint, two
|
||||
different room participants sending identical text in the same clock second
|
||||
collide and the second message is silently dropped.
|
||||
|
||||
Adding COALESCE(sender_key, '') is strictly more permissive — no existing
|
||||
rows can conflict — so the migration only needs to rebuild the index.
|
||||
"""
|
||||
cursor = await conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='messages'"
|
||||
)
|
||||
if await cursor.fetchone() is None:
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
# The index references type, conversation_key, sender_timestamp, outgoing,
|
||||
# and sender_key. Some migration tests create minimal messages tables that
|
||||
# lack these columns. Skip gracefully when the schema is too old.
|
||||
col_cursor = await conn.execute("PRAGMA table_info(messages)")
|
||||
columns = {row[1] for row in await col_cursor.fetchall()}
|
||||
required = {"type", "conversation_key", "sender_timestamp", "outgoing", "sender_key"}
|
||||
if not required.issubset(columns):
|
||||
await conn.commit()
|
||||
return
|
||||
|
||||
await conn.execute("DROP INDEX IF EXISTS idx_messages_incoming_priv_dedup")
|
||||
await conn.execute(
|
||||
"""CREATE UNIQUE INDEX IF NOT EXISTS idx_messages_incoming_priv_dedup
|
||||
ON messages(type, conversation_key, text, COALESCE(sender_timestamp, 0),
|
||||
COALESCE(sender_key, ''))
|
||||
WHERE type = 'PRIV' AND outgoing = 0"""
|
||||
)
|
||||
await conn.commit()
|
||||
@@ -0,0 +1,66 @@
|
||||
"""
|
||||
Database migrations using SQLite's user_version pragma.
|
||||
|
||||
Migrations run automatically on startup. The user_version pragma tracks
|
||||
which migrations have been applied (defaults to 0 for existing databases).
|
||||
|
||||
Each migration lives in its own file: ``_NNN_description.py``, exposing an
|
||||
``async def migrate(conn)`` entry point. The runner auto-discovers files by
|
||||
numeric prefix and executes them in order.
|
||||
|
||||
This approach is safe for existing users - their databases have user_version=0,
|
||||
so all migrations run in order on first startup after upgrade.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import pkgutil
|
||||
import re
|
||||
|
||||
import aiosqlite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def get_version(conn: aiosqlite.Connection) -> int:
|
||||
"""Get current schema version from SQLite user_version pragma."""
|
||||
cursor = await conn.execute("PRAGMA user_version")
|
||||
row = await cursor.fetchone()
|
||||
return row[0] if row else 0
|
||||
|
||||
|
||||
async def set_version(conn: aiosqlite.Connection, version: int) -> None:
|
||||
"""Set schema version using SQLite user_version pragma."""
|
||||
await conn.execute(f"PRAGMA user_version = {version}")
|
||||
|
||||
|
||||
async def run_migrations(conn: aiosqlite.Connection) -> int:
|
||||
"""
|
||||
Run all pending migrations.
|
||||
|
||||
Returns the number of migrations applied.
|
||||
"""
|
||||
version = await get_version(conn)
|
||||
applied = 0
|
||||
|
||||
for module_info in sorted(pkgutil.iter_modules(__path__), key=lambda m: m.name):
|
||||
match = re.match(r"_(\d+)_", module_info.name)
|
||||
if not match:
|
||||
continue
|
||||
num = int(match.group(1))
|
||||
if num <= version:
|
||||
continue
|
||||
logger.info("Applying migration %d: %s", num, module_info.name)
|
||||
mod = importlib.import_module(f"{__name__}.{module_info.name}")
|
||||
await mod.migrate(conn)
|
||||
await set_version(conn, num)
|
||||
applied += 1
|
||||
|
||||
if applied > 0:
|
||||
logger.info(
|
||||
"Applied %d migration(s), schema now at version %d", applied, await get_version(conn)
|
||||
)
|
||||
else:
|
||||
logger.debug("Schema up to date at version %d", version)
|
||||
|
||||
return applied
|
||||
@@ -877,10 +877,6 @@ class NoiseFloorHistoryStats(BaseModel):
|
||||
latest_timestamp: int | None = Field(
|
||||
default=None, description="Unix timestamp of the most recent sample"
|
||||
)
|
||||
supported: bool | None = Field(
|
||||
default=None,
|
||||
description="Whether the connected radio appears to support radio stats sampling",
|
||||
)
|
||||
samples: list[NoiseFloorSample] = Field(default_factory=list)
|
||||
|
||||
|
||||
|
||||
+24
-3
@@ -39,6 +39,7 @@ from app.repository import (
|
||||
ChannelRepository,
|
||||
ContactAdvertPathRepository,
|
||||
ContactRepository,
|
||||
MessageRepository,
|
||||
RawPacketRepository,
|
||||
)
|
||||
from app.services.contact_reconciliation import (
|
||||
@@ -645,10 +646,30 @@ async def _process_direct_message(
|
||||
)
|
||||
|
||||
if result is not None:
|
||||
# Successfully decrypted!
|
||||
# In the ambiguous direction case (both first bytes match), we
|
||||
# defaulted to incoming. Check if a matching outgoing message
|
||||
# already exists — if so, this is actually our own outgoing echo
|
||||
# and should be treated as such instead of creating a duplicate
|
||||
# incoming row.
|
||||
effective_outgoing = is_outgoing
|
||||
if not is_outgoing and dest_hash == src_hash:
|
||||
existing_outgoing = await MessageRepository.get_by_content(
|
||||
msg_type="PRIV",
|
||||
conversation_key=contact.public_key.lower(),
|
||||
text=result.message,
|
||||
sender_timestamp=result.timestamp,
|
||||
outgoing=True,
|
||||
)
|
||||
if existing_outgoing is not None:
|
||||
effective_outgoing = True
|
||||
logger.debug(
|
||||
"Ambiguous DM resolved as outgoing echo (matched existing sent msg %d)",
|
||||
existing_outgoing.id,
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
"Decrypted DM %s contact %s: %s",
|
||||
"to" if is_outgoing else "from",
|
||||
"to" if effective_outgoing else "from",
|
||||
contact.name or contact.public_key[:12],
|
||||
result.message[:50] if result.message else "",
|
||||
)
|
||||
@@ -664,7 +685,7 @@ async def _process_direct_message(
|
||||
path_len=packet_info.path_length if packet_info else None,
|
||||
rssi=rssi,
|
||||
snr=snr,
|
||||
outgoing=is_outgoing,
|
||||
outgoing=effective_outgoing,
|
||||
)
|
||||
|
||||
return {
|
||||
|
||||
+13
-16
@@ -9,6 +9,7 @@ The path_len wire byte is packed as [hash_mode:2][hop_count:6]:
|
||||
Mode 3 (hash_size=4) is reserved and rejected.
|
||||
"""
|
||||
|
||||
from collections.abc import Iterable
|
||||
from dataclasses import dataclass
|
||||
|
||||
MAX_PATH_SIZE = 64
|
||||
@@ -246,30 +247,26 @@ def parse_explicit_hop_route(route_text: str) -> tuple[str, int, int]:
|
||||
return "".join(hops), len(hops), hash_size - 1
|
||||
|
||||
|
||||
async def bucket_path_hash_widths(cursor, *, batch_size: int = 500) -> dict[str, int | float]:
|
||||
def bucket_path_hash_widths(rows: Iterable) -> dict[str, int | float]:
|
||||
"""Bucket raw packet rows by hop hash width and return counts + percentages.
|
||||
|
||||
*cursor* must be an already-executed async cursor whose rows have a ``data``
|
||||
*rows* must be an already-fetched list whose elements have a ``data``
|
||||
column containing raw packet bytes.
|
||||
"""
|
||||
single_byte = 0
|
||||
double_byte = 0
|
||||
triple_byte = 0
|
||||
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
for row in rows:
|
||||
envelope = parse_packet_envelope(bytes(row["data"]))
|
||||
if envelope is None:
|
||||
continue
|
||||
if envelope.hash_size == 1:
|
||||
single_byte += 1
|
||||
elif envelope.hash_size == 2:
|
||||
double_byte += 1
|
||||
elif envelope.hash_size == 3:
|
||||
triple_byte += 1
|
||||
for row in rows:
|
||||
envelope = parse_packet_envelope(bytes(row["data"]))
|
||||
if envelope is None:
|
||||
continue
|
||||
if envelope.hash_size == 1:
|
||||
single_byte += 1
|
||||
elif envelope.hash_size == 2:
|
||||
double_byte += 1
|
||||
elif envelope.hash_size == 3:
|
||||
triple_byte += 1
|
||||
|
||||
total = single_byte + double_byte + triple_byte
|
||||
if total == 0:
|
||||
|
||||
+4
-1
@@ -118,7 +118,7 @@ async def test_serial_device(port: str, baudrate: int, timeout: float = 3.0) ->
|
||||
return True
|
||||
|
||||
return False
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
logger.debug("Device %s timed out", port)
|
||||
return False
|
||||
except Exception as e:
|
||||
@@ -192,6 +192,9 @@ class RadioManager:
|
||||
if not blocking:
|
||||
if self._operation_lock.locked():
|
||||
raise RadioOperationBusyError(f"Radio is busy (operation: {name})")
|
||||
# In single-threaded asyncio the lock cannot be acquired between the
|
||||
# check above and the await below (no other coroutine runs until we
|
||||
# yield). The await returns immediately for an uncontested lock.
|
||||
await self._operation_lock.acquire()
|
||||
else:
|
||||
await self._operation_lock.acquire()
|
||||
|
||||
+60
-9
@@ -480,7 +480,7 @@ async def drain_pending_messages(mc: MeshCore) -> int:
|
||||
# Small delay between fetches
|
||||
await asyncio.sleep(0.1)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning("Error draining messages: %s", e, exc_info=True)
|
||||
@@ -518,7 +518,7 @@ async def poll_for_messages(mc: MeshCore) -> int:
|
||||
# If we got a message, there might be more - drain them
|
||||
count += await drain_pending_messages(mc)
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning("Message poll exception: %s", e, exc_info=True)
|
||||
@@ -1295,7 +1295,13 @@ async def stop_background_contact_reconciliation() -> None:
|
||||
|
||||
|
||||
async def get_contacts_selected_for_radio_sync() -> list[Contact]:
|
||||
"""Return the contacts that would be loaded onto the radio right now."""
|
||||
"""Return the contacts that would be loaded onto the radio right now.
|
||||
|
||||
Fill order:
|
||||
1. Favorites (up to full capacity)
|
||||
2. Most recently DM-active non-repeaters (sent or received, up to 80% refill target)
|
||||
3. Most recently advertised non-repeaters (up to 80% refill target)
|
||||
"""
|
||||
app_settings = await AppSettingsRepository.get()
|
||||
max_contacts = _effective_radio_capacity(app_settings.max_radio_contacts)
|
||||
refill_target, _full_sync_trigger = _compute_radio_contact_limits(max_contacts)
|
||||
@@ -1315,7 +1321,7 @@ async def get_contacts_selected_for_radio_sync() -> list[Contact]:
|
||||
break
|
||||
|
||||
if len(selected_contacts) < refill_target:
|
||||
for contact in await ContactRepository.get_recently_contacted_non_repeaters(
|
||||
for contact in await ContactRepository.get_recently_dm_active_non_repeaters(
|
||||
limit=max_contacts
|
||||
):
|
||||
key = contact.public_key.lower()
|
||||
@@ -1354,8 +1360,8 @@ async def _sync_contacts_to_radio_inner(mc: MeshCore) -> dict:
|
||||
|
||||
Fill order is:
|
||||
1. Favorite contacts
|
||||
2. Most recently interacted-with non-repeaters
|
||||
3. Most recently advert-heard non-repeaters without interaction history
|
||||
2. Most recently DM-active non-repeaters (sent or received)
|
||||
3. Most recently advert-heard non-repeaters
|
||||
|
||||
Favorite contacts are always reloaded first, up to the configured capacity.
|
||||
Additional non-favorite fill stops at the refill target (80% of capacity).
|
||||
@@ -1489,8 +1495,8 @@ async def sync_recent_contacts_to_radio(force: bool = False, mc: MeshCore | None
|
||||
"""
|
||||
Load contacts to the radio for DM ACK support.
|
||||
|
||||
Fill order is favorites, then recently contacted non-repeaters,
|
||||
then recently advert-heard non-repeaters. Favorites are always reloaded
|
||||
Fill order is favorites, then recently DM-active non-repeaters (sent or
|
||||
received), then recently advert-heard non-repeaters. Favorites are always reloaded
|
||||
up to the configured capacity; additional non-favorite fill stops at the
|
||||
80% refill target.
|
||||
Only runs at most once every CONTACT_SYNC_THROTTLE_SECONDS unless forced.
|
||||
@@ -1584,10 +1590,40 @@ async def _collect_repeater_telemetry(mc: MeshCore, contact: Contact) -> bool:
|
||||
"full_events": status.get("full_evts", 0),
|
||||
}
|
||||
|
||||
# Best-effort LPP sensor fetch — failure here does not fail the overall
|
||||
# collection; status telemetry is still recorded without sensor data.
|
||||
try:
|
||||
lpp_raw = await mc.commands.req_telemetry_sync(
|
||||
contact.public_key, timeout=10, min_timeout=5
|
||||
)
|
||||
if lpp_raw:
|
||||
lpp_sensors = []
|
||||
for entry in lpp_raw:
|
||||
value = entry.get("value", 0)
|
||||
# Skip multi-value sensors (GPS, accelerometer, etc.)
|
||||
if isinstance(value, dict):
|
||||
continue
|
||||
lpp_sensors.append(
|
||||
{
|
||||
"channel": entry.get("channel", 0),
|
||||
"type_name": str(entry.get("type", "unknown")),
|
||||
"value": value,
|
||||
}
|
||||
)
|
||||
if lpp_sensors:
|
||||
data["lpp_sensors"] = lpp_sensors
|
||||
except Exception as e:
|
||||
logger.debug(
|
||||
"Telemetry collect: LPP sensor fetch failed for %s (non-fatal): %s",
|
||||
contact.public_key[:12],
|
||||
e,
|
||||
)
|
||||
|
||||
try:
|
||||
timestamp = int(time.time())
|
||||
await RepeaterTelemetryRepository.record(
|
||||
public_key=contact.public_key,
|
||||
timestamp=int(time.time()),
|
||||
timestamp=timestamp,
|
||||
data=data,
|
||||
)
|
||||
logger.info(
|
||||
@@ -1595,6 +1631,21 @@ async def _collect_repeater_telemetry(mc: MeshCore, contact: Contact) -> bool:
|
||||
contact.name or contact.public_key[:12],
|
||||
contact.public_key[:12],
|
||||
)
|
||||
|
||||
# Dispatch to fanout modules (e.g. HA MQTT discovery)
|
||||
from app.fanout.manager import fanout_manager
|
||||
|
||||
asyncio.create_task(
|
||||
fanout_manager.broadcast_telemetry(
|
||||
{
|
||||
"public_key": contact.public_key,
|
||||
"name": contact.name or contact.public_key[:12],
|
||||
"timestamp": timestamp,
|
||||
**data,
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
|
||||
@@ -294,6 +294,28 @@ class ContactRepository:
|
||||
rows = await cursor.fetchall()
|
||||
return [ContactRepository._row_to_contact(row) for row in rows]
|
||||
|
||||
@staticmethod
|
||||
async def get_recently_dm_active_non_repeaters(limit: int = 200) -> list[Contact]:
|
||||
"""Get non-repeater contacts with the most recent DM activity (sent or received)."""
|
||||
cursor = await db.conn.execute(
|
||||
"""
|
||||
SELECT c.*
|
||||
FROM contacts c
|
||||
INNER JOIN (
|
||||
SELECT conversation_key, MAX(received_at) AS last_dm
|
||||
FROM messages
|
||||
WHERE type = 'PRIV'
|
||||
GROUP BY conversation_key
|
||||
) m ON c.public_key = m.conversation_key
|
||||
WHERE c.type != 2 AND length(c.public_key) = 64
|
||||
ORDER BY m.last_dm DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(limit,),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
return [ContactRepository._row_to_contact(row) for row in rows]
|
||||
|
||||
@staticmethod
|
||||
async def get_recently_advertised_non_repeaters(limit: int = 200) -> list[Contact]:
|
||||
"""Get recently advert-heard non-repeater contacts."""
|
||||
|
||||
@@ -557,10 +557,11 @@ class MessageRepository:
|
||||
@staticmethod
|
||||
async def increment_ack_count(message_id: int) -> int:
|
||||
"""Increment ack count and return the new value."""
|
||||
await db.conn.execute("UPDATE messages SET acked = acked + 1 WHERE id = ?", (message_id,))
|
||||
await db.conn.commit()
|
||||
cursor = await db.conn.execute("SELECT acked FROM messages WHERE id = ?", (message_id,))
|
||||
cursor = await db.conn.execute(
|
||||
"UPDATE messages SET acked = acked + 1 WHERE id = ? RETURNING acked", (message_id,)
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
await db.conn.commit()
|
||||
return row["acked"] if row else 1
|
||||
|
||||
@staticmethod
|
||||
@@ -867,7 +868,8 @@ class MessageRepository:
|
||||
""",
|
||||
(conversation_key, t_24h),
|
||||
)
|
||||
path_hash_width_24h = await bucket_path_hash_widths(cursor3)
|
||||
rows3 = await cursor3.fetchall()
|
||||
path_hash_width_24h = bucket_path_hash_widths(rows3)
|
||||
|
||||
return {
|
||||
"message_counts": message_counts,
|
||||
|
||||
@@ -74,41 +74,52 @@ class RawPacketRepository:
|
||||
async def stream_all_undecrypted(
|
||||
batch_size: int = UNDECRYPTED_PACKET_BATCH_SIZE,
|
||||
) -> AsyncIterator[tuple[int, bytes, int]]:
|
||||
"""Yield all undecrypted packets as (id, data, timestamp) in bounded batches."""
|
||||
cursor = await db.conn.execute(
|
||||
"SELECT id, data, timestamp FROM raw_packets WHERE message_id IS NULL ORDER BY timestamp ASC"
|
||||
)
|
||||
try:
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
for row in rows:
|
||||
yield (row["id"], bytes(row["data"]), row["timestamp"])
|
||||
finally:
|
||||
"""Yield all undecrypted packets as (id, data, timestamp) in bounded batches.
|
||||
|
||||
Uses keyset pagination so each batch is a fresh query with a fully
|
||||
consumed cursor — no open statement held across yield boundaries.
|
||||
"""
|
||||
last_id = -1
|
||||
while True:
|
||||
cursor = await db.conn.execute(
|
||||
"SELECT id, data, timestamp FROM raw_packets "
|
||||
"WHERE message_id IS NULL AND id > ? ORDER BY id ASC LIMIT ?",
|
||||
(last_id, batch_size),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
await cursor.close()
|
||||
if not rows:
|
||||
break
|
||||
for row in rows:
|
||||
last_id = row["id"]
|
||||
yield (row["id"], bytes(row["data"]), row["timestamp"])
|
||||
|
||||
@staticmethod
|
||||
async def stream_undecrypted_text_messages(
|
||||
batch_size: int = UNDECRYPTED_PACKET_BATCH_SIZE,
|
||||
) -> AsyncIterator[tuple[int, bytes, int]]:
|
||||
"""Yield undecrypted TEXT_MESSAGE packets in bounded-size batches."""
|
||||
cursor = await db.conn.execute(
|
||||
"SELECT id, data, timestamp FROM raw_packets WHERE message_id IS NULL ORDER BY timestamp ASC"
|
||||
)
|
||||
try:
|
||||
while True:
|
||||
rows = await cursor.fetchmany(batch_size)
|
||||
if not rows:
|
||||
break
|
||||
"""Yield undecrypted TEXT_MESSAGE packets in bounded-size batches.
|
||||
|
||||
for row in rows:
|
||||
data = bytes(row["data"])
|
||||
payload_type = get_packet_payload_type(data)
|
||||
if payload_type == PayloadType.TEXT_MESSAGE:
|
||||
yield (row["id"], data, row["timestamp"])
|
||||
finally:
|
||||
Uses keyset pagination so each batch is a fresh query with a fully
|
||||
consumed cursor — no open statement held across yield boundaries.
|
||||
"""
|
||||
last_id = -1
|
||||
while True:
|
||||
cursor = await db.conn.execute(
|
||||
"SELECT id, data, timestamp FROM raw_packets "
|
||||
"WHERE message_id IS NULL AND id > ? ORDER BY id ASC LIMIT ?",
|
||||
(last_id, batch_size),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
await cursor.close()
|
||||
if not rows:
|
||||
break
|
||||
for row in rows:
|
||||
last_id = row["id"]
|
||||
data = bytes(row["data"])
|
||||
payload_type = get_packet_payload_type(data)
|
||||
if payload_type == PayloadType.TEXT_MESSAGE:
|
||||
yield (row["id"], data, row["timestamp"])
|
||||
|
||||
@staticmethod
|
||||
async def count_undecrypted_text_messages(
|
||||
|
||||
@@ -73,3 +73,24 @@ class RepeaterTelemetryRepository:
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
async def get_latest(public_key: str) -> dict | None:
|
||||
"""Return the most recent telemetry row for a repeater, or None."""
|
||||
cursor = await db.conn.execute(
|
||||
"""
|
||||
SELECT timestamp, data
|
||||
FROM repeater_telemetry_history
|
||||
WHERE public_key = ?
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(public_key,),
|
||||
)
|
||||
row = await cursor.fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return {
|
||||
"timestamp": row["timestamp"],
|
||||
"data": json.loads(row["data"]),
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ SECONDS_1H = 3600
|
||||
SECONDS_24H = 86400
|
||||
SECONDS_72H = 259200
|
||||
SECONDS_7D = 604800
|
||||
RAW_PACKET_STATS_BATCH_SIZE = 500
|
||||
|
||||
|
||||
class AppSettingsRepository:
|
||||
@@ -302,7 +301,8 @@ class StatisticsRepository:
|
||||
"SELECT data FROM raw_packets WHERE timestamp >= ?",
|
||||
(now - SECONDS_24H,),
|
||||
)
|
||||
return await bucket_path_hash_widths(cursor, batch_size=RAW_PACKET_STATS_BATCH_SIZE)
|
||||
rows = await cursor.fetchall()
|
||||
return bucket_path_hash_widths(rows)
|
||||
|
||||
@staticmethod
|
||||
async def get_all() -> dict:
|
||||
|
||||
@@ -4,7 +4,7 @@ import os
|
||||
import platform
|
||||
import struct
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any, Literal
|
||||
|
||||
from fastapi import APIRouter
|
||||
@@ -390,7 +390,7 @@ async def debug_support_snapshot() -> DebugSnapshotResponse:
|
||||
is_reconnecting=is_reconnecting,
|
||||
)
|
||||
return DebugSnapshotResponse(
|
||||
captured_at=datetime.now(timezone.utc).isoformat(),
|
||||
captured_at=datetime.now(UTC).isoformat(),
|
||||
system=_build_system_info(),
|
||||
application=_build_application_info(),
|
||||
health=_build_debug_health_summary(health_data, radio_state=radio_state),
|
||||
|
||||
+26
-2
@@ -16,7 +16,16 @@ from app.repository.fanout import FanoutConfigRepository
|
||||
logger = logging.getLogger(__name__)
|
||||
router = APIRouter(prefix="/fanout", tags=["fanout"])
|
||||
|
||||
_VALID_TYPES = {"mqtt_private", "mqtt_community", "bot", "webhook", "apprise", "sqs", "map_upload"}
|
||||
_VALID_TYPES = {
|
||||
"mqtt_private",
|
||||
"mqtt_community",
|
||||
"mqtt_ha",
|
||||
"bot",
|
||||
"webhook",
|
||||
"apprise",
|
||||
"sqs",
|
||||
"map_upload",
|
||||
}
|
||||
|
||||
_IATA_RE = re.compile(r"^[A-Z]{3}$")
|
||||
_DEFAULT_COMMUNITY_MQTT_TOPIC_TEMPLATE = "meshcore/{IATA}/{PUBLIC_KEY}/packets"
|
||||
@@ -96,6 +105,8 @@ def _validate_and_normalize_config(config_type: str, config: dict) -> dict:
|
||||
_validate_sqs_config(normalized)
|
||||
elif config_type == "map_upload":
|
||||
_validate_map_upload_config(normalized)
|
||||
elif config_type == "mqtt_ha":
|
||||
_validate_mqtt_ha_config(normalized)
|
||||
|
||||
return normalized
|
||||
|
||||
@@ -318,6 +329,19 @@ def _validate_map_upload_config(config: dict) -> None:
|
||||
config["geofence_radius_km"] = radius
|
||||
|
||||
|
||||
def _validate_mqtt_ha_config(config: dict) -> None:
|
||||
"""Validate mqtt_ha config blob."""
|
||||
if not config.get("broker_host"):
|
||||
raise HTTPException(status_code=400, detail="broker_host is required for mqtt_ha")
|
||||
port = config.get("broker_port", 1883)
|
||||
if not isinstance(port, int) or port < 1 or port > 65535:
|
||||
raise HTTPException(status_code=400, detail="broker_port must be between 1 and 65535")
|
||||
for field in ("tracked_contacts", "tracked_repeaters"):
|
||||
value = config.get(field)
|
||||
if value is not None and not isinstance(value, list):
|
||||
raise HTTPException(status_code=400, detail=f"{field} must be a list of public keys")
|
||||
|
||||
|
||||
def _enforce_scope(config_type: str, scope: dict) -> dict:
|
||||
"""Enforce type-specific scope constraints. Returns normalized scope."""
|
||||
if config_type == "mqtt_community":
|
||||
@@ -326,7 +350,7 @@ def _enforce_scope(config_type: str, scope: dict) -> dict:
|
||||
return {"messages": "none", "raw_packets": "all"}
|
||||
if config_type == "bot":
|
||||
return {"messages": "all", "raw_packets": "none"}
|
||||
if config_type in ("webhook", "apprise"):
|
||||
if config_type in ("webhook", "apprise", "mqtt_ha"):
|
||||
messages = scope.get("messages", "all")
|
||||
if messages not in ("all", "none") and not isinstance(messages, dict):
|
||||
raise HTTPException(
|
||||
|
||||
@@ -7,6 +7,7 @@ from pydantic import BaseModel, Field
|
||||
from app.config import settings
|
||||
from app.repository import RawPacketRepository
|
||||
from app.services.radio_runtime import radio_runtime as radio_manager
|
||||
from app.services.radio_stats import get_latest_radio_stats
|
||||
from app.version_info import get_app_build_info
|
||||
|
||||
router = APIRouter(tags=["health"])
|
||||
@@ -32,6 +33,28 @@ class FanoutStatusResponse(BaseModel):
|
||||
last_error: str | None = None
|
||||
|
||||
|
||||
class RadioStatsSnapshot(BaseModel):
|
||||
"""Latest cached stats from the local radio's periodic 60s poll."""
|
||||
|
||||
timestamp: int | None = None
|
||||
# Core stats
|
||||
battery_mv: int | None = None
|
||||
uptime_secs: int | None = None
|
||||
# Radio stats
|
||||
noise_floor: int | None = None
|
||||
last_rssi: int | None = None
|
||||
last_snr: float | None = None
|
||||
tx_air_secs: int | None = None
|
||||
rx_air_secs: int | None = None
|
||||
# Packet stats
|
||||
packets_recv: int | None = None
|
||||
packets_sent: int | None = None
|
||||
flood_tx: int | None = None
|
||||
direct_tx: int | None = None
|
||||
flood_rx: int | None = None
|
||||
direct_rx: int | None = None
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
status: str
|
||||
radio_connected: bool
|
||||
@@ -40,6 +63,7 @@ class HealthResponse(BaseModel):
|
||||
connection_info: str | None
|
||||
app_info: AppInfoResponse | None = None
|
||||
radio_device_info: RadioDeviceInfoResponse | None = None
|
||||
radio_stats: RadioStatsSnapshot | None = None
|
||||
database_size_mb: float
|
||||
oldest_undecrypted_timestamp: int | None
|
||||
fanout_statuses: dict[str, FanoutStatusResponse] = Field(default_factory=dict)
|
||||
@@ -122,6 +146,28 @@ async def build_health_data(radio_connected: bool, connection_info: str | None)
|
||||
"max_channels": getattr(radio_manager, "max_channels", None),
|
||||
}
|
||||
|
||||
# Local radio stats from the 60s background sampler
|
||||
raw_stats = get_latest_radio_stats()
|
||||
radio_stats = None
|
||||
if raw_stats:
|
||||
packets = raw_stats.get("packets") or {}
|
||||
radio_stats = {
|
||||
"timestamp": raw_stats.get("timestamp"),
|
||||
"battery_mv": raw_stats.get("battery_mv"),
|
||||
"uptime_secs": raw_stats.get("uptime_secs"),
|
||||
"noise_floor": raw_stats.get("noise_floor"),
|
||||
"last_rssi": raw_stats.get("last_rssi"),
|
||||
"last_snr": raw_stats.get("last_snr"),
|
||||
"tx_air_secs": raw_stats.get("tx_air_secs"),
|
||||
"rx_air_secs": raw_stats.get("rx_air_secs"),
|
||||
"packets_recv": packets.get("recv"),
|
||||
"packets_sent": packets.get("sent"),
|
||||
"flood_tx": packets.get("flood_tx"),
|
||||
"direct_tx": packets.get("direct_tx"),
|
||||
"flood_rx": packets.get("flood_rx"),
|
||||
"direct_rx": packets.get("direct_rx"),
|
||||
}
|
||||
|
||||
return {
|
||||
"status": "ok" if radio_connected and not radio_initializing else "degraded",
|
||||
"radio_connected": radio_connected,
|
||||
@@ -133,6 +179,7 @@ async def build_health_data(radio_connected: bool, connection_info: str | None)
|
||||
"commit_hash": app_build_info.commit_hash,
|
||||
},
|
||||
"radio_device_info": radio_device_info,
|
||||
"radio_stats": radio_stats,
|
||||
"database_size_mb": db_size_mb,
|
||||
"oldest_undecrypted_timestamp": oldest_ts,
|
||||
"fanout_statuses": fanout_statuses,
|
||||
|
||||
@@ -473,7 +473,7 @@ async def discover_mesh(request: RadioDiscoveryRequest) -> RadioDiscoveryRespons
|
||||
break
|
||||
try:
|
||||
event = await asyncio.wait_for(events.get(), timeout=remaining)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
break
|
||||
|
||||
merged = _merge_discovery_result(
|
||||
@@ -536,7 +536,7 @@ async def trace_path(request: RadioTraceRequest) -> RadioTraceResponse:
|
||||
timeout_seconds = _trace_timeout_seconds(send_result)
|
||||
try:
|
||||
event = await asyncio.wait_for(response_task, timeout=timeout_seconds)
|
||||
except asyncio.TimeoutError as exc:
|
||||
except TimeoutError as exc:
|
||||
raise HTTPException(status_code=504, detail="No trace response heard") from exc
|
||||
finally:
|
||||
if not response_task.done():
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
|
||||
@@ -93,6 +94,7 @@ async def repeater_status(public_key: str) -> RepeaterStatusResponse:
|
||||
contact = await _resolve_contact_or_404(public_key)
|
||||
_require_repeater(contact)
|
||||
|
||||
lpp_raw = None
|
||||
async with radio_manager.radio_operation(
|
||||
"repeater_status", pause_polling=True, suspend_auto_fetch=True
|
||||
) as mc:
|
||||
@@ -101,6 +103,15 @@ async def repeater_status(public_key: str) -> RepeaterStatusResponse:
|
||||
|
||||
status = await mc.commands.req_status_sync(contact.public_key, timeout=10, min_timeout=5)
|
||||
|
||||
# Best-effort LPP sensor fetch while we still hold the lock
|
||||
if status is not None:
|
||||
try:
|
||||
lpp_raw = await mc.commands.req_telemetry_sync(
|
||||
contact.public_key, timeout=10, min_timeout=5
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("LPP sensor fetch failed for %s (non-fatal): %s", public_key[:12], e)
|
||||
|
||||
if status is None:
|
||||
raise HTTPException(status_code=504, detail="No status response from repeater")
|
||||
|
||||
@@ -127,12 +138,44 @@ async def repeater_status(public_key: str) -> RepeaterStatusResponse:
|
||||
# Record to telemetry history as a JSON blob (best-effort)
|
||||
now = int(time.time())
|
||||
status_dict = response.model_dump(exclude={"telemetry_history"})
|
||||
|
||||
# Attach scalar LPP sensors to the stored snapshot (same logic as auto-collect)
|
||||
if lpp_raw:
|
||||
lpp_sensors = []
|
||||
for entry in lpp_raw:
|
||||
value = entry.get("value", 0)
|
||||
if isinstance(value, dict):
|
||||
continue
|
||||
lpp_sensors.append(
|
||||
{
|
||||
"channel": entry.get("channel", 0),
|
||||
"type_name": str(entry.get("type", "unknown")),
|
||||
"value": value,
|
||||
}
|
||||
)
|
||||
if lpp_sensors:
|
||||
status_dict["lpp_sensors"] = lpp_sensors
|
||||
|
||||
try:
|
||||
await RepeaterTelemetryRepository.record(
|
||||
public_key=contact.public_key,
|
||||
timestamp=now,
|
||||
data=status_dict,
|
||||
)
|
||||
|
||||
# Dispatch to fanout modules (e.g. HA MQTT discovery)
|
||||
from app.fanout.manager import fanout_manager
|
||||
|
||||
asyncio.create_task(
|
||||
fanout_manager.broadcast_telemetry(
|
||||
{
|
||||
"public_key": contact.public_key,
|
||||
"name": contact.name or contact.public_key[:12],
|
||||
"timestamp": now,
|
||||
**status_dict,
|
||||
}
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to record telemetry history: %s", e)
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ async def fetch_contact_cli_response(
|
||||
while _monotonic() < deadline:
|
||||
try:
|
||||
result = await mc.commands.get_msg(timeout=2.0)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
continue
|
||||
except Exception as exc:
|
||||
logger.debug("get_msg() exception: %s", exc)
|
||||
@@ -196,7 +196,7 @@ async def prepare_authenticated_contact_connection(
|
||||
login_future,
|
||||
timeout=response_timeout,
|
||||
)
|
||||
except asyncio.TimeoutError:
|
||||
except TimeoutError:
|
||||
logger.warning(
|
||||
"No login response from %s %s within %.1fs",
|
||||
contact_label,
|
||||
|
||||
@@ -2,7 +2,7 @@ from fastapi import APIRouter
|
||||
|
||||
from app.models import StatisticsResponse
|
||||
from app.repository import StatisticsRepository
|
||||
from app.services.radio_noise_floor import get_noise_floor_history
|
||||
from app.services.radio_stats import get_noise_floor_history
|
||||
|
||||
router = APIRouter(prefix="/statistics", tags=["statistics"])
|
||||
|
||||
@@ -10,5 +10,5 @@ router = APIRouter(prefix="/statistics", tags=["statistics"])
|
||||
@router.get("", response_model=StatisticsResponse)
|
||||
async def get_statistics() -> StatisticsResponse:
|
||||
data = await StatisticsRepository.get_all()
|
||||
data["noise_floor_24h"] = await get_noise_floor_history()
|
||||
data["noise_floor_24h"] = get_noise_floor_history()
|
||||
return StatisticsResponse(**data)
|
||||
|
||||
@@ -264,38 +264,43 @@ async def send_channel_message_with_effective_scope(
|
||||
return send_result
|
||||
finally:
|
||||
if override_scope and override_scope != baseline_scope:
|
||||
try:
|
||||
restore_result = await mc.commands.set_flood_scope(
|
||||
baseline_scope if baseline_scope else ""
|
||||
)
|
||||
if restore_result is not None and restore_result.type == EventType.ERROR:
|
||||
logger.error(
|
||||
"Failed to restore baseline flood_scope after sending to %s: %s",
|
||||
restored = False
|
||||
for attempt in range(3):
|
||||
try:
|
||||
restore_result = await mc.commands.set_flood_scope(
|
||||
baseline_scope if baseline_scope else ""
|
||||
)
|
||||
if restore_result is not None and restore_result.type == EventType.ERROR:
|
||||
logger.warning(
|
||||
"Attempt %d/3: failed to restore flood_scope after sending to %s: %s",
|
||||
attempt + 1,
|
||||
channel.name,
|
||||
restore_result.payload,
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
"Restored baseline flood_scope after channel send: %r",
|
||||
baseline_scope or "(disabled)",
|
||||
)
|
||||
restored = True
|
||||
break
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Attempt %d/3: exception restoring flood_scope after sending to %s",
|
||||
attempt + 1,
|
||||
channel.name,
|
||||
restore_result.payload,
|
||||
)
|
||||
error_broadcast_fn(
|
||||
"Regional override restore failed",
|
||||
(
|
||||
f"Sent to {channel.name}, but restoring flood scope failed. "
|
||||
"The radio may still be region-scoped. Consider rebooting the radio."
|
||||
),
|
||||
)
|
||||
else:
|
||||
logger.debug(
|
||||
"Restored baseline flood_scope after channel send: %r",
|
||||
baseline_scope or "(disabled)",
|
||||
)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to restore baseline flood_scope after sending to %s",
|
||||
if not restored:
|
||||
logger.error(
|
||||
"All 3 attempts to restore flood_scope failed for %s",
|
||||
channel.name,
|
||||
)
|
||||
error_broadcast_fn(
|
||||
"Regional override restore failed",
|
||||
(
|
||||
f"Sent to {channel.name}, but restoring flood scope failed. "
|
||||
"The radio may still be region-scoped. Consider rebooting the radio."
|
||||
f"Sent to {channel.name}, but restoring flood scope failed "
|
||||
f"after 3 attempts. The radio may still be region-scoped. "
|
||||
f"Consider rebooting the radio."
|
||||
),
|
||||
)
|
||||
|
||||
@@ -421,7 +426,8 @@ async def _retry_direct_message_until_acked(
|
||||
message_repository,
|
||||
) -> None:
|
||||
next_wait_timeout_ms = wait_timeout_ms
|
||||
for attempt in range(1, DM_SEND_MAX_ATTEMPTS):
|
||||
attempt = 1
|
||||
while attempt < DM_SEND_MAX_ATTEMPTS:
|
||||
await sleep_fn((next_wait_timeout_ms / 1000) * DM_RETRY_WAIT_MARGIN)
|
||||
if await _is_message_acked(message_id=message_id, message_repository=message_repository):
|
||||
return
|
||||
@@ -463,6 +469,14 @@ async def _retry_direct_message_until_acked(
|
||||
timestamp=sender_timestamp,
|
||||
attempt=attempt,
|
||||
)
|
||||
except RadioOperationBusyError:
|
||||
logger.debug(
|
||||
"Radio busy during DM retry attempt %d/%d for %s, will retry without consuming attempt",
|
||||
attempt + 1,
|
||||
DM_SEND_MAX_ATTEMPTS,
|
||||
contact.public_key[:12],
|
||||
)
|
||||
continue
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Background DM retry attempt %d/%d failed for %s",
|
||||
@@ -470,6 +484,7 @@ async def _retry_direct_message_until_acked(
|
||||
DM_SEND_MAX_ATTEMPTS,
|
||||
contact.public_key[:12],
|
||||
)
|
||||
attempt += 1
|
||||
continue
|
||||
|
||||
if result is None:
|
||||
@@ -479,6 +494,7 @@ async def _retry_direct_message_until_acked(
|
||||
DM_SEND_MAX_ATTEMPTS,
|
||||
contact.public_key[:12],
|
||||
)
|
||||
attempt += 1
|
||||
continue
|
||||
|
||||
if result.type == EventType.ERROR:
|
||||
@@ -489,6 +505,7 @@ async def _retry_direct_message_until_acked(
|
||||
contact.public_key[:12],
|
||||
result.payload,
|
||||
)
|
||||
attempt += 1
|
||||
continue
|
||||
|
||||
if await _is_message_acked(message_id=message_id, message_repository=message_repository):
|
||||
@@ -516,6 +533,8 @@ async def _retry_direct_message_until_acked(
|
||||
if ack_count > 0:
|
||||
return
|
||||
|
||||
attempt += 1
|
||||
|
||||
|
||||
async def send_direct_message_to_contact(
|
||||
*,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -193,7 +193,7 @@ async def run_post_connect_setup(radio_manager) -> None:
|
||||
logger.info(
|
||||
"Radio clock at connect: epoch=%d utc=%s",
|
||||
radio_time,
|
||||
datetime.fromtimestamp(radio_time, timezone.utc).strftime(
|
||||
datetime.fromtimestamp(radio_time, UTC).strftime(
|
||||
"%Y-%m-%d %H:%M:%S UTC"
|
||||
),
|
||||
)
|
||||
@@ -274,7 +274,7 @@ async def prepare_connected_radio(radio_manager, *, broadcast_on_success: bool =
|
||||
try:
|
||||
await radio_manager.post_connect_setup()
|
||||
break
|
||||
except asyncio.TimeoutError as exc:
|
||||
except TimeoutError as exc:
|
||||
if attempt < POST_CONNECT_SETUP_MAX_ATTEMPTS:
|
||||
logger.warning(
|
||||
"Post-connect setup timed out after %ds on attempt %d/%d; retrying once",
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
"""In-memory local-radio noise floor history sampling."""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections import deque
|
||||
|
||||
from meshcore import EventType
|
||||
|
||||
from app.radio import RadioDisconnectedError, RadioOperationBusyError
|
||||
from app.services.radio_runtime import radio_runtime as radio_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
NOISE_FLOOR_SAMPLE_INTERVAL_SECONDS = 300
|
||||
NOISE_FLOOR_WINDOW_SECONDS = 24 * 60 * 60
|
||||
MAX_NOISE_FLOOR_SAMPLES = 300
|
||||
|
||||
_noise_floor_task: asyncio.Task | None = None
|
||||
_noise_floor_samples: deque[tuple[int, int]] = deque(maxlen=MAX_NOISE_FLOOR_SAMPLES)
|
||||
_noise_floor_supported: bool | None = None
|
||||
_samples_lock = asyncio.Lock()
|
||||
|
||||
|
||||
async def _append_sample(timestamp: int, noise_floor_dbm: int) -> None:
|
||||
async with _samples_lock:
|
||||
_noise_floor_samples.append((timestamp, noise_floor_dbm))
|
||||
|
||||
|
||||
async def sample_noise_floor_once(*, blocking: bool = False) -> None:
|
||||
"""Fetch the current radio noise floor once and record it when available."""
|
||||
global _noise_floor_supported
|
||||
|
||||
if not radio_manager.is_connected:
|
||||
return
|
||||
|
||||
try:
|
||||
async with radio_manager.radio_operation("noise_floor_sample", blocking=blocking) as mc:
|
||||
event = await mc.commands.get_stats_radio()
|
||||
except (RadioDisconnectedError, RadioOperationBusyError):
|
||||
return
|
||||
except Exception as exc:
|
||||
logger.debug("Noise floor sampling failed: %s", exc)
|
||||
return
|
||||
|
||||
if event.type == EventType.ERROR:
|
||||
_noise_floor_supported = False
|
||||
return
|
||||
|
||||
if event.type != EventType.STATS_RADIO:
|
||||
return
|
||||
|
||||
noise_floor = event.payload.get("noise_floor")
|
||||
if not isinstance(noise_floor, int):
|
||||
return
|
||||
|
||||
_noise_floor_supported = True
|
||||
await _append_sample(int(time.time()), noise_floor)
|
||||
|
||||
|
||||
async def _noise_floor_sampling_loop() -> None:
|
||||
while True:
|
||||
try:
|
||||
await sample_noise_floor_once()
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception("Noise floor sampling loop crashed during sample")
|
||||
|
||||
try:
|
||||
await asyncio.sleep(NOISE_FLOOR_SAMPLE_INTERVAL_SECONDS)
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
|
||||
|
||||
async def start_noise_floor_sampling() -> None:
|
||||
global _noise_floor_task
|
||||
if _noise_floor_task is not None and not _noise_floor_task.done():
|
||||
return
|
||||
_noise_floor_task = asyncio.create_task(_noise_floor_sampling_loop())
|
||||
|
||||
|
||||
async def stop_noise_floor_sampling() -> None:
|
||||
global _noise_floor_task
|
||||
if _noise_floor_task is None:
|
||||
return
|
||||
if not _noise_floor_task.done():
|
||||
_noise_floor_task.cancel()
|
||||
try:
|
||||
await _noise_floor_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
_noise_floor_task = None
|
||||
|
||||
|
||||
async def get_noise_floor_history() -> dict:
|
||||
"""Return the current 24-hour in-memory noise floor history snapshot."""
|
||||
now = int(time.time())
|
||||
cutoff = now - NOISE_FLOOR_WINDOW_SECONDS
|
||||
|
||||
async with _samples_lock:
|
||||
samples = [
|
||||
{"timestamp": timestamp, "noise_floor_dbm": noise_floor_dbm}
|
||||
for timestamp, noise_floor_dbm in _noise_floor_samples
|
||||
if timestamp >= cutoff
|
||||
]
|
||||
|
||||
latest = samples[-1] if samples else None
|
||||
oldest_timestamp = samples[0]["timestamp"] if samples else None
|
||||
coverage_seconds = 0 if oldest_timestamp is None else max(0, now - oldest_timestamp)
|
||||
|
||||
return {
|
||||
"sample_interval_seconds": NOISE_FLOOR_SAMPLE_INTERVAL_SECONDS,
|
||||
"coverage_seconds": coverage_seconds,
|
||||
"latest_noise_floor_dbm": latest["noise_floor_dbm"] if latest else None,
|
||||
"latest_timestamp": latest["timestamp"] if latest else None,
|
||||
"supported": _noise_floor_supported,
|
||||
"samples": samples,
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user