Compare commits

..

1 Commits

Author SHA1 Message Date
l5y bb7a09cb6f web: decryption confidence scoring 2026-01-11 08:38:24 +01:00
45 changed files with 448 additions and 3150 deletions
+5 -26
View File
@@ -252,36 +252,15 @@ services.potato-mesh = {
## Docker
Docker images are published on GitHub Container Registry for each release.
Image names and tags follow the workflow format:
`${IMAGE_PREFIX}-${service}-${architecture}:${tag}` (see `.github/workflows/docker.yml`).
Docker images are published on Github for each release:
```bash
docker pull ghcr.io/l5yth/potato-mesh-web-linux-amd64:latest
docker pull ghcr.io/l5yth/potato-mesh-web-linux-arm64:latest
docker pull ghcr.io/l5yth/potato-mesh-web-linux-armv7:latest
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:latest
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-arm64:latest
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-armv7:latest
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-amd64:latest
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-arm64:latest
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-armv7:latest
# version-pinned examples
docker pull ghcr.io/l5yth/potato-mesh-web-linux-amd64:v0.5.5
docker pull ghcr.io/l5yth/potato-mesh-ingestor-linux-amd64:v0.5.5
docker pull ghcr.io/l5yth/potato-mesh-matrix-bridge-linux-amd64:v0.5.5
docker pull ghcr.io/l5yth/potato-mesh/web:latest # newest release
docker pull ghcr.io/l5yth/potato-mesh/web:v0.5.5 # pinned historical release
docker pull ghcr.io/l5yth/potato-mesh/ingestor:latest
docker pull ghcr.io/l5yth/potato-mesh/matrix-bridge:latest
```
Note: `latest` is only published for non-prerelease versions. Pre-release tags
such as `-rc`, `-beta`, `-alpha`, or `-dev` are version-tagged only.
When using Compose, set `POTATOMESH_IMAGE_ARCH` in `docker-compose.yml` (or via
environment) so service images resolve to the correct architecture variant and
you avoid manual tag mistakes.
Feel free to run the [configure.sh](./configure.sh) script to set up your
environment. See the [Docker guide](DOCKER.md) for more details and custom
deployment instructions.
+2 -2
View File
@@ -15,11 +15,11 @@
<key>CFBundlePackageType</key>
<string>FMWK</string>
<key>CFBundleShortVersionString</key>
<string>0.5.11</string>
<string>0.5.10</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>0.5.11</string>
<string>0.5.10</string>
<key>MinimumOSVersion</key>
<string>14.0</string>
</dict>
+1 -1
View File
@@ -1,7 +1,7 @@
name: potato_mesh_reader
description: Meshtastic Reader — read-only view for PotatoMesh messages.
publish_to: "none"
version: 0.5.11
version: 0.5.10
environment:
sdk: ">=3.4.0 <4.0.0"
+1 -1
View File
@@ -18,7 +18,7 @@ The ``data.mesh`` module exposes helpers for reading Meshtastic node and
message information before forwarding it to the accompanying web application.
"""
VERSION = "0.5.11"
VERSION = "0.5.10"
"""Semantic version identifier shared with the dashboard and front-end."""
__version__ = VERSION
+1
View File
@@ -29,6 +29,7 @@ if SCRIPT_DIR in sys.path:
from google.protobuf.json_format import MessageToDict
from meshtastic.protobuf import mesh_pb2, telemetry_pb2
PORTNUM_MAP: Dict[int, Tuple[str, Any]] = {
3: ("POSITION_APP", mesh_pb2.Position),
4: ("NODEINFO_APP", mesh_pb2.NodeInfo),
-5
View File
@@ -424,7 +424,6 @@ def store_position_packet(packet: Mapping, decoded: Mapping) -> None:
"hop_limit": hop_limit,
"bitfield": bitfield,
"payload_b64": payload_b64,
"ingestor": host_node_id(),
}
if raw_payload:
position_payload["raw"] = raw_payload
@@ -569,7 +568,6 @@ def store_traceroute_packet(packet: Mapping, decoded: Mapping) -> None:
"rssi": rssi,
"snr": snr,
"elapsed_ms": elapsed_ms,
"ingestor": host_node_id(),
}
_queue_post_json(
@@ -937,7 +935,6 @@ def store_telemetry_packet(packet: Mapping, decoded: Mapping) -> None:
"rssi": rssi,
"hop_limit": hop_limit,
"payload_b64": payload_b64,
"ingestor": host_node_id(),
}
if battery_level is not None:
@@ -1266,7 +1263,6 @@ def store_neighborinfo_packet(packet: Mapping, decoded: Mapping) -> None:
"neighbors": neighbor_entries,
"rx_time": rx_time,
"rx_iso": _iso(rx_time),
"ingestor": host_node_id(),
}
if node_broadcast_interval is not None:
@@ -1524,7 +1520,6 @@ def store_packet_dict(packet: Mapping) -> None:
"hop_limit": int(hop) if hop is not None else None,
"reply_id": reply_id,
"emoji": emoji,
"ingestor": host_node_id(),
}
if not encrypted_flag and channel_name_value:
+2 -1
View File
@@ -30,7 +30,8 @@ CREATE TABLE IF NOT EXISTS messages (
channel_name TEXT,
reply_id INTEGER,
emoji TEXT,
ingestor TEXT
decrypted INTEGER NOT NULL DEFAULT 0,
decryption_confidence REAL
);
CREATE INDEX IF NOT EXISTS idx_messages_rx_time ON messages(rx_time);
-1
View File
@@ -17,7 +17,6 @@ CREATE TABLE IF NOT EXISTS neighbors (
neighbor_id TEXT NOT NULL,
snr REAL,
rx_time INTEGER NOT NULL,
ingestor TEXT,
PRIMARY KEY (node_id, neighbor_id),
FOREIGN KEY (node_id) REFERENCES nodes(node_id) ON DELETE CASCADE,
FOREIGN KEY (neighbor_id) REFERENCES nodes(node_id) ON DELETE CASCADE
+1 -2
View File
@@ -33,8 +33,7 @@ CREATE TABLE IF NOT EXISTS positions (
rssi INTEGER,
hop_limit INTEGER,
bitfield INTEGER,
payload_b64 TEXT,
ingestor TEXT
payload_b64 TEXT
);
CREATE INDEX IF NOT EXISTS idx_positions_rx_time ON positions(rx_time);
+1 -2
View File
@@ -53,8 +53,7 @@ CREATE TABLE IF NOT EXISTS telemetry (
rainfall_1h REAL,
rainfall_24h REAL,
soil_moisture INTEGER,
soil_temperature REAL,
ingestor TEXT
soil_temperature REAL
);
CREATE INDEX IF NOT EXISTS idx_telemetry_rx_time ON telemetry(rx_time);
+1 -2
View File
@@ -21,8 +21,7 @@ CREATE TABLE IF NOT EXISTS traces (
rx_iso TEXT NOT NULL,
rssi INTEGER,
snr REAL,
elapsed_ms INTEGER,
ingestor TEXT
elapsed_ms INTEGER
);
CREATE TABLE IF NOT EXISTS trace_hops (
+1 -8
View File
@@ -81,12 +81,7 @@ x-matrix-bridge-base: &matrix-bridge-base
image: ghcr.io/l5yth/potato-mesh-matrix-bridge-${POTATOMESH_IMAGE_ARCH:-linux-amd64}:${POTATOMESH_IMAGE_TAG:-latest}
volumes:
- potatomesh_matrix_bridge_state:/app
- type: bind
source: ./matrix/Config.toml
target: /app/Config.toml
read_only: true
bind:
create_host_path: false
- ./matrix/Config.toml:/app/Config.toml:ro
restart: unless-stopped
deploy:
resources:
@@ -133,8 +128,6 @@ services:
matrix-bridge:
<<: *matrix-bridge-base
network_mode: host
profiles:
- matrix
depends_on:
- web
extra_hosts:
+3 -3
View File
@@ -169,9 +169,9 @@ checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510"
[[package]]
name = "bytes"
version = "1.11.1"
version = "1.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
[[package]]
name = "cc"
@@ -969,7 +969,7 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
name = "potatomesh-matrix-bridge"
version = "0.5.11"
version = "0.5.10"
dependencies = [
"anyhow",
"axum",
+1 -1
View File
@@ -14,7 +14,7 @@
[package]
name = "potatomesh-matrix-bridge"
version = "0.5.11"
version = "0.5.10"
edition = "2021"
[dependencies]
-32
View File
@@ -146,38 +146,6 @@ Container detection checks `POTATOMESH_CONTAINER`, `CONTAINER`, and `/proc/1/cgr
Set `POTATOMESH_CONTAINER=0` or `--no-container` to opt out of container defaults.
### Docker Compose First Run
Before starting Compose, complete this preflight checklist:
1. Ensure `matrix/Config.toml` exists as a regular file on the host (not a directory).
2. Fill required Matrix values in `matrix/Config.toml`:
- `matrix.as_token`
- `matrix.hs_token`
- `matrix.server_name`
- `matrix.room_id`
- `matrix.homeserver`
This is required because the shared Compose anchor `x-matrix-bridge-base` mounts `./matrix/Config.toml` to `/app/Config.toml`.
Then follow the token and namespace requirements in [Matrix Appservice Setup (Synapse example)](#matrix-appservice-setup-synapse-example).
#### Troubleshooting
| Symptom | Likely cause | What to check |
| --- | --- | --- |
| `Is a directory (os error 21)` | Host mount source became a directory | `matrix/Config.toml` was missing at mount time and got created as a directory on host. |
| `M_UNKNOWN_TOKEN` / `401 Unauthorized` | Matrix appservice token mismatch | Verify `matrix.as_token` matches your appservice registration and setup in [Matrix Appservice Setup (Synapse example)](#matrix-appservice-setup-synapse-example). |
#### Recovery from accidental `Config.toml` directory creation
```bash
# from repo root
rm -rf matrix/Config.toml
touch matrix/Config.toml
# then edit matrix/Config.toml and set valid matrix.as_token, matrix.hs_token,
# matrix.server_name, matrix.room_id, and matrix.homeserver before starting compose
```
### PotatoMesh API
The bridge assumes:
-10
View File
@@ -788,7 +788,6 @@ def test_store_packet_dict_posts_text_message(mesh_module, monkeypatch):
mesh.config.LORA_FREQ = 868
mesh.config.MODEM_PRESET = "MediumFast"
mesh.register_host_node_id("!f00dbabe")
packet = {
"id": 123,
@@ -824,7 +823,6 @@ def test_store_packet_dict_posts_text_message(mesh_module, monkeypatch):
assert payload["rssi"] == -70
assert payload["reply_id"] is None
assert payload["emoji"] is None
assert payload["ingestor"] == "!f00dbabe"
assert payload["lora_freq"] == 868
assert payload["modem_preset"] == "MediumFast"
assert priority == mesh._MESSAGE_POST_PRIORITY
@@ -881,7 +879,6 @@ def test_store_packet_dict_posts_position(mesh_module, monkeypatch):
mesh.config.LORA_FREQ = 868
mesh.config.MODEM_PRESET = "MediumFast"
mesh.register_host_node_id("!f00dbabe")
packet = {
"id": 200498337,
@@ -949,7 +946,6 @@ def test_store_packet_dict_posts_position(mesh_module, monkeypatch):
)
assert payload["lora_freq"] == 868
assert payload["modem_preset"] == "MediumFast"
assert payload["ingestor"] == "!f00dbabe"
assert payload["raw"]["time"] == 1_758_624_189
@@ -964,7 +960,6 @@ def test_store_packet_dict_posts_neighborinfo(mesh_module, monkeypatch):
mesh.config.LORA_FREQ = 868
mesh.config.MODEM_PRESET = "MediumFast"
mesh.register_host_node_id("!f00dbabe")
packet = {
"id": 2049886869,
@@ -1009,7 +1004,6 @@ def test_store_packet_dict_posts_neighborinfo(mesh_module, monkeypatch):
assert neighbors[2]["neighbor_num"] == 0x0BAD_C0DE
assert payload["lora_freq"] == 868
assert payload["modem_preset"] == "MediumFast"
assert payload["ingestor"] == "!f00dbabe"
def test_store_packet_dict_handles_nodeinfo_packet(mesh_module, monkeypatch):
@@ -2288,7 +2282,6 @@ def test_store_packet_dict_handles_telemetry_packet(mesh_module, monkeypatch):
mesh.config.LORA_FREQ = 868
mesh.config.MODEM_PRESET = "MediumFast"
mesh.register_host_node_id("!f00dbabe")
packet = {
"id": 1_256_091_342,
@@ -2341,7 +2334,6 @@ def test_store_packet_dict_handles_telemetry_packet(mesh_module, monkeypatch):
assert payload["current"] == pytest.approx(0.0715)
assert payload["lora_freq"] == 868
assert payload["modem_preset"] == "MediumFast"
assert payload["ingestor"] == "!f00dbabe"
def test_store_packet_dict_handles_environment_telemetry(mesh_module, monkeypatch):
@@ -2485,7 +2477,6 @@ def test_store_packet_dict_handles_traceroute_packet(mesh_module, monkeypatch):
mesh.config.LORA_FREQ = 915
mesh.config.MODEM_PRESET = "LongFast"
mesh.register_host_node_id("!f00dbabe")
packet = {
"id": 2_934_054_466,
@@ -2527,7 +2518,6 @@ def test_store_packet_dict_handles_traceroute_packet(mesh_module, monkeypatch):
assert "elapsed_ms" in payload
assert payload["lora_freq"] == 915
assert payload["modem_preset"] == "LongFast"
assert payload["ingestor"] == "!f00dbabe"
def test_traceroute_hop_normalization_supports_mappings(mesh_module, monkeypatch):
+2 -40
View File
@@ -55,38 +55,8 @@ def _javascript_package_version() -> str:
raise AssertionError("package.json does not expose a string version")
def _flutter_package_version() -> str:
pubspec_path = REPO_ROOT / "app" / "pubspec.yaml"
for line in pubspec_path.read_text(encoding="utf-8").splitlines():
if line.startswith("version:"):
version = line.split(":", 1)[1].strip()
if version:
return version
break
raise AssertionError("pubspec.yaml does not expose a version")
def _rust_package_version() -> str:
cargo_path = REPO_ROOT / "matrix" / "Cargo.toml"
inside_package = False
for line in cargo_path.read_text(encoding="utf-8").splitlines():
stripped = line.strip()
if stripped == "[package]":
inside_package = True
continue
if inside_package and stripped.startswith("[") and stripped.endswith("]"):
break
if inside_package:
literal = re.match(
r'version\s*=\s*["\'](?P<version>[^"\']+)["\']', stripped
)
if literal:
return literal.group("version")
raise AssertionError("Cargo.toml does not expose a package version")
def test_version_identifiers_match_across_languages() -> None:
"""Guard against version drift between Python, Ruby, JavaScript, Flutter, and Rust."""
"""Guard against version drift between Python, Ruby, and JavaScript."""
python_version = getattr(data, "__version__", None)
assert (
@@ -95,13 +65,5 @@ def test_version_identifiers_match_across_languages() -> None:
ruby_version = _ruby_fallback_version()
javascript_version = _javascript_package_version()
flutter_version = _flutter_package_version()
rust_version = _rust_package_version()
assert (
python_version
== ruby_version
== javascript_version
== flutter_version
== rust_version
)
assert python_version == ruby_version == javascript_version
-3
View File
@@ -139,10 +139,7 @@ module PotatoMesh
set :public_folder, File.expand_path("../../public", __dir__)
set :views, File.expand_path("../../views", __dir__)
set :federation_thread, nil
set :initial_federation_thread, nil
set :federation_worker_pool, nil
set :federation_shutdown_requested, false
set :federation_shutdown_hook_installed, false
set :port, resolve_port
set :bind, DEFAULT_BIND_ADDRESS
@@ -616,7 +616,6 @@ module PotatoMesh
payload_b64 = string_or_nil(payload["payload_b64"] || payload["payload"])
payload_b64 ||= string_or_nil(position_section.dig("payload", "__bytes_b64__"))
ingestor = string_or_nil(payload["ingestor"])
row = [
pos_id,
@@ -640,14 +639,13 @@ module PotatoMesh
hop_limit,
bitfield,
payload_b64,
ingestor,
]
with_busy_retry do
db.execute <<~SQL, row
INSERT INTO positions(id,node_id,node_num,rx_time,rx_iso,position_time,to_id,latitude,longitude,altitude,location_source,
precision_bits,sats_in_view,pdop,ground_speed,ground_track,snr,rssi,hop_limit,bitfield,payload_b64,ingestor)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
precision_bits,sats_in_view,pdop,ground_speed,ground_track,snr,rssi,hop_limit,bitfield,payload_b64)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
ON CONFLICT(id) DO UPDATE SET
node_id=COALESCE(excluded.node_id,positions.node_id),
node_num=COALESCE(excluded.node_num,positions.node_num),
@@ -668,8 +666,7 @@ module PotatoMesh
rssi=COALESCE(excluded.rssi,positions.rssi),
hop_limit=COALESCE(excluded.hop_limit,positions.hop_limit),
bitfield=COALESCE(excluded.bitfield,positions.bitfield),
payload_b64=COALESCE(excluded.payload_b64,positions.payload_b64),
ingestor=COALESCE(NULLIF(positions.ingestor,''), excluded.ingestor)
payload_b64=COALESCE(excluded.payload_b64,positions.payload_b64)
SQL
end
@@ -724,7 +721,6 @@ module PotatoMesh
touch_node_last_seen(db, node_id || node_num, node_num, rx_time: rx_time, source: :neighborinfo)
neighbor_entries = []
ingestor = string_or_nil(payload["ingestor"])
neighbors_payload = payload["neighbors"]
neighbors_list = neighbors_payload.is_a?(Array) ? neighbors_payload : []
@@ -761,41 +757,21 @@ module PotatoMesh
snr = coerce_float(neighbor["snr"])
ensure_unknown_node(db, neighbor_id || neighbor_num, neighbor_num, heard_time: entry_rx_time)
touch_node_last_seen(db, neighbor_id || neighbor_num, neighbor_num, rx_time: entry_rx_time, source: :neighborinfo)
neighbor_entries << [neighbor_id, snr, entry_rx_time, ingestor]
neighbor_entries << [neighbor_id, snr, entry_rx_time]
end
with_busy_retry do
db.transaction do
if neighbor_entries.empty?
db.execute("DELETE FROM neighbors WHERE node_id = ?", [node_id])
else
expected_neighbors = neighbor_entries.map(&:first).uniq
existing_neighbors = db.execute(
"SELECT neighbor_id FROM neighbors WHERE node_id = ?",
[node_id],
).flatten
stale_neighbors = existing_neighbors - expected_neighbors
stale_neighbors.each_slice(500) do |slice|
placeholders = slice.map { "?" }.join(",")
db.execute(
"DELETE FROM neighbors WHERE node_id = ? AND neighbor_id IN (#{placeholders})",
[node_id] + slice,
)
end
end
neighbor_entries.each do |neighbor_id, snr_value, heard_time, reporter_id|
db.execute("DELETE FROM neighbors WHERE node_id = ?", [node_id])
neighbor_entries.each do |neighbor_id, snr_value, heard_time|
db.execute(
<<~SQL,
INSERT INTO neighbors(node_id, neighbor_id, snr, rx_time, ingestor)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT(node_id, neighbor_id) DO UPDATE SET
snr = excluded.snr,
rx_time = excluded.rx_time,
ingestor = COALESCE(NULLIF(neighbors.ingestor,''), excluded.ingestor)
INSERT OR REPLACE INTO neighbors(node_id, neighbor_id, snr, rx_time)
VALUES (?, ?, ?, ?)
SQL
[node_id, neighbor_id, snr_value, heard_time, reporter_id],
[node_id, neighbor_id, snr_value, heard_time],
)
end
end
@@ -1005,7 +981,6 @@ module PotatoMesh
payload_b64 = string_or_nil(payload["payload_b64"] || payload["payload"])
lora_freq = coerce_integer(payload["lora_freq"] || payload["loraFrequency"])
modem_preset = string_or_nil(payload["modem_preset"] || payload["modemPreset"])
ingestor = string_or_nil(payload["ingestor"])
telemetry_section = normalize_json_object(payload["telemetry"])
device_metrics = normalize_json_object(payload["device_metrics"] || payload["deviceMetrics"])
@@ -1335,7 +1310,6 @@ module PotatoMesh
rainfall_24h,
soil_moisture,
soil_temperature,
ingestor,
]
placeholders = Array.new(row.length, "?").join(",")
@@ -1343,7 +1317,7 @@ module PotatoMesh
with_busy_retry do
db.execute <<~SQL, row
INSERT INTO telemetry(id,node_id,node_num,from_id,to_id,rx_time,rx_iso,telemetry_time,channel,portnum,hop_limit,snr,rssi,bitfield,payload_b64,
battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,temperature,relative_humidity,barometric_pressure,gas_resistance,current,iaq,distance,lux,white_lux,ir_lux,uv_lux,wind_direction,wind_speed,weight,wind_gust,wind_lull,radiation,rainfall_1h,rainfall_24h,soil_moisture,soil_temperature,ingestor)
battery_level,voltage,channel_utilization,air_util_tx,uptime_seconds,temperature,relative_humidity,barometric_pressure,gas_resistance,current,iaq,distance,lux,white_lux,ir_lux,uv_lux,wind_direction,wind_speed,weight,wind_gust,wind_lull,radiation,rainfall_1h,rainfall_24h,soil_moisture,soil_temperature)
VALUES (#{placeholders})
ON CONFLICT(id) DO UPDATE SET
node_id=COALESCE(excluded.node_id,telemetry.node_id),
@@ -1385,8 +1359,7 @@ module PotatoMesh
rainfall_1h=COALESCE(excluded.rainfall_1h,telemetry.rainfall_1h),
rainfall_24h=COALESCE(excluded.rainfall_24h,telemetry.rainfall_24h),
soil_moisture=COALESCE(excluded.soil_moisture,telemetry.soil_moisture),
soil_temperature=COALESCE(excluded.soil_temperature,telemetry.soil_temperature),
ingestor=COALESCE(NULLIF(telemetry.ingestor,''), excluded.ingestor)
soil_temperature=COALESCE(excluded.soil_temperature,telemetry.soil_temperature)
SQL
end
@@ -1437,7 +1410,6 @@ module PotatoMesh
metrics&.[]("latency_ms") ||
metrics&.[]("latencyMs"),
)
ingestor = string_or_nil(payload["ingestor"])
hops_value = payload.key?("hops") ? payload["hops"] : payload["path"]
hops = normalize_trace_hops(hops_value)
@@ -1449,9 +1421,9 @@ module PotatoMesh
end
with_busy_retry do
db.execute <<~SQL, [trace_identifier, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms, ingestor]
INSERT INTO traces(id, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms, ingestor)
VALUES(?,?,?,?,?,?,?,?,?,?)
db.execute <<~SQL, [trace_identifier, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms]
INSERT INTO traces(id, request_id, src, dest, rx_time, rx_iso, rssi, snr, elapsed_ms)
VALUES(?,?,?,?,?,?,?,?,?)
ON CONFLICT(id) DO UPDATE SET
request_id=COALESCE(excluded.request_id,traces.request_id),
src=COALESCE(excluded.src,traces.src),
@@ -1460,8 +1432,7 @@ module PotatoMesh
rx_iso=excluded.rx_iso,
rssi=COALESCE(excluded.rssi,traces.rssi),
snr=COALESCE(excluded.snr,traces.snr),
elapsed_ms=COALESCE(excluded.elapsed_ms,traces.elapsed_ms),
ingestor=COALESCE(NULLIF(traces.ingestor,''), excluded.ingestor)
elapsed_ms=COALESCE(excluded.elapsed_ms,traces.elapsed_ms)
SQL
trace_id = trace_identifier || db.last_insert_row_id
@@ -1526,6 +1497,7 @@ module PotatoMesh
portnum: data[:portnum],
payload: data[:payload],
channel_name: channel_name,
decryption_confidence: data[:decryption_confidence],
}
end
@@ -1595,10 +1567,13 @@ module PotatoMesh
channel_index = coerce_integer(message["channel"] || message["channel_index"] || message["channelIndex"])
decrypted_payload = nil
decrypted_text = nil
decrypted_portnum = nil
decrypted_flag = false
decryption_confidence = nil
if encrypted && (text.nil? || text.to_s.strip.empty?)
decrypted = decrypt_meshtastic_message(
decrypted_data = decrypt_meshtastic_message(
message,
msg_id,
from_id,
@@ -1606,9 +1581,24 @@ module PotatoMesh
channel_index,
)
if decrypted
decrypted_payload = decrypted
decrypted_portnum = decrypted[:portnum]
if decrypted_data
decrypted_payload = decrypted_data
decrypted_portnum = decrypted_data[:portnum]
if decrypted_data[:text]
text = decrypted_data[:text]
decrypted_text = text
clear_encrypted = true
encrypted = nil
message["text"] = text
message["channel_name"] ||= decrypted_data[:channel_name]
decrypted_flag = true
decryption_confidence = decrypted_data[:decryption_confidence] || 0.0
if portnum.nil? && decrypted_portnum
portnum = decrypted_portnum
message["portnum"] = portnum
end
end
end
end
@@ -1622,7 +1612,6 @@ module PotatoMesh
channel_name = string_or_nil(message["channel_name"] || message["channelName"])
reply_id = coerce_integer(message["reply_id"] || message["replyId"])
emoji = string_or_nil(message["emoji"])
ingestor = string_or_nil(message["ingestor"])
row = [
msg_id,
@@ -1642,12 +1631,13 @@ module PotatoMesh
channel_name,
reply_id,
emoji,
ingestor,
decrypted_flag ? 1 : 0,
decryption_confidence,
]
with_busy_retry do
existing = db.get_first_row(
"SELECT from_id, to_id, text, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji, portnum, ingestor FROM messages WHERE id = ?",
"SELECT from_id, to_id, text, encrypted, lora_freq, modem_preset, channel_name, reply_id, emoji, portnum, decrypted, decryption_confidence FROM messages WHERE id = ?",
[msg_id],
)
if existing
@@ -1702,6 +1692,11 @@ module PotatoMesh
updates["rx_iso"] = rx_iso if rx_iso
end
if clear_encrypted
updates["decrypted"] = 1
updates["decryption_confidence"] = decryption_confidence
end
if portnum
existing_portnum = existing.is_a?(Hash) ? existing["portnum"] : existing[9]
existing_portnum_str = existing_portnum&.to_s
@@ -1745,12 +1740,6 @@ module PotatoMesh
updates["emoji"] = emoji if should_update
end
if ingestor
existing_ingestor = existing.is_a?(Hash) ? existing["ingestor"] : existing[10]
existing_ingestor = string_or_nil(existing_ingestor)
updates["ingestor"] = ingestor if existing_ingestor.nil?
end
unless updates.empty?
assignments = updates.keys.map { |column| "#{column} = ?" }.join(", ")
db.execute("UPDATE messages SET #{assignments} WHERE id = ?", updates.values + [msg_id])
@@ -1760,12 +1749,12 @@ module PotatoMesh
begin
db.execute <<~SQL, row
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji,ingestor)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
INSERT INTO messages(id,rx_time,rx_iso,from_id,to_id,channel,portnum,text,encrypted,snr,rssi,hop_limit,lora_freq,modem_preset,channel_name,reply_id,emoji,decrypted,decryption_confidence)
VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
SQL
rescue SQLite3::ConstraintException
existing_row = db.get_first_row(
"SELECT text, encrypted, ingestor FROM messages WHERE id = ?",
"SELECT text, encrypted FROM messages WHERE id = ?",
[msg_id],
)
existing_text = existing_row.is_a?(Hash) ? existing_row["text"] : existing_row&.[](0)
@@ -1773,8 +1762,6 @@ module PotatoMesh
allow_encrypted_update = existing_text_str.nil? || existing_text_str.strip.empty?
existing_encrypted = existing_row.is_a?(Hash) ? existing_row["encrypted"] : existing_row&.[](1)
existing_encrypted_str = existing_encrypted&.to_s
existing_ingestor = existing_row.is_a?(Hash) ? existing_row["ingestor"] : existing_row&.[](2)
existing_ingestor = string_or_nil(existing_ingestor)
decrypted_precedence = text && (clear_encrypted || (existing_encrypted_str && !existing_encrypted_str.strip.empty?))
fallback_updates = {}
@@ -1784,6 +1771,10 @@ module PotatoMesh
fallback_updates["encrypted"] = encrypted if encrypted && allow_encrypted_update
fallback_updates["encrypted"] = nil if clear_encrypted
fallback_updates["portnum"] = portnum if portnum
if clear_encrypted
fallback_updates["decrypted"] = 1
fallback_updates["decryption_confidence"] = decryption_confidence
end
if decrypted_precedence
fallback_updates["channel"] = message["channel"] if message.key?("channel")
fallback_updates["snr"] = message["snr"] if message.key?("snr")
@@ -1802,7 +1793,6 @@ module PotatoMesh
end
fallback_updates["reply_id"] = reply_id unless reply_id.nil?
fallback_updates["emoji"] = emoji if emoji
fallback_updates["ingestor"] = ingestor if ingestor && existing_ingestor.nil?
unless fallback_updates.empty?
assignments = fallback_updates.keys.map { |column| "#{column} = ?" }.join(", ")
db.execute("UPDATE messages SET #{assignments} WHERE id = ?", fallback_updates.values + [msg_id])
@@ -1811,7 +1801,7 @@ module PotatoMesh
end
end
if clear_encrypted && text
if clear_encrypted && decrypted_text
debug_log(
"Stored decrypted text message",
context: "data_processing.insert_message",
@@ -1853,7 +1843,7 @@ module PotatoMesh
)
end
should_touch_message = !stored_decrypted
should_touch_message = !stored_decrypted || decrypted_text
if should_touch_message
ensure_unknown_node(db, from_id || raw_from_id, message["from_num"], heard_time: rx_time)
touch_node_last_seen(
@@ -1919,7 +1909,7 @@ module PotatoMesh
return false unless portnum_value
payload_b64 = Base64.strict_encode64(payload_bytes)
supported_ports = [3, 4, 67, 70, 71]
supported_ports = [3, 67, 70, 71]
return false unless supported_ports.include?(portnum_value)
decoded = PotatoMesh::App::Meshtastic::PayloadDecoder.decode(
@@ -1944,7 +1934,6 @@ module PotatoMesh
"lora_freq" => coerce_integer(message["lora_freq"] || message["loraFrequency"]),
"modem_preset" => string_or_nil(message["modem_preset"] || message["modemPreset"]),
"payload_b64" => payload_b64,
"ingestor" => string_or_nil(message["ingestor"]),
}
case decoded["type"]
@@ -1958,33 +1947,6 @@ module PotatoMesh
portnum: portnum_value,
)
true
when "NODEINFO_APP"
node_payload = normalize_decrypted_nodeinfo_payload(decoded["payload"])
return false unless valid_decrypted_nodeinfo_payload?(node_payload)
node_id = string_or_nil(node_payload["id"]) || from_id
node_num = coerce_integer(node_payload["num"]) ||
coerce_integer(message["from_num"]) ||
resolve_node_num(from_id, message)
node_id ||= format("!%08x", node_num & 0xFFFFFFFF) if node_num
return false unless node_id
payload = node_payload.merge(
"num" => node_num,
"lastHeard" => coerce_integer(node_payload["lastHeard"] || node_payload["last_heard"]) || rx_time,
"snr" => node_payload.key?("snr") ? node_payload["snr"] : snr,
"lora_freq" => common_payload["lora_freq"],
"modem_preset" => common_payload["modem_preset"],
)
upsert_node(db, node_id, payload)
debug_log(
"Stored decrypted node payload",
context: "data_processing.store_decrypted_payload",
message_id: packet_id,
portnum: portnum_value,
node_id: node_id,
)
true
when "TELEMETRY_APP"
payload = common_payload.merge("telemetry" => decoded["payload"])
insert_telemetry(db, payload)
@@ -2047,92 +2009,6 @@ module PotatoMesh
end
end
# Validate decoded NodeInfo payloads before upserting node records.
#
# @param payload [Object] decoded payload candidate.
# @return [Boolean] true when the payload resembles a Meshtastic NodeInfo.
def valid_decrypted_nodeinfo_payload?(payload)
return false unless payload.is_a?(Hash)
return false if payload.empty?
return false unless payload["user"].is_a?(Hash)
return false if payload.key?("position") && !payload["position"].is_a?(Hash)
return false if payload.key?("deviceMetrics") && !payload["deviceMetrics"].is_a?(Hash)
return false unless nodeinfo_user_has_identifying_fields?(payload["user"])
true
end
# Normalize decoded NodeInfo payload keys for +upsert_node+ compatibility.
#
# The Python decoder preserves protobuf field names, so nested hashes may
# use +snake_case+ keys that +upsert_node+ does not read.
#
# @param payload [Object] decoded NodeInfo payload.
# @return [Hash] normalized payload hash.
def normalize_decrypted_nodeinfo_payload(payload)
return {} unless payload.is_a?(Hash)
user = payload["user"]
normalized_user = user.is_a?(Hash) ? user.dup : nil
if normalized_user
normalized_user["shortName"] ||= normalized_user["short_name"]
normalized_user["longName"] ||= normalized_user["long_name"]
normalized_user["hwModel"] ||= normalized_user["hw_model"]
normalized_user["publicKey"] ||= normalized_user["public_key"]
normalized_user["isUnmessagable"] = normalized_user["is_unmessagable"] if normalized_user.key?("is_unmessagable")
end
metrics = payload["deviceMetrics"] || payload["device_metrics"]
normalized_metrics = metrics.is_a?(Hash) ? metrics.dup : nil
if normalized_metrics
normalized_metrics["batteryLevel"] ||= normalized_metrics["battery_level"]
normalized_metrics["channelUtilization"] ||= normalized_metrics["channel_utilization"]
normalized_metrics["airUtilTx"] ||= normalized_metrics["air_util_tx"]
normalized_metrics["uptimeSeconds"] ||= normalized_metrics["uptime_seconds"]
end
position = payload["position"]
normalized_position = position.is_a?(Hash) ? position.dup : nil
if normalized_position
normalized_position["precisionBits"] ||= normalized_position["precision_bits"]
normalized_position["locationSource"] ||= normalized_position["location_source"]
end
normalized = payload.dup
normalized["user"] = normalized_user if normalized_user
normalized["deviceMetrics"] = normalized_metrics if normalized_metrics
normalized["position"] = normalized_position if normalized_position
normalized["lastHeard"] ||= normalized["last_heard"]
normalized["hopsAway"] ||= normalized["hops_away"]
normalized["isFavorite"] = normalized["is_favorite"] if normalized.key?("is_favorite")
normalized["hwModel"] ||= normalized["hw_model"]
normalized
end
# Validate that a decoded NodeInfo user section contains identifying data.
#
# @param user [Hash] decoded NodeInfo user payload.
# @return [Boolean] true when at least one identifying field is present.
def nodeinfo_user_has_identifying_fields?(user)
identifying_fields = [
user["id"],
user["shortName"],
user["short_name"],
user["longName"],
user["long_name"],
user["macaddr"],
user["hwModel"],
user["hw_model"],
user["publicKey"],
user["public_key"],
]
identifying_fields.any? do |value|
value.is_a?(String) ? !value.strip.empty? : !value.nil?
end
end
def normalize_node_id(db, node_ref)
return nil if node_ref.nil?
ref_str = node_ref.to_s.strip
+9 -31
View File
@@ -149,8 +149,15 @@ module PotatoMesh
db.execute("ALTER TABLE messages ADD COLUMN emoji TEXT")
message_columns << "emoji"
end
unless message_columns.include?("ingestor")
db.execute("ALTER TABLE messages ADD COLUMN ingestor TEXT")
unless message_columns.include?("decrypted")
db.execute("ALTER TABLE messages ADD COLUMN decrypted INTEGER NOT NULL DEFAULT 0")
message_columns << "decrypted"
end
unless message_columns.include?("decryption_confidence")
db.execute("ALTER TABLE messages ADD COLUMN decryption_confidence REAL")
message_columns << "decryption_confidence"
end
reply_index_exists =
@@ -191,31 +198,6 @@ module PotatoMesh
db.execute("ALTER TABLE telemetry ADD COLUMN #{name} #{type}")
telemetry_columns << name
end
unless telemetry_columns.include?("ingestor")
db.execute("ALTER TABLE telemetry ADD COLUMN ingestor TEXT")
end
position_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='positions'").flatten
if position_tables.empty?
positions_schema = File.expand_path("../../../../data/positions.sql", __dir__)
db.execute_batch(File.read(positions_schema))
end
position_columns = db.execute("PRAGMA table_info(positions)").map { |row| row[1] }
unless position_columns.include?("ingestor")
db.execute("ALTER TABLE positions ADD COLUMN ingestor TEXT")
end
neighbor_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='neighbors'").flatten
if neighbor_tables.empty?
neighbors_schema = File.expand_path("../../../../data/neighbors.sql", __dir__)
db.execute_batch(File.read(neighbors_schema))
end
neighbor_columns = db.execute("PRAGMA table_info(neighbors)").map { |row| row[1] }
unless neighbor_columns.include?("ingestor")
db.execute("ALTER TABLE neighbors ADD COLUMN ingestor TEXT")
end
trace_tables =
db.execute(
@@ -225,10 +207,6 @@ module PotatoMesh
traces_schema = File.expand_path("../../../../data/traces.sql", __dir__)
db.execute_batch(File.read(traces_schema))
end
trace_columns = db.execute("PRAGMA table_info(traces)").map { |row| row[1] }
unless trace_columns.include?("ingestor")
db.execute("ALTER TABLE traces ADD COLUMN ingestor TEXT")
end
ingestor_tables =
db.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='ingestors'").flatten
+60 -338
View File
@@ -17,8 +17,6 @@
module PotatoMesh
module App
module Federation
FEDERATION_SLEEP_SLICE_SECONDS = 0.2
# Resolve the canonical domain for the running instance.
#
# @return [String, nil] sanitized instance domain or nil outside production.
@@ -172,9 +170,6 @@ module PotatoMesh
# @return [PotatoMesh::App::WorkerPool, nil] active worker pool if created.
def ensure_federation_worker_pool!
return nil unless federation_enabled?
return nil if federation_shutdown_requested?
ensure_federation_shutdown_hook!
existing = settings.respond_to?(:federation_worker_pool) ? settings.federation_worker_pool : nil
return existing if existing&.alive?
@@ -186,77 +181,16 @@ module PotatoMesh
name: "potato-mesh-fed",
)
set(:federation_worker_pool, pool) if respond_to?(:set)
pool
end
# Ensure federation background workers are torn down during process exit.
#
# @return [void]
def ensure_federation_shutdown_hook!
application = is_a?(Class) ? self : self.class
return application.ensure_federation_shutdown_hook! unless application.equal?(self)
installed = if respond_to?(:settings) && settings.respond_to?(:federation_shutdown_hook_installed)
settings.federation_shutdown_hook_installed
else
instance_variable_defined?(:@federation_shutdown_hook_installed) && @federation_shutdown_hook_installed
end
return if installed
if respond_to?(:set) && settings.respond_to?(:federation_shutdown_hook_installed=)
set(:federation_shutdown_hook_installed, true)
else
@federation_shutdown_hook_installed = true
end
at_exit do
begin
application.shutdown_federation_background_work!(timeout: PotatoMesh::Config.federation_shutdown_timeout_seconds)
pool.shutdown(timeout: PotatoMesh::Config.federation_task_timeout_seconds)
rescue StandardError
# Suppress shutdown errors during interpreter teardown.
end
end
end
# Check whether federation workers have received a shutdown request.
#
# @return [Boolean] true when stop has been requested.
def federation_shutdown_requested?
return false unless respond_to?(:settings)
return false unless settings.respond_to?(:federation_shutdown_requested)
settings.federation_shutdown_requested == true
end
# Mark federation background work as shutting down.
#
# @return [void]
def request_federation_shutdown!
set(:federation_shutdown_requested, true) if respond_to?(:set)
end
# Clear any previously requested federation shutdown marker.
#
# @return [void]
def clear_federation_shutdown_request!
set(:federation_shutdown_requested, false) if respond_to?(:set)
end
# Sleep in short intervals so federation loops can react to shutdown.
#
# @param seconds [Numeric] target sleep duration.
# @return [Boolean] true when the full delay elapsed without shutdown.
def federation_sleep_with_shutdown(seconds)
remaining = seconds.to_f
while remaining.positive?
return false if federation_shutdown_requested?
slice = [remaining, FEDERATION_SLEEP_SLICE_SECONDS].min
Kernel.sleep(slice)
remaining -= slice
end
!federation_shutdown_requested?
set(:federation_worker_pool, pool) if respond_to?(:set)
pool
end
# Shutdown and clear the federation worker pool if present.
@@ -280,44 +214,6 @@ module PotatoMesh
end
end
# Gracefully terminate federation background loops and worker pool tasks.
#
# @param timeout [Numeric, nil] maximum join time applied per thread.
# @return [void]
def shutdown_federation_background_work!(timeout: nil)
request_federation_shutdown!
timeout_value = timeout || PotatoMesh::Config.federation_shutdown_timeout_seconds
stop_federation_thread!(:initial_federation_thread, timeout: timeout_value)
stop_federation_thread!(:federation_thread, timeout: timeout_value)
shutdown_federation_worker_pool!
clear_federation_crawl_state!
end
# Stop a specific federation thread setting and clear its reference.
#
# @param setting_name [Symbol] settings key storing the thread object.
# @param timeout [Numeric] seconds to wait for clean thread exit.
# @return [void]
def stop_federation_thread!(setting_name, timeout:)
return unless respond_to?(:settings)
return unless settings.respond_to?(setting_name)
thread = settings.public_send(setting_name)
if thread&.alive?
begin
thread.wakeup if thread.respond_to?(:wakeup)
rescue ThreadError
# The thread may not currently be sleeping; continue shutdown.
end
thread.join(timeout)
if thread.alive?
thread.kill
thread.join(0.1)
end
end
set(setting_name, nil) if respond_to?(:set)
end
def federation_target_domains(self_domain)
normalized_self = sanitize_instance_domain(self_domain)&.downcase
ordered = []
@@ -369,21 +265,16 @@ module PotatoMesh
def announce_instance_to_domain(domain, payload_json)
return false unless domain && !domain.empty?
return false if federation_shutdown_requested?
https_failures = []
published = instance_uri_candidates(domain, "/api/instances").any? do |uri|
break false if federation_shutdown_requested?
instance_uri_candidates(domain, "/api/instances").each do |uri|
begin
http = build_remote_http_client(uri)
response = Timeout.timeout(PotatoMesh::Config.remote_instance_request_timeout) do
http.start do |connection|
request = build_federation_http_request(Net::HTTP::Post, uri)
request.body = payload_json
connection.request(request)
end
response = http.start do |connection|
request = build_federation_http_request(Net::HTTP::Post, uri)
request.body = payload_json
connection.request(request)
end
if response.is_a?(Net::HTTPSuccess)
debug_log(
@@ -392,16 +283,14 @@ module PotatoMesh
target: uri.to_s,
status: response.code,
)
true
else
debug_log(
"Federation announcement failed",
context: "federation.announce",
target: uri.to_s,
status: response.code,
)
false
return true
end
debug_log(
"Federation announcement failed",
context: "federation.announce",
target: uri.to_s,
status: response.code,
)
rescue StandardError => e
metadata = {
context: "federation.announce",
@@ -416,18 +305,9 @@ module PotatoMesh
**metadata,
)
https_failures << metadata
else
warn_log(
"Federation announcement raised exception",
**metadata,
)
next
end
false
end
end
unless published
https_failures.each do |metadata|
warn_log(
"Federation announcement raised exception",
**metadata,
@@ -435,7 +315,14 @@ module PotatoMesh
end
end
published
https_failures.each do |metadata|
warn_log(
"Federation announcement raised exception",
**metadata,
)
end
false
end
# Determine whether an HTTPS announcement failure should fall back to HTTP.
@@ -455,7 +342,6 @@ module PotatoMesh
def announce_instance_to_all_domains
return unless federation_enabled?
return if federation_shutdown_requested?
attributes, signature = ensure_self_instance_record!
payload_json = JSON.generate(instance_announcement_payload(attributes, signature))
@@ -463,15 +349,13 @@ module PotatoMesh
pool = federation_worker_pool
scheduled = []
domains.each_with_object(scheduled) do |domain, scheduled_tasks|
break if federation_shutdown_requested?
domains.each do |domain|
if pool
begin
task = pool.schedule do
announce_instance_to_domain(domain, payload_json)
end
scheduled_tasks << [domain, task]
scheduled << [domain, task]
next
rescue PotatoMesh::App::WorkerPool::QueueFullError
warn_log(
@@ -512,9 +396,7 @@ module PotatoMesh
return if scheduled.empty?
timeout = PotatoMesh::Config.federation_task_timeout_seconds
scheduled.all? do |domain, task|
break false if federation_shutdown_requested?
scheduled.each do |domain, task|
begin
task.wait(timeout: timeout)
rescue PotatoMesh::App::WorkerPool::TaskTimeoutError => e
@@ -535,23 +417,19 @@ module PotatoMesh
error_message: e.message,
)
end
true
end
end
def start_federation_announcer!
# Federation broadcasts must not execute when federation support is disabled.
return nil unless federation_enabled?
clear_federation_shutdown_request!
ensure_federation_shutdown_hook!
existing = settings.federation_thread
return existing if existing&.alive?
thread = Thread.new do
loop do
break unless federation_sleep_with_shutdown(PotatoMesh::Config.federation_announcement_interval)
sleep PotatoMesh::Config.federation_announcement_interval
begin
announce_instance_to_all_domains
rescue StandardError => e
@@ -577,8 +455,6 @@ module PotatoMesh
def start_initial_federation_announcement!
# Skip the initial broadcast entirely when federation is disabled.
return nil unless federation_enabled?
clear_federation_shutdown_request!
ensure_federation_shutdown_hook!
existing = settings.respond_to?(:initial_federation_thread) ? settings.initial_federation_thread : nil
return existing if existing&.alive?
@@ -586,12 +462,7 @@ module PotatoMesh
thread = Thread.new do
begin
delay = PotatoMesh::Config.initial_federation_delay_seconds
if delay.positive?
completed = federation_sleep_with_shutdown(delay)
next unless completed
end
next if federation_shutdown_requested?
Kernel.sleep(delay) if delay.positive?
announce_instance_to_all_domains
rescue StandardError => e
warn_log(
@@ -652,19 +523,15 @@ module PotatoMesh
end
def perform_instance_http_request(uri)
raise InstanceFetchError, "federation shutdown requested" if federation_shutdown_requested?
http = build_remote_http_client(uri)
Timeout.timeout(PotatoMesh::Config.remote_instance_request_timeout) do
http.start do |connection|
request = build_federation_http_request(Net::HTTP::Get, uri)
response = connection.request(request)
case response
when Net::HTTPSuccess
response.body
else
raise InstanceFetchError, "unexpected response #{response.code}"
end
http.start do |connection|
request = build_federation_http_request(Net::HTTP::Get, uri)
response = connection.request(request)
case response
when Net::HTTPSuccess
response.body
else
raise InstanceFetchError, "unexpected response #{response.code}"
end
end
rescue StandardError => e
@@ -721,12 +588,8 @@ module PotatoMesh
end
def fetch_instance_json(domain, path)
return [nil, ["federation shutdown requested"]] if federation_shutdown_requested?
errors = []
instance_uri_candidates(domain, path).each do |uri|
break if federation_shutdown_requested?
begin
body = perform_instance_http_request(uri)
return [JSON.parse(body), uri] if body
@@ -739,34 +602,6 @@ module PotatoMesh
[nil, errors]
end
# Resolve the best matching active-node count from a remote /api/stats payload.
#
# @param payload [Hash, nil] decoded JSON payload from /api/stats.
# @param max_age_seconds [Integer] activity window currently expected for federation freshness.
# @return [Integer, nil] selected active-node count when available.
def remote_active_node_count_from_stats(payload, max_age_seconds:)
return nil unless payload.is_a?(Hash)
active_nodes = payload["active_nodes"]
return nil unless active_nodes.is_a?(Hash)
age = coerce_integer(max_age_seconds) || 0
key = if age <= 3600
"hour"
elsif age <= 86_400
"day"
elsif age <= PotatoMesh::Config.week_seconds
"week"
else
"month"
end
value = coerce_integer(active_nodes[key])
return nil unless value
[value, 0].max
end
# Parse a remote federation instance payload into canonical attributes.
#
# @param payload [Hash] JSON object describing a remote instance.
@@ -827,147 +662,49 @@ module PotatoMesh
# @param overall_limit [Integer, nil] maximum unique domains visited.
# @return [Boolean] true when the crawl was scheduled successfully.
def enqueue_federation_crawl(domain, per_response_limit:, overall_limit:)
sanitized_domain = sanitize_instance_domain(domain)
unless sanitized_domain
warn_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: domain,
reason: "invalid domain",
)
return false
end
return false if federation_shutdown_requested?
application = is_a?(Class) ? self : self.class
pool = application.federation_worker_pool
pool = federation_worker_pool
unless pool
debug_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: sanitized_domain,
domain: domain,
reason: "federation disabled",
)
return false
end
claim_result = application.claim_federation_crawl_slot(sanitized_domain)
unless claim_result == :claimed
debug_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: sanitized_domain,
reason: claim_result == :in_flight ? "crawl already in flight" : "recent crawl completed",
)
return false
end
application = is_a?(Class) ? self : self.class
pool.schedule do
db = nil
db = application.open_database
begin
db = application.open_database
application.ingest_known_instances_from!(
db,
sanitized_domain,
domain,
per_response_limit: per_response_limit,
overall_limit: overall_limit,
)
ensure
db&.close
application.release_federation_crawl_slot(sanitized_domain)
end
end
true
rescue PotatoMesh::App::WorkerPool::QueueFullError
application.handle_failed_federation_crawl_schedule(sanitized_domain, "worker queue saturated")
rescue PotatoMesh::App::WorkerPool::ShutdownError
application.handle_failed_federation_crawl_schedule(sanitized_domain, "worker pool shut down")
end
# Handle a failed crawl schedule attempt without applying cooldown.
#
# @param domain [String] canonical domain that failed to schedule.
# @param reason [String] human-readable failure reason.
# @return [Boolean] always false because scheduling did not succeed.
def handle_failed_federation_crawl_schedule(domain, reason)
release_federation_crawl_slot(domain, record_completion: false)
warn_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: domain,
reason: reason,
reason: "worker queue saturated",
)
false
rescue PotatoMesh::App::WorkerPool::ShutdownError
warn_log(
"Skipped remote instance crawl",
context: "federation.instances",
domain: domain,
reason: "worker pool shut down",
)
false
end
# Initialize shared in-memory state used to deduplicate crawl scheduling.
#
# @return [void]
def initialize_federation_crawl_state!
@federation_crawl_init_mutex ||= Mutex.new
return if instance_variable_defined?(:@federation_crawl_mutex) && @federation_crawl_mutex
@federation_crawl_init_mutex.synchronize do
return if instance_variable_defined?(:@federation_crawl_mutex) && @federation_crawl_mutex
@federation_crawl_mutex = Mutex.new
@federation_crawl_in_flight = Set.new
@federation_crawl_last_completed_at = {}
end
end
# Retrieve the cooldown period used for duplicate crawl suppression.
#
# @return [Integer] seconds a domain remains in cooldown after completion.
def federation_crawl_cooldown_seconds
PotatoMesh::Config.federation_crawl_cooldown_seconds
end
# Mark a domain crawl as claimed if no active or recent crawl exists.
#
# @param domain [String] canonical domain name.
# @return [Symbol] +:claimed+, +:in_flight+, or +:cooldown+.
def claim_federation_crawl_slot(domain)
initialize_federation_crawl_state!
now = Time.now.to_i
@federation_crawl_mutex.synchronize do
return :in_flight if @federation_crawl_in_flight.include?(domain)
last_completed = @federation_crawl_last_completed_at[domain]
if last_completed && now - last_completed < federation_crawl_cooldown_seconds
return :cooldown
end
@federation_crawl_in_flight << domain
:claimed
end
end
# Release an in-flight crawl claim and record completion timestamp.
#
# @param domain [String] canonical domain name.
# @param record_completion [Boolean] true to apply cooldown tracking.
# @return [void]
def release_federation_crawl_slot(domain, record_completion: true)
return unless domain
initialize_federation_crawl_state!
@federation_crawl_mutex.synchronize do
@federation_crawl_in_flight.delete(domain)
@federation_crawl_last_completed_at[domain] = Time.now.to_i if record_completion
end
end
# Clear all in-memory crawl scheduling state.
#
# @return [void]
def clear_federation_crawl_state!
initialize_federation_crawl_state!
@federation_crawl_mutex.synchronize do
@federation_crawl_in_flight.clear
@federation_crawl_last_completed_at.clear
end
end
# Recursively ingest federation records exposed by the supplied domain.
@@ -987,7 +724,6 @@ module PotatoMesh
)
sanitized = sanitize_instance_domain(domain)
return visited || Set.new unless sanitized
return visited || Set.new if federation_shutdown_requested?
visited ||= Set.new
@@ -1022,8 +758,6 @@ module PotatoMesh
processed_entries = 0
recent_cutoff = Time.now.to_i - PotatoMesh::Config.remote_instance_max_node_age
payload.each do |entry|
break if federation_shutdown_requested?
if per_response_limit && per_response_limit.positive? && processed_entries >= per_response_limit
debug_log(
"Skipped remote instance entry due to response limit",
@@ -1077,33 +811,21 @@ module PotatoMesh
attributes[:is_private] = false if attributes[:is_private].nil?
stats_payload, stats_metadata = fetch_instance_json(attributes[:domain], "/api/stats")
stats_count = remote_active_node_count_from_stats(
stats_payload,
max_age_seconds: PotatoMesh::Config.remote_instance_max_node_age,
)
attributes[:nodes_count] = stats_count if stats_count
nodes_since_path = "/api/nodes?since=#{recent_cutoff}&limit=1000"
nodes_since_window, nodes_since_metadata = fetch_instance_json(attributes[:domain], nodes_since_path)
if stats_count.nil? && attributes[:nodes_count].nil? && nodes_since_window.is_a?(Array)
if nodes_since_window.is_a?(Array)
attributes[:nodes_count] = nodes_since_window.length
elsif nodes_since_metadata
warn_log(
"Failed to load remote node window",
context: "federation.instances",
domain: attributes[:domain],
reason: Array(nodes_since_metadata).map(&:to_s).join("; "),
)
end
remote_nodes, node_metadata = fetch_instance_json(attributes[:domain], "/api/nodes")
remote_nodes = nodes_since_window if remote_nodes.nil? && nodes_since_window.is_a?(Array)
if attributes[:nodes_count].nil? && remote_nodes.is_a?(Array)
attributes[:nodes_count] = remote_nodes.length
end
if stats_count.nil? && Array(stats_metadata).any?
debug_log(
"Remote instance /api/stats unavailable; using node list fallback",
context: "federation.instances",
domain: attributes[:domain],
reason: Array(stats_metadata).map(&:to_s).join("; "),
)
end
remote_nodes ||= nodes_since_window if nodes_since_window.is_a?(Array)
unless remote_nodes
warn_log(
"Failed to load remote node data",
@@ -29,6 +29,8 @@ module PotatoMesh
DEFAULT_PSK_B64 = "AQ=="
TEXT_MESSAGE_PORTNUM = 1
# Number of characters required for full confidence scoring.
CONFIDENCE_LENGTH_TARGET = 8.0
# Decrypt an encrypted Meshtastic payload into UTF-8 text.
#
@@ -78,12 +80,21 @@ module PotatoMesh
return nil unless data
text = nil
decryption_confidence = nil
if data[:portnum] == TEXT_MESSAGE_PORTNUM
candidate = data[:payload].dup.force_encoding("UTF-8")
text = candidate if candidate.valid_encoding? && !candidate.empty?
if candidate.valid_encoding? && !candidate.empty?
text = candidate
decryption_confidence = text_confidence(text)
end
end
{ portnum: data[:portnum], payload: data[:payload], text: text }
{
portnum: data[:portnum],
payload: data[:payload],
text: text,
decryption_confidence: decryption_confidence,
}
rescue ArgumentError, OpenSSL::Cipher::CipherError
nil
end
@@ -154,6 +165,25 @@ module PotatoMesh
nil
end
# Score the plausibility of decrypted text content.
#
# @param text [String] decrypted text candidate.
# @return [Float] confidence score between 0.0 and 1.0.
def text_confidence(text)
return 0.0 unless text.is_a?(String)
return 0.0 if text.empty?
total = text.length.to_f
length_score = [total / CONFIDENCE_LENGTH_TARGET, 1.0].min
control_count = text.scan(/[\p{Cc}\p{Cs}]/).length
control_ratio = control_count / total
acceptable_count = text.scan(/[\p{L}\p{N}\p{P}\p{S}\p{Zs}\t\n\r]/).length
acceptable_ratio = acceptable_count / total
score = length_score * acceptable_ratio * (1.0 - control_ratio)
score.clamp(0.0, 1.0)
end
# Resolve the node number from any of the supported identifiers.
#
# @param from_id [String, nil] Meshtastic node identifier.
+22 -38
View File
@@ -127,43 +127,6 @@ module PotatoMesh
[threshold, floor].max
end
# Return exact active-node counts across common activity windows.
#
# Counts are resolved directly in SQL with COUNT(*) thresholds against
# +nodes.last_heard+ to avoid sampling bias from list endpoint limits.
#
# @param now [Integer] reference unix timestamp in seconds.
# @param db [SQLite3::Database, nil] optional open database handle to reuse.
# @return [Hash{String => Integer}] counts keyed by hour/day/week/month.
def query_active_node_stats(now: Time.now.to_i, db: nil)
handle = db || open_database(readonly: true)
handle.results_as_hash = true
reference_now = coerce_integer(now) || Time.now.to_i
hour_cutoff = reference_now - 3600
day_cutoff = reference_now - 86_400
week_cutoff = reference_now - PotatoMesh::Config.week_seconds
month_cutoff = reference_now - (30 * 24 * 60 * 60)
private_filter = private_mode? ? " AND (role IS NULL OR role <> 'CLIENT_HIDDEN')" : ""
sql = <<~SQL
SELECT
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS hour_count,
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS day_count,
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS week_count,
(SELECT COUNT(*) FROM nodes WHERE last_heard >= ?#{private_filter}) AS month_count
SQL
row = with_busy_retry do
handle.get_first_row(sql, [hour_cutoff, day_cutoff, week_cutoff, month_cutoff])
end || {}
{
"hour" => row["hour_count"].to_i,
"day" => row["day_count"].to_i,
"week" => row["week_count"].to_i,
"month" => row["month_count"].to_i,
}
ensure
handle&.close unless db
end
def node_reference_tokens(node_ref)
parts = canonical_node_parts(node_ref)
canonical_id, numeric_id = parts ? parts[0, 2] : [nil, nil]
@@ -394,7 +357,7 @@ module PotatoMesh
SELECT m.id, m.rx_time, m.rx_iso, m.from_id, m.to_id, m.channel,
m.portnum, m.text, m.encrypted, m.rssi, m.hop_limit,
m.lora_freq, m.modem_preset, m.channel_name, m.snr,
m.reply_id, m.emoji, m.ingestor
m.reply_id, m.emoji, m.decrypted, m.decryption_confidence
FROM messages m
SQL
sql += " WHERE #{where_clauses.join(" AND ")}\n"
@@ -411,6 +374,27 @@ module PotatoMesh
if string_or_nil(r["encrypted"])
r.delete("portnum")
end
if r.key?("decrypted")
decrypted_raw = r["decrypted"]
decrypted = case decrypted_raw
when true, false
decrypted_raw
when Integer
!decrypted_raw.zero?
when String
trimmed = decrypted_raw.strip
!trimmed.empty? && trimmed != "0" && trimmed.casecmp("false") != 0
else
!!decrypted_raw
end
r["decrypted"] = decrypted
r.delete("decryption_confidence") unless decrypted
end
if r.key?("decryption_confidence") && !r["decryption_confidence"].nil?
r["decryption_confidence"] = r["decryption_confidence"].to_f
end
if PotatoMesh::Config.debug? && (r["from_id"].nil? || r["from_id"].to_s.strip.empty?)
raw = db.execute("SELECT * FROM messages WHERE id = ?", [r["id"]]).first
debug_log(
@@ -67,14 +67,6 @@ module PotatoMesh
query_nodes(limit, since: params["since"]).to_json
end
app.get "/api/stats" do
content_type :json
{
active_nodes: query_active_node_stats,
sampled: false,
}.to_json
end
app.get "/api/nodes/:id" do
content_type :json
node_ref = string_or_nil(params["id"])
+1 -34
View File
@@ -37,14 +37,11 @@ module PotatoMesh
DEFAULT_MAX_DISTANCE_KM = 42.0
DEFAULT_REMOTE_INSTANCE_CONNECT_TIMEOUT = 15
DEFAULT_REMOTE_INSTANCE_READ_TIMEOUT = 60
DEFAULT_REMOTE_INSTANCE_REQUEST_TIMEOUT = 30
DEFAULT_FEDERATION_MAX_INSTANCES_PER_RESPONSE = 64
DEFAULT_FEDERATION_MAX_DOMAINS_PER_CRAWL = 256
DEFAULT_FEDERATION_WORKER_POOL_SIZE = 4
DEFAULT_FEDERATION_WORKER_QUEUE_CAPACITY = 128
DEFAULT_FEDERATION_TASK_TIMEOUT_SECONDS = 120
DEFAULT_FEDERATION_SHUTDOWN_TIMEOUT_SECONDS = 3
DEFAULT_FEDERATION_CRAWL_COOLDOWN_SECONDS = 300
DEFAULT_INITIAL_FEDERATION_DELAY_SECONDS = 2
DEFAULT_FEDERATION_SEED_DOMAINS = %w[potatomesh.net potatomesh.jmrp.io mesh.qrp.ro].freeze
@@ -187,7 +184,7 @@ module PotatoMesh
#
# @return [String] semantic version identifier.
def version_fallback
"0.5.11"
"0.5.10"
end
# Default refresh interval for frontend polling routines.
@@ -353,16 +350,6 @@ module PotatoMesh
)
end
# End-to-end timeout applied to each outbound federation HTTP request.
#
# @return [Integer] maximum request duration in seconds.
def remote_instance_request_timeout
fetch_positive_integer(
"REMOTE_INSTANCE_REQUEST_TIMEOUT",
DEFAULT_REMOTE_INSTANCE_REQUEST_TIMEOUT,
)
end
# Limit the number of remote instances processed from a single response.
#
# @return [Integer] maximum entries processed per /api/instances payload.
@@ -413,26 +400,6 @@ module PotatoMesh
)
end
# Determine how long shutdown waits before forcing federation thread exit.
#
# @return [Integer] per-thread shutdown timeout in seconds.
def federation_shutdown_timeout_seconds
fetch_positive_integer(
"FEDERATION_SHUTDOWN_TIMEOUT",
DEFAULT_FEDERATION_SHUTDOWN_TIMEOUT_SECONDS,
)
end
# Define how long finished crawl domains remain on cooldown.
#
# @return [Integer] cooldown window in seconds.
def federation_crawl_cooldown_seconds
fetch_positive_integer(
"FEDERATION_CRAWL_COOLDOWN",
DEFAULT_FEDERATION_CRAWL_COOLDOWN_SECONDS,
)
end
# Maximum acceptable age for remote node data.
#
# @return [Integer] seconds before remote nodes are considered stale.
+2 -2
View File
@@ -1,12 +1,12 @@
{
"name": "potato-mesh",
"version": "0.5.11",
"version": "0.5.10",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "potato-mesh",
"version": "0.5.11",
"version": "0.5.10",
"devDependencies": {
"istanbul-lib-coverage": "^3.2.2",
"istanbul-lib-report": "^3.0.1",
+1 -1
View File
@@ -1,6 +1,6 @@
{
"name": "potato-mesh",
"version": "0.5.11",
"version": "0.5.10",
"type": "module",
"private": true,
"scripts": {
@@ -62,22 +62,6 @@ function buildModel(overrides = {}) {
});
}
function findChannelByLabel(model, label) {
return model.channels.find(channel => channel.label === label);
}
function assertChannelMessages(model, { label, id, index, messageIds }) {
const channel = findChannelByLabel(model, label);
assert.ok(channel);
if (id instanceof RegExp) {
assert.match(channel.id, id);
} else {
assert.equal(channel.id, id);
}
assert.equal(channel.index, index);
assert.deepEqual(channel.entries.map(entry => entry.message.id), messageIds);
}
test('buildChatTabModel returns sorted nodes and channel buckets', () => {
const model = buildModel();
assert.equal(model.logEntries.length, 3);
@@ -91,13 +75,12 @@ test('buildChatTabModel returns sorted nodes and channel buckets', () => {
['recent-node', 'iso-node', 'encrypted']
);
assert.equal(model.channels.length, 6);
assert.equal(model.channels.length, 5);
assert.deepEqual(model.channels.map(channel => channel.label), [
'EnvDefault',
'Fallback',
'MediumFast',
'ShortFast',
'1',
'BerlinMesh'
]);
@@ -123,16 +106,11 @@ test('buildChatTabModel returns sorted nodes and channel buckets', () => {
assert.equal(presetChannel.id, 'channel-0-shortfast');
assert.deepEqual(presetChannel.entries.map(entry => entry.message.id), ['primary-preset']);
const unnamedSecondaryChannel = channelByLabel['1'];
assert.equal(unnamedSecondaryChannel.index, 1);
assert.equal(unnamedSecondaryChannel.id, 'channel-1');
assert.deepEqual(unnamedSecondaryChannel.entries.map(entry => entry.message.id), ['iso-ts']);
const secondaryChannel = channelByLabel.BerlinMesh;
assert.equal(secondaryChannel.index, 1);
assert.match(secondaryChannel.id, /^channel-secondary-name-berlinmesh-[a-z0-9]+$/);
assert.equal(secondaryChannel.entries.length, 1);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), ['recent-alt']);
assert.equal(secondaryChannel.id, 'channel-secondary-berlinmesh');
assert.equal(secondaryChannel.entries.length, 2);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), ['iso-ts', 'recent-alt']);
});
test('buildChatTabModel skips channel buckets when there are no messages', () => {
@@ -294,7 +272,7 @@ test('buildChatTabModel ignores plaintext log-only entries', () => {
assert.equal(encryptedEntries[0]?.message?.id, 'enc');
});
test('buildChatTabModel merges secondary channels with matching labels across indexes', () => {
test('buildChatTabModel merges secondary channels with matching labels regardless of index', () => {
const primaryId = 'primary';
const secondaryFirstId = 'secondary-one';
const secondarySecondId = 'secondary-two';
@@ -318,139 +296,55 @@ test('buildChatTabModel merges secondary channels with matching labels across in
assert.equal(primaryChannel.entries.length, 1);
assert.equal(primaryChannel.entries[0]?.message?.id, primaryId);
const mergedSecondaryChannel = meshChannels.find(channel => channel.index === 3);
assert.ok(mergedSecondaryChannel);
assert.match(mergedSecondaryChannel.id, /^channel-secondary-name-meshtown-[a-z0-9]+$/);
assert.deepEqual(
mergedSecondaryChannel.entries.map(entry => entry.message.id),
[secondaryFirstId, secondarySecondId]
);
const secondaryChannel = meshChannels.find(channel => channel.index > 0);
assert.ok(secondaryChannel);
assert.equal(secondaryChannel.id, 'channel-secondary-meshtown');
assert.equal(secondaryChannel.index, 3);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), [secondaryFirstId, secondarySecondId]);
});
test('buildChatTabModel keeps unnamed secondary buckets separate when a label later arrives', () => {
const scenarios = [
{
index: 4,
label: 'SideMesh',
messages: [
{ id: 'unnamed', rx_time: NOW - 15, channel: 4 },
{ id: 'named', rx_time: NOW - 10, channel: 4, channel_name: 'SideMesh' }
],
namedId: /^channel-secondary-name-sidemesh-[a-z0-9]+$/,
namedMessages: ['named'],
unnamedMessages: ['unnamed']
},
{
index: 5,
label: 'MeshNorth',
messages: [
{ id: 'named', rx_time: NOW - 12, channel: 5, channel_name: 'MeshNorth' },
{ id: 'unlabeled', rx_time: NOW - 8, channel: 5 }
],
namedId: /^channel-secondary-name-meshnorth-[a-z0-9]+$/,
namedMessages: ['named'],
unnamedMessages: ['unlabeled']
}
];
for (const scenario of scenarios) {
const model = buildChatTabModel({
nodes: [],
messages: scenario.messages,
nowSeconds: NOW,
windowSeconds: WINDOW
});
const secondaryChannels = model.channels.filter(channel => channel.index === scenario.index);
assert.equal(secondaryChannels.length, 2);
assertChannelMessages(model, {
label: scenario.label,
id: scenario.namedId,
index: scenario.index,
messageIds: scenario.namedMessages
});
assertChannelMessages(model, {
label: String(scenario.index),
id: `channel-${scenario.index}`,
index: scenario.index,
messageIds: scenario.unnamedMessages
});
}
});
test('buildChatTabModel keeps same-index channels with different names in separate tabs', () => {
test('buildChatTabModel rekeys unnamed secondary buckets when a label later arrives', () => {
const unnamedId = 'unnamed';
const namedId = 'named';
const label = 'SideMesh';
const index = 4;
const model = buildChatTabModel({
nodes: [],
messages: [
{ id: 'public-msg', rx_time: NOW - 12, channel: 1, channel_name: 'PUBLIC' },
{ id: 'berlin-msg', rx_time: NOW - 8, channel: 1, channel_name: 'BerlinMesh' }
{ id: unnamedId, rx_time: NOW - 15, channel: index },
{ id: namedId, rx_time: NOW - 10, channel: index, channel_name: label }
],
nowSeconds: NOW,
windowSeconds: WINDOW
});
assertChannelMessages(model, {
label: 'PUBLIC',
id: /^channel-secondary-name-public-[a-z0-9]+$/,
index: 1,
messageIds: ['public-msg']
});
assertChannelMessages(model, {
label: 'BerlinMesh',
id: /^channel-secondary-name-berlinmesh-[a-z0-9]+$/,
index: 1,
messageIds: ['berlin-msg']
});
const secondaryChannels = model.channels.filter(channel => channel.index === index);
assert.equal(secondaryChannels.length, 1);
const [secondaryChannel] = secondaryChannels;
assert.equal(secondaryChannel.id, 'channel-secondary-sidemesh');
assert.equal(secondaryChannel.label, label);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), [unnamedId, namedId]);
});
test('buildChatTabModel merges same-name channels even when indexes differ', () => {
test('buildChatTabModel merges unlabeled secondary messages into existing named buckets by index', () => {
const namedId = 'named';
const unlabeledId = 'unlabeled';
const label = 'MeshNorth';
const index = 5;
const model = buildChatTabModel({
nodes: [],
messages: [
{ id: 'test-1', rx_time: NOW - 12, channel: 1, channel_name: 'TEST' },
{ id: 'test-2', rx_time: NOW - 8, channel: 2, channel_name: 'TEST' }
{ id: namedId, rx_time: NOW - 12, channel: index, channel_name: label },
{ id: unlabeledId, rx_time: NOW - 8, channel: index }
],
nowSeconds: NOW,
windowSeconds: WINDOW
});
assertChannelMessages(model, {
label: 'TEST',
id: /^channel-secondary-name-test-[a-z0-9]+$/,
index: 1,
messageIds: ['test-1', 'test-2']
});
});
test('buildChatTabModel keeps same-index slug-colliding labels on distinct tab ids', () => {
const model = buildChatTabModel({
nodes: [],
messages: [
{ id: 'foo-space', rx_time: NOW - 10, channel: 1, channel_name: 'Foo Bar' },
{ id: 'foo-dash', rx_time: NOW - 8, channel: 1, channel_name: 'Foo-Bar' }
],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const fooSpaceChannel = findChannelByLabel(model, 'Foo Bar');
const fooDashChannel = findChannelByLabel(model, 'Foo-Bar');
assert.ok(fooSpaceChannel);
assert.ok(fooDashChannel);
assert.match(fooSpaceChannel.id, /^channel-secondary-name-foo-bar-[a-z0-9]+$/);
assert.match(fooDashChannel.id, /^channel-secondary-name-foo-bar-[a-z0-9]+$/);
assert.notEqual(fooSpaceChannel.id, fooDashChannel.id);
});
test('buildChatTabModel falls back to hashed id for unsluggable secondary labels', () => {
const model = buildChatTabModel({
nodes: [],
messages: [{ id: 'hash-fallback', rx_time: NOW - 5, channel: 2, channel_name: '###' }],
nowSeconds: NOW,
windowSeconds: WINDOW
});
const channel = findChannelByLabel(model, '###');
assert.ok(channel);
assert.equal(channel.index, 2);
assert.ok(channel.id.startsWith('channel-secondary-name-'));
assert.ok(channel.id.length > 'channel-secondary-name-'.length);
const secondaryChannels = model.channels.filter(channel => channel.index === index);
assert.equal(secondaryChannels.length, 1);
const [secondaryChannel] = secondaryChannels;
assert.equal(secondaryChannel.id, 'channel-secondary-meshnorth');
assert.equal(secondaryChannel.label, label);
assert.deepEqual(secondaryChannel.entries.map(entry => entry.message.id), [namedId, unlabeledId]);
});
@@ -1,64 +0,0 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
filterDisplayableFederationInstances,
isSuppressedFederationSiteName,
resolveFederationInstanceLabel,
resolveFederationInstanceSortValue,
resolveFederationSiteNameForDisplay,
shouldDisplayFederationInstance,
truncateFederationSiteName
} from '../federation-instance-display.js';
test('isSuppressedFederationSiteName detects URL-like advertising names', () => {
assert.equal(isSuppressedFederationSiteName('http://spam.example offer'), true);
assert.equal(isSuppressedFederationSiteName('Visit www.spam.example today'), true);
assert.equal(isSuppressedFederationSiteName('Mesh Collective'), false);
assert.equal(isSuppressedFederationSiteName(''), false);
assert.equal(isSuppressedFederationSiteName(null), false);
});
test('truncateFederationSiteName shortens names longer than 32 characters', () => {
assert.equal(truncateFederationSiteName('Short Mesh'), 'Short Mesh');
assert.equal(
truncateFederationSiteName('abcdefghijklmnopqrstuvwxyz1234567890'),
'abcdefghijklmnopqrstuvwxyz123...'
);
assert.equal(truncateFederationSiteName('abcdefghijklmnopqrstuvwxyz123456').length, 32);
assert.equal(truncateFederationSiteName(null), '');
});
test('display helpers filter suppressed names and preserve original domains', () => {
const entries = [
{ name: 'Normal Mesh', domain: 'normal.mesh' },
{ name: 'https://spam.example promo', domain: 'spam.mesh' },
{ domain: 'unnamed.mesh' }
];
assert.equal(shouldDisplayFederationInstance(entries[0]), true);
assert.equal(shouldDisplayFederationInstance(entries[1]), false);
assert.deepEqual(filterDisplayableFederationInstances(entries), [
{ name: 'Normal Mesh', domain: 'normal.mesh' },
{ domain: 'unnamed.mesh' }
]);
assert.equal(resolveFederationSiteNameForDisplay(entries[0]), 'Normal Mesh');
assert.equal(resolveFederationInstanceLabel(entries[2]), 'unnamed.mesh');
assert.equal(resolveFederationInstanceSortValue(entries[0]), 'Normal Mesh');
});
@@ -21,74 +21,6 @@ import { createDomEnvironment } from './dom-environment.js';
import { initializeFederationPage } from '../federation-page.js';
import { roleColors } from '../role-helpers.js';
function createBasicFederationPageHarness() {
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
const { document, createElement, registerElement } = env;
const mapEl = createElement('div', 'map');
registerElement('map', mapEl);
const statusEl = createElement('div', 'status');
registerElement('status', statusEl);
const tableEl = createElement('table', 'instances');
const tbodyEl = createElement('tbody');
registerElement('instances', tableEl);
tableEl.appendChild(tbodyEl);
const configEl = createElement('div');
configEl.setAttribute('data-app-config', JSON.stringify({ mapCenter: { lat: 0, lon: 0 }, mapZoom: 3 }));
document.querySelector = selector => {
if (selector === '[data-app-config]') return configEl;
if (selector === '#instances tbody') return tbodyEl;
return null;
};
return { ...env, statusEl, tbodyEl };
}
function createBasicLeafletStub(options = {}) {
const { markerPopups = null, fitBounds = false } = options;
return {
map() {
return {
setView() {},
on() {},
fitBounds: fitBounds ? () => {} : undefined,
getPane() {
return null;
}
};
},
tileLayer() {
return {
addTo() {
return this;
},
getContainer() {
return null;
},
on() {}
};
},
layerGroup() {
return {
addLayer() {},
addTo() {
return this;
}
};
},
circleMarker() {
return {
bindPopup(html) {
markerPopups?.push(html);
return this;
}
};
}
};
}
test('federation map centers on configured coordinates and follows theme filters', async () => {
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: true });
const { document, window, createElement, registerElement, cleanup } = env;
@@ -671,141 +603,57 @@ test('federation legend toggle respects media query changes', async () => {
});
test('federation page tolerates fetch failures', async () => {
const { cleanup } = createBasicFederationPageHarness();
const fetchImpl = async () => {
throw new Error('boom');
};
const leafletStub = createBasicLeafletStub();
await initializeFederationPage({ config: {}, fetchImpl, leaflet: leafletStub });
cleanup();
});
test('federation page suppresses spammy site names and truncates long names in visible UI', async () => {
const { cleanup, statusEl, tbodyEl } = createBasicFederationPageHarness();
const markerPopups = [];
const leafletStub = createBasicLeafletStub({ markerPopups, fitBounds: true });
const fetchImpl = async () => ({
ok: true,
json: async () => [
{
domain: 'visible.mesh',
name: 'abcdefghijklmnopqrstuvwxyz1234567890',
latitude: 1,
longitude: 1,
lastUpdateTime: Math.floor(Date.now() / 1000) - 30
},
{
domain: 'spam.mesh',
name: 'www.spam.example buy now',
latitude: 2,
longitude: 2,
lastUpdateTime: Math.floor(Date.now() / 1000) - 60
}
]
});
try {
await initializeFederationPage({ config: {}, fetchImpl, leaflet: leafletStub });
assert.equal(statusEl.textContent, '1 instances');
assert.equal(tbodyEl.childNodes.length, 1);
assert.match(tbodyEl.childNodes[0].innerHTML, /abcdefghijklmnopqrstuvwxyz123\.\.\./);
assert.doesNotMatch(tbodyEl.childNodes[0].innerHTML, /spam\.mesh/);
assert.equal(markerPopups.length, 1);
assert.match(markerPopups[0], /abcdefghijklmnopqrstuvwxyz123\.\.\./);
assert.doesNotMatch(markerPopups[0], /www\.spam\.example/);
} finally {
cleanup();
}
});
test('federation page sorts by full site names before truncating visible labels', async () => {
const env = createDomEnvironment({ includeBody: true, bodyHasDarkClass: false });
const { document, createElement, registerElement, cleanup } = env;
const sharedPrefix = 'abcdefghijklmnopqrstuvwxyz123';
const mapEl = createElement('div', 'map');
registerElement('map', mapEl);
const statusEl = createElement('div', 'status');
registerElement('status', statusEl);
const tableEl = createElement('table', 'instances');
const tbodyEl = createElement('tbody');
registerElement('instances', tableEl);
tableEl.appendChild(tbodyEl);
const headerNameTh = createElement('th');
const headerName = createElement('span');
headerName.classList.add('sort-header');
headerName.dataset.sortKey = 'name';
headerName.dataset.sortLabel = 'Name';
headerNameTh.appendChild(headerName);
const ths = [headerNameTh];
const headers = [headerName];
const headerHandlers = new Map();
headers.forEach(header => {
header.addEventListener = (event, handler) => {
const existing = headerHandlers.get(header) || {};
existing[event] = handler;
headerHandlers.set(header, existing);
};
header.closest = () => ths.find(th => th.childNodes.includes(header));
header.querySelector = () => null;
});
tableEl.querySelectorAll = selector => {
if (selector === 'thead .sort-header[data-sort-key]') return headers;
if (selector === 'thead th') return ths;
return [];
};
const configEl = createElement('div');
configEl.setAttribute('data-app-config', JSON.stringify({ mapCenter: { lat: 0, lon: 0 }, mapZoom: 3 }));
configEl.setAttribute('data-app-config', JSON.stringify({}));
document.querySelector = selector => {
if (selector === '[data-app-config]') return configEl;
if (selector === '#instances tbody') return tbodyEl;
return null;
};
const fetchImpl = async () => ({
ok: true,
json: async () => [
{
domain: 'zeta.mesh',
name: `${sharedPrefix}zeta suffix`,
latitude: 1,
longitude: 1,
lastUpdateTime: Math.floor(Date.now() / 1000) - 30
},
{
domain: 'alpha.mesh',
name: `${sharedPrefix}alpha suffix`,
latitude: 2,
longitude: 2,
lastUpdateTime: Math.floor(Date.now() / 1000) - 60
}
]
});
const leafletStub = {
map() {
return {
setView() {},
on() {},
getPane() {
return null;
}
};
},
tileLayer() {
return {
addTo() {
return this;
},
getContainer() {
return null;
},
on() {}
};
},
layerGroup() {
return { addLayer() {}, addTo() { return this; } };
},
circleMarker() {
return { bindPopup() { return this; } };
}
};
try {
await initializeFederationPage({
config: {},
fetchImpl,
leaflet: createBasicLeafletStub({ fitBounds: true })
});
const fetchImpl = async () => {
throw new Error('boom');
};
const nameHandlers = headerHandlers.get(headerName);
nameHandlers.click();
assert.match(tbodyEl.childNodes[0].innerHTML, /alpha\.mesh/);
assert.match(tbodyEl.childNodes[1].innerHTML, /zeta\.mesh/);
assert.match(tbodyEl.childNodes[0].innerHTML, /abcdefghijklmnopqrstuvwxyz123\.\.\./);
assert.match(tbodyEl.childNodes[1].innerHTML, /abcdefghijklmnopqrstuvwxyz123\.\.\./);
} finally {
cleanup();
}
await initializeFederationPage({ config: {}, fetchImpl, leaflet: leafletStub });
cleanup();
});
@@ -154,75 +154,6 @@ test('initializeInstanceSelector populates options alphabetically and selects th
}
});
test('initializeInstanceSelector hides suppressed names and truncates long labels', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const navLink = env.document.createElement('a');
navLink.classList.add('js-federation-nav');
navLink.textContent = 'Federation';
env.document.body.appendChild(navLink);
const fetchImpl = async () => ({
ok: true,
async json() {
return [
{ name: 'Visit https://spam.example now', domain: 'spam.mesh' },
{ name: 'abcdefghijklmnopqrstuvwxyz1234567890', domain: 'long.mesh' },
{ name: 'Alpha Mesh', domain: 'alpha.mesh' }
];
}
});
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document
});
assert.equal(select.options.length, 3);
assert.equal(select.options[1].textContent, 'abcdefghijklmnopqrstuvwxyz123...');
assert.equal(select.options[2].textContent, 'Alpha Mesh');
assert.equal(navLink.textContent, 'Federation (2)');
assert.equal(select.options.some(option => option.value === 'spam.mesh'), false);
} finally {
env.cleanup();
}
});
test('initializeInstanceSelector sorts by full site names before truncating labels', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
const sharedPrefix = 'abcdefghijklmnopqrstuvwxyz123';
const fetchImpl = async () => ({
ok: true,
async json() {
return [
{ name: `${sharedPrefix}zeta suffix`, domain: 'zeta.mesh' },
{ name: `${sharedPrefix}alpha suffix`, domain: 'alpha.mesh' }
];
}
});
try {
await initializeInstanceSelector({
selectElement: select,
fetchImpl,
windowObject: env.window,
documentObject: env.document
});
assert.equal(select.options[1].value, 'alpha.mesh');
assert.equal(select.options[2].value, 'zeta.mesh');
assert.equal(select.options[1].textContent, 'abcdefghijklmnopqrstuvwxyz123...');
assert.equal(select.options[2].textContent, 'abcdefghijklmnopqrstuvwxyz123...');
} finally {
env.cleanup();
}
});
test('initializeInstanceSelector navigates to the chosen instance domain', async () => {
const env = createDomEnvironment();
const select = setupSelectElement(env.document);
@@ -1,210 +0,0 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import test from 'node:test';
import assert from 'node:assert/strict';
import {
computeLocalActiveNodeStats,
fetchActiveNodeStats,
formatActiveNodeStatsText,
normaliseActiveNodeStatsPayload,
} from '../main.js';
const NOW = 1_700_000_000;
test('computeLocalActiveNodeStats calculates local hour/day/week/month counts', () => {
const nodes = [
{ last_heard: NOW - 60 },
{ last_heard: NOW - 4_000 },
{ last_heard: NOW - 90_000 },
{ last_heard: NOW - (8 * 86_400) },
{ last_heard: NOW - (20 * 86_400) },
];
const stats = computeLocalActiveNodeStats(nodes, NOW);
assert.deepEqual(stats, {
hour: 1,
day: 2,
week: 3,
month: 5,
sampled: true,
});
});
test('normaliseActiveNodeStatsPayload validates and normalizes API payload', () => {
const payload = {
active_nodes: {
hour: '11',
day: 22,
week: 33,
month: 44,
},
sampled: false,
};
assert.deepEqual(normaliseActiveNodeStatsPayload(payload), {
hour: 11,
day: 22,
week: 33,
month: 44,
sampled: false,
});
assert.equal(normaliseActiveNodeStatsPayload({}), null);
});
test('normaliseActiveNodeStatsPayload rejects malformed stat values', () => {
assert.equal(
normaliseActiveNodeStatsPayload({ active_nodes: { hour: 'x', day: 1, week: 1, month: 1 } }),
null
);
assert.equal(
normaliseActiveNodeStatsPayload({ active_nodes: null }),
null
);
});
test('normaliseActiveNodeStatsPayload clamps negatives and truncates floats', () => {
assert.deepEqual(
normaliseActiveNodeStatsPayload({
active_nodes: { hour: -1.9, day: 2.8, week: 3.1, month: 4.9 },
sampled: 1
}),
{ hour: 0, day: 2, week: 3, month: 4, sampled: true }
);
});
test('fetchActiveNodeStats uses /api/stats when available', async () => {
const calls = [];
const fetchImpl = async (url) => {
calls.push(url);
return {
ok: true,
async json() {
return {
active_nodes: { hour: 5, day: 15, week: 25, month: 35 },
sampled: false,
};
},
};
};
const stats = await fetchActiveNodeStats({ nodes: [], nowSeconds: NOW, fetchImpl });
assert.equal(calls[0], '/api/stats');
assert.deepEqual(stats, {
hour: 5,
day: 15,
week: 25,
month: 35,
sampled: false,
});
});
test('fetchActiveNodeStats reuses cached /api/stats response for repeated calls', async () => {
const calls = [];
const fetchImpl = async (url) => {
calls.push(url);
return {
ok: true,
async json() {
return {
active_nodes: { hour: 2, day: 4, week: 6, month: 8 },
sampled: false,
};
},
};
};
const first = await fetchActiveNodeStats({ nodes: [], nowSeconds: NOW, fetchImpl });
const second = await fetchActiveNodeStats({ nodes: [], nowSeconds: NOW, fetchImpl });
assert.equal(calls.length, 1);
assert.deepEqual(first, second);
});
test('fetchActiveNodeStats falls back to local counts when stats fetch fails', async () => {
const nodes = [
{ last_heard: NOW - 120 },
{ last_heard: NOW - (10 * 86_400) },
];
const fetchImpl = async () => {
throw new Error('network down');
};
const stats = await fetchActiveNodeStats({ nodes, nowSeconds: NOW, fetchImpl });
assert.deepEqual(stats, {
hour: 1,
day: 1,
week: 1,
month: 2,
sampled: true,
});
});
test('fetchActiveNodeStats falls back to local counts on non-OK HTTP responses', async () => {
const stats = await fetchActiveNodeStats({
nodes: [{ last_heard: NOW - 10 }],
nowSeconds: NOW,
fetchImpl: async () => ({ ok: false, status: 503 })
});
assert.equal(stats.sampled, true);
assert.equal(stats.hour, 1);
});
test('fetchActiveNodeStats falls back to local counts on invalid payloads', async () => {
const stats = await fetchActiveNodeStats({
nodes: [{ last_heard: NOW - (31 * 86_400) }],
nowSeconds: NOW,
fetchImpl: async () => ({
ok: true,
async json() {
return { active_nodes: { hour: 'bad' } };
}
})
});
assert.equal(stats.sampled, true);
assert.equal(stats.month, 0);
});
test('formatActiveNodeStatsText emits expected dashboard string', () => {
const text = formatActiveNodeStatsText({
channel: 'LongFast',
frequency: '868MHz',
stats: { hour: 1, day: 2, week: 3, month: 4, sampled: false },
});
assert.equal(
text,
'LongFast (868MHz) — active nodes: 1/hour, 2/day, 3/week, 4/month.'
);
});
test('formatActiveNodeStatsText appends sampled marker when local fallback is used', () => {
const text = formatActiveNodeStatsText({
channel: 'LongFast',
frequency: '868MHz',
stats: { hour: 9, day: 8, week: 7, month: 6, sampled: true },
});
assert.equal(
text,
'LongFast (868MHz) — active nodes: 9/hour, 8/day, 7/week, 6/month (sampled).'
);
});
@@ -154,14 +154,12 @@ test('additional format helpers provide table friendly output', () => {
channel_name: 'Primary',
node: { short_name: 'SRCE', role: 'ROUTER', node_id: '!src' },
},
{ text: ' GAA= ', encrypted: true, rx_time: 1_700_000_405 },
{ emoji: '😊', rx_time: 1_700_000_401 },
],
renderShortHtml,
nodeContext,
);
assert.equal(messagesHtml.includes('hello'), true);
assert.equal(messagesHtml.includes('GAA='), false);
assert.equal(messagesHtml.includes('😊'), true);
assert.match(messagesHtml, /\[\d{4}-\d{2}-\d{2} \d{2}:\d{2}\]\[868\]/);
assert.equal(messagesHtml.includes('[868]'), true);
+42 -25
View File
@@ -20,7 +20,7 @@ import { extractModemMetadata } from './node-modem-metadata.js';
* Highest channel index that should be represented within the tab view.
* @type {number}
*/
export const MAX_CHANNEL_INDEX = 255;
export const MAX_CHANNEL_INDEX = 9;
/**
* Discrete event types that can appear in the chat activity log.
@@ -245,12 +245,28 @@ export function buildChatTabModel({
modemPreset,
envFallbackLabel: primaryChannelEnvLabel
});
const nameBucketKey = safeIndex > 0 ? buildSecondaryNameBucketKey(safeIndex, labelInfo) : null;
const nameBucketKey = safeIndex > 0 ? buildSecondaryNameBucketKey(labelInfo) : null;
const primaryBucketKey = safeIndex === 0 && labelInfo.label !== '0' ? buildPrimaryBucketKey(labelInfo.label) : '0';
const bucketKey = safeIndex === 0 ? primaryBucketKey : nameBucketKey ?? String(safeIndex);
let bucketKey = safeIndex === 0 ? primaryBucketKey : nameBucketKey ?? String(safeIndex);
let bucket = channelBuckets.get(bucketKey);
if (!bucket && safeIndex > 0) {
const existingBucketKey = findExistingBucketKeyByIndex(channelBuckets, safeIndex);
if (existingBucketKey) {
bucketKey = existingBucketKey;
bucket = channelBuckets.get(existingBucketKey);
}
}
if (bucket && nameBucketKey && bucket.key !== nameBucketKey) {
channelBuckets.delete(bucket.key);
bucket.key = nameBucketKey;
bucket.id = buildChannelTabId(nameBucketKey);
channelBuckets.set(nameBucketKey, bucket);
bucketKey = nameBucketKey;
}
if (!bucket) {
bucket = {
key: bucketKey,
@@ -553,42 +569,43 @@ function buildPrimaryBucketKey(primaryChannelLabel) {
return '0';
}
function buildSecondaryNameBucketKey(index, labelInfo) {
function buildSecondaryNameBucketKey(labelInfo) {
const label = labelInfo?.label ?? null;
const priority = labelInfo?.priority ?? CHANNEL_LABEL_PRIORITY.INDEX;
if (!Number.isFinite(index) || index <= 0 || priority !== CHANNEL_LABEL_PRIORITY.NAME || !label) {
if (priority !== CHANNEL_LABEL_PRIORITY.NAME || !label) {
return null;
}
const trimmedLabel = label.trim().toLowerCase();
if (!trimmedLabel.length) {
return null;
}
return `secondary-name::${trimmedLabel}`;
return `secondary::${trimmedLabel}`;
}
function findExistingBucketKeyByIndex(channelBuckets, targetIndex) {
if (!channelBuckets || !Number.isFinite(targetIndex) || targetIndex <= 0) {
return null;
}
const normalizedTarget = Math.trunc(targetIndex);
for (const [key, bucket] of channelBuckets.entries()) {
if (!bucket || !Number.isFinite(bucket.index)) {
continue;
}
if (Math.trunc(bucket.index) !== normalizedTarget) {
continue;
}
if (bucket.index === 0) {
continue;
}
return key;
}
return null;
}
function buildChannelTabId(bucketKey) {
if (bucketKey === '0') {
return 'channel-0';
}
const secondaryNameParts = /^secondary-name::(.+)$/.exec(String(bucketKey));
if (secondaryNameParts) {
const secondaryLabelSlug = slugify(secondaryNameParts[1]);
const secondaryHash = hashChannelKey(bucketKey);
if (secondaryLabelSlug) {
return `channel-secondary-name-${secondaryLabelSlug}-${secondaryHash}`;
}
return `channel-secondary-name-${secondaryHash}`;
}
const secondaryParts = /^secondary::(\d+)::(.+)$/.exec(String(bucketKey));
if (secondaryParts) {
const secondaryIndex = secondaryParts[1];
const secondaryLabelSlug = slugify(secondaryParts[2]);
const secondaryHash = hashChannelKey(bucketKey);
if (secondaryLabelSlug) {
return `channel-secondary-${secondaryIndex}-${secondaryLabelSlug}-${secondaryHash}`;
}
return `channel-secondary-${secondaryIndex}-${secondaryHash}`;
}
const slug = slugify(bucketKey);
if (slug) {
if (slug !== '0') {
@@ -1,172 +0,0 @@
/*
* Copyright © 2025-26 l5yth & contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const MAX_VISIBLE_SITE_NAME_LENGTH = 32;
const TRUNCATION_SUFFIX = '...';
const TRUNCATED_SITE_NAME_LENGTH = MAX_VISIBLE_SITE_NAME_LENGTH - TRUNCATION_SUFFIX.length;
const SUPPRESSED_SITE_NAME_PATTERN = /(?:^|[^a-z0-9])(?:https?:\/\/|www\.)\S+/i;
/**
* Read a federated instance site name as a trimmed string.
*
* @param {{ name?: string } | null | undefined} entry Federation instance payload entry.
* @returns {string} Trimmed site name or an empty string when absent.
*/
function readSiteName(entry) {
if (!entry || typeof entry !== 'object') {
return '';
}
return typeof entry.name === 'string' ? entry.name.trim() : '';
}
/**
* Read a federated instance domain as a trimmed string.
*
* @param {{ domain?: string } | null | undefined} entry Federation instance payload entry.
* @returns {string} Trimmed domain or an empty string when absent.
*/
function readDomain(entry) {
if (!entry || typeof entry !== 'object') {
return '';
}
return typeof entry.domain === 'string' ? entry.domain.trim() : '';
}
/**
* Determine whether a remote site name should be suppressed from frontend displays.
*
* @param {string} name Remote site name.
* @returns {boolean} true when the name contains a URL-like advertising token.
*/
export function isSuppressedFederationSiteName(name) {
if (typeof name !== 'string') {
return false;
}
const trimmed = name.trim();
if (!trimmed) {
return false;
}
return SUPPRESSED_SITE_NAME_PATTERN.test(trimmed);
}
/**
* Truncate an instance site name for frontend display without mutating source data.
*
* Names longer than 32 characters are shortened to stay within that 32-character
* budget including the trailing ellipsis.
*
* @param {string} name Remote site name.
* @returns {string} Display-ready site name.
*/
export function truncateFederationSiteName(name) {
if (typeof name !== 'string') {
return '';
}
const trimmed = name.trim();
if (trimmed.length <= MAX_VISIBLE_SITE_NAME_LENGTH) {
return trimmed;
}
return `${trimmed.slice(0, TRUNCATED_SITE_NAME_LENGTH)}${TRUNCATION_SUFFIX}`;
}
/**
* Determine whether an instance should remain visible in frontend federation views.
*
* @param {{ name?: string } | null | undefined} entry Federation instance payload entry.
* @returns {boolean} true when the entry should be shown to users.
*/
export function shouldDisplayFederationInstance(entry) {
return !isSuppressedFederationSiteName(readSiteName(entry));
}
/**
* Resolve a frontend display name for a federation instance.
*
* @param {{ name?: string } | null | undefined} entry Federation instance payload entry.
* @returns {string} Display-ready site name or an empty string when absent.
*/
export function resolveFederationSiteNameForDisplay(entry) {
const siteName = readSiteName(entry);
return siteName ? truncateFederationSiteName(siteName) : '';
}
/**
* Resolve the original trimmed site name for a federation instance.
*
* @param {{ name?: string } | null | undefined} entry Federation instance payload entry.
* @returns {string} Full trimmed site name or an empty string when absent.
*/
export function resolveFederationSiteName(entry) {
return readSiteName(entry);
}
/**
* Determine the full sort value for an instance selector entry.
*
* Sorting must use the original trimmed site name so truncation does not collapse
* multiple entries into the same comparison key.
*
* @param {{ name?: string, domain?: string } | null | undefined} entry Federation instance payload entry.
* @returns {string} Full trimmed site name falling back to the domain.
*/
export function resolveFederationInstanceSortValue(entry) {
const siteName = resolveFederationSiteName(entry);
return siteName || readDomain(entry);
}
/**
* Determine the most suitable display label for an instance list entry.
*
* @param {{ name?: string, domain?: string } | null | undefined} entry Federation instance payload entry.
* @returns {string} Display label falling back to the domain.
*/
export function resolveFederationInstanceLabel(entry) {
const siteName = resolveFederationSiteNameForDisplay(entry);
if (siteName) {
return siteName;
}
return readDomain(entry);
}
/**
* Filter a federation payload down to the instances that should remain visible.
*
* @param {Array<object>} entries Federation payload from the API.
* @returns {Array<object>} Visible instances for frontend rendering.
*/
export function filterDisplayableFederationInstances(entries) {
if (!Array.isArray(entries)) {
return [];
}
return entries.filter(shouldDisplayFederationInstance);
}
export const __test__ = {
MAX_VISIBLE_SITE_NAME_LENGTH,
TRUNCATION_SUFFIX,
TRUNCATED_SITE_NAME_LENGTH,
readDomain,
readSiteName,
SUPPRESSED_SITE_NAME_PATTERN
};
+4 -16
View File
@@ -15,11 +15,6 @@
*/
import { readAppConfig } from './config.js';
import {
filterDisplayableFederationInstances,
resolveFederationSiteName,
resolveFederationSiteNameForDisplay
} from './federation-instance-display.js';
import { resolveLegendVisibility } from './map-legend-visibility.js';
import { mergeConfig } from './settings.js';
import { roleColors } from './role-helpers.js';
@@ -279,12 +274,7 @@ export async function initializeFederationPage(options = {}) {
? true
: legendCollapsedValue.trim() !== 'false';
const tableSorters = {
name: {
getValue: inst => resolveFederationSiteName(inst),
compare: compareString,
hasValue: hasStringValue,
defaultDirection: 'asc'
},
name: { getValue: inst => inst.name ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
domain: { getValue: inst => inst.domain ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
contact: { getValue: inst => inst.contactLink ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
version: { getValue: inst => inst.version ?? '', compare: compareString, hasValue: hasStringValue, defaultDirection: 'asc' },
@@ -373,8 +363,7 @@ export async function initializeFederationPage(options = {}) {
for (const instance of sorted) {
const tr = document.createElement('tr');
const url = buildInstanceUrl(instance.domain);
const displayName = resolveFederationSiteNameForDisplay(instance);
const nameHtml = displayName ? escapeHtml(displayName) : '<em>—</em>';
const nameHtml = instance.name ? escapeHtml(instance.name) : '<em>—</em>';
const domainHtml = url
? `<a href="${escapeHtml(url)}" target="_blank" rel="noopener">${escapeHtml(instance.domain || '')}</a>`
: escapeHtml(instance.domain || '');
@@ -540,7 +529,7 @@ export async function initializeFederationPage(options = {}) {
credentials: 'omit'
});
if (response.ok) {
instances = filterDisplayableFederationInstances(await response.json());
instances = await response.json();
}
} catch (err) {
console.warn('Failed to fetch federation instances', err);
@@ -647,8 +636,7 @@ export async function initializeFederationPage(options = {}) {
bounds.push([lat, lon]);
const displayName = resolveFederationSiteNameForDisplay(instance);
const name = displayName || instance.domain || 'Unknown';
const name = instance.name || instance.domain || 'Unknown';
const url = buildInstanceUrl(instance.domain);
const nodeCountValue = toFiniteNumber(instance.nodesCount ?? instance.nodes_count);
const popupLines = [
+19 -13
View File
@@ -14,12 +14,6 @@
* limitations under the License.
*/
import {
filterDisplayableFederationInstances,
resolveFederationInstanceLabel,
resolveFederationInstanceSortValue
} from './federation-instance-display.js';
/**
* Determine the most suitable label for an instance list entry.
*
@@ -27,7 +21,17 @@ import {
* @returns {string} Preferred display label falling back to the domain.
*/
function resolveInstanceLabel(entry) {
return resolveFederationInstanceLabel(entry);
if (!entry || typeof entry !== 'object') {
return '';
}
const name = typeof entry.name === 'string' ? entry.name.trim() : '';
if (name.length > 0) {
return name;
}
const domain = typeof entry.domain === 'string' ? entry.domain.trim() : '';
return domain;
}
/**
@@ -202,21 +206,23 @@ export async function initializeInstanceSelector(options) {
return;
}
const visibleEntries = filterDisplayableFederationInstances(payload);
updateFederationNavCount({ documentObject: doc, count: visibleEntries.length });
if (!Array.isArray(payload)) {
return;
}
updateFederationNavCount({ documentObject: doc, count: payload.length });
const sanitizedDomain = typeof instanceDomain === 'string' ? instanceDomain.trim().toLowerCase() : null;
const sortedEntries = visibleEntries
const sortedEntries = payload
.filter(entry => entry && typeof entry.domain === 'string' && entry.domain.trim() !== '')
.map(entry => ({
domain: entry.domain.trim(),
label: resolveInstanceLabel(entry),
sortLabel: resolveFederationInstanceSortValue(entry),
}))
.sort((a, b) => {
const labelA = a.sortLabel || a.domain;
const labelB = b.sortLabel || b.domain;
const labelA = a.label || a.domain;
const labelB = b.label || b.domain;
return labelA.localeCompare(labelB, undefined, { sensitivity: 'base' });
});
+16 -174
View File
@@ -69,144 +69,6 @@ import {
roleRenderOrder,
} from './role-helpers.js';
/**
* Compute active-node counts from a local node array.
*
* @param {Array<Object>} nodes Node payloads.
* @param {number} nowSeconds Reference timestamp.
* @returns {{hour: number, day: number, week: number, month: number, sampled: boolean}} Local count snapshot.
*/
export function computeLocalActiveNodeStats(nodes, nowSeconds) {
const safeNodes = Array.isArray(nodes) ? nodes : [];
const referenceNow = Number.isFinite(nowSeconds) ? nowSeconds : Date.now() / 1000;
const windows = [
{ key: 'hour', secs: 3600 },
{ key: 'day', secs: 86_400 },
{ key: 'week', secs: 7 * 86_400 },
{ key: 'month', secs: 30 * 86_400 }
];
const counts = { sampled: true };
for (const window of windows) {
counts[window.key] = safeNodes.filter(node => {
const lastHeard = Number(node?.last_heard);
return Number.isFinite(lastHeard) && referenceNow - lastHeard <= window.secs;
}).length;
}
return counts;
}
/**
* Parse and validate the `/api/stats` payload.
*
* @param {*} payload Candidate JSON object from the stats endpoint.
* @returns {{hour: number, day: number, week: number, month: number, sampled: boolean}|null} Normalized stats or null.
*/
export function normaliseActiveNodeStatsPayload(payload) {
const activeNodes = payload && typeof payload === 'object' ? payload.active_nodes : null;
if (!activeNodes || typeof activeNodes !== 'object') {
return null;
}
const hour = Number(activeNodes.hour);
const day = Number(activeNodes.day);
const week = Number(activeNodes.week);
const month = Number(activeNodes.month);
if (![hour, day, week, month].every(Number.isFinite)) {
return null;
}
return {
hour: Math.max(0, Math.trunc(hour)),
day: Math.max(0, Math.trunc(day)),
week: Math.max(0, Math.trunc(week)),
month: Math.max(0, Math.trunc(month)),
sampled: Boolean(payload.sampled)
};
}
const ACTIVE_NODE_STATS_CACHE_TTL_MS = 30_000;
let activeNodeStatsCache = null;
let activeNodeStatsFetchPromise = null;
let activeNodeStatsFetchImpl = null;
/**
* Fetch active-node stats from the dedicated API endpoint with short-lived caching.
*
* @param {Function} fetchImpl Fetch implementation.
* @returns {Promise<{hour: number, day: number, week: number, month: number, sampled: boolean} | null>} Normalized stats or null.
*/
async function fetchRemoteActiveNodeStats(fetchImpl) {
const nowMs = Date.now();
if (activeNodeStatsCache?.fetchImpl === fetchImpl && activeNodeStatsCache.expiresAt > nowMs) {
return activeNodeStatsCache.stats;
}
if (activeNodeStatsFetchPromise && activeNodeStatsFetchImpl === fetchImpl) {
return activeNodeStatsFetchPromise;
}
activeNodeStatsFetchImpl = fetchImpl;
activeNodeStatsFetchPromise = (async () => {
const response = await fetchImpl('/api/stats', { cache: 'no-store' });
if (!response?.ok) {
throw new Error(`stats HTTP ${response?.status ?? 'unknown'}`);
}
const payload = await response.json();
const normalized = normaliseActiveNodeStatsPayload(payload);
if (!normalized) {
throw new Error('invalid stats payload');
}
activeNodeStatsCache = {
fetchImpl,
expiresAt: Date.now() + ACTIVE_NODE_STATS_CACHE_TTL_MS,
stats: normalized
};
return normalized;
})();
try {
return await activeNodeStatsFetchPromise;
} finally {
activeNodeStatsFetchPromise = null;
activeNodeStatsFetchImpl = null;
}
}
/**
* Fetch active-node stats from the dedicated API endpoint with local fallback.
*
* @param {{
* nodes: Array<Object>,
* nowSeconds: number,
* fetchImpl?: Function
* }} params Fetch parameters.
* @returns {Promise<{hour: number, day: number, week: number, month: number, sampled: boolean}>} Stats snapshot.
*/
export async function fetchActiveNodeStats({ nodes, nowSeconds, fetchImpl = fetch }) {
try {
const normalized = await fetchRemoteActiveNodeStats(fetchImpl);
if (normalized) return normalized;
throw new Error('invalid stats payload');
} catch (error) {
console.debug('Failed to fetch /api/stats; using local active-node counts.', error);
return computeLocalActiveNodeStats(nodes, nowSeconds);
}
}
/**
* Format the dashboard refresh-info sentence for active-node counts.
*
* @param {{channel: string, frequency: string, stats: {hour:number,day:number,week:number,month:number,sampled:boolean}}} params Formatting data.
* @returns {string} User-visible sentence for the dashboard header.
*/
export function formatActiveNodeStatsText({ channel, frequency, stats }) {
const parts = [
`${Number(stats?.hour) || 0}/hour`,
`${Number(stats?.day) || 0}/day`,
`${Number(stats?.week) || 0}/week`,
`${Number(stats?.month) || 0}/month`
];
const suffix = stats?.sampled ? ' (sampled)' : '';
return `${channel} (${frequency}) — active nodes: ${parts.join(', ')}${suffix}.`;
}
/**
* Entry point for the interactive dashboard. Wires up event listeners,
* initializes the map, and triggers the first data refresh cycle.
@@ -360,7 +222,6 @@ export function initializeApp(config) {
/** @type {ReturnType<typeof setTimeout>|null} */
let refreshTimer = null;
let refreshInfoRequestId = 0;
/**
* Close any open short-info overlays that do not contain the provided anchor.
@@ -3049,14 +2910,6 @@ export function initializeApp(config) {
* @returns {HTMLElement} Chat log element.
*/
function createMessageChatEntry(m) {
let plainText = '';
if (m?.text != null) {
plainText = String(m.text).trim();
}
if (m?.encrypted && plainText === 'GAA=') {
return null;
}
const div = document.createElement('div');
const tsSeconds = resolveTimestampSeconds(
m.rx_time ?? m.rxTime,
@@ -3288,28 +3141,18 @@ export function initializeApp(config) {
}
const getDivider = createDateDividerFactory();
const limitedEntries = entries.slice(Math.max(entries.length - CHAT_LIMIT, 0));
let renderedEntries = 0;
for (const entry of limitedEntries) {
if (!entry || typeof entry.ts !== 'number') {
continue;
}
if (typeof renderEntry !== 'function') {
continue;
}
const node = renderEntry(entry);
if (!node) {
continue;
}
const divider = getDivider(entry.ts);
if (divider) fragment.appendChild(divider);
fragment.appendChild(node);
renderedEntries += 1;
}
if (renderedEntries === 0 && emptyLabel) {
const empty = document.createElement('p');
empty.className = 'chat-empty';
empty.textContent = emptyLabel;
fragment.appendChild(empty);
if (typeof renderEntry === 'function') {
const node = renderEntry(entry);
if (node) {
fragment.appendChild(node);
}
}
}
return fragment;
}
@@ -4534,16 +4377,15 @@ export function initializeApp(config) {
if (!refreshInfo || !isDashboardView) {
return;
}
const requestId = ++refreshInfoRequestId;
void fetchActiveNodeStats({ nodes, nowSeconds: nowSec }).then(stats => {
if (requestId !== refreshInfoRequestId) {
return;
}
refreshInfo.textContent = formatActiveNodeStatsText({
channel: config.channel,
frequency: config.frequency,
stats
});
});
const windows = [
{ label: 'hour', secs: 3600 },
{ label: 'day', secs: 86400 },
{ label: 'week', secs: 7 * 86400 },
];
const counts = windows.map(w => {
const c = nodes.filter(n => n.last_heard && nowSec - Number(n.last_heard) <= w.secs).length;
return `${c}/${w.label}`;
}).join(', ');
refreshInfo.textContent = `${config.channel} (${config.frequency}) — active nodes: ${counts}.`;
}
}
-1
View File
@@ -2056,7 +2056,6 @@ function renderMessages(messages, renderShortHtml, node) {
if (!message || typeof message !== 'object') return null;
const text = stringOrNull(message.text) || stringOrNull(message.emoji);
if (!text) return null;
if (message.encrypted && String(text).trim() === 'GAA=') return null;
const timestamp = formatMessageTimestamp(message.rx_time, message.rx_iso);
const metadata = extractChatMessageMetadata(message);
+26 -576
View File
@@ -26,9 +26,6 @@ require "socket"
RSpec.describe "Potato Mesh Sinatra app" do
let(:app) { Sinatra::Application }
let(:application_class) { PotatoMesh::Application }
INSERT_NODE_WITH_LAST_HEARD_SQL = "INSERT INTO nodes(node_id, num, last_heard, first_heard) VALUES (?,?,?,?)".freeze
INSERT_NODE_WITH_METADATA_SQL = "INSERT INTO nodes(node_id, num, short_name, long_name, hw_model, role, last_heard, first_heard) VALUES(?,?,?,?,?,?,?,?)".freeze
SELECT_NODE_LAST_HEARD_SQL = "SELECT last_heard FROM nodes WHERE node_id = ?".freeze
describe "configuration" do
it "sets the default HTTP port to the baked-in value" do
@@ -409,7 +406,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
it "stores and clears the initial federation thread" do
delay = 3
allow(PotatoMesh::Config).to receive(:initial_federation_delay_seconds).and_return(delay)
expect(app).to receive(:federation_sleep_with_shutdown).with(delay).and_return(true)
expect(Kernel).to receive(:sleep).with(delay)
expect(app).to receive(:announce_instance_to_all_domains)
allow(Thread).to receive(:new) do |&block|
dummy_thread.block = block
@@ -929,7 +926,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
with_db do |db|
db.execute(
INSERT_NODE_WITH_LAST_HEARD_SQL,
"INSERT INTO nodes(node_id, num, last_heard, first_heard) VALUES (?,?,?,?)",
[node_id, node_num, rx_time - 120, rx_time - 180],
)
@@ -2868,7 +2865,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
count = db.get_first_value("SELECT COUNT(*) FROM nodes WHERE node_id = ?", [node["node_id"]])
expect(count).to eq(1)
last_heard = db.get_first_value(SELECT_NODE_LAST_HEARD_SQL, [node["node_id"]])
last_heard = db.get_first_value("SELECT last_heard FROM nodes WHERE node_id = ?", [node["node_id"]])
expect(last_heard).to eq(expected_last_heard(node))
end
end
@@ -2931,26 +2928,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
end
describe "POST /api/messages" do
SELECT_MESSAGE_ENCRYPTED_SQL = "SELECT encrypted FROM messages WHERE id = ?".freeze
SELECT_NEIGHBOR_COUNT_BY_NODE_SQL = "SELECT COUNT(*) FROM neighbors WHERE node_id = ?".freeze
NODE_INFO_LONG_NAME = "Node Info".freeze
FIRST_MESSAGE_INGESTOR_ID = "!1111aaaa".freeze
SHARED_TEST_INGESTOR_ID = "!aaaa1111".freeze
DEADBEEF_NODE_ID = "!deadbeef".freeze
NEIGHBOR_EMPTY_UPDATE_ROOT_ID = "!cafed00d".freeze
NEIGHBOR_ROOT_ID = "!1a2b3c01".freeze
NEIGHBOR_PRIMARY_ID = "!1a2b3c02".freeze
NEIGHBOR_SNR_CLEAR_ROOT_ID = "!1a2b3c10".freeze
NEIGHBOR_SNR_CLEAR_PEER_ID = "!1a2b3c11".freeze
NEIGHBOR_CHUNK_ROOT_ID = "!1a2b3c30".freeze
def post_twice_for_ingestor(endpoint, first_payload, second_payload)
post endpoint, first_payload.to_json, auth_headers
expect(last_response).to be_ok
post endpoint, second_payload.to_json, auth_headers
expect(last_response).to be_ok
end
it "persists messages from fixture data" do
import_nodes_fixture
import_messages_fixture
@@ -3034,36 +3011,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(reaction_row["emoji"]).to eq("🔥")
end
it "stores message ingestor and preserves the first reporter" do
first_payload = {
"id" => 77_001,
"rx_time" => reference_time.to_i - 10,
"from_id" => "!ingmsg01",
"channel" => 0,
"portnum" => "TEXT_MESSAGE_APP",
"text" => "first reporter",
"ingestor" => FIRST_MESSAGE_INGESTOR_ID,
}
second_payload = first_payload.merge(
"text" => "updated text",
"ingestor" => "!2222bbbb",
)
post_twice_for_ingestor("/api/messages", first_payload, second_payload)
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row("SELECT text, ingestor FROM messages WHERE id = ?", [first_payload["id"]])
expect(row["text"]).to eq("updated text")
expect(row["ingestor"]).to eq(FIRST_MESSAGE_INGESTOR_ID)
end
get "/api/messages?limit=10"
expect(last_response).to be_ok
row = JSON.parse(last_response.body).find { |entry| entry["id"] == first_payload["id"] }
expect(row["ingestor"]).to eq(FIRST_MESSAGE_INGESTOR_ID)
end
it "creates hidden nodes for unknown message senders" do
payload = {
"id" => 9_999,
@@ -3128,7 +3075,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
"id" => 10_001,
"rx_time" => reference_time.to_i,
"from_id" => "!cafef00d",
"to_id" => DEADBEEF_NODE_ID,
"to_id" => "!deadbeef",
"channel" => 0,
"portnum" => "TEXT_MESSAGE_APP",
"text" => "Spec participant placeholder",
@@ -3145,12 +3092,12 @@ RSpec.describe "Potato Mesh Sinatra app" do
<<~SQL,
SELECT node_id, num, short_name, long_name, role, last_heard, first_heard
FROM nodes
WHERE node_id IN ("!cafef00d", "#{DEADBEEF_NODE_ID}")
WHERE node_id IN ("!cafef00d", "!deadbeef")
ORDER BY node_id
SQL
)
expect(rows.map { |row| row["node_id"] }).to contain_exactly("!cafef00d", DEADBEEF_NODE_ID)
expect(rows.map { |row| row["node_id"] }).to contain_exactly("!cafef00d", "!deadbeef")
rows.each do |row|
expect(row["num"]).to be_an(Integer)
expect(row["role"]).to eq("CLIENT_HIDDEN")
@@ -3349,30 +3296,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
end
end
it "stores position ingestor and preserves the first reporter" do
first_payload = {
"id" => 19_001,
"node_id" => "!ingpos01",
"rx_time" => reference_time.to_i - 80,
"latitude" => 52.1,
"longitude" => 13.2,
"ingestor" => SHARED_TEST_INGESTOR_ID,
}
second_payload = first_payload.merge(
"latitude" => 53.3,
"ingestor" => "!bbbb2222",
)
post_twice_for_ingestor("/api/positions", first_payload, second_payload)
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row("SELECT latitude, ingestor FROM positions WHERE id = ?", [first_payload["id"]])
expect_same_value(row["latitude"], 53.3)
expect(row["ingestor"]).to eq(SHARED_TEST_INGESTOR_ID)
end
end
it "fills first_heard when updating an existing node without one" do
node_id = "!specposfh"
rx_time = reference_time.to_i - 90
@@ -3515,48 +3438,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
end
end
it "does not update existing neighbor last_heard from third-party neighbor reports" do
reporter_id = "!abc123ef"
existing_neighbor_id = "!00ff0011"
prior_last_heard = reference_time.to_i - 4 * 60 * 60
rx_time = reference_time.to_i - 60 * 60
neighbor_rx_time = rx_time - 120
with_db do |db|
db.execute(
INSERT_NODE_WITH_LAST_HEARD_SQL,
[reporter_id, 0xabc123ef, prior_last_heard, prior_last_heard],
)
db.execute(
INSERT_NODE_WITH_LAST_HEARD_SQL,
[existing_neighbor_id, 0x00ff0011, prior_last_heard, prior_last_heard],
)
end
payload = {
"node_id" => reporter_id,
"node_num" => 0xabc123ef,
"rx_time" => rx_time,
"neighbors" => [
{ "node_id" => existing_neighbor_id, "snr" => -7.5, "rx_time" => neighbor_rx_time },
],
}
post "/api/neighbors", payload.to_json, auth_headers
expect(last_response).to be_ok
expect(JSON.parse(last_response.body)).to eq("status" => "ok")
with_db(readonly: true) do |db|
db.results_as_hash = true
reporter_row = db.get_first_row(SELECT_NODE_LAST_HEARD_SQL, [reporter_id])
neighbor_row = db.get_first_row(SELECT_NODE_LAST_HEARD_SQL, [existing_neighbor_id])
expect(reporter_row["last_heard"]).to eq(rx_time)
expect(neighbor_row["last_heard"]).to eq(prior_last_heard)
end
end
it "handles broadcasts with no neighbors" do
rx_time = reference_time.to_i - 60
payload = {
@@ -3588,127 +3469,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(JSON.parse(last_response.body)).to be_empty
end
it "removes stored neighbors when a later packet contains no neighbors" do
seed_payload = {
"node_id" => NEIGHBOR_EMPTY_UPDATE_ROOT_ID,
"rx_time" => reference_time.to_i - 50,
"neighbors" => [
{ "node_id" => DEADBEEF_NODE_ID, "snr" => -2.0 },
],
"ingestor" => SHARED_TEST_INGESTOR_ID,
}
empty_payload = {
"node_id" => NEIGHBOR_EMPTY_UPDATE_ROOT_ID,
"rx_time" => reference_time.to_i - 10,
"neighbors" => [],
"ingestor" => "!bbbb2222",
}
post "/api/neighbors", seed_payload.to_json, auth_headers
expect(last_response).to be_ok
post "/api/neighbors", empty_payload.to_json, auth_headers
expect(last_response).to be_ok
with_db(readonly: true) do |db|
remaining = db.get_first_value(SELECT_NEIGHBOR_COUNT_BY_NODE_SQL, [NEIGHBOR_EMPTY_UPDATE_ROOT_ID])
expect(remaining).to eq(0)
end
end
it "stores neighbor ingestor and preserves the first reporter per tuple" do
base = {
"node_id" => NEIGHBOR_ROOT_ID,
"rx_time" => reference_time.to_i - 45,
"neighbors" => [
{ "node_id" => NEIGHBOR_PRIMARY_ID, "snr" => -1.5 },
{ "node_id" => "!1a2b3c03", "snr" => -2.5 },
],
"ingestor" => "!aaaa9999",
}
update = {
"node_id" => NEIGHBOR_ROOT_ID,
"rx_time" => reference_time.to_i - 30,
"neighbors" => [
{ "node_id" => NEIGHBOR_PRIMARY_ID, "snr" => -0.5 },
],
"ingestor" => "!bbbb8888",
}
post_twice_for_ingestor("/api/neighbors", base, update)
with_db(readonly: true) do |db|
db.results_as_hash = true
rows = db.execute("SELECT neighbor_id, snr, ingestor FROM neighbors WHERE node_id = ? ORDER BY neighbor_id", [NEIGHBOR_ROOT_ID])
expect(rows.size).to eq(1)
expect(rows.first["neighbor_id"]).to eq(NEIGHBOR_PRIMARY_ID)
expect_same_value(rows.first["snr"], -0.5)
expect(rows.first["ingestor"]).to eq("!aaaa9999")
end
end
it "clears stored neighbor snr when an updated entry omits snr" do
initial = {
"node_id" => NEIGHBOR_SNR_CLEAR_ROOT_ID,
"rx_time" => reference_time.to_i - 40,
"neighbors" => [
{ "node_id" => NEIGHBOR_SNR_CLEAR_PEER_ID, "snr" => -3.25 },
],
}
update = {
"node_id" => NEIGHBOR_SNR_CLEAR_ROOT_ID,
"rx_time" => reference_time.to_i - 20,
"neighbors" => [
{ "node_id" => NEIGHBOR_SNR_CLEAR_PEER_ID },
],
}
post "/api/neighbors", initial.to_json, auth_headers
expect(last_response).to be_ok
post "/api/neighbors", update.to_json, auth_headers
expect(last_response).to be_ok
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT snr, rx_time FROM neighbors WHERE node_id = ? AND neighbor_id = ?",
[NEIGHBOR_SNR_CLEAR_ROOT_ID, NEIGHBOR_SNR_CLEAR_PEER_ID],
)
expect(row["snr"]).to be_nil
expect(row["rx_time"]).to eq(update["rx_time"])
end
end
it "removes stale neighbors in chunked deletes" do
initial_neighbors = Array.new(1_100) do |i|
{ "node_id" => format("!%08x", 0x2000_0000 + i), "snr" => -2.0 }
end
initial = {
"node_id" => NEIGHBOR_CHUNK_ROOT_ID,
"rx_time" => reference_time.to_i - 35,
"neighbors" => initial_neighbors,
}
update = {
"node_id" => NEIGHBOR_CHUNK_ROOT_ID,
"rx_time" => reference_time.to_i - 25,
"neighbors" => [
{ "node_id" => "!20000000", "snr" => -1.0 },
],
}
post "/api/neighbors", initial.to_json, auth_headers
expect(last_response).to be_ok
post "/api/neighbors", update.to_json, auth_headers
expect(last_response).to be_ok
with_db(readonly: true) do |db|
count = db.get_first_value(
SELECT_NEIGHBOR_COUNT_BY_NODE_SQL,
[NEIGHBOR_CHUNK_ROOT_ID],
)
expect(count).to eq(1)
end
end
it "returns 400 when more than 1000 neighbor packets are provided" do
payload = Array.new(1001) do |i|
{ "node_id" => format("!%08x", i), "rx_time" => reference_time.to_i - i }
@@ -3724,30 +3484,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(count).to eq(0)
end
end
it "handles large neighbor lists without SQLite bind overflows" do
neighbors = Array.new(1_100) do |i|
{ "node_id" => format("!%08x", 0x1000_0000 + i), "snr" => -1.0 }
end
payload = {
"node_id" => "!1a2b3c20",
"rx_time" => reference_time.to_i - 15,
"neighbors" => neighbors,
}
post "/api/neighbors", payload.to_json, auth_headers
expect(last_response).to be_ok
expect(JSON.parse(last_response.body)).to eq("status" => "ok")
with_db(readonly: true) do |db|
count = db.get_first_value(
SELECT_NEIGHBOR_COUNT_BY_NODE_SQL,
["!1a2b3c20"],
)
expect(count).to eq(1_100)
end
end
end
describe "POST /api/telemetry" do
@@ -3905,27 +3641,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(JSON.parse(last_response.body)).to eq("error" => "invalid JSON")
end
it "stores telemetry ingestor and preserves the first reporter" do
payload = {
"id" => 23_001,
"node_id" => "!ingtel01",
"rx_time" => reference_time.to_i - 70,
"telemetry" => { "deviceMetrics" => { "batteryLevel" => 90 } },
"battery_level" => 90,
"ingestor" => "!1111bbbb",
}
updated = payload.merge("battery_level" => 80, "ingestor" => "!2222cccc")
post_twice_for_ingestor("/api/telemetry", payload, updated)
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row("SELECT battery_level, ingestor FROM telemetry WHERE id = ?", [payload["id"]])
expect_same_value(row["battery_level"], 80.0)
expect(row["ingestor"]).to eq("!1111bbbb")
end
end
it "returns 400 when more than 1000 telemetry packets are provided" do
payload = Array.new(1001) { |i| { "id" => i + 1, "rx_time" => reference_time.to_i - i } }
@@ -4045,28 +3760,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(JSON.parse(last_response.body)).to eq("error" => "invalid JSON")
end
it "stores trace ingestor and preserves the first reporter" do
payload = {
"id" => 31_001,
"request_id" => 77,
"src" => 0x10000001,
"dest" => 0x10000002,
"rx_time" => reference_time.to_i - 50,
"hops" => [0x10000001, 0x10000002],
"ingestor" => "!aaaa0001",
}
update = payload.merge("snr" => 7.5, "ingestor" => "!bbbb0002")
post_twice_for_ingestor("/api/traces", payload, update)
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row("SELECT snr, ingestor FROM traces WHERE id = ?", [payload["id"]])
expect_same_value(row["snr"], 7.5)
expect(row["ingestor"]).to eq("!aaaa0001")
end
end
it "returns 400 when more than 1000 traces are provided" do
payload = Array.new(1001) { |i| { "id" => i + 1, "rx_time" => reference_time.to_i - i } }
@@ -4245,7 +3938,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(row["encrypted"]).to eq(encrypted_b64)
node_row = db.get_first_row(
SELECT_NODE_LAST_HEARD_SQL,
"SELECT last_heard FROM nodes WHERE node_id = ?",
[sender_id],
)
@@ -4290,7 +3983,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
expect(node_entry["to_id"]).to eq(receiver_id)
end
it "keeps encrypted text-port messages even when the PSK is configured" do
it "decrypts encrypted messages when the PSK is configured" do
psk_b64 = "Nmh7EooP2Tsc+7pvPwXLcEDDuYhk+fBo2GLnbA1Y1sg="
previous_psk = ENV["MESHTASTIC_PSK_B64"]
ENV["MESHTASTIC_PSK_B64"] = psk_b64
@@ -4314,13 +4007,16 @@ RSpec.describe "Potato Mesh Sinatra app" do
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT text, encrypted, channel_name FROM messages WHERE id = ?",
"SELECT text, encrypted, channel_name, decrypted, decryption_confidence FROM messages WHERE id = ?",
[payload["packet_id"]],
)
expect(row["text"]).to be_nil
expect(row["encrypted"]).to eq("Q1R7tgI5yXzMXu/3")
expect(row["channel_name"]).to be_nil
expect(row["text"]).to eq("Nabend")
expect(row["encrypted"]).to be_nil
expect(row["channel_name"]).to eq("BerlinMesh")
expect(row["decrypted"]).to eq(1)
expect(row["decryption_confidence"]).to be > 0.0
expect(row["decryption_confidence"]).to be <= 1.0
end
ensure
if previous_psk.nil?
@@ -4394,13 +4090,15 @@ RSpec.describe "Potato Mesh Sinatra app" do
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT text, encrypted, portnum FROM messages WHERE id = ?",
"SELECT text, encrypted, portnum, decrypted, decryption_confidence FROM messages WHERE id = ?",
[payload["packet_id"]],
)
expect(row["text"]).to be_nil
expect(row["encrypted"]).to eq(encrypted_payload)
expect(row["portnum"]).to be_nil
expect(row["decrypted"]).to eq(0)
expect(row["decryption_confidence"]).to be_nil
end
ensure
if previous_psk.nil?
@@ -4695,7 +4393,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
SELECT_MESSAGE_ENCRYPTED_SQL,
"SELECT encrypted FROM messages WHERE id = ?",
[900_005],
)
@@ -4759,7 +4457,7 @@ RSpec.describe "Potato Mesh Sinatra app" do
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
SELECT_MESSAGE_ENCRYPTED_SQL,
"SELECT encrypted FROM messages WHERE id = ?",
[900_006],
)
@@ -4943,28 +4641,22 @@ RSpec.describe "Potato Mesh Sinatra app" do
"SELECT hop_index, node_id FROM trace_hops WHERE trace_id = ? ORDER BY hop_index",
[900_004],
)
hop_node_ids = hop_rows.map do |row|
if row.is_a?(Hash)
row["node_id"] || row[:node_id] || row[1]
else
row[1]
end
end
expect(trace_row["id"]).to eq(900_004)
expect(trace_row["dest"]).to eq(4)
expect(hop_node_ids).to eq([1, 2, 3, 4])
expect(hop_rows.map { |row| row[1] }).to eq([1, 2, 3, 4])
end
end
it "keeps encrypted messages when decrypted payload resolves to text portnum" do
it "overwrites encrypted messages when decrypted text arrives" do
encrypted_payload = Base64.strict_encode64("cipher".b)
decrypted_text = "decoded"
allow(PotatoMesh::Application).to receive(:decrypt_meshtastic_message).and_return(
nil,
{
portnum: 1,
payload: "plain".b,
text: "decoded",
text: decrypted_text,
channel_name: nil,
},
)
@@ -5000,212 +4692,11 @@ RSpec.describe "Potato Mesh Sinatra app" do
[910_001],
)
expect(row["text"]).to be_nil
expect(row["encrypted"]).to eq(encrypted_payload)
expect(row["text"]).to eq(decrypted_text)
expect(row["encrypted"]).to be_nil
end
end
it "stores decoded node data when decrypting nodeinfo payloads" do
payload_bytes = "nodeinfo".b
encoded_payload = Base64.strict_encode64(payload_bytes)
node_payload = {
"id" => "!7c5b0920",
"num" => 2_085_057_824,
"last_heard" => reference_time.to_i,
"user" => {
"short_name" => "NODE",
"long_name" => NODE_INFO_LONG_NAME,
"hw_model" => "TBEAM",
"role" => "CLIENT",
},
}
allow(PotatoMesh::Application).to receive(:decrypt_meshtastic_message).and_return(
{
portnum: 4,
payload: payload_bytes,
text: nil,
channel_name: nil,
},
)
allow(PotatoMesh::App::Meshtastic::PayloadDecoder).to receive(:decode).and_return(
{
"type" => "NODEINFO_APP",
"payload" => node_payload,
},
)
with_db do |db|
PotatoMesh::Application.insert_message(
db,
{
"packet_id" => 900_008,
"rx_time" => reference_time.to_i,
"rx_iso" => reference_time.utc.iso8601,
"from_id" => "!7c5b0920",
"encrypted" => encoded_payload,
},
)
end
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
"SELECT node_id, short_name, long_name, hw_model FROM nodes WHERE node_id = ?",
["!7c5b0920"],
)
expect(row["node_id"]).to eq("!7c5b0920")
expect(row["short_name"]).to eq("NODE")
expect(row["long_name"]).to eq(NODE_INFO_LONG_NAME)
expect(row["hw_model"]).to eq("TBEAM")
end
end
it "keeps encrypted payloads when decoded nodeinfo payload is invalid" do
payload_bytes = "nodeinfo".b
encoded_payload = Base64.strict_encode64(payload_bytes)
allow(PotatoMesh::Application).to receive(:decrypt_meshtastic_message).and_return(
{
portnum: 4,
payload: payload_bytes,
text: nil,
channel_name: nil,
},
)
allow(PotatoMesh::App::Meshtastic::PayloadDecoder).to receive(:decode).and_return(
{
"type" => "NODEINFO_APP",
"payload" => {},
},
)
with_db do |db|
PotatoMesh::Application.insert_message(
db,
{
"packet_id" => 900_009,
"rx_time" => reference_time.to_i,
"rx_iso" => reference_time.utc.iso8601,
"from_id" => "!7c5b0920",
"encrypted" => encoded_payload,
},
)
end
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
SELECT_MESSAGE_ENCRYPTED_SQL,
[900_009],
)
expect(row["encrypted"]).to eq(encoded_payload)
end
end
it "keeps encrypted payloads when decoded nodeinfo payload lacks identifying user fields" do
payload_bytes = "nodeinfo".b
encoded_payload = Base64.strict_encode64(payload_bytes)
allow(PotatoMesh::Application).to receive(:decrypt_meshtastic_message).and_return(
{
portnum: 4,
payload: payload_bytes,
text: nil,
channel_name: nil,
},
)
allow(PotatoMesh::App::Meshtastic::PayloadDecoder).to receive(:decode).and_return(
{
"type" => "NODEINFO_APP",
"payload" => { "id" => "!7c5b0920", "num" => 2_085_057_824 },
},
)
with_db do |db|
PotatoMesh::Application.insert_message(
db,
{
"packet_id" => 900_010,
"rx_time" => reference_time.to_i,
"rx_iso" => reference_time.utc.iso8601,
"from_id" => "!7c5b0920",
"encrypted" => encoded_payload,
},
)
end
with_db(readonly: true) do |db|
db.results_as_hash = true
row = db.get_first_row(
SELECT_MESSAGE_ENCRYPTED_SQL,
[900_010],
)
expect(row["encrypted"]).to eq(encoded_payload)
end
end
it "normalizes snake_case decoded nodeinfo payloads" do
payload = {
"user" => {
"short_name" => "NODE",
"long_name" => NODE_INFO_LONG_NAME,
"hw_model" => "TBEAM",
"public_key" => "pk",
"is_unmessagable" => true,
},
"device_metrics" => {
"battery_level" => 87,
"channel_utilization" => 12.5,
"air_util_tx" => 2.75,
"uptime_seconds" => 1200,
},
"position" => {
"precision_bits" => 13,
"location_source" => "LOC_MANUAL",
},
"last_heard" => reference_time.to_i,
"hops_away" => 2,
"is_favorite" => true,
"hw_model" => "TBEAM",
}
normalized = PotatoMesh::Application.send(:normalize_decrypted_nodeinfo_payload, payload)
expect(normalized.dig("user", "shortName")).to eq("NODE")
expect(normalized.dig("user", "longName")).to eq(NODE_INFO_LONG_NAME)
expect(normalized.dig("user", "hwModel")).to eq("TBEAM")
expect(normalized.dig("user", "publicKey")).to eq("pk")
expect(normalized.dig("user", "isUnmessagable")).to be(true)
expect(normalized.dig("deviceMetrics", "batteryLevel")).to eq(87)
expect(normalized.dig("deviceMetrics", "channelUtilization")).to eq(12.5)
expect(normalized.dig("deviceMetrics", "airUtilTx")).to eq(2.75)
expect(normalized.dig("deviceMetrics", "uptimeSeconds")).to eq(1200)
expect(normalized.dig("position", "precisionBits")).to eq(13)
expect(normalized.dig("position", "locationSource")).to eq("LOC_MANUAL")
expect(normalized["lastHeard"]).to eq(reference_time.to_i)
expect(normalized["hopsAway"]).to eq(2)
expect(normalized["isFavorite"]).to be(true)
expect(normalized["hwModel"]).to eq("TBEAM")
end
it "rejects malformed normalized nodeinfo payloads" do
invalid_payload = {
"user" => { "shortName" => "NODE" },
"deviceMetrics" => "invalid",
"position" => "invalid",
}
valid = PotatoMesh::Application.send(:valid_decrypted_nodeinfo_payload?, invalid_payload)
normalized = PotatoMesh::Application.send(:normalize_decrypted_nodeinfo_payload, nil)
expect(valid).to be(false)
expect(normalized).to eq({})
end
it "prefers decrypted message fields over encrypted ones" do
encrypted_payload = Base64.strict_encode64("cipher".b)
encrypted_message = {
@@ -5665,47 +5156,6 @@ RSpec.describe "Potato Mesh Sinatra app" do
end
end
describe "GET /api/stats" do
it "returns exact SQL-backed activity counts without list-endpoint sampling" do
clear_database
now = reference_time.to_i
allow(Time).to receive(:now).and_return(reference_time)
with_db do |db|
db.transaction
1005.times do |index|
heard = now - (index % 1800)
node_id = format("!%08x", index + 1)
db.execute(
INSERT_NODE_WITH_METADATA_SQL,
[node_id, index + 1, "n#{index}", "Node #{index}", "TBEAM", "CLIENT", heard, heard],
)
end
db.execute(
INSERT_NODE_WITH_METADATA_SQL,
["!week0001", 200_001, "week", "Week Node", "TBEAM", "CLIENT", now - (2 * 86_400), now - (2 * 86_400)],
)
db.execute(
INSERT_NODE_WITH_METADATA_SQL,
["!month001", 200_002, "month", "Month Node", "TBEAM", "CLIENT", now - (20 * 86_400), now - (20 * 86_400)],
)
db.commit
end
get "/api/stats"
expect(last_response).to be_ok
payload = JSON.parse(last_response.body)
expect(payload["sampled"]).to eq(false)
expect(payload["active_nodes"]).to include(
"hour" => 1005,
"day" => 1005,
"week" => 1006,
"month" => 1007,
)
end
end
describe "GET /api/messages" do
it "returns the stored messages with canonical node references when encrypted messages are included" do
import_nodes_fixture
-72
View File
@@ -239,30 +239,6 @@ RSpec.describe PotatoMesh::Config do
end
end
describe ".remote_instance_request_timeout" do
it "returns the baked-in request timeout when unset" do
within_env("REMOTE_INSTANCE_REQUEST_TIMEOUT" => nil) do
expect(described_class.remote_instance_request_timeout).to eq(
PotatoMesh::Config::DEFAULT_REMOTE_INSTANCE_REQUEST_TIMEOUT,
)
end
end
it "accepts positive overrides" do
within_env("REMOTE_INSTANCE_REQUEST_TIMEOUT" => "19") do
expect(described_class.remote_instance_request_timeout).to eq(19)
end
end
it "rejects invalid overrides" do
within_env("REMOTE_INSTANCE_REQUEST_TIMEOUT" => "0") do
expect(described_class.remote_instance_request_timeout).to eq(
PotatoMesh::Config::DEFAULT_REMOTE_INSTANCE_REQUEST_TIMEOUT,
)
end
end
end
describe ".federation_max_instances_per_response" do
it "returns the baked-in response limit when unset" do
within_env("FEDERATION_MAX_INSTANCES_PER_RESPONSE" => nil) do
@@ -383,54 +359,6 @@ RSpec.describe PotatoMesh::Config do
end
end
describe ".federation_shutdown_timeout_seconds" do
it "returns the default shutdown timeout when unset" do
within_env("FEDERATION_SHUTDOWN_TIMEOUT" => nil) do
expect(described_class.federation_shutdown_timeout_seconds).to eq(
PotatoMesh::Config::DEFAULT_FEDERATION_SHUTDOWN_TIMEOUT_SECONDS,
)
end
end
it "accepts positive overrides" do
within_env("FEDERATION_SHUTDOWN_TIMEOUT" => "9") do
expect(described_class.federation_shutdown_timeout_seconds).to eq(9)
end
end
it "rejects invalid overrides" do
within_env("FEDERATION_SHUTDOWN_TIMEOUT" => "-1") do
expect(described_class.federation_shutdown_timeout_seconds).to eq(
PotatoMesh::Config::DEFAULT_FEDERATION_SHUTDOWN_TIMEOUT_SECONDS,
)
end
end
end
describe ".federation_crawl_cooldown_seconds" do
it "returns the default crawl cooldown when unset" do
within_env("FEDERATION_CRAWL_COOLDOWN" => nil) do
expect(described_class.federation_crawl_cooldown_seconds).to eq(
PotatoMesh::Config::DEFAULT_FEDERATION_CRAWL_COOLDOWN_SECONDS,
)
end
end
it "accepts positive overrides" do
within_env("FEDERATION_CRAWL_COOLDOWN" => "17") do
expect(described_class.federation_crawl_cooldown_seconds).to eq(17)
end
end
it "rejects invalid overrides" do
within_env("FEDERATION_CRAWL_COOLDOWN" => "0") do
expect(described_class.federation_crawl_cooldown_seconds).to eq(
PotatoMesh::Config::DEFAULT_FEDERATION_CRAWL_COOLDOWN_SECONDS,
)
end
end
end
describe ".db_path" do
it "returns the default path inside the data directory" do
expect(described_class.db_path).to eq(described_class.default_db_path)
+14 -59
View File
@@ -166,6 +166,20 @@ RSpec.describe PotatoMesh::App::Database do
expect(telemetry_columns).to include("rx_time", "battery_level")
end
it "adds decryption metadata columns to existing messages tables" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
db.execute("CREATE TABLE messages(id INTEGER PRIMARY KEY)")
end
expect(column_names_for("messages")).not_to include("decrypted", "decryption_confidence")
harness_class.ensure_schema_upgrades
message_columns = column_names_for("messages")
expect(message_columns).to include("decrypted", "decryption_confidence")
end
it "creates trace tables when absent" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
@@ -184,65 +198,6 @@ RSpec.describe PotatoMesh::App::Database do
expect(hop_columns).to include("trace_id", "hop_index", "node_id")
end
it "creates positions and neighbors tables when absent" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
db.execute("CREATE TABLE messages(id INTEGER PRIMARY KEY)")
db.execute("CREATE TABLE telemetry(id INTEGER PRIMARY KEY, rx_time INTEGER, rx_iso TEXT)")
end
expect(column_names_for("positions")).to be_empty
expect(column_names_for("neighbors")).to be_empty
harness_class.ensure_schema_upgrades
positions_columns = column_names_for("positions")
expect(positions_columns).to include("id", "node_id", "rx_time", "ingestor")
neighbors_columns = column_names_for("neighbors")
expect(neighbors_columns).to include("node_id", "neighbor_id", "rx_time", "ingestor")
end
it "adds ingestor columns to legacy positions neighbors and traces tables" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
db.execute("CREATE TABLE messages(id INTEGER PRIMARY KEY)")
db.execute("CREATE TABLE telemetry(id INTEGER PRIMARY KEY, rx_time INTEGER, rx_iso TEXT)")
db.execute <<~SQL
CREATE TABLE positions (
id INTEGER PRIMARY KEY,
rx_time INTEGER,
rx_iso TEXT,
node_id TEXT
)
SQL
db.execute <<~SQL
CREATE TABLE neighbors (
node_id TEXT,
neighbor_id TEXT,
rx_time INTEGER
)
SQL
db.execute <<~SQL
CREATE TABLE traces (
id INTEGER PRIMARY KEY,
request_id INTEGER,
src TEXT,
dest TEXT,
rx_time INTEGER,
rx_iso TEXT
)
SQL
db.execute("CREATE TABLE trace_hops(trace_id INTEGER, hop_index INTEGER, node_id TEXT)")
end
harness_class.ensure_schema_upgrades
expect(column_names_for("positions")).to include("ingestor")
expect(column_names_for("neighbors")).to include("ingestor")
expect(column_names_for("traces")).to include("ingestor")
end
it "adds the contact_link column to existing instances tables" do
SQLite3::Database.new(PotatoMesh::Config.db_path) do |db|
db.execute("CREATE TABLE nodes(node_id TEXT)")
+26 -589
View File
@@ -23,11 +23,6 @@ require "uri"
require "socket"
RSpec.describe PotatoMesh::App::Federation do
NODES_API_PATH = "/api/nodes".freeze
STATS_API_PATH = "/api/stats".freeze
FULL_DATA_UNAVAILABLE_REASON = "full data unavailable".freeze
HTTP_CONNECTION_DOUBLE = "Net::HTTPConnection".freeze
subject(:federation_helpers) do
Class.new do
extend PotatoMesh::App::Federation
@@ -62,8 +57,6 @@ RSpec.describe PotatoMesh::App::Federation do
:federation_thread,
:initial_federation_thread,
:federation_worker_pool,
:federation_shutdown_requested,
:federation_shutdown_hook_installed,
).new
end
@@ -84,12 +77,10 @@ RSpec.describe PotatoMesh::App::Federation do
federation_helpers.instance_variable_set(:@remote_instance_verify_callback, nil)
federation_helpers.reset_debug_messages
federation_helpers.reset_warn_messages
federation_helpers.clear_federation_crawl_state!
federation_helpers.shutdown_federation_worker_pool!
end
after do
federation_helpers.clear_federation_crawl_state!
federation_helpers.shutdown_federation_worker_pool!
end
@@ -279,7 +270,7 @@ RSpec.describe PotatoMesh::App::Federation do
let(:response_map) do
mapping = { [seed_domain, "/api/instances"] => [payload_entries, :instances] }
attributes_list.each do |attributes|
mapping[[attributes[:domain], NODES_API_PATH]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/nodes"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/instances"]] = [[], :instances]
end
mapping
@@ -296,37 +287,6 @@ RSpec.describe PotatoMesh::App::Federation do
end
end
def configure_remote_node_window(now)
allow(Time).to receive(:now).and_return(now)
allow(PotatoMesh::Config).to receive(:remote_instance_max_node_age).and_return(900)
end
def stats_mapping(now:, stats_response:, full_nodes_response:, window_nodes_response: nil)
recent_cutoff = now.to_i - 900
mapping = { [seed_domain, "/api/instances"] => [payload_entries, :instances] }
attributes_list.each do |attributes|
mapping[[attributes[:domain], STATS_API_PATH]] = stats_response
mapping[[attributes[:domain], NODES_API_PATH]] = full_nodes_response
mapping[[attributes[:domain], "/api/instances"]] = [[], :instances]
next unless window_nodes_response
mapping[[attributes[:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"]] = window_nodes_response
end
mapping
end
def stub_ingest_fetches(mapping, capture_paths: false)
captured_paths = []
allow(federation_helpers).to receive(:fetch_instance_json) do |host, path|
captured_paths << [host, path] if capture_paths
mapping.fetch([host, path]) { [nil, []] }
end
allow(federation_helpers).to receive(:verify_instance_signature).and_return(true)
allow(federation_helpers).to receive(:validate_remote_nodes).and_return([true, nil])
allow(federation_helpers).to receive(:upsert_instance_record)
captured_paths
end
it "stops processing once the per-response limit is exceeded" do
processed_domains = []
allow(federation_helpers).to receive(:upsert_instance_record) do |_db, attrs, _signature|
@@ -362,162 +322,37 @@ RSpec.describe PotatoMesh::App::Federation do
expect(federation_helpers.debug_messages).to include(a_string_including("crawl limit"))
end
it "prefers /api/stats when counting remote activity" do
it "requests an expanded recent node window when counting remote activity" do
now = Time.at(1_700_000_000)
configure_remote_node_window(now)
allow(Time).to receive(:now).and_return(now)
allow(PotatoMesh::Config).to receive(:remote_instance_max_node_age).and_return(900)
recent_cutoff = now.to_i - 900
mapping = stats_mapping(
now:,
stats_response: [{ "active_nodes" => { "hour" => 5, "day" => 7, "week" => 9, "month" => 11 }, "sampled" => false }, :stats],
full_nodes_response: [node_payload, :nodes],
)
captured_paths = stub_ingest_fetches(mapping, capture_paths: true)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(captured_paths).to include(
[attributes_list[0][:domain], STATS_API_PATH],
[attributes_list[1][:domain], STATS_API_PATH],
[attributes_list[2][:domain], STATS_API_PATH],
)
expect(captured_paths).to include(
[attributes_list[0][:domain], NODES_API_PATH],
[attributes_list[1][:domain], NODES_API_PATH],
[attributes_list[2][:domain], NODES_API_PATH],
)
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(5))
end
it "prefers recent node window counts when /api/stats is unavailable" do
now = Time.at(1_700_000_000)
configure_remote_node_window(now)
full_nodes_payload = node_payload.take(2)
recent_window_payload = node_payload
recent_path = "/api/nodes?since=#{now.to_i - 900}&limit=1000"
mapping = stats_mapping(
now:,
stats_response: [nil, ["stats unavailable"]],
full_nodes_response: [full_nodes_payload, :nodes],
window_nodes_response: [recent_window_payload, :nodes],
)
captured_paths = stub_ingest_fetches(mapping, capture_paths: true)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(captured_paths).to include(
[attributes_list[0][:domain], STATS_API_PATH],
[attributes_list[1][:domain], STATS_API_PATH],
[attributes_list[2][:domain], STATS_API_PATH],
)
expect(captured_paths).to include(
[attributes_list[0][:domain], NODES_API_PATH],
[attributes_list[1][:domain], NODES_API_PATH],
[attributes_list[2][:domain], NODES_API_PATH],
)
expect(captured_paths).to include(
[attributes_list[0][:domain], recent_path],
[attributes_list[1][:domain], recent_path],
[attributes_list[2][:domain], recent_path],
)
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(recent_window_payload.length))
end
it "falls back to recent node window when full node data is unavailable" do
now = Time.at(1_700_000_000)
configure_remote_node_window(now)
mapping = stats_mapping(
now:,
stats_response: [nil, ["stats unavailable"]],
full_nodes_response: [nil, [FULL_DATA_UNAVAILABLE_REASON]],
window_nodes_response: [node_payload, :nodes],
)
stub_ingest_fetches(mapping)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(node_payload.length))
end
it "uses recent node window fallback when stats succeed but full node data is unavailable" do
now = Time.at(1_700_000_000)
configure_remote_node_window(now)
recent_path = "/api/nodes?since=#{now.to_i - 900}&limit=1000"
mapping = stats_mapping(
now:,
stats_response: [{ "active_nodes" => { "hour" => 9, "day" => 10, "week" => 11, "month" => 12 }, "sampled" => false }, :stats],
full_nodes_response: [nil, [FULL_DATA_UNAVAILABLE_REASON]],
window_nodes_response: [node_payload, :nodes],
)
captured_paths = stub_ingest_fetches(mapping, capture_paths: true)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(captured_paths).to include(
[attributes_list[0][:domain], STATS_API_PATH],
[attributes_list[1][:domain], STATS_API_PATH],
[attributes_list[2][:domain], STATS_API_PATH],
)
expect(captured_paths).to include(
[attributes_list[0][:domain], recent_path],
[attributes_list[1][:domain], recent_path],
[attributes_list[2][:domain], recent_path],
)
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(9))
end
it "handles URI metadata from malformed /api/stats payloads without crashing" do
now = Time.at(1_700_000_000)
configure_remote_node_window(now)
mapping = stats_mapping(
now:,
stats_response: [{ "unexpected" => "shape" }, URI.parse("https://ally-0.mesh/api/stats")],
full_nodes_response: [node_payload.take(2), :nodes],
window_nodes_response: [node_payload, :nodes],
)
stub_ingest_fetches(mapping)
expect do
federation_helpers.ingest_known_instances_from!(db, seed_domain)
end.not_to raise_error
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(node_payload.length))
end
it "skips remote entries when both full and window node feeds are unavailable" do
now = Time.at(1_700_000_000)
configure_remote_node_window(now)
recent_path = "/api/nodes?since=#{now.to_i - 900}&limit=1000"
mapping = stats_mapping(
now:,
stats_response: [{ "active_nodes" => { "hour" => 3, "day" => 3, "week" => 3, "month" => 3 }, "sampled" => false }, :stats],
full_nodes_response: [nil, [FULL_DATA_UNAVAILABLE_REASON]],
window_nodes_response: [nil, ["window unavailable"]],
)
captured_paths = stub_ingest_fetches(mapping, capture_paths: true)
upserted = []
allow(federation_helpers).to receive(:upsert_instance_record) do |_db, attrs, _signature|
upserted << attrs
mapping = { [seed_domain, "/api/instances"] => [payload_entries, :instances] }
attributes_list.each_with_index do |attributes, index|
mapping[[attributes[:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/nodes"]] = [node_payload, :nodes]
mapping[[attributes[:domain], "/api/instances"]] = [[], :instances]
allow(federation_helpers).to receive(:remote_instance_attributes_from_payload).with(payload_entries[index]).and_return([attributes, "signature-#{index}", nil])
end
captured_paths = []
allow(federation_helpers).to receive(:fetch_instance_json) do |host, path|
captured_paths << [host, path]
mapping.fetch([host, path]) { [nil, []] }
end
allow(federation_helpers).to receive(:verify_instance_signature).and_return(true)
allow(federation_helpers).to receive(:validate_remote_nodes).and_return([true, nil])
allow(federation_helpers).to receive(:upsert_instance_record)
federation_helpers.ingest_known_instances_from!(db, seed_domain)
expect(captured_paths).to include(
[attributes_list[0][:domain], NODES_API_PATH],
[attributes_list[1][:domain], NODES_API_PATH],
[attributes_list[2][:domain], NODES_API_PATH],
[attributes_list[0][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
[attributes_list[1][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
[attributes_list[2][:domain], "/api/nodes?since=#{recent_cutoff}&limit=1000"],
)
expect(captured_paths).to include(
[attributes_list[0][:domain], recent_path],
[attributes_list[1][:domain], recent_path],
[attributes_list[2][:domain], recent_path],
)
expect(upserted).to be_empty
expect(federation_helpers.warn_messages).to include("Failed to load remote node data")
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(3))
expect(attributes_list.map { |attrs| attrs[:nodes_count] }).to all(eq(node_payload.length))
end
end
@@ -714,7 +549,7 @@ RSpec.describe PotatoMesh::App::Federation do
end
it "applies federation headers to instance fetch requests" do
connection = instance_double(HTTP_CONNECTION_DOUBLE)
connection = instance_double("Net::HTTPConnection")
success_response = Net::HTTPOK.new("1.1", "200", "OK")
allow(success_response).to receive(:body).and_return("{}")
allow(success_response).to receive(:code).and_return("200")
@@ -736,56 +571,13 @@ RSpec.describe PotatoMesh::App::Federation do
expect(captured_request["User-Agent"]).to eq(federation_helpers.send(:federation_user_agent_header))
expect(captured_request["Content-Type"]).to be_nil
end
it "wraps non-success HTTP responses" do
connection = instance_double(HTTP_CONNECTION_DOUBLE)
failure_response = Net::HTTPBadGateway.new("1.1", "502", "Bad Gateway")
allow(failure_response).to receive(:code).and_return("502")
allow(http_client).to receive(:start) do |&block|
block.call(connection)
end
allow(connection).to receive(:request).and_return(failure_response)
expect do
federation_helpers.send(:perform_instance_http_request, uri)
end.to raise_error(
PotatoMesh::App::InstanceFetchError,
a_string_including("unexpected response 502"),
)
end
end
describe ".federation_sleep_with_shutdown" do
it "returns false when shutdown is requested during sleep" do
allow(Kernel).to receive(:sleep)
call_count = 0
allow(federation_helpers).to receive(:federation_shutdown_requested?) do
call_count += 1
call_count > 1
end
result = federation_helpers.federation_sleep_with_shutdown(1.0)
expect(result).to be(false)
expect(Kernel).to have_received(:sleep).at_least(:once)
end
it "returns true when the full delay elapses without shutdown" do
allow(Kernel).to receive(:sleep)
allow(federation_helpers).to receive(:federation_shutdown_requested?).and_return(false)
result = federation_helpers.federation_sleep_with_shutdown(0.01)
expect(result).to be(true)
end
end
describe ".announce_instance_to_domain" do
let(:payload) { "{}" }
let(:https_uri) { URI.parse("https://remote.mesh/api/instances") }
let(:http_uri) { URI.parse("http://remote.mesh/api/instances") }
let(:http_connection) { instance_double(HTTP_CONNECTION_DOUBLE) }
let(:http_connection) { instance_double("Net::HTTPConnection") }
let(:success_response) { Net::HTTPOK.new("1.1", "200", "OK") }
before do
@@ -863,14 +655,6 @@ RSpec.describe PotatoMesh::App::Federation do
expect(federation_helpers.ensure_federation_worker_pool!).to be_nil
end
it "returns nil when federation shutdown has been requested" do
allow(federation_helpers).to receive(:federation_enabled?).and_return(true)
federation_helpers.request_federation_shutdown!
expect(federation_helpers.ensure_federation_worker_pool!).to be_nil
expect(federation_helpers.send(:settings).federation_worker_pool).to be_nil
end
it "creates and memoizes the worker pool" do
allow(federation_helpers).to receive(:federation_enabled?).and_return(true)
@@ -883,69 +667,6 @@ RSpec.describe PotatoMesh::App::Federation do
end
end
describe ".ensure_federation_shutdown_hook!" do
it "registers a single at_exit hook when called repeatedly" do
allow(federation_helpers).to receive(:at_exit)
federation_helpers.ensure_federation_shutdown_hook!
federation_helpers.ensure_federation_shutdown_hook!
expect(federation_helpers).to have_received(:at_exit).once
expect(federation_helpers.send(:settings).federation_shutdown_hook_installed).to be(true)
end
it "delegates hook installation from instances to the application class" do
class_with_instance = Class.new do
include PotatoMesh::App::Federation
end
expect(class_with_instance).to receive(:ensure_federation_shutdown_hook!).once
class_with_instance.new.ensure_federation_shutdown_hook!
end
it "uses ivar guard when hook-installed setting is unavailable" do
helper_without_hook_setting = Class.new do
extend PotatoMesh::App::Federation
class << self
def settings
@settings ||= Struct.new(:federation_thread, :initial_federation_thread, :federation_worker_pool, :federation_shutdown_requested).new
end
# No-op in this helper because tests only assert hook registration behavior.
def shutdown_federation_background_work!(timeout: nil); end
end
end
allow(helper_without_hook_setting).to receive(:at_exit)
helper_without_hook_setting.ensure_federation_shutdown_hook!
helper_without_hook_setting.ensure_federation_shutdown_hook!
expect(helper_without_hook_setting).to have_received(:at_exit).once
expect(
helper_without_hook_setting.instance_variable_get(:@federation_shutdown_hook_installed),
).to be(true)
end
end
describe ".stop_federation_thread!" do
it "wakes, joins, and kills a stubborn live thread" do
thread = instance_double(Thread)
allow(thread).to receive(:alive?).and_return(true, true, false)
allow(thread).to receive(:respond_to?).with(:wakeup).and_return(true)
allow(thread).to receive(:wakeup).and_raise(ThreadError, "not asleep")
allow(thread).to receive(:join)
allow(thread).to receive(:kill)
federation_helpers.set(:federation_thread, thread)
federation_helpers.stop_federation_thread!(:federation_thread, timeout: 0.01)
expect(thread).to have_received(:join).with(0.01)
expect(thread).to have_received(:kill)
expect(federation_helpers.send(:settings).federation_thread).to be_nil
end
end
describe ".shutdown_federation_worker_pool!" do
it "logs an error when shutdown fails" do
pool = instance_double(PotatoMesh::App::WorkerPool)
@@ -962,10 +683,6 @@ RSpec.describe PotatoMesh::App::Federation do
describe ".enqueue_federation_crawl" do
let(:pool) { instance_double(PotatoMesh::App::WorkerPool) }
before do
allow(PotatoMesh::Config).to receive(:federation_crawl_cooldown_seconds).and_return(300)
end
it "returns false and logs when the pool is unavailable" do
allow(federation_helpers).to receive(:federation_worker_pool).and_return(nil)
@@ -979,17 +696,6 @@ RSpec.describe PotatoMesh::App::Federation do
expect(federation_helpers.debug_messages.last).to include("Skipped remote instance crawl")
end
it "returns false and logs when the domain is invalid" do
result = federation_helpers.enqueue_federation_crawl(
"https://bad domain",
per_response_limit: 5,
overall_limit: 9,
)
expect(result).to be(false)
expect(federation_helpers.warn_messages.last).to include("Skipped remote instance crawl")
end
it "schedules ingestion work on the pool" do
allow(federation_helpers).to receive(:federation_worker_pool).and_return(pool)
db = instance_double(SQLite3::Database)
@@ -1040,29 +746,6 @@ RSpec.describe PotatoMesh::App::Federation do
expect(result).to be(false)
end
it "does not apply cooldown when scheduling fails due to queue saturation" do
allow(PotatoMesh::Config).to receive(:federation_crawl_cooldown_seconds).and_return(300)
allow(federation_helpers).to receive(:federation_worker_pool).and_return(pool)
allow(pool).to receive(:schedule).and_raise(PotatoMesh::App::WorkerPool::QueueFullError, "full")
first = federation_helpers.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 1,
overall_limit: 2,
)
second = federation_helpers.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 1,
overall_limit: 2,
)
expect(first).to be(false)
expect(second).to be(false)
expect(federation_helpers.debug_messages).not_to include(
a_string_including("recent crawl completed"),
)
end
it "logs when the worker pool is shutting down" do
allow(federation_helpers).to receive(:federation_worker_pool).and_return(pool)
allow(pool).to receive(:schedule).and_raise(PotatoMesh::App::WorkerPool::ShutdownError, "closed")
@@ -1083,224 +766,6 @@ RSpec.describe PotatoMesh::App::Federation do
expect(result).to be(false)
end
it "deduplicates crawls while a domain crawl is already in flight" do
db = instance_double(SQLite3::Database)
allow(db).to receive(:close)
captured_job = nil
allow(federation_helpers).to receive(:federation_worker_pool).and_return(pool)
allow(pool).to receive(:schedule) do |&block|
captured_job = block
instance_double(PotatoMesh::App::WorkerPool::Task)
end
allow(federation_helpers).to receive(:open_database).and_return(db)
allow(federation_helpers).to receive(:ingest_known_instances_from!)
first = federation_helpers.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 5,
overall_limit: 9,
)
second = federation_helpers.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 5,
overall_limit: 9,
)
expect(first).to be(true)
expect(second).to be(false)
expect(captured_job).not_to be_nil
captured_job.call
expect(db).to have_received(:close)
end
it "releases the crawl slot when opening the database fails" do
allow(federation_helpers).to receive(:federation_crawl_cooldown_seconds).and_return(0)
captured_job = nil
allow(federation_helpers).to receive(:federation_worker_pool).and_return(pool)
allow(pool).to receive(:schedule) do |&block|
captured_job = block
instance_double(PotatoMesh::App::WorkerPool::Task)
end
allow(federation_helpers).to receive(:open_database).and_raise(SQLite3::Exception, "db unavailable")
allow(federation_helpers).to receive(:ingest_known_instances_from!)
first = federation_helpers.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 5,
overall_limit: 9,
)
expect(first).to be(true)
expect(captured_job).not_to be_nil
expect { captured_job.call }.to raise_error(SQLite3::Exception, "db unavailable")
second = federation_helpers.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 5,
overall_limit: 9,
)
expect(second).to be(true)
end
it "deduplicates crawls across instance receivers using shared class state" do
helper_class = Class.new do
include PotatoMesh::App::Federation
class << self
attr_accessor :pool
def settings
@settings ||= Struct.new(:federation_shutdown_requested).new(false)
end
def set(key, value)
settings.public_send("#{key}=", value)
end
def federation_worker_pool
pool
end
# No-op to keep the test helper minimal while satisfying federation logging calls.
def debug_log(*); end
# No-op to keep the test helper minimal while satisfying federation logging calls.
def warn_log(*); end
end
def settings
self.class.settings
end
def set(key, value)
self.class.set(key, value)
end
def debug_log(...)
self.class.debug_log(...)
end
def warn_log(...)
self.class.warn_log(...)
end
end
pool_double = instance_double(PotatoMesh::App::WorkerPool)
allow(pool_double).to receive(:schedule).and_return(instance_double(PotatoMesh::App::WorkerPool::Task))
helper_class.pool = pool_double
first_receiver = helper_class.new
second_receiver = helper_class.new
first = first_receiver.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 1,
overall_limit: 2,
)
second = second_receiver.enqueue_federation_crawl(
"remote.mesh",
per_response_limit: 1,
overall_limit: 2,
)
expect(first).to be(true)
expect(second).to be(false)
expect(pool_double).to have_received(:schedule).once
end
end
describe ".fetch_instance_json" do
it "short-circuits when shutdown has been requested" do
federation_helpers.request_federation_shutdown!
payload, metadata = federation_helpers.fetch_instance_json("remote.mesh", NODES_API_PATH)
expect(payload).to be_nil
expect(metadata).to eq(["federation shutdown requested"])
end
it "stops iterating URI candidates after shutdown is requested mid-loop" do
calls = 0
allow(federation_helpers).to receive(:instance_uri_candidates).and_return([
URI.parse("https://remote.mesh/api/nodes"),
URI.parse("http://remote.mesh/api/nodes"),
])
allow(federation_helpers).to receive(:perform_instance_http_request) do |_uri|
calls += 1
federation_helpers.request_federation_shutdown!
raise PotatoMesh::App::InstanceFetchError, "boom"
end
payload, metadata = federation_helpers.fetch_instance_json("remote.mesh", NODES_API_PATH)
expect(payload).to be_nil
expect(calls).to eq(1)
expect(metadata.first).to include("boom")
end
end
describe ".claim_federation_crawl_slot" do
it "initializes crawl dedupe state safely under concurrent access" do
federation_helpers.instance_variable_set(:@federation_crawl_mutex, nil)
federation_helpers.instance_variable_set(:@federation_crawl_in_flight, nil)
federation_helpers.instance_variable_set(:@federation_crawl_last_completed_at, nil)
federation_helpers.instance_variable_set(:@federation_crawl_init_mutex, nil)
threads = Array.new(12) do
Thread.new do
federation_helpers.initialize_federation_crawl_state!
end
end
threads.each(&:join)
mutex = federation_helpers.instance_variable_get(:@federation_crawl_mutex)
in_flight = federation_helpers.instance_variable_get(:@federation_crawl_in_flight)
last_completed = federation_helpers.instance_variable_get(:@federation_crawl_last_completed_at)
expect(mutex).to be_a(Mutex)
expect(in_flight).to be_a(Set)
expect(last_completed).to be_a(Hash)
expect(in_flight).to be_empty
expect(last_completed).to be_empty
end
it "returns cooldown when the domain completed recently" do
allow(PotatoMesh::Config).to receive(:federation_crawl_cooldown_seconds).and_return(300)
federation_helpers.clear_federation_crawl_state!
federation_helpers.release_federation_crawl_slot("remote.mesh")
result = federation_helpers.claim_federation_crawl_slot("remote.mesh")
expect(result).to eq(:cooldown)
end
end
describe ".shutdown_federation_background_work!" do
it "marks shutdown and clears announcer references" do
initial_thread = instance_double(Thread)
recurring_thread = instance_double(Thread)
pool = instance_double(PotatoMesh::App::WorkerPool)
allow(PotatoMesh::Config).to receive(:federation_shutdown_timeout_seconds).and_return(0.05)
allow(PotatoMesh::Config).to receive(:federation_task_timeout_seconds).and_return(0.05)
[initial_thread, recurring_thread].each do |thread|
allow(thread).to receive(:alive?).and_return(false)
end
allow(pool).to receive(:shutdown)
federation_helpers.set(:initial_federation_thread, initial_thread)
federation_helpers.set(:federation_thread, recurring_thread)
federation_helpers.set(:federation_worker_pool, pool)
federation_helpers.shutdown_federation_background_work!
expect(federation_helpers.federation_shutdown_requested?).to be(true)
expect(federation_helpers.send(:settings).initial_federation_thread).to be_nil
expect(federation_helpers.send(:settings).federation_thread).to be_nil
expect(federation_helpers.send(:settings).federation_worker_pool).to be_nil
end
end
describe ".wait_for_federation_tasks" do
@@ -1371,32 +836,4 @@ RSpec.describe PotatoMesh::App::Federation do
federation_helpers.announce_instance_to_all_domains
end
end
describe ".start_federation_announcer!" do
it "clears shutdown, installs hook, and exits loop when sleep aborts" do
thread_double = instance_double(Thread)
captured = nil
allow(federation_helpers).to receive(:federation_enabled?).and_return(true)
allow(federation_helpers).to receive(:clear_federation_shutdown_request!)
allow(federation_helpers).to receive(:ensure_federation_shutdown_hook!)
allow(federation_helpers).to receive(:federation_sleep_with_shutdown).and_return(false)
allow(Thread).to receive(:new) do |&block|
captured = block
thread_double
end
allow(thread_double).to receive(:respond_to?).with(:name=).and_return(false)
allow(thread_double).to receive(:respond_to?).with(:daemon=).and_return(false)
allow(federation_helpers).to receive(:set)
result = federation_helpers.start_federation_announcer!
expect(result).to eq(thread_double)
expect(captured).to be_a(Proc)
captured.call
expect(federation_helpers).to have_received(:clear_federation_shutdown_request!)
expect(federation_helpers).to have_received(:ensure_federation_shutdown_hook!)
expect(federation_helpers).to have_received(:federation_sleep_with_shutdown)
end
end
end
+4 -4
View File
@@ -61,7 +61,7 @@ RSpec.describe "Ingestor endpoints" do
node_id: "!abc12345",
start_time: now - 120,
last_seen_time: now - 60,
version: "0.5.11",
version: "0.5.10",
lora_freq: 915,
modem_preset: "LongFast",
}.merge(overrides)
@@ -133,7 +133,7 @@ RSpec.describe "Ingestor endpoints" do
with_db do |db|
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!fresh000", now - 100, now - 10, "0.5.11"],
["!fresh000", now - 100, now - 10, "0.5.10"],
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
@@ -141,7 +141,7 @@ RSpec.describe "Ingestor endpoints" do
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version, lora_freq, modem_preset) VALUES(?,?,?,?,?,?)",
["!rich000", now - 200, now - 100, "0.5.11", 915, "MediumFast"],
["!rich000", now - 200, now - 100, "0.5.10", 915, "MediumFast"],
)
end
@@ -173,7 +173,7 @@ RSpec.describe "Ingestor endpoints" do
)
db.execute(
"INSERT INTO ingestors(node_id, start_time, last_seen_time, version) VALUES(?,?,?,?)",
["!new-ingestor", now - 60, now - 30, "0.5.11"],
["!new-ingestor", now - 60, now - 30, "0.5.10"],
)
end
+21 -1
View File
@@ -143,6 +143,18 @@ RSpec.describe PotatoMesh::App::Meshtastic::Cipher do
expect(text).to eq("Nabend")
end
it "captures a confidence score for decrypted text" do
data = described_class.decrypt_data(
cipher_b64: cipher_b64,
packet_id: packet_id,
from_id: from_id,
psk_b64: psk_b64,
)
expect(data[:text]).to eq("Nabend")
expect(data[:decryption_confidence]).to be_between(0.0, 1.0)
end
it "decrypts the public PSK alias sample payload" do
text = described_class.decrypt_text(
cipher_b64: "otu3OyMrTIUlcaisLVDyAnLW",
@@ -196,7 +208,7 @@ RSpec.describe PotatoMesh::App::Meshtastic::Cipher do
)
expect(text).to be_nil
expect(data).to eq({ portnum: 3, payload: payload, text: nil })
expect(data).to eq({ portnum: 3, payload: payload, text: nil, decryption_confidence: nil })
end
it "normalizes packet ids from numeric strings" do
@@ -277,4 +289,12 @@ RSpec.describe PotatoMesh::App::Meshtastic::Cipher do
expect(data).to be_nil
end
it "scores text confidence higher for longer printable content" do
low = described_class.text_confidence("AC")
high = described_class.text_confidence("This looks like a sentence.")
expect(low).to be < high
expect(high).to be_between(0.0, 1.0)
end
end